query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Updates hardware_control based on the widget being checked | def _update_hardware_control_from_gui(self):
self._under_hardware_control = self.widgets['hardware_control'].isChecked() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_hardware_control(self, value):\n self.widgets['hardware_control'].setChecked(value)\n self._under_hardware_control = value",
"def update(self):\n #update checkboxes\n self.average_check_box.SetValue(self.parent.fftsink.average)\n self.use_persistence_check_box.SetValue(self.parent.fftsink.use_persistence)\n self.peak_hold_check_box.SetValue(self.parent.fftsink.peak_hold)\n #update radio buttons\n try:\n index = list(DIV_LEVELS).index(self.parent.fftsink.y_per_div)\n self.radio_buttons[index].SetValue(True)\n except: pass",
"def update_buttons(self):\n # Enable the Add/Remove/Up/Down measurements buttons if a Survey is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)\n \n # Enable the Add/Remove condition buttons if a Measurement is selected\n #enable = len(list(self.mgr.obj.measurements)) > 0\n enable = self.measurementTableWidget.rowCount() > 0\n self.addConditionButton.setEnabled(enable)\n self.removeConditionButton.setEnabled(enable)",
"def updateControls(self, onoff=False, radio=True):\n self.myContainer.sendControls(self.status, self.setpoint)\n if onoff and self.status: self.myContainer.sendIRcode(\"cool3\", \"62\")\n elif onoff and not self.status: self.myContainer.sendIRcode(\"off\", \"0\")\n if radio:\n self.myRadio.sendControls()",
"def __setState(self, widget):\n\n import wx\n import fsleyes_widgets.bitmaptoggle as bmptoggle\n\n if isinstance(widget, wx.MenuItem):\n widget.Check(self.toggled)\n elif isinstance(widget, (wx.CheckBox,\n wx.ToggleButton,\n bmptoggle.BitmapToggleButton)):\n widget.SetValue(self.toggled)",
"def _on_toggled(self, check, index):\r\n\r\n check.handler_block(self._lst_handler_id[index])\r\n\r\n self._software_model.lst_development[index] = int(check.get_active())\r\n\r\n check.handler_unblock(self._lst_handler_id[index])\r\n\r\n return False",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbc.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Nos\n self.nos_button = pyxbmct.RadioButton('')\n self.placeControl(self.nos_button, 10, 3, rowspan=1, columnspan=4)\n self.connect(self.nos_button, self.nos_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nos', 2) == 1:\n self.nos_button.setSelected(True)\n else:\n self.nos_button.setSelected(False)\n nos = pyxbmct.Image(addonfolder+artsfolder+'/nos.png')\n self.placeControl(nos, 10, 3, rowspan=1, columnspan=4)\n\n\t\t# Nos Madeira\n self.madeira_button = pyxbmct.RadioButton('')\n self.placeControl(self.madeira_button, 12, 6, rowspan=1, columnspan=4)\n self.connect(self.madeira_button, self.madeira_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'madeira', 2) == 1:\n self.madeira_button.setSelected(True)\n else:\n self.madeira_button.setSelected(False)\n madeira = pyxbmct.Image(addonfolder+artsfolder+'/madeira.png')\n self.placeControl(madeira, 12, 6, rowspan=1, columnspan=4)\n\n\t\t# Nowo\n self.nowo_button = pyxbmct.RadioButton('')\n self.placeControl(self.nowo_button, 10, 9, rowspan=1, columnspan=4)\n self.connect(self.nowo_button, self.nowo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nowo', 2) == 1:\n self.nowo_button.setSelected(True)\n else:\n self.nowo_button.setSelected(False)\n nowo = pyxbmct.Image(addonfolder+artsfolder+'/nowo.png')\n self.placeControl(nowo, 10, 9, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def _check_hardware_control(self):\n if self._under_hardware_control:\n v_input = self._ai_client.get_ai_voltage(self._hwc_ai_channel, max_range=10) #CHeck status of hwc voltage input\n v_input = v_input[-1]\n if self._is_stabilizing:\n if v_input < self._hwc_thresh:\n self.stop()\n else:\n if v_input > self._hwc_thresh:\n self.start()",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/generic.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# USB\n self.usb_button = pyxbmct.RadioButton('')\n self.placeControl(self.usb_button, 9, 3, rowspan=2, columnspan=4)\n self.connect(self.usb_button, self.usb_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'usb', 2) == 1:\n self.usb_button.setSelected(True)\n else:\n self.usb_button.setSelected(False)\n usb = pyxbmct.Image(addonfolder+artsfolder+'/usb.png')\n self.placeControl(usb, 9, 3, rowspan=2, columnspan=4)\n\n\t\t# PCI-X\n self.pcix_button = pyxbmct.RadioButton('')\n self.placeControl(self.pcix_button, 9, 9, rowspan=2, columnspan=4)\n self.connect(self.pcix_button, self.pcix_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'pcix', 2) == 1:\n self.pcix_button.setSelected(True)\n else:\n self.pcix_button.setSelected(False)\n pcix = pyxbmct.Image(addonfolder+artsfolder+'/pcix.png')\n self.placeControl(pcix, 9, 9, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def value_changed(self, value, value2):\n self.update_ha_state()\n if value2 == 0x70:\n self.which = 0\n self.onoff = 0\n elif value2 == 0x50:\n self.which = 0\n self.onoff = 1\n elif value2 == 0x30:\n self.which = 1\n self.onoff = 0\n elif value2 == 0x10:\n self.which = 1\n self.onoff = 1\n self.hass.bus.fire('button_pressed', {\"id\": self.dev_id,\n 'pushed': value,\n 'which': self.which,\n 'onoff': self.onoff})",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/tvh.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=17)\n\n\t\t# Wetek Button\n self.wetek_button = pyxbmct.RadioButton('')\n self.placeControl(self.wetek_button, 9, 1, rowspan=3, columnspan=3)\n self.connect(self.wetek_button, self.wetek_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetek', 2) == 1:\n self.wetek_button.setSelected(True)\n else:\n self.wetek_button.setSelected(False)\n wetek = pyxbmct.Image(addonfolder+artsfolder+'/weteksmall.png')\n self.placeControl(wetek, 9, 1, rowspan=3, columnspan=3)\n\n\t\t# K Button\n self.k_button = pyxbmct.RadioButton('')\n self.placeControl(self.k_button, 9, 5, rowspan=3, columnspan=3)\n self.connect(self.k_button, self.k_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k', 2) == 1:\n self.k_button.setSelected(True)\n else:\n self.k_button.setSelected(False)\n k = pyxbmct.Image(addonfolder+artsfolder+'/ksmall.png')\n self.placeControl(k, 9, 5, rowspan=3, columnspan=3)\n\n\t\t# Khadas Button\n self.khadas_button = pyxbmct.RadioButton('')\n self.placeControl(self.khadas_button, 9, 9, rowspan=3, columnspan=3)\n self.connect(self.khadas_button, self.khadas_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadas', 2) == 1:\n self.khadas_button.setSelected(True)\n else:\n self.khadas_button.setSelected(False)\n khadas = pyxbmct.Image(addonfolder+artsfolder+'/khadasmall.png')\n self.placeControl(khadas, 9, 9, rowspan=3, columnspan=3)\n\n\t\t# Generic Button\n self.generic_button = pyxbmct.RadioButton('')\n self.placeControl(self.generic_button, 9, 13, rowspan=3, columnspan=3)\n self.connect(self.generic_button, self.generic_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'generic', 2) == 1:\n self.generic_button.setSelected(True)\n else:\n self.generic_button.setSelected(False)\n generic = pyxbmct.Image(addonfolder+artsfolder+'/genericsmall.png')\n self.placeControl(generic, 9, 13, rowspan=3, columnspan=3)\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 16, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def _updateState(self):\n\n self.changeColorBtn.setEnabled(self.transformTypeCbx.isChecked() or self.shapeTypeCbx.isChecked())",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/kbox.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n # DVBT\n self.kdvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbt_button, 11, 1, rowspan=1, columnspan=3)\n self.connect(self.kdvbt_button, self.kdvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbt', 2) == 1:\n self.kdvbt_button.setSelected(True)\n else:\n self.kdvbt_button.setSelected(False)\n lnb1 = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(lnb1, 11, 1, rowspan=1, columnspan=3)\n\n # DVBC\n self.kdvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbc_button, 12, 1, rowspan=1, columnspan=3)\n self.connect(self.kdvbc_button, self.kdvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbc', 2) == 1:\n self.kdvbc_button.setSelected(True)\n else:\n self.kdvbc_button.setSelected(False)\n lnb1 = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(lnb1, 12, 1, rowspan=1, columnspan=3)\n\n # DVBS2\n self.kdvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbs_button, 11, 6, rowspan=1, columnspan=3)\n self.connect(self.kdvbs_button, self.kdvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbs', 2) == 1:\n self.kdvbs_button.setSelected(True)\n else:\n self.kdvbs_button.setSelected(False)\n lnb2 = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(lnb2, 11, 6, rowspan=1, columnspan=3)\n\n # DVBT/DVBS2\n self.kdvbts_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbts_button, 11, 11, rowspan=1, columnspan=3)\n self.connect(self.kdvbts_button, self.kdvbts_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbts', 2) == 1:\n self.kdvbts_button.setSelected(True)\n else:\n self.kdvbts_button.setSelected(False)\n both = pyxbmct.Image(addonfolder+artsfolder+'/dvbts2.png')\n self.placeControl(both, 11, 11, rowspan=1, columnspan=3)\n\n # DVBC/DVBS2\n self.kdvbcs_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbcs_button, 12, 11, rowspan=1, columnspan=3)\n self.connect(self.kdvbcs_button, self.kdvbcs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbcs', 2) == 1:\n self.kdvbcs_button.setSelected(True)\n else:\n self.kdvbcs_button.setSelected(False)\n both = pyxbmct.Image(addonfolder+artsfolder+'/dvbcs2.png')\n self.placeControl(both, 12, 11, rowspan=1, columnspan=3)\n\n # Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def _update_gui(self):\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: rf_on = True\r\n self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbt.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# TDT\n self.tdt_button = pyxbmct.RadioButton('')\n self.placeControl(self.tdt_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.tdt_button, self.tdt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'tdt', 2) == 1:\n self.tdt_button.setSelected(True)\n else:\n self.tdt_button.setSelected(False)\n tdt = pyxbmct.Image(addonfolder+artsfolder+'/tdt.png')\n self.placeControl(tdt, 11, 1, rowspan=1, columnspan=4)\n \n\t\t# Meo\n self.meo_button = pyxbmct.RadioButton('')\n self.placeControl(self.meo_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.meo_button, self.meo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'meo', 2) == 1:\n self.meo_button.setSelected(True)\n else:\n self.meo_button.setSelected(False)\n meo = pyxbmct.Image(addonfolder+artsfolder+'/meo.png')\n self.placeControl(meo, 11, 6, rowspan=1, columnspan=4)\n\n\t\t# Vodafone\n self.vodafone_button = pyxbmct.RadioButton('')\n self.placeControl(self.vodafone_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.vodafone_button, self.vodafone_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'vodafone', 2) == 1:\n self.vodafone_button.setSelected(True)\n else:\n self.vodafone_button.setSelected(False)\n vodafone = pyxbmct.Image(addonfolder+artsfolder+'/vodafone.png')\n self.placeControl(vodafone, 11, 11, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbs.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Hispasat\n self.hispasat_button = pyxbmct.RadioButton('')\n self.placeControl(self.hispasat_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.hispasat_button, self.hispasat_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'hispasat', 2) == 1:\n self.hispasat_button.setSelected(True)\n else:\n self.hispasat_button.setSelected(False)\n hispasat = pyxbmct.Image(addonfolder+artsfolder+'/hispasat.png')\n self.placeControl(hispasat, 11, 1, rowspan=1, columnspan=4)\n \n\t\t# Astra\n self.astra_button = pyxbmct.RadioButton('')\n self.placeControl(self.astra_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.astra_button, self.astra_button_update)\n# if tools.return_data('TVHWIZARD', 'STRING', 'astra', 2) == 1:\n# self.astra_button.setSelected(True)\n# else:\n# self.astra_button.setSelected(False)\n astra = pyxbmct.Image(addonfolder+artsfolder+'/astra.png')\n self.placeControl(astra, 11, 6, rowspan=1, columnspan=4)\n\n\t\t# Hotbird\n self.hotbird_button = pyxbmct.RadioButton('')\n self.placeControl(self.hotbird_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.hotbird_button, self.hotbird_button_update)\n# if tools.return_data('TVHWIZARD', 'STRING', 'hotbird', 2) == 1:\n# self.hotbird_button.setSelected(True)\n# else:\n# self.hotbird_button.setSelected(False)\n hotbird = pyxbmct.Image(addonfolder+artsfolder+'/hotbird.png')\n self.placeControl(hotbird, 11, 11, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def update_control(self):\n self._control_ctr += 0x01",
"def updateCheck(self):\n if (self.checkStatus1.get() == True):\n self.master.configure(background='#f5f5f0')\n self.checkStatus2.set(False)\n self.checkStatus3.set(False)\n\n elif (self.checkStatus2.get() == True):\n self.master.configure(background='#ff99ff')\n self.checkStatus3.set(False)\n self.checkStatus1.set(False)\n elif (self.checkStatus3.get() == True):\n self.master.configure(background='#00ff00')\n self.checkStatus1.set(False)\n self.checkStatus2.set(False)",
"def set_controls(self):\n image = pyxbmct.Image(addonfolder+artsfolder+'/khadasdvb.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\t\t\n\t\t# DVB-C\n self.dvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbc_button, 10, 1, rowspan=2, columnspan=4)\n self.connect(self.dvbc_button, self.dvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadasdvbc', 2) == 1:\n self.dvbc_button.setSelected(True)\n else:\n self.dvbc_button.setSelected(False)\n dvbc = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(dvbc, 10, 1, rowspan=2, columnspan=4)\n \n\t\t# DVB-S\n self.dvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbs_button, 10, 6, rowspan=2, columnspan=4)\n self.connect(self.dvbs_button, self.dvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadasdvbs', 2) == 1:\n self.dvbs_button.setSelected(True)\n else:\n self.dvbs_button.setSelected(False)\n dvbs = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(dvbs, 10, 6, rowspan=2, columnspan=4)\n\n\t\t# DVB-T\n self.dvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbt_button, 10, 11, rowspan=2, columnspan=4)\n self.connect(self.dvbt_button, self.dvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadasdvbt', 2) == 1:\n self.dvbt_button.setSelected(True)\n else:\n self.dvbt_button.setSelected(False)\n dvbt = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(dvbt, 10, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/k.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# KI Plus\n self.k1plus_button = pyxbmct.RadioButton('')\n self.placeControl(self.k1plus_button, 8, 1, rowspan=2, columnspan=4)\n self.connect(self.k1plus_button, self.k1plus_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k1plus', 2) == 1:\n self.k1plus_button.setSelected(True)\n else:\n self.k1plus_button.setSelected(False)\n k1plus = pyxbmct.Image(addonfolder+artsfolder+'/k1plus.png')\n self.placeControl(k1plus, 8, 1, rowspan=2, columnspan=4)\n\n\t\t# KI Pro\n self.k1pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k1pro_button, 11, 6, rowspan=2, columnspan=4)\n self.connect(self.k1pro_button, self.k1pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k1pro', 2) == 1:\n self.k1pro_button.setSelected(True)\n else:\n self.k1pro_button.setSelected(False)\n k1pro = pyxbmct.Image(addonfolder+artsfolder+'/k1pro.png')\n self.placeControl(k1pro, 11, 6, rowspan=2, columnspan=4)\n\n\t\t# KII Pro\n self.k2pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k2pro_button, 8, 6, rowspan=2, columnspan=4)\n self.connect(self.k2pro_button, self.k2pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k2pro', 2) == 1:\n self.k2pro_button.setSelected(True)\n else:\n self.k2pro_button.setSelected(False)\n k2pro = pyxbmct.Image(addonfolder+artsfolder+'/k2pro.png')\n self.placeControl(k2pro, 8, 6, rowspan=2, columnspan=4)\n\n\t\t# KIII Pro\n self.k3pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k3pro_button, 8, 11, rowspan=2, columnspan=4)\n self.connect(self.k3pro_button, self.k3pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k3pro', 2) == 1:\n self.k3pro_button.setSelected(True)\n else:\n self.k3pro_button.setSelected(False)\n k3pro = pyxbmct.Image(addonfolder+artsfolder+'/k3pro.png')\n self.placeControl(k3pro, 8, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def set_controls(self):\n image = pyxbmct.Image(addonfolder+artsfolder+'/dvb.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\t\t\n\t\t# DVB-C\n self.dvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbc_button, 10, 1, rowspan=2, columnspan=4)\n self.connect(self.dvbc_button, self.dvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wdvbc', 2) == 1:\n self.dvbc_button.setSelected(True)\n else:\n self.dvbc_button.setSelected(False)\n dvbc = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(dvbc, 10, 1, rowspan=2, columnspan=4)\n \n\t\t# DVB-S\n self.dvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbs_button, 10, 6, rowspan=2, columnspan=4)\n self.connect(self.dvbs_button, self.dvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wdvbs', 2) == 1:\n self.dvbs_button.setSelected(True)\n else:\n self.dvbs_button.setSelected(False)\n dvbs = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(dvbs, 10, 6, rowspan=2, columnspan=4)\n\n\t\t# DVB-T\n self.dvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbt_button, 10, 11, rowspan=2, columnspan=4)\n self.connect(self.dvbt_button, self.dvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wdvbt', 2) == 1:\n self.dvbt_button.setSelected(True)\n else:\n self.dvbt_button.setSelected(False)\n dvbt = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(dvbt, 10, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def set_controls(self):\n image = pyxbmct.Image(addonfolder+artsfolder+'/dvb.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\t\t\n\t\t# DVB-C\n self.dvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbc_button, 10, 1, rowspan=2, columnspan=4)\n self.connect(self.dvbc_button, self.dvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'gdvbc', 2) == 1:\n self.dvbc_button.setSelected(True)\n else:\n self.dvbc_button.setSelected(False)\n dvbc = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(dvbc, 10, 1, rowspan=2, columnspan=4)\n \n\t\t# DVB-S\n self.dvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbs_button, 10, 6, rowspan=2, columnspan=4)\n self.connect(self.dvbs_button, self.dvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'gdvbs', 2) == 1:\n self.dvbs_button.setSelected(True)\n else:\n self.dvbs_button.setSelected(False)\n dvbs = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(dvbs, 10, 6, rowspan=2, columnspan=4)\n\n\t\t# DVB-T\n self.dvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbt_button, 10, 11, rowspan=2, columnspan=4)\n self.connect(self.dvbt_button, self.dvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'gdvbt', 2) == 1:\n self.dvbt_button.setSelected(True)\n else:\n self.dvbt_button.setSelected(False)\n dvbt = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(dvbt, 10, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def floorTB1Checked(self, state):\n if state == QtCore.Qt.Checked:\n print('Show TB1 Floor Selected')\n # # release video capture\n # self.cap = cv2.VideoCapture(0)\n # # read image in BGR format\n # ret, img = self.cap.read()\n # image = QtGui.QImage(img, img.shape[1], img.shape[0],\n # img.shape[1] * img.shape[2],\n # QtGui.QImage.Format_RGB888)\n # pixmap = QtGui.QPixmap()\n # pixmap.convertFromImage(image.rgbSwapped())\n # self.simulationWidget.setPixmap(pixmap)\n else:\n print('Hide TB1 Floor Selected')\n # self.cap.release()",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/wplay.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n # LNB1\n self.wplnb1_button = pyxbmct.RadioButton('')\n self.placeControl(self.wplnb1_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.wplnb1_button, self.wplnb1_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wplnb1', 2) == 1:\n self.wplnb1_button.setSelected(True)\n else:\n self.wplnb1_button.setSelected(False)\n lnb1 = pyxbmct.Image(addonfolder+artsfolder+'/lnb1.png')\n self.placeControl(lnb1, 11, 1, rowspan=1, columnspan=4)\n\n # LNB2\n self.wplnb2_button = pyxbmct.RadioButton('')\n self.placeControl(self.wplnb2_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.wplnb2_button, self.wplnb2_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wplnb2', 2) == 1:\n self.wplnb2_button.setSelected(True)\n else:\n self.wplnb2_button.setSelected(False)\n lnb2 = pyxbmct.Image(addonfolder+artsfolder+'/lnb2.png')\n self.placeControl(lnb2, 11, 6, rowspan=1, columnspan=4)\n\n # LNB1/LNB2\n self.wplnboth_button = pyxbmct.RadioButton('')\n self.placeControl(self.wplnboth_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.wplnboth_button, self.wplnboth_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wplnboth', 2) == 1:\n self.wplnboth_button.setSelected(True)\n else:\n self.wplnboth_button.setSelected(False)\n both = pyxbmct.Image(addonfolder+artsfolder+'/both.png')\n self.placeControl(both, 11, 11, rowspan=1, columnspan=4)\n\n # Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def actualizeHardwarelabel (self, data):\n if data.has_key(StatusMsg.label_state):\n self.hardwarelabel.setText(self.stateDecoder.get(data.get(StatusMsg.label_state)))",
"def on_system_time_textChanged(self, p0):\n # Updated 12/28/16\n time_start = time.time()\n \n self.Init_prog.setValue( percentCheck(self.current_Init_Time.text(), self.Init_tot.text()) ) \n self.Side_1_prog.setValue( percentCheck(self.current_Side_1_Time.text(), self.Side_1_tot.text()) ) \n self.Side_2_prog.setValue( percentCheck(self.current_Side_2_Time.text(), self.Side_2_tot.text()) ) \n self.Total_prog.setValue( percentCheck(self.current_Rep_Cycle.text(), self.Rep_Cycle_tot.text()) ) \n \n # Get Current Bus Values For Mode Discovery\n current_bus_A = I2C.read_byte_data(In_bus, pinIn_A) # bus with valves\n current_bus_B = I2C.read_byte_data(In_bus, pinIn_B) # bus with pumps + magnets\n self.System_Mode.setText( disoverMode( current_bus_A + current_bus_B ) )\n \n # update top GUI info field\n self.as_of_time.setText(\"System Status as of \"+ time.strftime(\"%B %d, %Y at %H:%M:%S\"))\n \n # update individual device status on GUI\n self.FP101_val.setText( returnStatus(var1, 0) )\n self.FP102_val.setText( returnStatus(var2, 0) )\n self.FP103_val.setText( returnStatus(var3, 0) )\n \n self.FV201_val.setText( returnStatus(var4, 'open') )\n self.FV202_val.setText( returnStatus(var5, 'open') )\n self.FV203_val.setText( returnStatus(var6, 'open') )\n self.FV204_val.setText( returnStatus(var7, 'open') )\n \n self.EM201_val.setText( returnStatus(var8, 0) )\n self.EM202_val.setText( returnStatus(var9, 0) )\n\n # update Temperatures\n tempString = str(var10)\n self.temp_val_1.setText(tempString[0:4])\n\n tempString = str(var11)\n self.temp_val_2.setText(tempString[0:4])\n\n # update pH\n pHString = str(var12)\n self.pH_val.setText(pHString[0:6])\n \n if (time.time()-time_start > update_GUI_interval):\n print(\"GUI update longer than update interval...\")",
"def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)",
"def toggled_comunication(self):\n if self.actionPC_Monitor.isChecked() and self.actionPC_Monitor.isEnabled():\n self.actionPC_Monitor.setEnabled(0)\n self.actionPC_Sensor_Actuador.setChecked(0)\n self.actionPC_Sensor_Actuador.setEnabled(1)\n self.monitor_environment()\n \n elif self.actionPC_Sensor_Actuador.isChecked() and self.actionPC_Sensor_Actuador.isEnabled():\n self.actionPC_Sensor_Actuador.setEnabled(0)\n self.actionPC_Monitor.setChecked(0)\n self.actionPC_Monitor.setEnabled(1)\n self.actuator_environment()",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/wetek.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# WetekPlay\n self.wp_button = pyxbmct.RadioButton('')\n self.placeControl(self.wp_button, 10, 3, rowspan=2, columnspan=4)\n self.connect(self.wp_button, self.wp_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetekplay', 2) == 1:\n self.wp_button.setSelected(True)\n else:\n self.wp_button.setSelected(False)\n wp = pyxbmct.Image(addonfolder+artsfolder+'/wp.png')\n self.placeControl(wp, 10, 3, rowspan=2, columnspan=4)\n\n\t\t# WetekPlay2\n self.wp2_button = pyxbmct.RadioButton('')\n self.placeControl(self.wp2_button, 10, 9, rowspan=2, columnspan=4)\n self.connect(self.wp2_button, self.wp2_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetekplay2', 2) == 1:\n self.wp2_button.setSelected(True)\n else:\n self.wp2_button.setSelected(False)\n wp2 = pyxbmct.Image(addonfolder+artsfolder+'/wp2.png')\n self.placeControl(wp2, 10, 9, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/khadas.png')\n self.placeControl(image, 0, 0, rowspan=7, columnspan=16)\n\n\t\t# KHADAS VTV\n kvtv = pyxbmct.Image(addonfolder+artsfolder+'/kvtv.png')\n self.placeControl(kvtv, 8, 2, rowspan=5, columnspan=4)\n\n\t\t# KHADAS VIM 2\n kvim = pyxbmct.Image(addonfolder+artsfolder+'/kvim.png')\n self.placeControl(kvim, 8, 11, rowspan=5, columnspan=4)\n\n\n\t\t# KHADAS KVIM2 & VTV\n self.kvimvtv_button = pyxbmct.RadioButton('')\n self.placeControl(self.kvimvtv_button, 10, 7, rowspan=2, columnspan=3)\n self.connect(self.kvimvtv_button, self.kvimvtv_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kvim2', 2) == 1:\n self.kvimvtv_button.setSelected(True)\n else:\n self.kvimvtv_button.setSelected(False)\n kvimvtv = pyxbmct.Image(addonfolder+artsfolder+'/kvimvtv.png')\n self.placeControl(kvimvtv, 10, 7, rowspan=2, columnspan=3)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())"
]
| [
"0.7537354",
"0.6053899",
"0.6034029",
"0.59560686",
"0.5829578",
"0.5829035",
"0.57404166",
"0.5730444",
"0.57228154",
"0.5716257",
"0.571111",
"0.5706572",
"0.5669908",
"0.56695765",
"0.56612366",
"0.56506187",
"0.56427896",
"0.56363565",
"0.5615632",
"0.56057274",
"0.5540846",
"0.55301654",
"0.55282897",
"0.5527057",
"0.551216",
"0.5509955",
"0.5495109",
"0.5486482",
"0.54430765",
"0.5409896"
]
| 0.87057966 | 0 |
Updates the power reading text on the GUI | def _update_power_label(self):
#Checks if > 0.5s has elapsed since the last change to the power reading label
#I do this since otherwise the text label updates too quickly and it's annoying
#to read.
currTime = time.time()
if currTime - self._last_power_text_update > 0.5:
#If it updates, reads in the power and updates
#TODO: Read the power in one function only and then all of the places that use it (updating feedback, updating power label, and plotting)
#access that member variable. Not a huge deal will slightly speed it up I guess and is a bit cleaner.
power = self.gain*np.array(self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage))
self.widgets['label_power'].setText(str(power[-1]))
self._last_power = power[-1]/self.gain
self._last_power_text_update = currTime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Update(self, mode = UPDATE_MODE.all):\r\n aux_versions = dstore.Get(\"versions\")\r\n \r\n if(aux_versions['hw'] != None): \r\n Ui().lineHwVersion.setText(str(aux_versions['hw'])) \r\n else:\r\n Ui().lineHwVersion.setText(\"- -\")\r\n \r\n if(aux_versions['fw'] != None): \r\n Ui().lineFwVersion.setText(str(aux_versions['fw'])) \r\n else:\r\n Ui().lineFwVersion.setText(\"- -\") \r\n \r\n \r\n \r\n \"\"\" TERMINAL INFO \"\"\"\r\n aux_terminal_info = dstore.Get(\"terminal_info\", \"GET\")\r\n \r\n \"\"\" number of cells \"\"\"\r\n if(aux_terminal_info['number_of_cells'] != None):\r\n Ui().lineCells.setText(str(aux_terminal_info['number_of_cells'])) \r\n else:\r\n Ui().lineCells.setText(\"-\") \r\n \r\n \r\n \"\"\" battery \"\"\"\r\n if(aux_terminal_info['battery'] != None):\r\n Ui().lineBattery.setText(str(aux_terminal_info['battery'])+\" %\") \r\n else:\r\n Ui().lineBattery.setText(\"-- %\") \r\n \r\n \"\"\" speaker \"\"\" \r\n if(aux_terminal_info['speaker']['keys'] == True):\r\n Ui().lineSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['keys'] == False):\r\n Ui().lineSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerKeys.setText(\"- -\")\r\n Ui().pushSpeakerKeys.setText(\"- -\")\r\n \r\n if(aux_terminal_info['speaker']['system'] == True):\r\n Ui().lineSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['system'] == False):\r\n Ui().lineSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['timing'] == True):\r\n Ui().lineSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['timing'] == False):\r\n Ui().lineSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else: \r\n Ui().lineSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['keys'] == None or aux_terminal_info['speaker']['timing']==None or aux_terminal_info['speaker']['system']==None): \r\n Ui().pushSpeakerKeys.setEnabled(False)\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n else:\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n \r\n \r\n return True",
"def _update(self):\n\n # Read the power supply status\n # TODO: Display power icon while charging\n plugged_in = open('/sys/class/power_supply/AC0/online').readline().strip() # pylint: disable=unused-variable\n power_percent = atoi(open('/sys/class/power_supply/BAT0/capacity').readline().strip())\n\n self.window.pcBatteryDisplay.setValue(power_percent)\n\n # Set color based on power_level\n if power_percent <= 25:\n self.window.pcBatteryDisplay.setStyleSheet('QProgressBar::chunk {{background-color: #{:06x}}}'.format(\n gui_utils.Color.RED))\n elif power_percent <= 60:\n self.window.pcBatteryDisplay.setStyleSheet('QProgressBar::chunk {{background-color: #{:06x}}}'.format(\n gui_utils.Color.ORANGE))\n else:\n self.window.pcBatteryDisplay.setStyleSheet('QProgressBar::chunk {{background-color: #{:06x}}}'.format(\n gui_utils.Color.BAR_GREEN))\n\n # Compute the CPU usage\n with open('/proc/stat') as f:\n\n # Parse the data from the file\n fields = [float(column) for column in f.readline().strip().split()[1:]]\n idle, total = fields[3], sum(fields)\n idle_delta = idle - self.cpu_last_idle\n total_delta = total - self.cpu_last_total\n self.cpu_last_idle = idle\n self.cpu_last_total = total\n\n # Calulate the utilisation\n utilisation = 100.0 * (1.0 - idle_delta / total_delta)\n self.cpu_buffer.append(utilisation)\n\n self.window.pcCpuDisplay.setValue(sum(self.cpu_buffer) / len(self.cpu_buffer))",
"def update(self):\n\n if self._old:\n # delete wave from last update cycle\n self._pi.wave_delete(self._old)\n self._old = None\n\n # show power state\n self._pi.write(pins.Q4, self._state.powered)\n\n # update wave\n if self._state.ready:\n self._old = self._wave\n self._wave = self.create_wave(self._state)\n self._pi.wave_send_repeat(self._wave)\n\n # power down\n elif self._wave:\n self._write_all_low()\n self._old = self._wave\n self._wave = None",
"def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()",
"def updateTextctrl(self):\r\n if self.settings.forecolor is not None:\r\n self.text_ctrl_output.SetForegroundColour(self.settings.forecolor)\r\n if self.settings.backcolor is not None:\r\n self.text_ctrl_output.SetBackgroundColour(self.settings.backcolor)\r\n if self.settings.font is not None:\r\n self.text_ctrl_output.SetFont(self.settings.font)\r\n text = self.text_ctrl_output.GetValue()\r\n self.text_ctrl_output.Clear()\r\n self.text_ctrl_output.SetValue(text)\r\n self.text_ctrl_output.Update()",
"def refresh(self, event):\n self.updatetext(self.textfunc, self.textargs)",
"def update(self):\n self._varText.setText(self._function())",
"def updateInfo(self):\n\t\tif ( self.errorCount == 2 ):\n\t\t\tself.pitchText.text = \"Unclear microphone input...\"\n\n\t\tcurNote = self.listener.pitch.note\n\t\tcurFreq = self.listener.pitch.freq\n\t\tself.tuneDelta, self.tuneNeighbor = self.listener.pitch.inTune()\n\t\ttuneText = \"%0.2f Hz off from %s (%0.1f Hz)\" % (abs(self.tuneDelta), \n\t\t\t\t\t\t\t\t\t\t\t\tself.tuneNeighbor.note, \n\t\t\t\t\t\t\t\t\t\t\t\tcurFreq)\n\t\tself.pitchText.text = tuneText",
"def _update_output_voltage_label(self):\n self.widgets['p_outputVoltage'].setValue((self._curr_output_voltage))",
"def _update_gui(self):\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: rf_on = True\r\n self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()",
"def update_display(self): #pylint: disable=too-many-branches\n if self.speed_enabled or self.cadence_enabled:\n speed, cadence = self.read_s_and_c()\n\n if self.heart_enabled:\n heart = self.read_heart()\n if not self._setup:\n self._hr_label = self._label_maker('{} bpm'.format(heart), 50, self._heart_y) # 75\n self.splash.append(self._hr_label)\n else:\n self._hr_label.text = '{} bpm'.format(heart)\n\n if self.speed_enabled:\n if not self._setup:\n self._sp_label = self._label_maker('{} mph'.format(speed), 50, self._speed_y) # 120\n self.splash.append(self._sp_label)\n else:\n self._sp_label.text = '{} mph'.format(speed)\n\n if self.cadence_enabled:\n if not self._setup:\n self._cadence_label = self._label_maker('{} rpm'.format(cadence), 50,\n self._cadence_y)\n self.splash.append(self._cadence_label)\n else:\n self._cadence_label.text = '{} rpm'.format(cadence)\n\n if self.ams_enabled:\n ams = self.read_ams()\n if not self._setup:\n self._ams_label = self._label_maker('{}'.format(ams), 50, self._ams_y,\n font=self.arial16)\n self.splash.append(self._ams_label)\n else:\n self._ams_label.text = '{}'.format(ams)\n\n self._setup = True",
"def on_system_time_textChanged(self, p0):\n # Updated 12/28/16\n time_start = time.time()\n \n self.Init_prog.setValue( percentCheck(self.current_Init_Time.text(), self.Init_tot.text()) ) \n self.Side_1_prog.setValue( percentCheck(self.current_Side_1_Time.text(), self.Side_1_tot.text()) ) \n self.Side_2_prog.setValue( percentCheck(self.current_Side_2_Time.text(), self.Side_2_tot.text()) ) \n self.Total_prog.setValue( percentCheck(self.current_Rep_Cycle.text(), self.Rep_Cycle_tot.text()) ) \n \n # Get Current Bus Values For Mode Discovery\n current_bus_A = I2C.read_byte_data(In_bus, pinIn_A) # bus with valves\n current_bus_B = I2C.read_byte_data(In_bus, pinIn_B) # bus with pumps + magnets\n self.System_Mode.setText( disoverMode( current_bus_A + current_bus_B ) )\n \n # update top GUI info field\n self.as_of_time.setText(\"System Status as of \"+ time.strftime(\"%B %d, %Y at %H:%M:%S\"))\n \n # update individual device status on GUI\n self.FP101_val.setText( returnStatus(var1, 0) )\n self.FP102_val.setText( returnStatus(var2, 0) )\n self.FP103_val.setText( returnStatus(var3, 0) )\n \n self.FV201_val.setText( returnStatus(var4, 'open') )\n self.FV202_val.setText( returnStatus(var5, 'open') )\n self.FV203_val.setText( returnStatus(var6, 'open') )\n self.FV204_val.setText( returnStatus(var7, 'open') )\n \n self.EM201_val.setText( returnStatus(var8, 0) )\n self.EM202_val.setText( returnStatus(var9, 0) )\n\n # update Temperatures\n tempString = str(var10)\n self.temp_val_1.setText(tempString[0:4])\n\n tempString = str(var11)\n self.temp_val_2.setText(tempString[0:4])\n\n # update pH\n pHString = str(var12)\n self.pH_val.setText(pHString[0:6])\n \n if (time.time()-time_start > update_GUI_interval):\n print(\"GUI update longer than update interval...\")",
"def update(self, *_):\n if not self.input_main.edit_modified():\n return\n\n analyze_text = self.create_analysis()\n self.output_main[\"state\"] = tk.NORMAL\n self.output_main.delete(\"1.0\", tk.END)\n self.output_main.insert(\"1.0\", analyze_text)\n self.output_main[\"state\"] = tk.DISABLED\n self.input_main.edit_modified(False)",
"def update_steps_display(self):\r\n self.steps_display[\"text\"] = str(self.steps.get())",
"def show(self):\n self.set_text(self.read())",
"def changeValue(self):\r\n # productive #onUpDnArrow\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value) + ' (pt: ' + str(self.ptNumber) + ')')\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)\r\n self.unlockControlPoints(widget.editNeedleTxtBox.value)\r\n widget.drawValidationNeedlesButton.text = \"Render Manual Needle \" + str(widget.editNeedleTxtBox.value)",
"def changeValue(self):\n #productive #onUpDnArrow\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value)+ ' (pt: '+str(self.ptNumber)+')')",
"def updateState(self):\n QtGui.QLabel.setText(self, self._state[0])",
"def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())",
"def update(self, event, level):\n\t\tDialog.update(self, event, level)\n\t\tif(self.index/SCROLL_CONSTANT >= len(self.text)):\n\t\t\tself.choosing = True",
"def unitUpdate(self):\n newText = self.unitGroup.unitString()\n cursorPos = len(newText) - self.text().length() + self.cursorPosition()\n if cursorPos < 0: # cursor set to same distance from right end\n cursorPos = 0\n self.blockSignals(True)\n self.setText(newText)\n self.setCursorPosition(cursorPos)\n self.blockSignals(False)\n self.emit(QtCore.SIGNAL('unitChanged')) # update numEdit",
"def update_information_box_text(self):\n # Get the minefield options from the model.\n options = self.controller.get_minefield_options()\n\n # Default values.\n message = \"Unrecognized difficulty.\"\n length = 10\n height = 10\n density = 10\n option = Option(length, height, density)\n\n # Change default values based on button hovering.\n if self.selected is self.buttons[0]:\n message = \"Small field and easy mine density.\"\n option = options[\"easy\"]\n length = option.l\n height = option.h\n density = option.d\n elif self.selected is self.buttons[1]:\n message = \"Increased field area and mine density.\"\n option = options[\"medium\"]\n length = option.l\n height = option.h\n density = option.d\n elif self.selected is self.buttons[2]:\n message = \"Challenging field and mine density.\"\n option = options[\"hard\"]\n length = option.l\n height = option.h\n density = option.d\n elif (self.selected is self.buttons[3] or\n self.selected.get_type() is UIType.NumberField):\n message = \"Customized settings.\"\n option = options[\"custom\"]\n length = option.l\n height = option.h\n density = option.d\n\n # Set values.\n self.info_message_textbox.set_text(message)\n self.numberfields[0].set_value(length)\n self.numberfields[1].set_value(height)\n mines = self.controller.calculate_mines(option)\n plural = \"\" if mines == 1 else \"s\"\n num_mines_msg = \"% ({} mine{})\".format(mines, plural)\n self.numberfields[2].set_value(density)\n self.numberfields[2].set_postfix(num_mines_msg)",
"def updateText(self,new_value):\n if self.value.getText() == new_value:\n pass\n else:\n self.value.setText(new_value)\n # Visual feedback when the value changes for the user\n for i in range(2):\n time.sleep(0.1)\n\n if i % 2 == 0:\n self.value.setStyle('bold')\n else:\n self.value.setStyle('normal')",
"def updateState(self):\n QtGui.QLineEdit.setText(self, self._state[0])",
"def get_mini_map_text(self, event=None):\n try:\n text_area = self.get_current()\n except Exception as e:\n print('error at get_mini_map_text', e)\n return\n data = text_area.get('1.0', 'end')\n self.mini_map_text.config(state='normal')\n self.mini_map_text.delete('1.0', 'end')\n self.mini_map_text.insert('1.0', data)\n self.mini_map_text.config(state='disabled')\n # self.nb.update_idletasks()",
"def percent_released(self):\n num = self.slider_saturation.value()\n #self.label_percent_value.setNum(num)\n \n if not self.connect_serial():\n return\n \n if num != 0:\n word = struct.pack(\"BBBBBBBB\", ID_PERCENT,0,0,0,0,0,0,num)\n else:\n word = struct.pack(\"BBBBBBBB\", ID_PERCENT+ID_STOP,0,0,0,0,0,0,0)\n \n self.textBrowser.append(QtGui.QApplication.translate(\"MainWindow\", \"Sent : \", None, QtGui.QApplication.UnicodeUTF8)+binascii.hexlify(word)+\"\\n\")\n \n self.ser.write(word)\n #self.textBrowser.append(\"Reciv : \"+binascii.hexlify(self.ser.read(INPUT_DATA_SIZE)))",
"def update_stockfish(self):\n if (self.show_stockfish and\n \"pscore\" in self.game.info[self.halfmove]):\n self.stock_buffer.set_text(\n (\"Score: {pscore:.1f} ({score})\\n\" +\n \"Depth: {depth} ({seconds:.1f} sec)\\n\" +\n \"PV : {pv}\").format(\n **self.game.info[self.halfmove]))\n else:\n self.stock_buffer.set_text(\"\")",
"def update_ui(self):\n # main data\n self.lAcc.setText(self.settings.ACCOUNT)\n # self.lExcessLiquidity.setText(str(self.ibkrworker.app.excessLiquidity))\n # self.lSma.setText(str(self.ibkrworker.app.sMa))\n if hasattr(self.ibkrworker.app, 'smaWithSafety'):\n self.lSma.setText(str(round(self.ibkrworker.app.smaWithSafety, 1)))\n else:\n self.lSma.setText(str(round(self.ibkrworker.app.sMa, 1)))\n self.lMarketValue.setText(str(self.ibkrworker.app.netLiquidation))\n self.lblAvailTrades.setText(str(self.ibkrworker.app.tradesRemaining))\n self.lcdPNL.display(self.ibkrworker.app.dailyPnl)\n if self.ibkrworker.app.dailyPnl > 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(51, 153, 51))\n self.lcdPNL.setPalette(palette)\n elif self.ibkrworker.app.dailyPnl < 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(255, 0, 0))\n self.lcdPNL.setPalette(palette)\n\n total_positions_value = 0\n for p in self.ibkrworker.app.openPositions.values():\n if hasattr(p, 'Value'):\n total_positions_value += p[\"Value\"]\n self.lPositionsTotalValue.setText(str(round(total_positions_value, 1)))\n\n self.update_open_positions()\n self.update_live_candidates()\n self.update_open_orders()\n\n # everything disabled for safety - is now enabled\n self.chbxProcess.setEnabled(True)\n self.btnSettings.setEnabled(True)\n\n self.update_session_state()\n\n if not self.uiTimer.isActive():\n self.update_console(\"UI resumed.\")\n self.uiTimer.start(int(self.settings.INTERVALUI) * 1000) # reset the ui timer",
"def update_display_power_status(self):\n\n # Build a set of args to pass to subprocess\n args = self._get_args_for_display_device()\n args += ['GET_POWER_STATUS', self.display_serial]\n\n # Make the subprocess call -- note that a non-zero exit status will\n # trigger a CalledProcessError.\n try:\n process_output = subprocess.check_output(args)\n\n except subprocess.CalledProcessError as err:\n log.error(\n 'Failed to communicate with display.'\n 'Command was {cmd}, exit code is {status}.'.format(\n cmd=err.cmd, status=err.returncode\n )\n )\n # Stop here if we can't get a connection to the display anyway.\n self.display_power_status = DISPLAY_UNKNOWN_STATE\n return\n\n # Decode the byte string read from the subprocess call\n process_output = process_output.decode(\"utf-8\").rstrip()\n\n # Check to see if the current display state is different to previous\n # state.\n new_display_state = process_output\n old_display_state = self.display_power_status\n\n # We only report back the power status if it has actually changed.\n if new_display_state != old_display_state:\n self.display_power_status = new_display_state\n self.analytics.track_event_async(\n category='display power status', action=process_output,\n value=None\n )",
"def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))"
]
| [
"0.66568124",
"0.6600302",
"0.63396484",
"0.63212717",
"0.626569",
"0.62451917",
"0.62050587",
"0.61857045",
"0.61414963",
"0.6130439",
"0.61217785",
"0.6098589",
"0.606162",
"0.6019479",
"0.6007645",
"0.59969217",
"0.5988557",
"0.595387",
"0.5938942",
"0.59360445",
"0.59103364",
"0.58911955",
"0.58904827",
"0.5850673",
"0.5832428",
"0.5813279",
"0.5789246",
"0.5779505",
"0.57653284",
"0.57546616"
]
| 0.79688066 | 0 |
Updates the output voltage label to the current voltage being outputted. This is called when the laser is "locked" and the PID loop is actively changing the output voltage | def _update_output_voltage_label(self):
self.widgets['p_outputVoltage'].setValue((self._curr_output_voltage)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_output_voltage_from_label(self):\n if (~self._is_stabilizing): #Only updates value if we are not stabilizing, otherwise the PID loop will be driving the output voltage\n #as opposed to the user.\n self._curr_output_voltage = self.widgets['p_outputVoltage'].value()\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)",
"def set_control_voltage(self, value):\n self._curr_output_voltage = value\n self._update_output_voltage_label()",
"def set_output_voltage(self, output_voltage, output_port = outputs.out1):\n if abs(output_voltage) < 8.0 and self._voltage_output_range != voltage_range.low:\n self.write_to_serial(':volt:rang low')\n self._voltage_output_range = voltage_range.low\n elif abs(output_voltage) >= 8.0 and self._voltage_output_range != voltage_range.high:\n self.write_to_serial(':volt:rang high')\n self._voltage_output_range = voltage_range.high\n\n if self._output_state[output_port] is False:\n self.write_to_serial(':outp:stat on')\n\n if self._selected_output != output_port:\n self.write_to_serial(':inst:sel ' + output_port.name)\n self._selected_output = output_port\n\n self.write_to_serial(':volt ' + str(output_voltage))",
"def set_voltage(self, value):\n self.write(\":VOLT {}V\".format(value))",
"def update_voutput_pins(self):\n if not pfio:\n raise Exception(\n \"Looks like some sloppy programmer (probably Tom Preston...) \" \\\n \"is trying to update the virtual output pins when the PiFace \" \\\n \"isn't connected. Make sure you check for the pfio before calling \" \\\n \"the update_voutput_pins method. kthxbai.\")\n\n output_bit_map = pfio.read_output()\n for i in range(len(self.output_pins)):\n # updating inner value so that we don't do more SPI reads\n self.output_pins[i]._value = (output_bit_map >> i) & 1 \n\n self.queue_draw()",
"def set_output_current(self, output_current, output_port = outputs.out1):\n #If the voltage is in the low limit, current will probably be limited. So, always set to high voltage here.\n #This should probably be smarter in a future release, so that it only goes to high voltage if low voltage is insufficient to drive the desired current.\n self.set_output_voltage(20.0 , output_port)\n\n if self._output_state[output_port] is False:\n self.write_to_serial(':outp:stat on')\n\n if self._selected_output != output_port:\n self.write_to_serial(':inst:sel ' + output_port.name)\n self._selected_output = output_port\n\n self.write_to_serial(':curr ' + str(output_current))",
"def voltage(self, voltage=None):\n if voltage is not None:\n # set output voltage on all phases\n # self.ts.log_debug('voltage: %s, type: %s' % (voltage, type(voltage)))\n if type(voltage) is not list and type(voltage) is not tuple:\n self.cmd(':AC:SETB:VOLT PERC,%0.1f,%0.1f,%0.1f\\n' % (voltage, voltage, voltage))\n v1 = voltage\n v2 = voltage\n v3 = voltage\n else:\n self.cmd(':AC:SETB:VOLT PERC,%0.1f,%0.1f,%0.1f\\n' % (voltage[0], voltage[0], voltage[0])) # use the first value in the 3 phase list\n v1 = voltage[0]\n v2 = voltage[0]\n v3 = voltage[0]",
"def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)",
"def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()",
"def _update_voltageSetpoint_fromGUI(self):\n self.voltageSetpoint = self.widgets['p_setpoint'].value()/self.gain",
"def increase_voltage(self):\n function_string = 'INCV' + self.output\n return self.scpi_comm(function_string)",
"def set_voltage(self, c, voltage):\n if abs(voltage['V']) < (.00001):\n voltage = 0 * units.V\n \n dev = self.selectedDevice(c)\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n self.open_comm(c, slot_number)\n output_state = yield self.get_output_state(c)\n if not output_state:\n try:\n yield self.set_output_state(c, True)\n except:\n self.initialize_mainframe(c)\n yield self.set_output_state(c, True)\n write_str = \"VOLT \"+ str(voltage['V'])\n try:\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n except:\n self.initialize_mainframe(c)\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n self.close_comm(c, slot_number)\n else:\n raise ValueError(self.no_selection_msg())",
"def set_voltage(self, voltage):\n self.ipcon.send_request(self, AnalogOut.FUNCTION_SET_VOLTAGE, (voltage,), 'H', '')",
"def outputValue(self):\n string = self.ask('od;E;')\n string = re.sub(r'^(NDCV|NDCA|EDCV)', r'', string)\n self.notify('voltage', float(string))\n return float(string)",
"def _setVoltageOrCurrent(self, mode, value, slewrate, turnOn):\n string = self.ask(\"od;\")\n if mode == 'voltage' and string.find('CA') != -1:\n self.toVoltage()\n elif mode == 'current' and string.find('CV') != -1:\n self.toCurrent()\n self.setOutputValue(value, slewrate=slewrate)\n if turnOn:\n self.turnOn()\n return self.outputValue()",
"def voltage(self):\n return self.outputValue()",
"def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")",
"def update_volt_rate(self):\r\n self.cmd = None\r\n if self.connected:\r\n self.cmd = (\"SYST:COMM:SER:SEND ':SENS:VOLT:NPLC \"\r\n + (str(self.DeltaRate.value()) if self.current_tab\r\n else str(self.dIdVRate.value())) + \"'\")\r\n self.voltmeter_rate = (str(self.DeltaRate.value())\r\n if self.current_tab\r\n else str(self.dIdVRate.value()))\r\n self.I_source.write(self.cmd)",
"def get_voltage(self):\n print(\"voici le voltage de la batterie\")",
"def write (self, V_out, DAC_A = 1, DAC_B = 1) :\n try :\n assert abs(V_out) <= V_REF\n data = (int(((V_out-V_REFLO)/(V_REF-V_REFLO))*2**14) << 2) & 0xFFFF\n bus.write_i2c_block_data(self.address, self.__pointer_register(write_and_powerup, DAC_A , DAC_B), [(data >> 8) & 0xFF, data & 0xFF])\n time.sleep(0.1) \n except AssertionError :\n print (\"The maxium value of V_out is V_REF\")\n except IOError :\n print (\"Device is not connected\")",
"def read_actual_voltage(self):\n function_string = 'V' + self.output + 'O?'\n value_string = self.scpi_comm(function_string)\n LOGGER.warn(value_string)\n time.sleep(0.1) # This might only be necessary on LAN interface\n try:\n value = float(value_string.replace('V', ''))\n except ValueError:\n value = -999999\n return value",
"def calc_out_voltage(self, input_photocurrent_file):\n pass",
"def get_setVoltage(self):\n self.read(\":VOLT?\")",
"def turn_output_on(self):\n self.instr.write('RF1')\n time.sleep(self.sleep_time)",
"def toVoltage(self, range=None, turnOn=False):\n self.write('F1;E')\n c = None\n if range == 1 or range == '10mV':\n c = 'R2'\n elif range == 2 or range == '100mV':\n c = 'R3'\n elif range == 3 or range == '1V':\n c = 'R4'\n elif range == 4 or range == '10V':\n c = 'R5'\n elif range == 5 or range == '30V':\n c = 'R6'\n if c is not None:\n self.write(c + ';E')\n if turnOn:\n self.turnOn()",
"def _update_power_label(self):\n\n #Checks if > 0.5s has elapsed since the last change to the power reading label\n #I do this since otherwise the text label updates too quickly and it's annoying\n #to read.\n currTime = time.time()\n if currTime - self._last_power_text_update > 0.5:\n #If it updates, reads in the power and updates\n #TODO: Read the power in one function only and then all of the places that use it (updating feedback, updating power label, and plotting)\n #access that member variable. Not a huge deal will slightly speed it up I guess and is a bit cleaner.\n power = self.gain*np.array(self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage))\n self.widgets['label_power'].setText(str(power[-1]))\n self._last_power = power[-1]/self.gain\n self._last_power_text_update = currTime",
"def setvoltages(self):\n pass",
"def on_cam_base_pitch_hSlider_valueChanged(self, value):\n self.cam_base_pitch_ledit.setText(str(100 + value))",
"def voltage(analog_pin):\n return \"%0.2f\" % inVolts(analogRead(analog_pin))",
"def ac_voltage(self, ac):\n self.set_ac_voltage(f'{ac}' if self._is_min_max(ac) else f'{ac} V')"
]
| [
"0.86768293",
"0.75235605",
"0.73683",
"0.6901445",
"0.6560296",
"0.6503128",
"0.6420002",
"0.64133865",
"0.6375068",
"0.6369727",
"0.636416",
"0.63497466",
"0.63417727",
"0.6259403",
"0.61915195",
"0.6165251",
"0.61429423",
"0.6125383",
"0.5998575",
"0.5986245",
"0.59737235",
"0.59335566",
"0.5918827",
"0.58987373",
"0.5885667",
"0.5858563",
"0.5825083",
"0.5815609",
"0.5811793",
"0.57984746"
]
| 0.87312424 | 0 |
Updates the output control voltage based on the text in the output voltage text box. This method is automatically run when the user changes the value in the text box, allowing the user to control the output voltage directly when the laser power is not "locked". | def _set_output_voltage_from_label(self):
if (~self._is_stabilizing): #Only updates value if we are not stabilizing, otherwise the PID loop will be driving the output voltage
#as opposed to the user.
self._curr_output_voltage = self.widgets['p_outputVoltage'].value()
self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_output_voltage_label(self):\n self.widgets['p_outputVoltage'].setValue((self._curr_output_voltage))",
"def set_control_voltage(self, value):\n self._curr_output_voltage = value\n self._update_output_voltage_label()",
"def set_output_voltage(self, output_voltage, output_port = outputs.out1):\n if abs(output_voltage) < 8.0 and self._voltage_output_range != voltage_range.low:\n self.write_to_serial(':volt:rang low')\n self._voltage_output_range = voltage_range.low\n elif abs(output_voltage) >= 8.0 and self._voltage_output_range != voltage_range.high:\n self.write_to_serial(':volt:rang high')\n self._voltage_output_range = voltage_range.high\n\n if self._output_state[output_port] is False:\n self.write_to_serial(':outp:stat on')\n\n if self._selected_output != output_port:\n self.write_to_serial(':inst:sel ' + output_port.name)\n self._selected_output = output_port\n\n self.write_to_serial(':volt ' + str(output_voltage))",
"def outputValue(self):\n string = self.ask('od;E;')\n string = re.sub(r'^(NDCV|NDCA|EDCV)', r'', string)\n self.notify('voltage', float(string))\n return float(string)",
"def set_voltage(self, value):\n self.write(\":VOLT {}V\".format(value))",
"def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)",
"def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()",
"def _setVoltageOrCurrent(self, mode, value, slewrate, turnOn):\n string = self.ask(\"od;\")\n if mode == 'voltage' and string.find('CA') != -1:\n self.toVoltage()\n elif mode == 'current' and string.find('CV') != -1:\n self.toCurrent()\n self.setOutputValue(value, slewrate=slewrate)\n if turnOn:\n self.turnOn()\n return self.outputValue()",
"def _update_voltageSetpoint_fromGUI(self):\n self.voltageSetpoint = self.widgets['p_setpoint'].value()/self.gain",
"def voltage(self):\n return self.outputValue()",
"def voltage(self, voltage=None):\n if voltage is not None:\n # set output voltage on all phases\n # self.ts.log_debug('voltage: %s, type: %s' % (voltage, type(voltage)))\n if type(voltage) is not list and type(voltage) is not tuple:\n self.cmd(':AC:SETB:VOLT PERC,%0.1f,%0.1f,%0.1f\\n' % (voltage, voltage, voltage))\n v1 = voltage\n v2 = voltage\n v3 = voltage\n else:\n self.cmd(':AC:SETB:VOLT PERC,%0.1f,%0.1f,%0.1f\\n' % (voltage[0], voltage[0], voltage[0])) # use the first value in the 3 phase list\n v1 = voltage[0]\n v2 = voltage[0]\n v3 = voltage[0]",
"def read_actual_voltage(self):\n function_string = 'V' + self.output + 'O?'\n value_string = self.scpi_comm(function_string)\n LOGGER.warn(value_string)\n time.sleep(0.1) # This might only be necessary on LAN interface\n try:\n value = float(value_string.replace('V', ''))\n except ValueError:\n value = -999999\n return value",
"def get_setVoltage(self):\n self.read(\":VOLT?\")",
"def increase_voltage(self):\n function_string = 'INCV' + self.output\n return self.scpi_comm(function_string)",
"def calc_out_voltage(self, input_photocurrent_file):\n pass",
"def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )",
"def set_voltage(self, c, voltage):\n if abs(voltage['V']) < (.00001):\n voltage = 0 * units.V\n \n dev = self.selectedDevice(c)\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n self.open_comm(c, slot_number)\n output_state = yield self.get_output_state(c)\n if not output_state:\n try:\n yield self.set_output_state(c, True)\n except:\n self.initialize_mainframe(c)\n yield self.set_output_state(c, True)\n write_str = \"VOLT \"+ str(voltage['V'])\n try:\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n except:\n self.initialize_mainframe(c)\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n self.close_comm(c, slot_number)\n else:\n raise ValueError(self.no_selection_msg())",
"def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")",
"def get_voltage(self):\n print(\"voici le voltage de la batterie\")",
"def update_volt_rate(self):\r\n self.cmd = None\r\n if self.connected:\r\n self.cmd = (\"SYST:COMM:SER:SEND ':SENS:VOLT:NPLC \"\r\n + (str(self.DeltaRate.value()) if self.current_tab\r\n else str(self.dIdVRate.value())) + \"'\")\r\n self.voltmeter_rate = (str(self.DeltaRate.value())\r\n if self.current_tab\r\n else str(self.dIdVRate.value()))\r\n self.I_source.write(self.cmd)",
"def toVoltage(self, range=None, turnOn=False):\n self.write('F1;E')\n c = None\n if range == 1 or range == '10mV':\n c = 'R2'\n elif range == 2 or range == '100mV':\n c = 'R3'\n elif range == 3 or range == '1V':\n c = 'R4'\n elif range == 4 or range == '10V':\n c = 'R5'\n elif range == 5 or range == '30V':\n c = 'R6'\n if c is not None:\n self.write(c + ';E')\n if turnOn:\n self.turnOn()",
"def set_voltage(self, voltage):\n self.ipcon.send_request(self, AnalogOut.FUNCTION_SET_VOLTAGE, (voltage,), 'H', '')",
"def set_voltage(self):\r\n\t\tself.voltage = int(input(\"Enter the Value from (0-65536)= \"))\r\n\t\twhile self.voltage > 65536 :\r\n\t\t\tself.voltage = int(input(\"Enter the Value from (0-65536)= \"))\r\n\t\t\r\n\t\tself.data = [self.voltage >> 8, self.voltage & 0xFF]\r\n\t\t\r\n\t\treturn self.data",
"def read_set_voltage(self):\n function_string = 'V' + self.output + '?'\n value_string = self.scpi_comm(function_string)\n try:\n value = float(value_string.replace('V' + self.output, ''))\n except ValueError:\n value = -9997\n return value",
"def update_voutput_pins(self):\n if not pfio:\n raise Exception(\n \"Looks like some sloppy programmer (probably Tom Preston...) \" \\\n \"is trying to update the virtual output pins when the PiFace \" \\\n \"isn't connected. Make sure you check for the pfio before calling \" \\\n \"the update_voutput_pins method. kthxbai.\")\n\n output_bit_map = pfio.read_output()\n for i in range(len(self.output_pins)):\n # updating inner value so that we don't do more SPI reads\n self.output_pins[i]._value = (output_bit_map >> i) & 1 \n\n self.queue_draw()",
"def on_system_time_textChanged(self, p0):\n # Updated 12/28/16\n time_start = time.time()\n \n self.Init_prog.setValue( percentCheck(self.current_Init_Time.text(), self.Init_tot.text()) ) \n self.Side_1_prog.setValue( percentCheck(self.current_Side_1_Time.text(), self.Side_1_tot.text()) ) \n self.Side_2_prog.setValue( percentCheck(self.current_Side_2_Time.text(), self.Side_2_tot.text()) ) \n self.Total_prog.setValue( percentCheck(self.current_Rep_Cycle.text(), self.Rep_Cycle_tot.text()) ) \n \n # Get Current Bus Values For Mode Discovery\n current_bus_A = I2C.read_byte_data(In_bus, pinIn_A) # bus with valves\n current_bus_B = I2C.read_byte_data(In_bus, pinIn_B) # bus with pumps + magnets\n self.System_Mode.setText( disoverMode( current_bus_A + current_bus_B ) )\n \n # update top GUI info field\n self.as_of_time.setText(\"System Status as of \"+ time.strftime(\"%B %d, %Y at %H:%M:%S\"))\n \n # update individual device status on GUI\n self.FP101_val.setText( returnStatus(var1, 0) )\n self.FP102_val.setText( returnStatus(var2, 0) )\n self.FP103_val.setText( returnStatus(var3, 0) )\n \n self.FV201_val.setText( returnStatus(var4, 'open') )\n self.FV202_val.setText( returnStatus(var5, 'open') )\n self.FV203_val.setText( returnStatus(var6, 'open') )\n self.FV204_val.setText( returnStatus(var7, 'open') )\n \n self.EM201_val.setText( returnStatus(var8, 0) )\n self.EM202_val.setText( returnStatus(var9, 0) )\n\n # update Temperatures\n tempString = str(var10)\n self.temp_val_1.setText(tempString[0:4])\n\n tempString = str(var11)\n self.temp_val_2.setText(tempString[0:4])\n\n # update pH\n pHString = str(var12)\n self.pH_val.setText(pHString[0:6])\n \n if (time.time()-time_start > update_GUI_interval):\n print(\"GUI update longer than update interval...\")",
"def update(self, v_input):\n\n self.v = v_input",
"def update_compliance(self):\r\n self.compliance_voltage = str(self.ComplianceVoltage.value())\r\n #self.cmd = None\r\n if self.connected:\r\n self.cmd = \"CURR:COMP \" + self.compliance_voltage\r\n self.I_source.write(self.cmd)",
"def ac_voltage(self, ac):\n self.set_ac_voltage(f'{ac}' if self._is_min_max(ac) else f'{ac} V')",
"def voltage(analog_pin):\n return \"%0.2f\" % inVolts(analogRead(analog_pin))"
]
| [
"0.8174182",
"0.75136",
"0.6978042",
"0.69540447",
"0.6786127",
"0.678343",
"0.67531073",
"0.66309124",
"0.66028297",
"0.655654",
"0.6517861",
"0.64178216",
"0.6394018",
"0.6364048",
"0.6340842",
"0.6326381",
"0.6299824",
"0.6291961",
"0.6254012",
"0.6247799",
"0.6165654",
"0.6156918",
"0.614569",
"0.61227375",
"0.6068642",
"0.6036891",
"0.6020166",
"0.5998328",
"0.597426",
"0.5961313"
]
| 0.77287716 | 1 |
Allows an external program to directly set the control/output voltage in use by the stabilizer | def set_control_voltage(self, value):
self._curr_output_voltage = value
self._update_output_voltage_label() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_output_voltage_from_label(self):\n if (~self._is_stabilizing): #Only updates value if we are not stabilizing, otherwise the PID loop will be driving the output voltage\n #as opposed to the user.\n self._curr_output_voltage = self.widgets['p_outputVoltage'].value()\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)",
"def voltage(self, voltage=None):\n if voltage is not None:\n # set output voltage on all phases\n # self.ts.log_debug('voltage: %s, type: %s' % (voltage, type(voltage)))\n if type(voltage) is not list and type(voltage) is not tuple:\n self.cmd(':AC:SETB:VOLT PERC,%0.1f,%0.1f,%0.1f\\n' % (voltage, voltage, voltage))\n v1 = voltage\n v2 = voltage\n v3 = voltage\n else:\n self.cmd(':AC:SETB:VOLT PERC,%0.1f,%0.1f,%0.1f\\n' % (voltage[0], voltage[0], voltage[0])) # use the first value in the 3 phase list\n v1 = voltage[0]\n v2 = voltage[0]\n v3 = voltage[0]",
"def set_voltage(self, value):\n self.write(\":VOLT {}V\".format(value))",
"def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")",
"def get_setVoltage(self):\n self.read(\":VOLT?\")",
"def _setVoltageOrCurrent(self, mode, value, slewrate, turnOn):\n string = self.ask(\"od;\")\n if mode == 'voltage' and string.find('CA') != -1:\n self.toVoltage()\n elif mode == 'current' and string.find('CV') != -1:\n self.toCurrent()\n self.setOutputValue(value, slewrate=slewrate)\n if turnOn:\n self.turnOn()\n return self.outputValue()",
"def set_volt(self,volt):\n self.spi_disable()\n self.spi_enable()\n data1=volt*256//self.vref\n if data1>255:\n data1=255\n if data1<0:\n data1=0\n data1=int(data1)\n temp0=AD5601_mode[\"normal\"]|((data1>>2)&0x00ff)\n temp1=(data1<<6)&0x00ff\n data=[temp0,temp1]\n return self.spi_device.write(data)",
"def set_output_voltage(self, output_voltage, output_port = outputs.out1):\n if abs(output_voltage) < 8.0 and self._voltage_output_range != voltage_range.low:\n self.write_to_serial(':volt:rang low')\n self._voltage_output_range = voltage_range.low\n elif abs(output_voltage) >= 8.0 and self._voltage_output_range != voltage_range.high:\n self.write_to_serial(':volt:rang high')\n self._voltage_output_range = voltage_range.high\n\n if self._output_state[output_port] is False:\n self.write_to_serial(':outp:stat on')\n\n if self._selected_output != output_port:\n self.write_to_serial(':inst:sel ' + output_port.name)\n self._selected_output = output_port\n\n self.write_to_serial(':volt ' + str(output_voltage))",
"def set_voltage(self, c, voltage):\n if abs(voltage['V']) < (.00001):\n voltage = 0 * units.V\n \n dev = self.selectedDevice(c)\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n self.open_comm(c, slot_number)\n output_state = yield self.get_output_state(c)\n if not output_state:\n try:\n yield self.set_output_state(c, True)\n except:\n self.initialize_mainframe(c)\n yield self.set_output_state(c, True)\n write_str = \"VOLT \"+ str(voltage['V'])\n try:\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n except:\n self.initialize_mainframe(c)\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n self.close_comm(c, slot_number)\n else:\n raise ValueError(self.no_selection_msg())",
"def set_voltage(self, voltage):\n self.ipcon.send_request(self, AnalogOut.FUNCTION_SET_VOLTAGE, (voltage,), 'H', '')",
"def setvoltages(self):\n pass",
"def read_set_voltage(self):\n function_string = 'V' + self.output + '?'\n value_string = self.scpi_comm(function_string)\n try:\n value = float(value_string.replace('V' + self.output, ''))\n except ValueError:\n value = -9997\n return value",
"def set_voltage(self, v):\n self.environment.set_voltage(self.neuron_id, v)",
"def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)",
"def _update_output_voltage_label(self):\n self.widgets['p_outputVoltage'].setValue((self._curr_output_voltage))",
"def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()",
"def _update_voltageSetpoint_fromGUI(self):\n self.voltageSetpoint = self.widgets['p_setpoint'].value()/self.gain",
"def get_voltage(self):\n print(\"voici le voltage de la batterie\")",
"def toVoltage(self, range=None, turnOn=False):\n self.write('F1;E')\n c = None\n if range == 1 or range == '10mV':\n c = 'R2'\n elif range == 2 or range == '100mV':\n c = 'R3'\n elif range == 3 or range == '1V':\n c = 'R4'\n elif range == 4 or range == '10V':\n c = 'R5'\n elif range == 5 or range == '30V':\n c = 'R6'\n if c is not None:\n self.write(c + ';E')\n if turnOn:\n self.turnOn()",
"def read_actual_voltage(self):\n function_string = 'V' + self.output + 'O?'\n value_string = self.scpi_comm(function_string)\n LOGGER.warn(value_string)\n time.sleep(0.1) # This might only be necessary on LAN interface\n try:\n value = float(value_string.replace('V', ''))\n except ValueError:\n value = -999999\n return value",
"def voltage(self):\n return self.outputValue()",
"def increase_voltage(self):\n function_string = 'INCV' + self.output\n return self.scpi_comm(function_string)",
"def set_vcon (dev, volts=3.3, enable=True):\n if enable:\n v=float(volts)\n d=int(round(22e3*(v/.62-1) / 100e3 * 127))\n d=max(d,0)\n d=min(d,127)\n log.debug ( \"vcon pot value: %d\" % d )\n #dev.set('UXN1330','vcon_pot', d )\n\n dev.set('UXN1330','vcon_en', 1 if enable else 0 )\n # convert back to volts so user can check against input\n return 0 if not enable else .62*(d/127.*100e3/22e3+1)",
"def set_voltage(self):\r\n\t\tself.voltage = int(input(\"Enter the Value from (0-65536)= \"))\r\n\t\twhile self.voltage > 65536 :\r\n\t\t\tself.voltage = int(input(\"Enter the Value from (0-65536)= \"))\r\n\t\t\r\n\t\tself.data = [self.voltage >> 8, self.voltage & 0xFF]\r\n\t\t\r\n\t\treturn self.data",
"def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el",
"def ac_voltage(self, ac):\n self.set_ac_voltage(f'{ac}' if self._is_min_max(ac) else f'{ac} V')",
"def set_voltage(self, millivolts):\n assert 3060 <= millivolts <= 10680, \"Voltage must be between 3,060 and 10,680 mV.\"\n assert self.instr == self.INSTR_EXT, \"Please switch to extended instruction set first.\"\n self.voltage = millivolts\n basevoltage = millivolts - 3060\n incrementor = basevoltage // 60\n code = 0x80 & incrementor\n self.command([code])",
"def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )",
"def setVoltage(self, value, slewrate=None, turnOn=False):\n return self._setVoltageOrCurrent('voltage', value, slewrate, turnOn)",
"def configure_vdc(self, rng, res, unit = 'V'):\n self.write_to_serial(':conf:volt:dc ' + str(rng) + ',' + str(res))# + unit)"
]
| [
"0.7024224",
"0.6937911",
"0.6917821",
"0.6887441",
"0.6882729",
"0.68348175",
"0.6834041",
"0.676967",
"0.676372",
"0.66738194",
"0.66340613",
"0.6613438",
"0.64863926",
"0.6474095",
"0.64466125",
"0.6388839",
"0.6372111",
"0.6298631",
"0.6152904",
"0.6147114",
"0.6120982",
"0.61022455",
"0.60873556",
"0.60866207",
"0.60786736",
"0.6054923",
"0.6025659",
"0.5997412",
"0.59971046",
"0.5976071"
]
| 0.71725106 | 0 |
Update the voltage setpoint to whatever value is currently in the setpoint spin box | def _update_voltageSetpoint_fromGUI(self):
self.voltageSetpoint = self.widgets['p_setpoint'].value()/self.gain | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_setpoint(self, value):\n self.voltageSetpoint = value/self.gain\n self.widgets['p_setpoint'].setValue(value)\n #Need to reset the PID loop with this new setpoint value\n self._update_PID()",
"def on_spinBox_valueChanged(self, p0):\n self.nbr_pts_caracterisation = self.spinBox.value()",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def _update_output_voltage_label(self):\n self.widgets['p_outputVoltage'].setValue((self._curr_output_voltage))",
"def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)",
"def set_control_voltage(self, value):\n self._curr_output_voltage = value\n self._update_output_voltage_label()",
"def _set_output_voltage_from_label(self):\n if (~self._is_stabilizing): #Only updates value if we are not stabilizing, otherwise the PID loop will be driving the output voltage\n #as opposed to the user.\n self._curr_output_voltage = self.widgets['p_outputVoltage'].value()\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)",
"def get_setVoltage(self):\n self.read(\":VOLT?\")",
"def _g_spin_changed(self):\n self.gLine.setValue(self.gSpin.value())",
"def setCurrent(self, value):\n\n\t\tself._current = self._setpoint - value",
"def set_voltage(self, value):\n self.write(\":VOLT {}V\".format(value))",
"def update_newly_set_ref_V_ampl(self):\n self.qlin_ref_V_ampl_RMS.setText(\n \"%.3f\" % self.alia.config.ref_V_ampl_RMS\n )\n self.qlin_ref_V_ampl.setText(\"%.3f\" % self.alia.config.ref_V_ampl)\n self.qlbl_ref_is_clipping.setText(self.get_clipping_text())\n # QApplication.processEvents()\n\n self.alia_qdev.state.reset()\n self.clear_curves_stage_1_and_2()",
"def _b_spin_changed(self):\n self.bLine.setValue(self.bSpin.value())",
"def _r_spin_changed(self):\n self.rLine.setValue(self.rSpin.value())",
"def set_voltage(self, voltage):\n self.ipcon.send_request(self, AnalogOut.FUNCTION_SET_VOLTAGE, (voltage,), 'H', '')",
"def _setVoltageOrCurrent(self, mode, value, slewrate, turnOn):\n string = self.ask(\"od;\")\n if mode == 'voltage' and string.find('CA') != -1:\n self.toVoltage()\n elif mode == 'current' and string.find('CV') != -1:\n self.toCurrent()\n self.setOutputValue(value, slewrate=slewrate)\n if turnOn:\n self.turnOn()\n return self.outputValue()",
"def changeValue(self):\n #productive #onUpDnArrow\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value)+ ' (pt: '+str(self.ptNumber)+')')",
"def setValue(self, value):\n\t\tratio = (value - self.range[0]) / (self.range[1] - self.range[0])\n\t\tself._changed = False\n\t\tself.slider.setValue(ratio * 100)\n\t\tself._changed = False\n\t\tself.spinbox.setValue(value)\n\t\tself._changed = False",
"def SetValue(self, val):\n self.spin.SetValue(val)\n self.slider.SetValue(100*(val-self.minval)/(self.maxval-self.minval))",
"def setvoltages(self):\n pass",
"def changeValue(self):\r\n # productive #onUpDnArrow\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value) + ' (pt: ' + str(self.ptNumber) + ')')\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)\r\n self.unlockControlPoints(widget.editNeedleTxtBox.value)\r\n widget.drawValidationNeedlesButton.text = \"Render Manual Needle \" + str(widget.editNeedleTxtBox.value)",
"def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")",
"def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0",
"def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0",
"def set_voltage(self, c, voltage):\n if abs(voltage['V']) < (.00001):\n voltage = 0 * units.V\n \n dev = self.selectedDevice(c)\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n self.open_comm(c, slot_number)\n output_state = yield self.get_output_state(c)\n if not output_state:\n try:\n yield self.set_output_state(c, True)\n except:\n self.initialize_mainframe(c)\n yield self.set_output_state(c, True)\n write_str = \"VOLT \"+ str(voltage['V'])\n try:\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n except:\n self.initialize_mainframe(c)\n yield self.write(c, write_str)\n self.wait_for_completion(c)\n self.close_comm(c, slot_number)\n else:\n raise ValueError(self.no_selection_msg())",
"def set_voltage(self, v):\n self.environment.set_voltage(self.neuron_id, v)",
"def set_volt(self,volt):\n self.spi_disable()\n self.spi_enable()\n data1=volt*256//self.vref\n if data1>255:\n data1=255\n if data1<0:\n data1=0\n data1=int(data1)\n temp0=AD5601_mode[\"normal\"]|((data1>>2)&0x00ff)\n temp1=(data1<<6)&0x00ff\n data=[temp0,temp1]\n return self.spi_device.write(data)",
"def get_setpoint(self):\n value = self.synth.cbox.get_dacs()[self.id_]\n value = value / self.conf['PSICONV']\n log.debug(\"Current setpoint on regulator %d = %f\",\n self.id_, value)\n return value",
"def valoresSpin():\n try:\n var.ui.spinEdad.setValue(16)\n except Exception as error:\n print('Error valores spin: %s' % str(error))",
"def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0"
]
| [
"0.7551044",
"0.7197047",
"0.7195143",
"0.70570076",
"0.69075614",
"0.67191017",
"0.66816556",
"0.66758114",
"0.66145205",
"0.65183496",
"0.6517884",
"0.6512107",
"0.65120155",
"0.6421457",
"0.6389189",
"0.63613814",
"0.6354926",
"0.63446605",
"0.63266104",
"0.6318307",
"0.6282934",
"0.62016416",
"0.6192903",
"0.6192903",
"0.619134",
"0.61623406",
"0.61546695",
"0.6130578",
"0.6128602",
"0.607133"
]
| 0.836005 | 0 |
Updates the power setpoint, for use by external programs wishing to interface with this one. | def set_setpoint(self, value):
self.voltageSetpoint = value/self.gain
self.widgets['p_setpoint'].setValue(value)
#Need to reset the PID loop with this new setpoint value
self._update_PID() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_voltageSetpoint_fromGUI(self):\n self.voltageSetpoint = self.widgets['p_setpoint'].value()/self.gain",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def set_setpoint():\n setpoint = request.params.get(\"setpoint\", 0, type=float)\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetSetpoint(pid, ctypes.c_float(setpoint))\n if retval != 0:\n LOG.error(\"Failed to set PID setpoint. Error code: %s\", ERROR_CODES[retval])\n LOG.info(\"setpoint: %f\", setpoint)\n LOG.info(\"PID: %d\", pid)",
"def setSetPoint(self, set_point):\r\n\t\tself.SetPoint = set_point",
"def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0",
"def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0",
"def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0",
"def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0",
"def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0",
"def setSetpoint(self, point):\n\n\t\tself._setpoint = point",
"def setPowerFromDensity(self):\n self.p.power = self.p.powerDensity * self.getHMMass()",
"def change_points(self, dp):\n\t\tself._points += dp",
"def on_to_ppt(self):\n try:\n import win32com.client\n except ImportError:\n print('ERROR: The win32com library needs to be installed')\n return\n\n # First, copy to the clipboard\n self.on_copy()\n\n # Connect to an open PowerPoint application\n app = win32com.client.Dispatch('PowerPoint.Application')\n\n # Get the current slide and paste the plot\n slide = app.ActiveWindow.View.Slide\n shape = slide.Shapes.Paste()\n\n # Add a hyperlink to the data location to easily open the data again\n shape.ActionSettings[0].Hyperlink.Address = self.main.abs_filename",
"def setPoint(self, set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0",
"def set_PU(self, powerups):\n self._powerups=powerups",
"def updateFP(self):\n for r in self._FP_list:\n r.setY(r.getY() + r.getVY())",
"def setPointsFP(self, new):\n assert new is None or type(new) in [int, float]\n self._points_FP = new",
"def setSetPoint(self, set_point, clearPID=False):\n\t\tself.SetPoint = set_point\n\t\tif (clearPID): self.clear()",
"def set_setpoint(self, value):\n act = SetpointAction(self, value)\n return act.invoke()",
"def modifyPoint(self, *args, **kwargs):\n ...",
"def setPTData(*args):\n args[0].Data.PTData.pt_data = args[1]",
"def offset_setpoint(inverse=False):\n \n offset = 360.0\n if inverse:\n offset = -360.0\n \n setpoint_old = pi.getINDI('PLC.UBCSettings.PLSetpoint')\n pi.setINDI('PLC.PLSetpoint.PLSetpoint=' + str(setpoint_old + offset) + ';forNAC=0')",
"def edit(self, p):\n self.poses[self.selected_point].model = p\n self.calibration_changed()",
"def setpointCallback(self,setpoint):\n if not self.setpoint_valid:\n rospy.loginfo(\"First setpoint received.\")\n self.setpoint_valid = True\n self.set_pose = setpoint\n if not self.enabled:\n rospy.logwarn(\"{}: PIDs not enabled, please call 'rosservice call {} true'\".format(rospy.get_name(),rospy.resolve_name('~enable')))\n rospy.loginfo('{}: Changed setpoint to: {}'.format(rospy.get_name(), setpoint.pose))",
"def update_plot():\n pass",
"def set_powerobject(self, boolean):\n if boolean == True:\n self.powerobject = 'P'",
"def _update_setpoint(self, *args, value: Any, **kwargs) -> None:\n self._last_setpoint = value\n # Always set done to False when a move is requested\n # This means we always get a rising edge when finished moving\n # Even if the move distance is under our done moving tolerance\n self.done.put(0, internal=True)\n self._update_done()",
"def set_initial_point(self, point):\r\n return self._studio.set_initial_point(point)",
"def setP1(self, p1):\n self.points[0] = p1",
"def update_p(self, p: float):\n self.p = p\n for k, sequential in self.m_ops.items():\n if sequential[0].is_identity_op():\n sequential[-1].p = p"
]
| [
"0.67093927",
"0.6139946",
"0.59940267",
"0.5886405",
"0.58322847",
"0.58322847",
"0.57975775",
"0.57975775",
"0.57975775",
"0.57480216",
"0.5733487",
"0.5705446",
"0.5703208",
"0.56821555",
"0.56507874",
"0.5611109",
"0.55992657",
"0.5586657",
"0.5575267",
"0.5563422",
"0.55268675",
"0.5514914",
"0.5471255",
"0.54694206",
"0.5463366",
"0.5440208",
"0.54349345",
"0.5430018",
"0.54294217",
"0.54243964"
]
| 0.6482329 | 1 |
Creates a new PID object based on the current PID member variables to be used for power feedbacking | def _update_PID(self):
self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ppid(self):",
"def dynamic_pid(self):\n pass",
"def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this",
"def create_entity(self):\n \n if self.ORION_CB.get_entity(self.params['name']) is None:\n \n print('[INFO]: Create new PID entity')\n \n entity_dict = {\"id\":self.params['name'], \"type\":'PID_controller'}\n for attr in ['Kp', 'Ti', 'Td', 'lim_low', 'lim_high', 'setpoint']:\n entity_dict.update({attr:{'value':self.params[attr],'type':'Number'}})\n\n entity_dict.update({'reverse_act':{'value':self.params['reverse_act'],'type':'Text'}})\n \n entity = filip.orion.Entity(entity_dict)#, attrs)\n\n self.ORION_CB.post_entity(entity)\n \n else:\n print('Entity name already assigned')",
"def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):\n self._pid_lock = threading.Lock()\n\n self.set_goal(goal)\n self.reset(init_pt)\n self.set_gains({\n PIDController.KP_KEY: kP,\n PIDController.KI_KEY: kI,\n PIDController.KD_KEY: kD\n })",
"def __init__(self, pid, period, num_period=None):\n\n super(PIDTimedLoop, self).__init__(period, num_period)\n self._pid = pid\n self._is_active = pid.poll() is None",
"def pid(self):",
"def def_pid(self,pid):\n self.pid=int(pid)",
"def __init__(self, prim):\n self.actual = prim",
"def setup_process_stats(pid):\n return psutil.Process(pid)",
"def __init__(self):\n self._id = 0\n self._init_cpu_cores_capacity = 0\n self._init_memory_capacity = 0\n self._init_pm_type = 0\n self._init_pm_state = 0\n\n self._region_id = 0\n self._zone_id = 0\n self._data_center_id = 0\n self._cluster_id = 0\n self._rack_id = 0\n\n # PM resource.\n self._live_vms: Set[int] = set()",
"def __init__(self, pid, binary_path, host_name, node_name, telemetry):\n self.pid = pid\n self.binary_path = binary_path\n self.host_name = host_name\n self.node_name = node_name\n self.telemetry = telemetry",
"def inspire_pidstore():",
"def create_ptx(self):\n\n self.lower_pmos_inst=self.add_inst(name=\"lower_pmos\",\n mod=self.pmos)\n self.connect_inst([\"bl\", \"en\", \"br\", \"vdd\"])\n\n self.upper_pmos1_inst=self.add_inst(name=\"upper_pmos1\",\n mod=self.pmos)\n self.connect_inst([\"bl\", \"en\", \"vdd\", \"vdd\"])\n\n self.upper_pmos2_inst=self.add_inst(name=\"upper_pmos2\",\n mod=self.pmos)\n self.connect_inst([\"br\", \"en\", \"vdd\", \"vdd\"])",
"def __init__(self):#, username):\n# self.username = username\n self.pid = os.getpid()",
"def pidGet(self) -> float:\n ...",
"def pidGet(self) -> float:\n ...",
"def __init__(self, upstream=None, downstream=None,\n name='', master = None, Kv = 0.0, verbose=0): \n global _pccount\n if name == '':\n name = 'PressureController_'+`_pccount`\n _pccount += 1\n FlowDevice.__init__(self,2,name,verbose)\n if upstream and downstream:\n self.install(upstream, downstream)\n self.setPressureCoeff(Kv)\n self.setMaster(master)",
"def __init__(self, pid, init=None):\n self.__pid = pid\n\n cmd_handler = Cmd_Handler(lambda x: x['type'])\n cmd_handler.register_cmd('build', self.decide_build)\n cmd_handler.register_cmd('move', self.decide_move)\n self.__cmd_handler = cmd_handler\n\n self.__opponent = None\n self.look_ahead = 0\n\n self.__state = None\n\n if init:\n self.init_state(init)",
"def _PIDController__calculate(self): \n \n\t\tenabled = self.m_enabled\n\t\tpidInput = self.m_pidInput\n\n\t\tif enabled:\n\n\t\t input = pidInput.PIDGet()\n\n\t\t self.m_error = self.m_setpoint - input\n\t\t if self.m_continuous:\n\t\t \n\t\t if math.fabs(self.m_error) > (self.m_maximumInput - self.m_minimumInput) / 2:\n\t\t if self.m_error > 0:\n\t\t self.m_error = self.m_error - self.m_maximumInput + self.m_minimumInput\n\t\t else:\n\t\t self.m_error = self.m_error + self.m_maximumInput - self.m_minimumInput\n\n\t\t potentialIGain = (self.m_totalError + self.m_error) * self.m_I\n\t\t \n\t\t if potentialIGain < self.m_maximumOutput:\n\t\t if potentialIGain > self.m_minimumOutput:\n\t\t self.m_totalError += self.m_error\n\t\t else:\n\t\t self.m_totalError = self.m_minimumOutput / self.m_I\n\t\t else:\n\t\t self.m_totalError = self.m_maximumOutput / self.m_I\n\n\t\t self.m_result = self.m_P * self.m_error + self.m_I * self.m_totalError + self.m_D * (self.m_error - self.m_prevError)\n\t\t self.m_prevError = self.m_error\n\n\t\t if self.m_result > self.m_maximumOutput:\n\t\t self.m_result = self.m_maximumOutput\n\t\t elif self.m_result < self.m_minimumOutput:\n\t\t self.m_result = self.m_minimumOutput\n\n\t\t pidOutput = self.m_pidOutput\n\t\t result = self.m_result",
"def spawn(self, pcls, args):\n\n childp, ownp = multiprocessing.Pipe()\n p = pcls(self._id, childp)\n p._loglevel = self._loglevel\n p.start()\n\n childp.close()\n cid = ownp.recv()\n ownp.send((\"setup\", args))\n ownp.send(\"start\")\n\n self._child_procs.append((p.pid, cid))\n\n return cid",
"def get_pid(self):\n return self.k_p, self.k_i, self.k_d",
"def new_process() -> Process:\n return multiprocessing.Process()",
"def get_PID(self):\n return self.PID",
"def _instantiate_processes(self, input=None, context=None):\n# FIX: ALLOW Projections (??ProjectionTiming TUPLES) TO BE INTERPOSED BETWEEN MECHANISMS IN PATHWAY\n# FIX: AUGMENT LinearMatrix TO USE FULL_CONNECTIVITY_MATRIX IF len(sender) != len(receiver)\n\n # # MODIFIED 2/8/17 OLD: [SEE BELOW]\n # self.variable = []\n # MODIFIED 2/8/17 END\n self.mechanismsDict = {}\n self._all_mech_tuples = []\n self._allMechanisms = MechanismList(self, self._all_mech_tuples)\n\n # Get list of processes specified in arg to init, possibly appended by EVCMechanism (with prediction processes)\n processes_spec = self.processes\n\n # Assign default Process if PROCESS is empty, or invalid\n if not processes_spec:\n from PsyNeuLink.Components.Process import Process_Base\n processes_spec.append(ProcessTuple(Process_Base(), None))\n\n # If input to system is specified, number of items must equal number of processes with origin mechanisms\n if input is not None and len(input) != len(self.originMechanisms):\n raise SystemError(\"Number of items in input ({}) must equal number of processes ({}) in {} \".\n format(len(input), len(self.originMechanisms),self.name))\n\n #region VALIDATE EACH ENTRY, STANDARDIZE FORMAT AND INSTANTIATE PROCESS\n\n # Convert all entries to (process, input) tuples, with None as filler for absent input\n input_index = input_index_curr = 0\n for i in range(len(processes_spec)):\n\n # MODIFIED 2/8/17 NEW:\n # Get list of origin mechanisms for processes that have already been converted\n # (for use below in assigning input)\n orig_mechs_already_processed = list(p[0].originMechanisms[0] for\n p in processes_spec if isinstance(p,ProcessTuple))\n # MODIFIED 2/8/17 END\n\n # Entry is not a tuple\n # presumably it is a process spec, so enter it as first item of ProcessTuple\n if not isinstance(processes_spec[i], tuple):\n processes_spec[i] = ProcessTuple(processes_spec[i], None)\n\n # Entry is a tuple but not a ProcessTuple, so convert it\n if isinstance(processes_spec[i], tuple) and not isinstance(processes_spec[i], ProcessTuple):\n processes_spec[i] = ProcessTuple(processes_spec[i][0], processes_spec[i][1])\n\n # Input was NOT provided on command line, so get it from the process\n if input is None:\n process = processes_spec[i].process\n process_input = []\n for process_input_state in process.processInputStates:\n process_input.extend(process_input_state.value)\n processes_spec[i] = ProcessTuple(process, process_input)\n # Input was provided on command line, so assign that to input item of tuple\n else:\n # Assign None as input to processes implemented by controller (controller provides their input)\n # (e.g., prediction processes implemented by EVCMechanism)\n if processes_spec[i].process._isControllerProcess:\n processes_spec[i] = ProcessTuple(processes_spec[i].process, None)\n else:\n # MODIFIED 2/8/17 NEW:\n # Replace input item in tuple with one from command line\n # Note: check if origin mechanism for current process is same as any previous one;\n # if it is, use that one (and don't increment index for input\n # otherwise, assign input and increment input_index\n try:\n input_index_curr = orig_mechs_already_processed.index(processes_spec[i][0].originMechanisms[0])\n except ValueError:\n input_index += 1\n processes_spec[i] = ProcessTuple(processes_spec[i].process, input[input_index_curr])\n input_index_curr = input_index\n # MODIFIED 2/8/17 END\n\n # Validate input\n if (processes_spec[i].input is not None and\n not isinstance(processes_spec[i].input,(numbers.Number, list, np.ndarray))):\n raise SystemError(\"Second item of entry {0} ({1}) must be an input value\".\n format(i, processes_spec[i].input))\n\n process = processes_spec[i].process\n process_input = processes_spec[i].input\n\n # # MODIFIED 2/8/17 OLD: [MOVED ASSIGNMENT OF self.variable TO _instantiate_graph()\n # # SINCE THAT IS WHERE SYSTEM'S ORIGIN MECHANISMS ARE IDENTIFIED]\n # self.variable.append(process_input)\n # # MODIFIED 2/8/17 END\n\n # IMPLEMENT: THIS IS WHERE LEARNING SPECIFIED FOR A SYSTEM SHOULD BE IMPLEMENTED FOR EACH PROCESS IN THE\n # SYSTEM; NOTE: IF THE PROCESS IS ALREADY INSTANTIATED WITHOUT LEARNING\n # (FIRST CONDITIONAL BELOW), MAY NEED TO BE RE-INSTANTIATED WITH LEARNING\n # (QUESTION: WHERE TO GET SPECS FOR PROCESS FOR RE-INSTANTIATION??)\n\n # If process item is a Process object, assign process_input as default\n if isinstance(process, Process):\n if process_input is not None:\n process._assign_defaults(variable=process_input, context=context)\n # If learning_rate is specified for system but not for process, then apply to process\n # # MODIFIED 3/21/17 OLD:\n # if self.learning_rate and not process.learning_rate:\n # # FIX: assign_params WANTS TO CREATE A ParamaterState ON process FOR learning_rate\n # process.assign_params(request_set={LEARNING_RATE:self.learning_rate})\n # # MODIFIED 3/21/17 NEW:[learning_rate SHOULD BE NOT BE RE-ASSIGNED FOR PROCESS, BUT RATHER ON EXECUTE]\n # if self.learning_rate is not None and process.learning_rate is None:\n # process.learning_rate = self.learning_rate\n # # MODIFIED 3/21/17 END\n\n # Otherwise, instantiate Process\n else:\n if inspect.isclass(process) and issubclass(process, Process):\n # FIX: MAKE SURE THIS IS CORRECT\n # Provide self as context, so that Process knows it is part of a System (and which one)\n # Note: this is used by Process._instantiate_pathway() when instantiating first Mechanism\n # in Pathway, to override instantiation of projections from Process.input_state\n process = Process(default_input_value=process_input,\n learning_rate=self.learning_rate,\n context=self)\n elif isinstance(process, dict):\n # IMPLEMENT: HANDLE Process specification dict here;\n # include process_input as ??param, and context=self\n raise SystemError(\"Attempt to instantiate process {0} in kwProcesses of {1} \"\n \"using a Process specification dict: not currently supported\".\n format(process.name, self.name))\n else:\n raise SystemError(\"Entry {0} of kwProcesses ({1}) must be a Process object, class, or a \"\n \"specification dict for a Process\".format(i, process))\n\n # # process should now be a Process object; assign to processList\n # self.processList.append(process)\n\n # Assign the Process a reference to this System\n process.systems.append(self)\n if process.learning:\n self.learning = True\n\n # Get max of Process phaseSpecs\n self._phaseSpecMax = int(max(math.floor(process._phaseSpecMax), self._phaseSpecMax))\n\n # Iterate through mechanism tuples in Process' mech_tuples\n # to construct self._all_mech_tuples and mechanismsDict\n # FIX: ??REPLACE WITH: for sender_mech_tuple in Process._mech_tuples\n for sender_mech_tuple in process._mech_tuples:\n\n sender_mech = sender_mech_tuple.mechanism\n\n # THIS IS NOW DONE IN _instantiate_graph\n # # Add system to the Mechanism's list of systems of which it is member\n # if not self in sender_mech_tuple[MECHANISM].systems:\n # sender_mech.systems[self] = INTERNAL\n\n # Assign sender mechanism entry in self.mechanismsDict, with mech_tuple as key and its Process as value\n # (this is used by Process._instantiate_pathway() to determine if Process is part of System)\n # If the sender is already in the System's mechanisms dict\n if sender_mech_tuple.mechanism in self.mechanismsDict:\n existing_mech_tuple = self._allMechanisms._get_tuple_for_mech(sender_mech)\n if not sender_mech_tuple is existing_mech_tuple:\n # Contents of tuple are the same, so use the tuple in _allMechanisms\n if (sender_mech_tuple.phase == existing_mech_tuple.phase and\n sender_mech_tuple.params == existing_mech_tuple.params):\n pass\n # Contents of tuple are different, so raise exception\n else:\n if sender_mech_tuple.phase != existing_mech_tuple.phase:\n offending_tuple_field = 'phase'\n offending_value = PHASE_ITEM\n else:\n offending_tuple_field = 'process_input'\n offending_value = PARAMS_ITEM\n raise SystemError(\"The same mechanism in different processes must have the same parameters:\"\n \"the {} ({}) for {} in {} does not match the value({}) in {}\".\n format(offending_tuple_field,\n sender_mech_tuple.mechanism,\n sender_mech_tuple[offending_value],\n process,\n existing_mech_tuple[offending_value],\n self.mechanismsDict[sender_mech_tuple.mechanism]\n ))\n # Add to entry's list\n self.mechanismsDict[sender_mech].append(process)\n else:\n # Add new entry\n self.mechanismsDict[sender_mech] = [process]\n if not sender_mech_tuple in self._all_mech_tuples:\n self._all_mech_tuples.append(sender_mech_tuple)\n\n process._allMechanisms = MechanismList(process, tuples_list=process._mech_tuples)\n\n # # MODIFIED 2/8/17 OLD: [SEE ABOVE]\n # self.variable = convert_to_np_array(self.variable, 2)\n # # MODIFIED 2/8/17 END\n #\n # # Instantiate processList using process_tuples, and point self.processes to it\n # # Note: this also points self.params[kwProcesses] to self.processes\n self.process_tuples = processes_spec\n self._processList = ProcessList(self, self.process_tuples)\n self.processes = self._processList.processes",
"def _PIDController__calculate(self): \n \n\t\tenabled = self.m_enabled\n\t\tpidInput = self.m_pidInput\n\t\tpidInput2 = self.source2\n\n\t\tif enabled:\n\n\t\t input = pidInput.PIDGet() - pidInput2.PIDGet()\n\n\t\t self.m_error = self.m_setpoint - input\n\t\t if self.m_continuous:\n\t\t \n\t\t if math.fabs(self.m_error) > (self.m_maximumInput - self.m_minimumInput) / 2:\n\t\t if self.m_error > 0:\n\t\t self.m_error = self.m_error - self.m_maximumInput + self.m_minimumInput\n\t\t else:\n\t\t self.m_error = self.m_error + self.m_maximumInput - self.m_minimumInput\n\n\t\t potentialIGain = (self.m_totalError + self.m_error) * self.m_I\n\t\t \n\t\t if potentialIGain < self.m_maximumOutput:\n\t\t if potentialIGain > self.m_minimumOutput:\n\t\t self.m_totalError += self.m_error\n\t\t else:\n\t\t self.m_totalError = self.m_minimumOutput / self.m_I\n\t\t else:\n\t\t self.m_totalError = self.m_maximumOutput / self.m_I\n\n\t\t self.m_result = self.m_P * self.m_error + self.m_I * self.m_totalError + self.m_D * (self.m_error - self.m_prevError)\n\t\t self.m_prevError = self.m_error\n\n\t\t if self.m_result > self.m_maximumOutput:\n\t\t self.m_result = self.m_maximumOutput\n\t\t elif self.m_result < self.m_minimumOutput:\n\t\t self.m_result = self.m_minimumOutput\n\n\t\t pidOutput = self.m_pidOutput\n\t\t result = self.m_result",
"def __init__(self):\n super(MultiProcessEngine, self).__init__()\n self._debug_output = False\n self._name = 'Main'\n self._last_worker_number = 0\n self._log_filename = None\n self._pid = os.getpid()\n self._process_information = process_info.ProcessInfo(self._pid)\n self._process_information_per_pid = {}\n self._processes_per_pid = {}\n self._quiet_mode = False\n self._rpc_clients_per_pid = {}\n self._rpc_errors_per_pid = {}\n self._status_update_active = False\n self._status_update_thread = None\n self._storage_writer = None\n self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT",
"def __init__(self, Pnt, FID):\n self.x = Pnt.X\n self.y = Pnt.Y\n self.fid = FID",
"def __init__(self):\n self.pidDict = { # particle_name, pid\n \"total\" : 0,\n \"charged\" : 1,\n \"charged_eta\" : 2,\n \"pion\" : 6, # sum(7, 8, -7)\n \"pion_p\" : 7,\n \"pion_0\" : 8,\n \"pion_m\" : -7,\n \"kaon\" : 11, # sum(12, 13)\n \"kaon_p\" : 12,\n \"kaon_0\" : 13,\n \"anti_kaon\" : -11, # sum(-12, -13)\n \"kaon_m\" : -12,\n \"anti_kaon_0\" : -13,\n \"nucleon\" : 16, # sum(17, 18)\n \"proton\" : 17,\n \"neutron\" : 18,\n \"anti_nucleon\" : -16, # sum(-17, -18)\n \"anti_proton\" : -17,\n \"anti_neutron\" : -18,\n \"sigma\" : 21, # sum(22, 23, 24)\n \"sigma_p\" : 22,\n \"sigma_0\" : 23,\n \"sigma_m\" : 24,\n \"anti_sigma\" : -21,\n \"anti_sigma_p\" : -22,\n \"anti_sigma_0\" : -23,\n \"anti_sigma_m\" : -24,\n \"xi\" : 26, # sum(27, 28)\n \"xi_0\" : 27,\n \"xi_m\" : 28,\n \"anti_xi\" : -26,\n \"anti_xi_0\" : -27,\n \"anti_xi_m\" : -28,\n \"lambda\" : 31,\n \"anti_lambda\" : -31,\n \"omega\" : 36,\n \"anti_omega\" : -36,\n \"phi\" : 41,\n \"rho\" : 46, #sum(47, 48, -47)\n \"rho_p\" : 47,\n \"rho_0\" : 48,\n \"rho_m\" : -47,\n \"eta\" : 51,\n \"eta_prime\" : 52,\n \"gamma\" : 61,\n \"omega782\" : 65,\n \"eta\" : 71,\n \"etap\" : 72,\n }\n\n for aParticle in self.pidDict.keys():\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]+1000\n else:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]-1000\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]+2000\n else:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]-2000\n\n self.pidDict.update({\n \"photon_total\" : 9000,\n \"photon_total_eq\" : 9001,\n \"photon_QGP_tot\" : 9002,\n \"photon_QGP_eq\" : 9003,\n \"photon_HG_tot\" : 9004,\n \"photon_HG_eq\" : 9005,\n \"direct_gamma_shortdecay_hydro\" : 9006,\n \"decay_gamma_pi0_hydro\" : 9007,\n \"decay_gamma_eta_hydro\" : 9008,\n \"decay_gamma_omega_hydro\" : 9009,\n \"decay_gamma_phi_hydro\" : 9010,\n \"decay_gamma_etap_hydro\" : 9011,\n \"decay_gamma_Sigma0_hydro\" : 9012,\n })\n\n #UrQMD pid Dictionary, name conversion defined as in binUtility\n self.UrQMDpidDict = { #particle name, UrQMD id# : isospin*2000 + pid\n 2101 : \"pion_p\",\n -1899 : \"pion_m\",\n 101 : \"pion_0\",\n 1106 : \"kaon_p\",\n -894 : \"kaon_0\",\n -1106 : \"kaon_m\",\n 894 : \"anti_kaon_0\",\n 1001 : \"proton\",\n -999 : \"neutron\",\n -1001 : \"anti_proton\",\n 999 : \"anti_neutron\",\n 2040 : \"sigma_p\",\n -1960 : \"sigma_m\",\n 40 : \"sigma_0\",\n -2040 : \"anti_sigma_p\",\n 1960 : \"anti_sigma_m\",\n -40 : \"anti_sigma_0\",\n 1049 : \"xi_0\",\n -951 : \"xi_m\",\n -1049 : \"anti_xi_0\",\n 951 : \"anti_xi_m\",\n 27 : \"lambda\",\n -27 : \"anti_lambda\",\n 55 : \"omega\",\n -55 : \"anti_omega\",\n 109 : \"phi\",\n 102 : \"eta\",\n 107 : \"eta_prime\",\n 100 : \"gamma\",\n }\n\n #pdg pid Dictionary\n self.PDGpidDict = { #pdg id#, particle name\n 211 : \"pion_p\",\n -211 : \"pion_m\",\n 111 : \"pion_0\",\n 321 : \"kaon_p\",\n 311 : \"kaon_0\",\n -321 : \"kaon_m\",\n -311 : \"anti_kaon_0\",\n 2212 : \"proton\",\n 2112 : \"neutron\",\n -2212 : \"anti_proton\",\n -2112 : \"anti_neutron\",\n 3222 : \"sigma_p\",\n 3112 : \"sigma_m\",\n 3212 : \"sigma_0\",\n -3222 : \"anti_sigma_p\",\n -3112 : \"anti_sigma_m\",\n -3212 : \"anti_sigma_0\",\n 3322 : \"xi_0\",\n 3312 : \"xi_m\",\n -3322 : \"anti_xi_0\",\n -3312 : \"anti_xi_m\",\n 3122 : \"lambda\",\n -3122 : \"anti_lambda\",\n 3334 : \"omega\",\n -3334 : \"anti_omega\",\n 333 : \"phi\",\n 221 : \"eta\",\n 331 : \"eta_prime\",\n 22 : \"gamma\",\n }\n\n #particle mass Dictionary (unit in GeV)\n self.masspidDict = {\n \"pion\" : 0.13957,\n \"pion_p\" : 0.13957,\n \"pion_0\" : 0.13498,\n \"pion_m\" : 0.13957,\n \"kaon\" : 0.49368,\n \"kaon_p\" : 0.49368,\n \"kaon_0\" : 0.49765,\n \"anti_kaon\" : 0.49368,\n \"kaon_m\" : 0.49368,\n \"anti_kaon_0\" : 0.49765,\n \"nucleon\" : 0.93827,\n \"proton\" : 0.93827,\n \"neutron\" : 0.93957,\n \"anti_nucleon\" : 0.93827,\n \"anti_proton\" : 0.93827,\n \"anit_neutron\" : 0.93957,\n \"sigma\" : 1.18937,\n \"sigma_p\" : 1.18937,\n \"sigma_0\" : 1.19264,\n \"sigma_m\" : 1.19745,\n \"anti_sigma\" : 1.18937,\n \"anti_sigma_p\" : 1.18937,\n \"anti_sigma_0\" : 1.19264,\n \"anti_sigma_m\" : 1.19745,\n \"xi\" : 1.31483,\n \"xi_0\" : 1.31483,\n \"xi_m\" : 1.32131,\n \"anti_xi\" : 1.31483,\n \"anti_xi_0\" : 1.31483,\n \"anti_xi_m\" : 1.32131,\n \"lambda\" : 1.11568,\n \"anti_lambda\" : 1.11568,\n \"omega\" : 1.67243,\n \"anti_omega\" : 1.67243,\n \"rho\" : 0.77580,\n \"rho_p\" : 0.77580,\n \"rho_0\" : 0.77580,\n \"rho_m\" : 0.77580,\n \"phi\" : 1.01946,\n \"eta\" : 0.54775,\n \"eta_prime\" : 0.95778,\n \"gamma\" : 0.0,\n }\n for aParticle in self.masspidDict.keys():\n self.masspidDict[aParticle+\"_hydro\"] = self.masspidDict[aParticle]\n self.masspidDict[aParticle+\"_thermal\"] = self.masspidDict[aParticle]\n\n # charged hadrons list\n self.charged_hadron_list = [\n \"pion_p\", \"pion_m\", \"kaon_p\", \"kaon_m\", \"proton\", \"anti_proton\",\n \"sigma_p\", \"sigma_m\", \"anti_sigma_p\", \"anti_sigma_m\",\n \"xi_m\", \"anti_xi_m\"]",
"def __init__(self, pid):\n self.pid = pid\n self.refresh_code_ranges()"
]
| [
"0.6403323",
"0.6256226",
"0.6253684",
"0.60611475",
"0.6023733",
"0.6017484",
"0.5905685",
"0.5836553",
"0.5782644",
"0.5770235",
"0.5732038",
"0.569442",
"0.56593156",
"0.5647622",
"0.5630679",
"0.5549715",
"0.5549715",
"0.5472046",
"0.5453043",
"0.5441365",
"0.53801626",
"0.5363527",
"0.5361274",
"0.53197956",
"0.53059924",
"0.5296965",
"0.52945286",
"0.52937406",
"0.5292136",
"0.52793527"
]
| 0.72997856 | 0 |
Test api Get all the users | def test_api_can_get_all_users(self):
response = self.client().get('/api/v1/user/')
self.assertTrue(response.status_code, 200) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])",
"def test_get_all_users(self):\n created_30_days_ago = datetime.datetime.utcnow() + datetime.timedelta(-30)\n add_user('neilb', '[email protected]', 'password123', created_30_days_ago)\n add_user('juneau', '[email protected]')\n with self.client:\n response = self.client.get('/users')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['data']['users']),2)\n self.assertTrue('created_at' in data['data']['users'][0])\n self.assertTrue('created_at' in data['data']['users'][1])\n self.assertIn('juneau', data['data']['users'][0]['username'])\n self.assertIn('neilb', data['data']['users'][1]['username'])\n self.assertIn('success', data['status'])",
"def test_user_list(self):\r\n self._add_demo_import()\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/users/list',\r\n params=params,\r\n status=200)\r\n\r\n # we should get back dict of count, users.\r\n data = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n 1, data.get('count'), \"There are none by default. \" + res.body)\r\n self.assertEqual(\r\n 'admin',\r\n data.get('users')[0]['username'],\r\n \"The first user is from admin \" + res.body)\r\n self.assertEqual(\r\n '[email protected]',\r\n data.get('users')[0]['email'],\r\n \"The first user is from [email protected] \" + res.body)",
"def test_get_all_users(self):\n api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n users = [user.getUserName() for user in api.user.get_users()]\n\n self.assertEqual(users, ['chuck', TEST_USER_NAME])",
"def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)",
"def test_get_users(self):\n pass",
"def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)",
"def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)",
"def test_users_get(self):\n pass",
"def test_users_get(self):\n pass",
"def get_all_users():",
"def test_api_user_get(self):\n pass",
"def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')",
"def test_getUsers(self):\n\t\turl = \"/users/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"count\"], 2)",
"def test_response_for_getting_all_users(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.status_code, 200)",
"def test_get_users_list(self):\n url = reverse('users')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def get_all_user():\n user = UserModel.objects()\n return jsonify(user), 200",
"def get(self, request):\n all_users = UserModel.objects.all()\n all_users_serializer = UserSerializer(all_users, many=True)\n return Response(all_users_serializer.data)",
"def fetch_all_users():\n users = find_users()\n return to_response(users, \"No users\")",
"def test_get_users(self):\n users = app.get_users()\n self.assertEqual(len(users), 1)",
"def test_get_all_users(self):\n\n email1 = \"[email protected]\"\n self.create_example_user(email1)\n\n email2 = \"[email protected]\"\n\n self.create_example_user(email2)\n\n users_get_endpoint_result = user.fetchall(self.database)\n\n verify_query = \"\"\"\n SELECT * FROM USERS;\"\"\"\n self.database.cursor.execute(verify_query)\n\n verify_rows = [r._asdict() for r in self.database.cursor.fetchall()]\n\n assert len(verify_rows) == len(users_get_endpoint_result)\n\n for (email, name, group_name, hashed_password, admin) in [\n (r[\"email\"], r[\"name\"], r[\"group_name\"], r[\"hashed_password\"], r[\"admin\"])\n for r in users_get_endpoint_result\n ]:\n\n self.verify_user_data(email, name, group_name, hashed_password, admin)",
"def test_get_all_super(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='super', email='[email protected]',\n user_type='super_admin')\n user.put()\n\n response = self.testapp.get(\n '/api/users',\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)",
"def test_list_user(self):\n pass",
"async def get_users(request):\n\n page = request.GET.getone(\"page\", None)\n page_size = request.GET.getone(\"page_size\", None)\n filter_name = request.GET.getone(\"q\", \"\")\n filter_admin = request.GET.getone(\"filter_admin\", \"false\")\n\n try:\n count_only = request.GET.getone(\"count_only\").lower() == \"true\"\n except (ValueError, KeyError):\n count_only = False\n\n if page:\n try:\n page = int(page)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for page\", status=400)\n page = 1 if page < 1 else page\n\n if page_size:\n try:\n page_size = int(page_size)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for page_size\", status=400)\n page_size = 1 if page_size < 1 else page_size\n\n query = request.cirrina.db_session.query(User)\n\n if filter_admin.lower() == \"true\":\n query = query.filter(User.is_admin)\n\n if filter_name:\n query = query.filter(User.username.like(\"%{}%\".format(filter_name)))\n\n nb_users = query.count()\n query = query.order_by(User.username)\n\n if page and page_size:\n users = query.limit(page_size).offset((page - 1) * page_size).all()\n else:\n users = query.all()\n\n data = {\"total_result_count\": nb_users}\n if not count_only:\n data[\"results\"] = [\n {\"id\": user.id, \"username\": user.username, \"is_admin\": user.is_admin}\n for user in users\n ]\n\n return web.json_response(data)",
"def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids",
"def test_list(self, client, users):\n url = reverse('users:list')\n response = client.get(url)\n assert response.status_code == 200\n for user in users:\n assert user.username in str(response.content)",
"def test_request_users(self):\n response = requests.get(self.url + '/users')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 1)\n self.assertIn('users', json.keys())\n\n users = json.get('users')\n self.assertIsInstance(users, list)\n self.assertEqual(len(users), 2)\n self.assertIn('John', users)\n self.assertIn('Jane', users)",
"def test_users_profile_list_return(self):\n self.client.credentials(\n HTTP_AUTHORIZATION='Token ' + self.login_response.data['token'])\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_200_OK)\n self.assertIn('users', self.response.data)",
"def get_users():\n users = storage.all('User')\n users_list = []\n for user in users.values():\n users_list.append(user.to_dict())\n return jsonify(users_list), 200",
"def get_users():\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing user\"', flush=True)\n return jsonify(message=\"Missing user\"), 400\n\n if not Administrator.is_administrator(current_user):\n print('non-admin user error', flush=True)\n return jsonify(message=\"Forbidden\"), 403\n\n try:\n users = User.get_users()\n print(users, flush=True)\n return jsonify(message='{}'.format(json.dumps(users))), 200\n\n except Exception as e:\n print(e, flush=True)\n return jsonify(message='{}'.format(e)), 501"
]
| [
"0.869498",
"0.83159864",
"0.82164174",
"0.82077795",
"0.81848764",
"0.81795245",
"0.8039775",
"0.80138654",
"0.8011747",
"0.8011747",
"0.80085105",
"0.80024284",
"0.7985461",
"0.79505664",
"0.7949807",
"0.79328257",
"0.7802099",
"0.7719796",
"0.77042687",
"0.7688526",
"0.7659961",
"0.75901043",
"0.75676185",
"0.75510234",
"0.75477767",
"0.7508063",
"0.74920636",
"0.7484613",
"0.7482916",
"0.74808574"
]
| 0.89917773 | 0 |
Test api can get a users by id | def test_api_can_get_users_by_id(self):
rv = self.client().post('/api/v1/user/',
data = self.req)
res = self.client().get('/api/v1/user/3')
self.assertEquals(res.status_code, 200) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_api_user_get(self):\n pass",
"def test_getUser(self):\n\t\turl = \"/users/2/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"id\"], 2)\n\t\tself.assertEqual(response.data[\"username\"], \"testUser2\")",
"def test_users_get(self):\n pass",
"def test_users_get(self):\n pass",
"def test_user_id_get(self):\n pass",
"def test_get_by_id(self):\n with self.client:\n self.client.post('/users/login', data=dict(\n username=\"eschoppik\", password='secret'\n ), follow_redirects=True)\n self.assertTrue(current_user.id == 1)\n self.assertFalse(current_user.id == 20)",
"def test_user_get(self):\r\n expected_user = UserFactory.create()\r\n # Test GET all users\r\n res = self.app.get('/api/user')\r\n data = json.loads(res.data)\r\n user = data[0]\r\n assert len(data) == 1, data\r\n assert user['name'] == expected_user.name, data\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # Test GETting a specific user by ID\r\n res = self.app.get('/api/user/1')\r\n data = json.loads(res.data)\r\n user = data\r\n assert user['name'] == expected_user.name, data\r\n\r\n # Test a non-existant ID\r\n res = self.app.get('/api/user/3434209')\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'user', err\r\n assert err['exception_cls'] == 'NotFound', err\r\n assert err['action'] == 'GET', err",
"def get_user(id):\n url = 'https://jsonplaceholder.typicode.com/'\n user = requests.get(url + 'users', params={'id': id}).json()\n return user",
"def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)",
"def test_request_users_user(self):\n response = requests.get(self.url + '/users/John')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())",
"def test_detail(self, client, users):\n user = users[0]\n url = reverse('users:detail', args=(user.pk,))\n response = client.get(url)\n assert response.status_code == 200\n assert user.username in str(response.content)",
"def test_get_users(self):\n pass",
"def test_resource_user_resource_find_users_get(self):\n pass",
"def test_get_single_user(self):\n user = add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get(f'/users/{user.id}')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue('created_at' in data['data'])\n self.assertIn('neilb', data['data']['username'])\n self.assertIn('[email protected]', data['data']['email'])\n self.assertIn('success', data['status'])",
"def test_getUsers(self):\n\t\turl = \"/users/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"count\"], 2)",
"def test_get_single_user_no_id(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/blah')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])",
"def test_show(self):\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@alice\", str(response.data))",
"def test_get_users_list(self):\n url = reverse('users')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_get_user_by_uuiduser_uuid_get(self):\n pass",
"def get_user(id):\n pass",
"def test_get_user(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n response = client.get(reverse(\"user-detail\"), format=\"json\")\n assert response.status_code == status.HTTP_200_OK\n assert json.dumps(response.data) == json.dumps(\n {\"id\": sample_user.pk, \"email\": sample_user.email}\n )",
"def test_request_users(self):\n response = requests.get(self.url + '/users')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 1)\n self.assertIn('users', json.keys())\n\n users = json.get('users')\n self.assertIsInstance(users, list)\n self.assertEqual(len(users), 2)\n self.assertIn('John', users)\n self.assertIn('Jane', users)",
"def test_user_id_identities_get(self):\n pass",
"def test_get_user_by_id_non_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session, username)\n # Users with matching username in auth token are allowed to make\n # the request\n user = GifSyncUser.get_by_username(username)\n assert user is not None\n auth_token = create_auth_token(username)\n response = get_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.OK\n assert_user_in_response(response)\n # User retrieved matches the user in database\n json_data: dict = response.get_json()\n user_data = json_data.get(\"user\")\n assert isinstance(user_data, dict)\n assert user_data.get(\"id\") == user.id\n assert user_data.get(\"username\") == user.username\n assert isinstance(user_data.get(\"gifs\"), list)\n assert len(user_data[\"gifs\"]) == len(user.gifs)",
"def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)",
"def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])",
"def test_get_all_users(self):\n created_30_days_ago = datetime.datetime.utcnow() + datetime.timedelta(-30)\n add_user('neilb', '[email protected]', 'password123', created_30_days_ago)\n add_user('juneau', '[email protected]')\n with self.client:\n response = self.client.get('/users')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['data']['users']),2)\n self.assertTrue('created_at' in data['data']['users'][0])\n self.assertTrue('created_at' in data['data']['users'][1])\n self.assertIn('juneau', data['data']['users'][0]['username'])\n self.assertIn('neilb', data['data']['users'][1]['username'])\n self.assertIn('success', data['status'])",
"def test_get_by_id(self):\n\n user = CustomUser.get_by_id(2)\n expected_user = CustomUser.objects.get(id=2)\n self.assertEqual(user, expected_user)",
"def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids",
"def test_get_all_by_one_user(self):\n response = self.client.get('/api/v1/users/5/parcels')\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"User does not have orders\", msg = \"User orders\")\n self.assertEqual(response.status_code, 200)"
]
| [
"0.81065226",
"0.7816911",
"0.7776593",
"0.7776593",
"0.7746049",
"0.76905054",
"0.7634237",
"0.7584148",
"0.75766575",
"0.755071",
"0.7522896",
"0.748282",
"0.7374766",
"0.7249184",
"0.7234273",
"0.72244555",
"0.72155386",
"0.716782",
"0.7155042",
"0.71496236",
"0.71312124",
"0.71212226",
"0.7116388",
"0.71020037",
"0.7098478",
"0.70919365",
"0.70865923",
"0.70775783",
"0.70508516",
"0.6998769"
]
| 0.8959683 | 0 |
Test api can create a users | def test_api_can_create_users(self):
res = self.client().post('/api/v1/user/', data = self.req)
self.assertEquals(res.status_code, 200)
self.assertIn('mary', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create(self):\n urls = [reverse('api:user-list')]\n data = {\n \"username\": \"newuser\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": [self.admin_client]\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )",
"def test_good_user_creation(self):\n data = json.dumps({\n \"username\" : \"mark\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 201)",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def test_create_user(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n\n request = self.factory.post(self.create_url, data, format='json')\n view = UserViewSet.as_view({\"post\": \"create\"})\n response = view(request)\n self.assertEqual(User.objects.count(), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['username'], data['username'])\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)",
"def test_createuser():\n url = baseUrl + userurl\n payload = user_payload\n logging.info(\"Create a user: %s\" % payload)\n r = requests.post(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 201\n resp = r.text\n assert resp == 'Success'",
"def test_api_user_post(self):\n pass",
"def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200",
"def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)",
"def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)",
"def test_create_user(self):\n url = reverse('signup')\n data = {'username': 'ctest', 'name': 'name', 'password': 'ctest12345', 'bio': 'bio',\n 'phoneNumber': '9382593895', 'city': 'tehran', 'email': '[email protected]', 'device': 'android'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_create_user(self):\n url = reverse('rest_register')\n data = {\n 'email': '[email protected]',\n 'password1': 'notshortpassword',\n 'password2': 'notshortpassword'\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().email, '[email protected]')",
"def test_users_post(self):\n pass",
"def test_users_post(self):\n pass",
"def test_create_user(self):\n data = {\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n\n response = self.client.post(self.create_url, data, format='json')\n\n # And that we're returning a 201 created code.\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # Additionally, we want to return the username and email upon successful creation.\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)",
"def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_resource_user_resource_add_users_post(self):\n pass",
"def test_add_user(self):\n pass",
"def test_user_exists(self):\n payload = {'email': '[email protected]','password': 'testpass'}\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']",
"def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_user(self):\n url = reverse('rest_register')\n data = {\n 'username': \"tommy\",\n 'email': \"[email protected]\",\n 'password1': \"thisPass\",\n 'password2': \"thisPass\",\n\n }\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 201)\n self.assertIn('key', resp.data)",
"def test_user_exists(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass123'\n }\n create_user(**payload)\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_new_user(self):\n json_resp = make_user(self.client)\n # check api response\n self.assertEqual(json_resp['status'], 'user registered')\n self.assertEqual(json_resp['username'], 'Dan')\n # check that user is in database\n self.assertEqual(User.query.count(), 1)\n\n # check malformed query\n resp = self.client.post('/user/',\n headers=api_headers(),\n data=json.dumps({'username': 'Dan'}))\n json_resp = json.loads(resp.data.decode('utf-8'))\n # check api response\n self.assertEqual(resp.status, '400 BAD REQUEST')\n self.assertEqual(json_resp['status'], 'missing fields')\n self.assertEqual(json_resp['missing'], ['email', 'password'])",
"def users_create():",
"def test_user_exists(self):\n\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1"
]
| [
"0.82718086",
"0.82564837",
"0.82564837",
"0.82564837",
"0.82460296",
"0.8224692",
"0.80256295",
"0.8014462",
"0.80112034",
"0.79581463",
"0.7924439",
"0.7820544",
"0.78131205",
"0.7799",
"0.7794324",
"0.778519",
"0.778519",
"0.7776685",
"0.7764455",
"0.7756855",
"0.7754141",
"0.7753791",
"0.77526796",
"0.77032304",
"0.76723975",
"0.7662394",
"0.76574165",
"0.764427",
"0.76203185",
"0.7611896"
]
| 0.8786353 | 0 |
Generate a commanded position, velocity and yaw based on the trajectory | def trajectory_control(self,position_trajectory,
yaw_trajectory,
time_trajectory,
current_time):
ind_min = np.argmin(np.abs(np.array(time_trajectory)-current_time))
time_ref = time_trajectory[ind_min]
if current_time < time_ref:
position0 = position_trajectory[ind_min-1]
position1 = position_trajectory[ind_min]
time0 = time_trajectory[ind_min-1]
time1 = time_trajectory[ind_min]
yaw_cmd = yaw_trajectory[ind_min-1]
else:
yaw_cmd = yaw_trajectory[ind_min]
if ind_min >= len(position_trajectory)-1:
position0 = position_trajectory[ind_min]
position1 = position_trajectory[ind_min]
time0 = 0.0
time1 = 1.0
else:
position0 = position_trajectory[ind_min]
position1 = position_trajectory[ind_min+1]
time0 = time_trajectory[ind_min]
time1 = time_trajectory[ind_min+1]
position_cmd = (position1-position0)* \
(current_time-time0)/(time1-time0)+position0
velocity_cmd = (position1-position0)/(time1-time0)
return (position_cmd,velocity_cmd,yaw_cmd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_trajectory(t, v, waypoints, coeff_x, coeff_y, coeff_z):\n global yaw\n global current_heading\n yawdot = 0.0\n pos = np.zeros(3)\n acc = np.zeros(3)\n vel = np.zeros(3)\n jerk = np.zeros(3)\n snap = np.zeros(3)\n yawddot = 0.0\n\n # distance vector array, represents each segment's distance\n distance = waypoints[0:-1] - waypoints[1:]\n # T is now each segment's travel time\n T = (1.0 / v) * np.sqrt(distance[:,0]**2 + distance[:,1]**2 + distance[:,2]**2)\n # accumulated time\n S = np.zeros(len(T) + 1)\n S[1:] = np.cumsum(T)\n\n # find which segment current t belongs to\n t_index = np.where(t >= S)[0][-1]\n\n # prepare the next desired state\n if t == 0:\n pos = waypoints[0]\n t0 = get_poly_cc(8, 1, 0)\n\n # get X-Y plane project of velocity vector ( this vector is tangent to curve )\n v_proj = np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)])\n if(LA.norm(v_proj) == 0.0):\n # if velocity vector is of zero magnitude there should be no change in heading!\n pass\n else:\n current_heading = v_proj/LA.norm(v_proj) * (1.0 / T[0])\n \n\n # stay hover at the last waypoint position\n elif t > S[-1]:\n pos = waypoints[-1]\n else:\n # scaled time\n scale = (t - S[t_index]) / T[t_index]\n start = 8 * t_index\n end = 8 * (t_index + 1)\n\n t0 = get_poly_cc(8, 0, scale)\n pos = np.array([coeff_x[start:end].dot(t0), coeff_y[start:end].dot(t0), coeff_z[start:end].dot(t0)])\n\n t1 = get_poly_cc(8, 1, scale)\n # chain rule applied\n vel = np.array([coeff_x[start:end].dot(t1), coeff_y[start:end].dot(t1), coeff_z[start:end].dot(t1)]) * (1.0 / T[t_index])\n\n t2 = get_poly_cc(8, 2, scale)\n # chain rule applied\n acc = np.array([coeff_x[start:end].dot(t2), coeff_y[start:end].dot(t2), coeff_z[start:end].dot(t2)]) * (1.0 / T[t_index]**2)\n\n t3 = get_poly_cc(8, 3, scale)\n # apply chain rule\n jerk = np.array([coeff_x[start:end].dot(t3), coeff_y[start:end].dot(t3), coeff_z[start:end].dot(t3)]) * (1.0 / T[t_index]**3)\n\n t4 = get_poly_cc(8, 4, scale)\n # apply chain rule\n snap = np.array([coeff_x[start:end].dot(t4), coeff_y[start:end].dot(t4), coeff_z[start:end].dot(t4)]) * (1.0 / T[t_index]**4)\n\n # calculate desired yaw and yaw rate\n\n v_proj = np.array([vel[0], vel[1]])\n\n if( LA.norm(v_proj) == 0.0):\n # if velocity vector is zero, again there should be no change in heading\n next_heading = current_heading\n else:\n next_heading = v_proj/LA.norm(v_proj)\n\n \"\"\"\n try :\n #current_heading = v_proj/LA.norm(v_proj) #* (1.0 / T[0]) #np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)]) * (1.0 / T[0])\n next_heading = v_proj/LA.norm(v_proj)\n except ZeroDivisionError:\n # velocity vector magnitude was zero so there should be no change in heading!\n next_heading = current_heading\n \"\"\" \n\n # angle between current vector with the next heading vector\n # from a * b = |a|*|b|cos(angle)\n delta_psi = np.arccos(np.dot(current_heading, next_heading) / (LA.norm(current_heading)*LA.norm(next_heading)))\n # cross product allow us to determine rotating direction\n norm_v = np.cross(current_heading,next_heading)\n\n if norm_v > 0:\n yaw += delta_psi\n elif norm_v < 0:\n yaw -= delta_psi\n else:\n # normv = 0! if there is no change in yaw, do not modify it!\n pass\n\n # dirty hack, quadcopter's yaw range represented by quaternion is [-pi, pi]\n while yaw > np.pi:\n yaw = yaw - 2*np.pi\n\n # print next_heading, current_heading, \"yaw\", yaw*180/np.pi, 'pos', pos\n current_heading = next_heading\n #print(current_heading)\n yawdot = delta_psi / 0.005 # dt is control period\n max_yawdot = 5.0 #rad/s\n if(abs(yawdot) > max_yawdot):\n yawdot = (yawdot/abs(yawdot))*max_yawdot # make it 5rad/s with appropriate direction\n \n yaw = np.sin(2*t)*0.0\n yawdot = 2*np.cos(2*t)*0.0\n yawddot = -4*np.sin(2*t)*0.0\n return DesiredState(pos, vel, acc, jerk, snap, yaw, yawdot, yawddot)",
"def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_segments+1\n x_wp = np.linspace(self.CurrentPosition[0], NextwpPosition[0], self.no_of_segments+1)\n y_wp = np.linspace(self.CurrentPosition[1], NextwpPosition[1], self.no_of_segments+1)\n z_wp = np.linspace(self.CurrentPosition[2], NextwpPosition[2], self.no_of_segments+1)\n \n # add intial and final condiions vel, acc, jerk\n x_ic = np.array([0, 0, 0])\n x_fc = np.array([0, 0, 0])\n x0 = np.array([x_wp[0], x_ic[0], x_ic[1], x_ic[2]])\n xT = np.array([x_wp[-1], x_fc[0], x_fc[1], x_fc[2]])\n\n y_ic = np.array([0, 0, 0])\n y_fc = np.array([0, 0, 0])\n y0 = np.array([y_wp[0], y_ic[0], y_ic[1], y_ic[2]])\n yT = np.array([y_wp[-1], y_fc[0], y_fc[1], y_fc[2]])\n \n z_ic = np.array([0, 0, 0])\n z_fc = np.array([0, 0, 0])\n z0 = np.array([z_wp[0], z_ic[0], z_ic[1], z_ic[2]])\n zT = np.array([z_wp[-1], z_fc[0], z_fc[1], z_fc[2]])\n\n path = [np.sqrt((x_wp[i]-x_wp[i-1])**2 + (y_wp[i]-y_wp[i-1])**2 + (z_wp[i]-z_wp[i-1])**2) for i in range(1, self.no_of_segments+1, 1)]\n\n \n T = []; T.insert(0, 0)\n T.insert(1, T[-1] + path[0]/self.reduced_speed)\n for i in range(1, len(path)-1, 1):\n T.append(T[-1] + path[i]/self.average_speed)\n T.insert(len(T)+1, T[-1]+path[-1]/self.reduced_speed) \n\n\n\n\n #T = []; T.insert(0, 0) # insert 0 at 0 position\n #for i in range(self.no_of_segments): \n # T.append(T[-1]+path[i]/self.average_speed)\n\n r = self.r\n N = 1 + self.N # because number of terms in a polynomial = degree+1\n\n QQ = []; AA_inv = []\n\n for i in range(self.no_of_segments): \n q = self.construct_Q(N, r, T[i], T[i+1])\n a = self.construct_A(N, r, T[i], T[i+1])\n a_inv = scipy.linalg.pinv(a)\n QQ = block_diag(QQ, q)\n AA_inv = block_diag(AA_inv, a_inv)\n \n order = 2*r*self.no_of_segments\n R = np.dot(AA_inv.T, np.dot(QQ, AA_inv))\n \n bx = self.construct_b(x0, xT)\n by = self.construct_b(y0, yT)\n bz = self.construct_b(z0, zT)\n\n m = Model(\"qp\")\n order = 2*r*self.no_of_segments\n dx = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dx\")\n dy = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dy\") \n dz = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dz\") \n\n # making objective using quicksum, takes a lot of time \n #obj1 = quicksum(dx[i] * quicksum(R[i][j] * dx[j] for j in range(order)) for i in range(order))\n #obj2 = quicksum(dy[i] * quicksum(R[i][j] * dy[j] for j in range(order)) for i in range(order))\n #obj3 = quicksum(dz[i] * quicksum(R[i][j] * dz[j] for j in range(order)) for i in range(order))\n \n # using LinExpr for the second expression is significantly faster \n obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order))\n obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order))\n obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order))\n obj = obj1 + obj2 + obj3\n j = 0\n for i in range(order): \n if i < r: \n m.addConstr(dx[i] == bx[i])\n m.addConstr(dy[i] == by[i])\n m.addConstr(dz[i] == bz[i])\n elif i >= order-r: \n m.addConstr(dx[i] == bx[r+j])\n m.addConstr(dy[i] == by[r+j])\n m.addConstr(dz[i] == bz[r+j])\n j += 1\n \n c = 1 # counter\n for i in range(r, order-2*r, 2*r): \n #m.addConstr(dx[i] == self.x_wp[c])\n #m.addConstr(dy[i] == self.y_wp[c])\n #m.addConstr(dz[i] == self.z_wp[c])\n m.addConstr(dx[i] <= x_wp[c] + 0.2)\n m.addConstr(dx[i] >= x_wp[c] - 0.2)\n m.addConstr(dy[i] <= y_wp[c] + 0.2)\n m.addConstr(dy[i] >= y_wp[c] - 0.2)\n m.addConstr(dz[i] <= z_wp[c] + 0.2)\n m.addConstr(dz[i] >= z_wp[c] - 0.2)\n c = c+1\n for j in range(r): \n m.addConstr(dx[i+j] == dx[i+j+r])\n m.addConstr(dy[i+j] == dy[i+j+r])\n m.addConstr(dz[i+j] == dz[i+j+r])\n #if j ==2: \n # m.addConstr(dx[i+j] == 2.0)\n\n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0)\n m.setParam('PSDtol', 1e-1)\n m.optimize()\n\n\n runtime = m.Runtime\n\n\n x_coeff = [dx[i].X for i in range(order)]\n y_coeff = [dy[i].X for i in range(order)]\n z_coeff = [dz[i].X for i in range(order)]\n\n Dx = np.asarray(x_coeff)[np.newaxis].T\n Dy = np.asarray(y_coeff)[np.newaxis].T \n Dz = np.asarray(z_coeff)[np.newaxis].T \n pcx = np.dot(AA_inv, Dx); pcy = np.dot(AA_inv, Dy); pcz = np.dot(AA_inv, Dz)\n\n\n poly_coeff_x = pcx.T.ravel().tolist()\n poly_coeff_y = pcy.T.ravel().tolist()\n poly_coeff_z = pcz.T.ravel().tolist()\n\n return poly_coeff_x, poly_coeff_y, poly_coeff_z, T, time.time()\n #self.publish(poly_coeff_x, poly_coeff_y, poly_coeff_z)",
"def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)",
"def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)",
"def input_system():\n vx = a.odometry_data[:, 2:3] # linear velocity_y [m/s]\n vy = a.odometry_data[:, 1:2] # linear velocity_x [m/s]\n v = np.add(vx, vy)\n v = np.true_divide(v, 2) # combined velocity [m/s]\n yawrate = np.reshape(a.odometry_data[:, 3], (-1, 1)) # angular_z [rad/s]\n u = np.reshape([v, yawrate], (-1, 2))\n return u",
"def test_pose(self):\n t = self.t\n \n # Cyclic functions for orientation and position values\n delta = math.sin(t) * 1000\n alpha = math.cos(t) * math.pi * 2\n \n # Default values\n x = 0\n y = 0\n z = 0\n\n pitch = 0\n yaw = 0\n roll = 0\n \n # assign values cyclically\n if t % (math.pi * 12) < math.pi * 2:\n x = delta\n elif t % (math.pi * 12) < math.pi * 4:\n y = delta\n elif t % (math.pi * 12) < math.pi * 6:\n z = delta\n elif t % (math.pi * 12) < math.pi * 8:\n pitch = alpha\n elif t % (math.pi * 12) < math.pi * 10:\n yaw = alpha\n elif t % (math.pi * 12) < math.pi * 12:\n roll = alpha\n else:\n # Reset counter\n self.t = 0.0\n \n return ((x, y, z), (pitch, yaw, roll))",
"def compute_vel(self, state, goal):\n\n \"\"\"\n Unicycle model control law:\n [v;w] = [kp 0 0; 0 ka kb]*[p;a;b]\n v = commanded linear velocity of robot\n w = commanded rotational velcoity of robot\n kp = gain parameter where kp > 0\n ka = gain parameter where ka - kp > 0\n kb = gain parameter where kb < 0\n p = distance from robot to goal\n a = angle between current robot heading and heading to goal\n b = error between current heading to goal and target end heading\n \"\"\"\n \n #print('state,goal,v,w')\n #print(state)\n #print(goal)\n\n xr = state[0][0] # m in world frame\n yr = state[1][0] # m in world frame\n thetar = state[2][0] #rads\n\n xg = goal[0] # m in world frame\n yg = goal[1] # m in world frame\n\n dy = yg - yr\n dx = xg - xr\n\n #print('')\n #print(state)\n #print(goal)\n \n # Calculate a\n a = -1*thetar + math.atan2(dy,dx)\n\n #print(a)\n\n if a > math.pi:\n a = a - 2*math.pi\n\n if a < -1*math.pi:\n a = a + 2*math.pi\n\n #print(a)\n\n # Set omega according to control law\n omega = self.ka*a\n if math.fabs(omega) > self.MAX_OMEGA:\n if omega > 0:\n omega = self.MAX_OMEGA\n else:\n omega = -1*self.MAX_OMEGA\n\n # Calculate P\n p = math.sqrt(dy*dy + dx*dx)\n\n # Set v \n v = self.kp*p\n if v > self.MAX_SPEED:\n v = self.MAX_SPEED\n\n # set the done value\n done = (p <= self.done_distance)\n\n #print(v)\n #print(omega)\n\n out_tuple = (v, omega, done)\n \n return out_tuple",
"def velocity_trajectory(self):\n return self._read(MX_VELOCITY_TRAJECTORY)",
"def yaw_pitch_roll(self):\n\n self._normalise()\n yaw = np.arctan2(2*(self.q[0]*self.q[3] - self.q[1]*self.q[2]),\n 1 - 2*(self.q[2]**2 + self.q[3]**2))\n pitch = np.arcsin(2*(self.q[0]*self.q[2] + self.q[3]*self.q[1]))\n roll = np.arctan2(2*(self.q[0]*self.q[1] - self.q[2]*self.q[3]),\n 1 - 2*(self.q[1]**2 + self.q[2]**2))\n\n return yaw, pitch, roll",
"def clbk_odom(self,msg):\n self.x = msg.pose.pose.position.x\n self.y = msg.pose.pose.position.y\n\n # yaw\n quaternion = (\n msg.pose.pose.orientation.x,\n msg.pose.pose.orientation.y,\n msg.pose.pose.orientation.z,\n msg.pose.pose.orientation.w)\n euler = transformations.euler_from_quaternion(quaternion)\n self.yaw = euler[2]",
"def compute_trajectory():\n pass",
"def _cart_position_control(self):\n # Target is given as [x,y,z] and quaternion.\n ref_xyz = np.array(self.target_pos[0:3])\n ref_pose = np.array(self.target_pos[3:7])\n\n # Calculate difference between current and target position+orientation\n xyz_diff = ref_xyz - self.tool_pos\n quat_diff = quaternion_difference(self.tool_quat, ref_pose)\n\n # print('current pos', self.tool_pos)\n\n # Convert orientation difference into angular velocities\n ang_vel = np.ndarray(3)\n functions.mju_quat2Vel(ang_vel, quat_diff, 1) # timestep=1\n\n # Stack the errors and push them through PD controller\n error = np.hstack([xyz_diff, ang_vel]) # (6,)\n out = self._pd_control(error)\n\n # Compute required torques using positional and rotational Jacobians\n torques_cartesian = np.dot(self.jac_pos, out[:3] + self.torque[:3])\n torques_euler = np.dot(self.jac_rot, out[3:6] + self.torque[3:6])\n\n #return self._model_dynamics() + torques_cartesian + torques_euler\n #print(torques_cartesian)\n #print(torques_euler)\n \n return torques_cartesian + torques_euler",
"def base_roll_pitch_yaw(self):\n #raise NotImplementedError('Not yet implemented!')\n return np.asarray([self._robot_state.roll, self._robot_state.pitch, self._robot_state.yaw])",
"def predict_trajectory(x_init, v, yaw_dot):\n x = np.array(x_init)\n trajectory = [x]\n time = 0\n while time <= Config.predict_time:\n x = motion(x, [v, yaw_dot], Config.dt)\n trajectory.append(x)\n time += Config.dt\n\n return np.array(trajectory)",
"def move2goal(self):\n vel_msg = Twist()\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = 0.4 # m/s\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = 1.5 # rad/s\n\n # Starting point reference\n goal_x = 1.0 \n goal_y = 1.0\n x_ref = 1.0\n y_ref = 1.0\n\n # Previous Reference\n x_prev_ref = 0.0\n y_prev_ref = 0.0\n theta_prev_ref = self.theta\n vrefA = 0.5\n wrefA = 0.0\n \n i = 0\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n\n x_prev_ref = tPx[0]\n y_prev_ref = tPy[0]\n theta_prev_ref = tPTheta[0]\n\n print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n print(f'ACTUAL THETA: {self.theta}')\n\n while not rospy.is_shutdown():\n \n if i >= 8:\n i = 0\n\n x_ref = goal_x\n y_ref = goal_y\n\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n # inputRef = ControllerInput(\n # xref=x_ref,\n # yref=y_ref,\n # RstateX=self.x_position,\n # RstateY=self.y_position,\n # RstateTheta=self.theta,\n # RstateVelocity=vel_msg.linear.x,\n # RstateW=vel_msg.angular.z,\n # xrefA=x_prev_ref,\n # yrefA=y_prev_ref,\n # thetarefA=theta_prev_ref,\n # vrefA=vrefA,\n # wrefA=wrefA\n # )\n\n # rospy.loginfo(f'X: {self.x_position} \\tY: {self.y_position}\\t Theta: {self.theta} ')\n # nmpc = NMPC_Controller(inputRef)\n # tPx, tPy, tPTheta = nmpc.test_create_mini_path()\n\n # print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n # print(f'ACTUAL THETA: {self.theta}')\n \n # new_v, new_w = nmpc.start_optmizer()\n # new_v = round(new_v, 4)\n # new_w = round(new_w, 4)\n\n # print(new_v, new_w)\n # rospy.loginfo(\n # f'X: {self.x_position}, Y: {self.y_position}, THETA: {self.theta}')\n \n # self.velocity_publisher.publish(vel_msg)\n # x_prev_ref = self.x_position\n # y_prev_ref = self.y_position\n # theta_prev_ref = self.theta\n # vrefA = vel_msg.linear.x\n # wrefA = vel_msg.angular.z\n \n\n # theta_prev_ref = self.theta\n # vel_msg.angular.z = 0.0\n\n\n '''Update the linear & angular velocity'''\n # vel_msg.linear.x = new_v\n # vel_msg.angular.z = new_w\n\n if i < 8:\n inputRef = ControllerInput(\n xref = tPx[i],\n yref = tPy[i],\n RstateX = self.x_position,\n RstateY = self.y_position,\n RstateTheta = self.theta,\n RstateVelocity = vel_msg.linear.x,\n RstateW = vel_msg.angular.z,\n xrefA = x_prev_ref,\n yrefA = y_prev_ref,\n thetarefA = theta_prev_ref,\n vrefA = vrefA,\n wrefA = wrefA\n )\n\n nmpc = NMPC_Controller(inputRef)\n new_v, new_w = nmpc.start_optmizer()\n new_v = round(new_v, 4)\n new_w = round(new_w, 4)\n\n print(f'(actual) X: {self.x_position}, Y: {self.x_position}, THETA: {self.theta}')\n print(f'(desired) X: {tPx[i]}, Y: {tPy[i]}')\n print(f'V: {vel_msg.linear.x}\\tW: {vel_msg.angular.z}')\n\n x_prev_ref = tPx[i-1]\n y_prev_ref = tPy[i-1]\n theta_prev_ref = tPTheta[i-1]\n vrefA = vel_msg.linear.x\n wrefA = vel_msg.angular.z\n\n vel_msg.linear.x = new_v\n vel_msg.angular.z = new_w\n # vel_msg.angular.z = 0.0\n\n print(f'index: {i}')\n\n distance = math.sqrt((self.x_position - tPx[i])**2 + (self.y_position - tPy[i])**2)\n if distance < 0.3:\n print(f'Distance: {distance}')\n i+=1\n\n\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()\n\n rospy.spin()",
"def _odometry_to_pos_vel(self, msg):\n \n aux1 = msg.pose.pose.position\n aux2 = msg.pose.pose.orientation\n quaternion = numpy.array([aux2.x, aux2.y, aux2.z, aux2.w])\n ea = tft.euler_from_quaternion(quaternion)\n rot = tft.rotation_from_quaternion(quaternion)\n yaw = ea[2]\n pos = numpy.array([aux1.x, aux1.y, aux1.z, yaw])\n aux3 = msg.twist.twist.linear\n vel_body = numpy.array([aux3.x, aux3.y, aux3.z])\n vel_world = rot.dot(vel_body)\n aux4 = msg.twist.twist.angular\n vel = numpy.concatenate([aux3.x, aux3.y, aux3.z, aux4.z])\n\n return pos, vel",
"def update_pose(self, data):\n # self.pose = data\n self.x_position = round(data.pose.pose.position.x, 4)\n self.y_position = round(data.pose.pose.position.y, 4)\n [yaw, _, _] = quaternion_to_euler(\n data.pose.pose.orientation.x, \n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w\n )\n \n self.theta = round(yaw, 4)\n print(f'(Reading) X: {data.pose.pose.position.x}\\t Y:{data.pose.pose.position.y}')\n # self.theta = round(data.pose.pose.orientation.z, 4)",
"def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5",
"def generate_motion_patters(self):\n\n\t\t# Motion primimtives for the forward direction.....................\n\t\td_del = 0.08\t\n\t\tdt = self.dt\n\t\tv = 2\t# Assuming a constant longitudinal velocity\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns in forward directon: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\t\t\t\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp = np.vstack((p,np.array([x0, y0, theta0])))\n\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)\n\n\t\t\n\t\t# Motion primitives for the backward direction ...................\n\t\td_del = 0.1\n\t\tv = -1.2\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns for the backward direction: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp=np.vstack((p, np.array([x0, y0, theta0])))\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)",
"def calculate_trajectory(self, position, velocity, acceleration,\n deltaV, deltaD):\n # Initial/End conditions\n initS = (position[0], velocity[0], acceleration[0])\n initD = (position[1], velocity[1], acceleration[1])\n\n # TODO: find proper boundary condition for dS\n dS = ((2 * initS[1] + deltaV) / 2) * self.deltaT\n\n endS = (initS[0] + dS, initS[1] + deltaV, 0)\n endD = (initD[0] + deltaD, 0, 0)\n\n # Calculate coefficients\n coeff_S = self.calculate_coefficients(initS, endS)\n coeff_D = self.calculate_coefficients(initD, endD)\n\n # Calculate trajectory\n trajectory = self._get_trajectory(coeff_S, coeff_D)\n\n return trajectory",
"def gen_ping(self, robot_position, robot_yaw):\n heading = self.__get_heading(robot_position, robot_yaw)\n shift_a, shift_b = self.__get_shifts(heading)\n # generate sin waves\n ref = self.__generate_sin()\n a = self.__generate_sin(shift_a)\n b = self.__generate_sin(shift_b)\n return np.vstack((a, ref, b, ref)), heading",
"def pub_goal_vehicle_pose(self):\n\n header = Header()\n header.stamp = rospy.Time.now()\n\n position = Point(20.5, -10, -85)\n # position = Point(20.5, -10, -85)\n yaw = pi\n\n # Converting yaw to quaternion\n # See https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\n # For better intuition about quaternions: https://eater.net/quaternions\n orientation = Quaternion(*rpy_to_quaterion(0, 0, yaw))\n pose = Pose(position, orientation)\n\n pose_stamped_msg = PoseStamped(header, pose)\n self.pub_vehicle_pose.publish(pose_stamped_msg)",
"def position_trajectory(self):\n return self._read(MX_POSITION_TRAJECTORY)",
"def target_position(self, time):\n\n x_pos = self.r*sin(self.w*time)+self.ar_tag_pos[0]\n y_pos = self.r*cos(self.w*time)+self.ar_tag_pos[1]\n z_pos = self.ar_tag_pos[2]\n # print(x_pos,y_pos)\n # raise NotImplementedError\n return np.array([x_pos,y_pos,z_pos])",
"def motions(self, values: dict):\n self.yaw = values[\"yaw\"]\n self.pitch = values[\"pitch\"]\n self.roll = values[\"roll\"]\n self.sway = values[\"sway\"]\n self.surge = values[\"surge\"]\n self.heave = values[\"heave\"]",
"def generate(self): \r\n \r\n self.pfn={} # phase joint functions \r\n self.afn={} # anti phase joint functions\r\n\r\n ## Foot and hip -> Lateral motion\r\n foot_func=SinusoidFunction()\r\n foot_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n foot_func.amplitude= self.parameters[\"foot_amplitude\"]\r\n foot_func.amplitude_offset= self.parameters[\"foot_amplitude_offset\"]\r\n foot_func.phase_offset= self.parameters[\"foot_phase_offset\"]\r\n self.pfn[\"l_foot_joint\"]=foot_func \r\n foot_func_af=foot_func.mirror()\r\n self.afn[\"l_foot_joint\"]=foot_func_af\r\n \r\n hip_func=SinusoidFunction()\r\n hip_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n hip_func.amplitude= self.parameters[\"hip_amplitude\"]\r\n hip_func.amplitude_offset= self.parameters[\"hip_amplitude_offset\"]\r\n hip_func.phase_offset= self.parameters[\"hip_phase_offset\"]\r\n self.pfn[\"l_hip_joint\"]=hip_func\r\n hip_func_af=hip_func.mirror()\r\n self.afn[\"l_hip_joint\"]=hip_func_af\r\n \r\n ## Thigh, ankle and knee -> Frontal motion\r\n thigh_func=SinusoidFunction()\r\n thigh_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n thigh_func.amplitude= self.parameters[\"thigh_amplitude\"]\r\n thigh_func.amplitude_offset= self.parameters[\"thigh_amplitude_offset\"]\r\n thigh_func.phase_offset= self.parameters[\"thigh_phase_offset\"]\r\n self.pfn[\"l_thigh_joint\"]=thigh_func\r\n thigh_func_af=thigh_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_thigh_joint\"]=thigh_func_af\r\n \r\n ankle_func=SinusoidFunction()\r\n ankle_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n ankle_func.amplitude= self.parameters[\"ankle_amplitude\"]\r\n ankle_func.amplitude_offset= self.parameters[\"ankle_amplitude_offset\"]\r\n ankle_func.phase_offset= self.parameters[\"ankle_phase_offset\"]\r\n self.pfn[\"l_ankle_joint\"]=ankle_func\r\n ankle_func_af=ankle_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_ankle_joint\"]=ankle_func_af\r\n \r\n knee_func=SinusoidFunction()\r\n knee_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n knee_func.amplitude= self.parameters[\"knee_amplitude\"]\r\n knee_func.amplitude_offset= self.parameters[\"knee_amplitude_offset\"]\r\n knee_func.phase_offset= self.parameters[\"knee_phase_offset\"]\r\n self.pfn[\"l_knee_joint\"]=knee_func\r\n knee_func_af=knee_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_knee_joint\"]=knee_func_af\r\n \r\n #f3=SinusoidFunction()\r\n #f3.angular_frequency=self.parameters[\"step_frequency\"]\r\n #f3.amplitude=self.parameters[\"step_amplitude\"]\r\n #f3.amplitude_offset=self.parameters[\"step_amplitude_offset\"]\r\n #self.pfn[\"l_thigh_joint\"]= f3\r\n #f33=f3.clone()\r\n #f33.amplitude_offset = self.parameters[\"ankle_amplitude_offset\"]\r\n #f33.amplitude = self.parameters[\"ankle_amplitude\"]\r\n #self.pfn[\"l_ankle_joint\"]=f33\r\n #f4=f3.mirror()\r\n ##f4.amplitude_offset -= 0.4\r\n #self.pfn[\"l_knee_joint\"]=f4\r\n \r\n #f5=f3.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_thigh_joint\"]=f5\r\n \r\n #f6=f33.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_ankle_joint\"]=f6\r\n #f7=f5.mirror()\r\n ##f7.amplitude_offset -= 0.4\r\n #self.afn[\"l_knee_joint\"]=f7\r\n \r\n self.generate_right()\r\n \r\n self.show()",
"def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos",
"def yaw(vec):\n\n return math.atan(math.sqrt(math.pow(vec[0], 2)\n + math.pow(vec[1], 2)) / vec[2])",
"def get_control_input(self, dt):\n \n desired_z = 3.0\n actual_z = self.internal_state.transform.translation.z\n \n # Convert to quaternion object for use by euler_from_quaternion()\n quaternion = np.array([self.internal_state.transform.rotation.x,\n self.internal_state.transform.rotation.y,\n self.internal_state.transform.rotation.z,\n self.internal_state.transform.rotation.w])\n \n # Determine the euler angles\n euler = euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n \n # Corrections for yaw wrap around\n if (not np.sign(yaw) == np.sign(self.yaw_old) and \n np.abs(np.abs(yaw) - np.pi) < 0.2):\n # Add or subtract 2 pi depending on crossing direction\n self.yaw_old = self.yaw_old + np.sign(yaw) * 2 * np.pi \n \n \n # Determine current yaw rate for yaw rate controller\n yaw_rate = (yaw - self.yaw_old) / dt\n \n # After use, update the old yaw value with the current yaw\n self.yaw_old = yaw\n \n # Determine current climb rate\n climb_rate = (actual_z - self.z_old) / dt\n \n # After use, update the old altitude with the current altitude\n self.z_old = actual_z\n \n # Find the errors between desired and actual signals\n err_roll = self.desired_roll - roll\n err_pitch = self.desired_pitch - pitch\n err_yaw_rate = self.desired_yaw_rate - yaw_rate\n err_climb_rate = self.desired_climb_rate - climb_rate\n \n # Set the hover motor speed\n motor_control = self.hover_speed * self.unit_hover_cmd\n \n # Roll controller\n motor_control += self.roll_Pgain * self.unit_roll_cmd * err_roll\n \n # Pitch controller\n motor_control += self.roll_Pgain * self.unit_pitch_cmd * err_pitch\n \n # Yaw rate controller (assuming small angles)\n motor_control += self.yaw_rate_Pgain * self.unit_yaw_cmd * err_yaw_rate\n \n # Climb rate controller\n motor_control += ((self.climb_Pgain * err_climb_rate +\n self.climb_Igain * self.internal_climb_error +\n self.climb_Dgain / dt * (err_climb_rate - \n self.internal_old_climb_rate_error)) * \n self.unit_hover_cmd)\n \n # Update the cumulative errors for integration\n self.internal_roll_error += err_roll * dt\n self.internal_pitch_error += err_pitch * dt\n self.internal_yaw_error += err_yaw_rate * dt\n self.internal_climb_error += err_climb_rate * dt\n \n # Update old error with current errors for differentiation\n self.internal_old_roll_error = err_roll\n self.internal_old_pitch_error = err_pitch\n self.internal_old_yaw_rate_error = err_yaw_rate\n self.internal_old_climb_rate_error = err_climb_rate\n \n \n # Return the minimum of the \n return np.clip(motor_control, self.motor_cmd_min, self.motor_cmd_max)",
"def _get_next_velocity(self):\n\n self._predict_state()\n\n # curr = pos_quat_to_euler(self.curr_quat)\n dest = pos_quat_to_euler(self.dest_quat_predict)\n error = self.calc_error(self.dest_quat_predict)\n # TODO error should be computed for phi, th axis individually\n\n # TODO recommend_velocity to reach desired setpoint at a given velocity\n phi_vel = self.motor_phi.recommend_velocity(dest[0])\n th_vel = self.motor_th .recommend_velocity(dest[1])\n\n if error < 0.05:\n return 0.0, 0.0\n\n # TODO this is lame\n #scale = error * 4\n #phi_vel = scale * phi_vel\n #th_vel = scale * th_vel\n\n return phi_vel, th_vel\n\n # TODO PID (control algo)... or should it be handled closer to motors?\n # TODO Path planning\n # TODO Velocity-accel curve to estimate time required to get to point\n # - Cache its integral and use as lookup to estimate if we can get\n # to point without overshoot"
]
| [
"0.7423539",
"0.66624177",
"0.6440654",
"0.62494177",
"0.62406516",
"0.62399036",
"0.6076807",
"0.6071118",
"0.59952617",
"0.59462184",
"0.59445816",
"0.5938334",
"0.5935856",
"0.59122837",
"0.5894206",
"0.5880433",
"0.58785534",
"0.5867887",
"0.5834048",
"0.58334523",
"0.5824219",
"0.5807007",
"0.579903",
"0.5733851",
"0.5733481",
"0.57291615",
"0.5719484",
"0.5713415",
"0.57103413",
"0.57094026"
]
| 0.7049658 | 1 |
Generate vertical acceleration (thrust) command | def altitude_control(self, altitude_cmd,
vertical_velocity_cmd,
altitude,
vertical_velocity,
attitude,
acceleration_ff=0.0):
hdot_cmd = self.Kp_alt * (altitude_cmd - altitude) + vertical_velocity_cmd
# Limit the ascent/descent rate
hdot_cmd = np.clip(hdot_cmd, -self.max_descent_rate, self.max_ascent_rate)
acceleration_cmd = acceleration_ff + self.Kp_hdot*(hdot_cmd - vertical_velocity)
R33 = np.cos(attitude[0]) * np.cos(attitude[1])
thrust = DRONE_MASS_KG * acceleration_cmd / R33
if thrust > MAX_THRUST:
thrust = MAX_THRUST
elif thrust < 0.0:
thrust = 0.0
return thrust | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def acceleration(v,u,t):\n return ((v-u)/t)",
"def cmd_velocity(self, vn, ve, vd, heading):\n pass",
"def computeTrajectoryWithDecceleration(self, cmd_vel, current_speed, delay_time, time_applied, time_decc):\n accs = [self.robot.acc_x, self.robot.acc_th, self.robot.dacc_x,self.robot.dacc_th ]\n\n #project forwards in time\n sim_interval = self.sim_interval\n sim_interval_sq = sim_interval*sim_interval\n cmd_vel = list(cmd_vel)\n current_speed = list(current_speed)\n\n max_acc_x = self.robot.max_acc_x\n max_acc_th = self.robot.max_acc_th\n max_decc_x = self.robot.max_dacc_x\n max_decc_th = self.robot.max_dacc_th\n\n\n code = \"\"\"\n double x, y, th;\n x = y = th = 0;\n\n double vx, vth;\n vx = current_speed[0];\n vth = current_speed[1];\n\n double cmd_x = cmd_vel[0];\n double cmd_th = cmd_vel[1];\n\n //are we accelerating or decelerating?\n double acc_x;\n double acc_th;\n\n bool b_ax = false;\n if (cmd_x > vx) {\n acc_x = accs[0];\n b_ax = true;\n } else {\n acc_x = accs[2];\n acc_x = -1*acc_x;\n }\n\n bool b_ath = false;\n if (cmd_th > vth) {\n acc_th = accs[1];\n b_ath = true;\n } else {\n acc_th = -1* (double) accs[3];\n\n }\n\n py::list traj;\n\n //delay at the beginning\n for (double i=0; i<=delay_time; i+=sim_interval) {\n //check if we have already attained our velocity\n if (fabs(vx - (double) cmd_x) < 1e-6) vx = cmd_x;\n\n //check if we have exceeded our target\n if (b_ax && (vx > cmd_x )) vx = cmd_x;\n if (!b_ax && (vx < cmd_x )) vx = cmd_x;\n\n if (fabs(vth - (double) cmd_th) < 1e-6) vth = cmd_th;\n if (b_ath && (vth > cmd_th )) vth = cmd_th;\n if (!b_ath && (vth < cmd_th )) vth = cmd_th;\n\n x += vx*cos(th)*sim_interval;\n y += vx*sin(th)*sim_interval;\n th += vth*sim_interval;\n\n py::list new_pt;\n new_pt.append(x);\n new_pt.append(y);\n new_pt.append(th);\n traj.append(new_pt);\n }\n\n //application of the command\n for (double i=0; i<=time_applied; i+=sim_interval) {\n vx += acc_x*sim_interval;\n\n //check if we have already attained our velocity\n if (fabs(vx - (double) cmd_x) < 1e-6) vx = cmd_x;\n\n //check if we have exceeded our target\n if (b_ax && (vx > cmd_x )) vx = cmd_x;\n if (!b_ax && (vx < cmd_x )) vx = cmd_x;\n\n vth += acc_th*sim_interval;\n\n if (fabs(vth - (double) cmd_th) < 1e-6) vth = cmd_th;\n if (b_ath && (vth > cmd_th )) vth = cmd_th;\n if (!b_ath && (vth < cmd_th )) vth = cmd_th;\n\n x += vx*cos(th)*sim_interval;\n y += vx*sin(th)*sim_interval;\n th += vth*sim_interval;\n\n py::list new_pt;\n new_pt.append(x);\n new_pt.append(y);\n new_pt.append(th);\n traj.append(new_pt);\n }\n\n //delay before decellerating\n for (double i=0; i<=delay_time; i+=sim_interval) {\n //check if we have already attained our velocity\n if (fabs(vx - (double) cmd_x) < 1e-6) vx = cmd_x;\n\n //check if we have exceeded our target\n if (b_ax && (vx > cmd_x )) vx = cmd_x;\n if (!b_ax && (vx < cmd_x )) vx = cmd_x;\n\n if (fabs(vth - (double) cmd_th) < 1e-6) vth = cmd_th;\n if (b_ath && (vth > cmd_th )) vth = cmd_th;\n if (!b_ath && (vth < cmd_th )) vth = cmd_th;\n\n x += vx*cos(th)*sim_interval;\n y += vx*sin(th)*sim_interval;\n th += vth*sim_interval;\n\n py::list new_pt;\n new_pt.append(x);\n new_pt.append(y);\n new_pt.append(th);\n traj.append(new_pt);\n }\n\n //now we apply a stop\n b_ax = false;\n if (0 > vx) {\n acc_x = accs[0];\n b_ax = true;\n } else {\n acc_x = -1 * (double) accs[2];\n }\n\n b_ath = false;\n if (0 > vth) {\n acc_th = accs[1];\n b_ath = true;\n } else {\n acc_th = -1 * (double) accs[3];\n }\n\n for (double i=0; i<=time_decc; i+=sim_interval) {\n vx += acc_x*sim_interval;\n\n //check if we have already attained our velocity\n if (fabs(vx) < 1e-6) vx = 0;\n\n //check if we have exceeded our target\n if (b_ax && (vx > 0 )) vx = 0;\n if (!b_ax && (vx < 0 )) vx = 0;\n\n vth += acc_th*sim_interval;\n\n if (fabs(vth) < 1e-6) vth = 0;\n if (b_ath && (vth > 0 )) vth = 0;\n if (!b_ath && (vth < 0 )) vth = 0;\n\n x += vx*cos(th)*sim_interval;\n y += vx*sin(th)*sim_interval;\n th += vth*sim_interval;\n\n py::list new_pt;\n new_pt.append(x);\n new_pt.append(y);\n new_pt.append(th);\n traj.append(new_pt);\n\n\n bool stopped = (fabs(vth) < 1e-2) && (fabs(vx) < 1e-2);\n if (stopped) break;\n\n }\n\n return_val = traj;\n \"\"\"\n traj = weave.inline(code, ['cmd_vel', 'current_speed', 'accs','sim_interval', 'delay_time', 'sim_interval_sq',\n 'time_applied', 'max_acc_x', 'max_acc_th', 'max_decc_x', 'max_decc_th',\n 'time_decc'], type_converters=converters.blitz)\n\n return traj",
"def vertical_hover(t):\n\n # define maximum acceleration and velocity\n v_max = 2.0\n a_max = 2.0\n yaw = 0.0\n yawdot = 0.0\n\n init_pos = np.zeros(3)\n acc = np.zeros(3)\n vel = np.zeros(3)\n\n # acceleration\n if t <= v_max/a_max:\n dt = t\n acc[2] = a_max\n vel = acc * dt\n pos = 0.5 * acc * dt**2\n\n # constant velocity\n elif t <= 2 * v_max / a_max:\n dt = t - v_max / a_max\n vel[2] = v_max\n pos = np.array([0, 0, (v_max**2 / (2 * a_max)) + (v_max * dt)])\n\n # slow down\n # TODO Test the acceleration, deceleration sequences\n elif t <= 3 * v_max / a_max:\n dt = t - 2 * v_max / a_max\n acc[2] = -a_max\n vel = np.array([0, 0, v_max]) + acc * dt\n pos = np.array([0, 0, (3 * v_max**2 / (2 * a_max)) + ((v_max * dt) + (0.5 * acc * dt**2))])\n\n # hover\n else:\n pos = np.array([0, 0, 2*v_max**2 / a_max])\n\n pos = init_pos + pos\n FinalState = namedtuple('FinalState', 'pos vel acc yaw yawdot')\n\n return FinalState(pos, vel, acc, yaw, yawdot)",
"def _twist_callback(self, cmd):\n self.set_velocity(cmd.linear.x, cmd.angular.z)",
"def acceleration(self):\n ux,uy = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n vx,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n \n ax = self._obj['u']*ux + self._obj['v']*uy\n ay = self._obj['u']*vx + self._obj['v']*vy\n\n self._obj['w'] = xr.DataArray(np.sqrt(ax**2+ay**2), dims=['x', 'y','t'])\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'{vel_units}^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'{vel_units}^2')\n\n\n return self._obj",
"def update_vehicle_state(self):\n #vel = self.v + self.commands['throttle']/self.m/self.simulation_rate\n\n vel = self.commands['speed']\n steer = self.commands['steering_angle']\n\n if steer > 0.5:\n steer_cmd = 25\n elif steer < -0.5:\n steer_cmd = 185\n else:\n steer_cmd = 100 - 160*steer ##linear\n #steer_cmd = 100 - 640*steer**3 ##cubic\n\n #rospy.logwarn('Velocity command is '+ str(vel))\n # 130 is the lowest vel_cmd that makes the truck move.\n if vel > 12:\n vel_cmd = 161\n elif vel < 0:\n vel_cmd = 0\n else:\n vel_cmd = 3.77*vel + 117\n # rospy.logerr('throttle: ' + str(throttle))\n hw_port.set_command(vel_cmd,steer_cmd,self.vehicle_id)",
"def accelerate(self):\n\t\tself.velocity += self.direction * self.ACCELERATION",
"def CreateMotionKernel(kernel):\r\n TrajSize = 64\r\n anxiety = 0.2* np.random.rand()\r\n numT = 10\r\n MaxTotalLength =10\r\n TotLength = 0\r\n #term determining, at each sample, the strengh of the component leating towards the previous position\r\n centripetal = 0.7 * np.random.rand()\r\n #term determining, at each sample, the random component of the new direction\r\n gaussianTerm =10 * np.random.rand()\r\n #probability of having a big shake, e.g. due to pressing camera button or abrupt hand movements\r\n freqBigShakes = 3 *np.random.rand()\r\n #v is the initial velocity vector, initialized at random direction\r\n init_angle = 360 * np.random.rand()\r\n #initial velocity vector having norm 1\r\n v0 = math.cos(init_angle / 180.0 * math.pi) + 1.0j * math.sin(init_angle/ 180.0 * math.pi)\r\n #the speed of the initial velocity vector\r\n v = v0* MaxTotalLength/(numT-1);\r\n\r\n if anxiety > 0:\r\n v = v0 * anxiety\r\n # initialize the trajectory vector\r\n x = np.zeros(numT,dtype = np.complex);\r\n\r\n abruptShakesCounter = 0\r\n for t in range(numT-1):\r\n # determine if there is an abrupt (impulsive) shake\r\n if np.random.rand() < freqBigShakes * anxiety:\r\n #if yes, determine the next direction which is likely to be opposite to the previous one\r\n nextDirection = 2 * v * (np.exp( 1.0j * (math.pi + (np.random.rand() - 0.5))))\r\n abruptShakesCounter = abruptShakesCounter + 1\r\n else:\r\n nextDirection=0\r\n\r\n #determine the random component motion vector at the next step\r\n dv = nextDirection + anxiety * (gaussianTerm * (np.random.randn()- + 1.0j * np.random.randn()) - centripetal * x[t]) * (MaxTotalLength / (numT - 1))\r\n v = v + dv\r\n # velocity vector normalization\r\n v = (v / np.abs(v)) * MaxTotalLength / (numT - 1)\r\n #print v\r\n x[t + 1] = x[t] + v\r\n # compute total length\r\n #TotLength=TotLength+np.abs(x([t+1]-x[t]))\r\n x_real = []\r\n x_imag = []\r\n for elem in x:\r\n x_real.append(elem.real)\r\n x_imag.append(elem.imag)\r\n x_real = np.round((x_real - np.min(x_real))/(np.max(x_real) - np.min(x_real)) * kernel-0.5)\r\n x_imag = np.round((x_imag - np.min(x_imag))/(np.max(x_imag) - np.min(x_imag)) * kernel-0.5)\r\n for idx in range(len(x_real)):\r\n if x_real[idx] < 0:\r\n x_real[idx] = 0\r\n if x_imag[idx] < 0:\r\n x_imag[idx] = 0\r\n if x_real[idx] > kernel -1:\r\n x_real[idx] = kernel -1\r\n if x_imag[idx] > kernel -1:\r\n x_imag[idx] = kernel -1\r\n\r\n ker = np.zeros((kernel, kernel))\r\n for idx in range(len(x_real)):\r\n ker[np.int(x_real[idx])][np.int(x_imag[idx])] = 1\r\n ker = ker/np.sum(np.sum(ker))\r\n return ker",
"def calculate_timestep(self, control_inputs):\r\n rcs_disp = [0, 0]\r\n if control_inputs[4]:\r\n if control_inputs[0]: rcs_disp[0] += 1\r\n if control_inputs[1]: rcs_disp[0] -= 1\r\n if control_inputs[2]: rcs_disp[1] += 1\r\n if control_inputs[3]: rcs_disp[1] -= 1\r\n else:\r\n if control_inputs[0]:\r\n self.delta_v += self.accel*0.1\r\n self.thrust_angle = self.heading\r\n if control_inputs[1]:\r\n self.delta_v -= self.accel*0.1\r\n self.thrust_angle = self.heading\r\n if control_inputs[3]: self.delta_omega += self.accel*0.1\r\n if control_inputs[2]: self.delta_omega -= self.accel*0.1\r\n if control_inputs[5]:\r\n self.delta_omega *= 0.5\r\n self.delta_v *= 0.5\r\n if control_inputs[0] == False and control_inputs[1] == False:\r\n self.delta_v = 0\r\n if control_inputs[3] == False and control_inputs[2] == False:\r\n self.delta_omega = 0\r\n \r\n self.vel += self.delta_v\r\n self.omega += self.delta_omega\r\n if self.vel > 5: self.vel = 5\r\n if self.vel < -5: self.vel = -5\r\n if self.omega > 5: self.omega = 5\r\n if self.omega < -5: self.omega = -5\r\n if abs(self.vel) < 0.1: self.vel = 0\r\n if abs(self.omega) < 0.1: self.omega = 0\r\n if control_inputs[6]:\r\n self.vel = self.vel*0.9\r\n self.omega = self.omega*0.9\r\n self.heading += self.omega*self.time_step\r\n self.position[0] += (\r\n self.vel*self.time_step*cos(-radians(self.thrust_angle))\r\n + rcs_disp[0]*cos(-radians(self.heading))\r\n - rcs_disp[1]*sin(-radians(self.heading)))\r\n self.position[1] += (\r\n self.vel*self.time_step*sin(-radians(self.thrust_angle))\r\n + rcs_disp[0]*sin(-radians(self.heading))\r\n -rcs_disp[1]*cos(-radians(self.heading)))\r\n return self.position, self.vel, self.heading, self.omega, self.thrust_angle",
"def VerletHope2(r, v, beta,dt,R_dust,M_dust):\n # Deceptively simple (read about Velocity Verlet on wikipedia)\n r_new = r + v*dt + calculate_acceleration2(r,v,beta,omega,R_dust,M_dust)*dt**2/2\n v_new = v + (calculate_acceleration2(r,v,beta,omega,R_dust,M_dust) + calculate_acceleration2(r_new,v,beta,omega,R_dust,M_dust))/2 * dt\n \n return (r_new, v_new)",
"def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)",
"def descent_acc(thrust, drone_mass, g=9.81, drag = 0.):\n output = (-thrust - drone_mass * g + drag) / drone_mass\n return output",
"def CalculateFeedForwardVoltage(leftSide, velocity, acceleration):\r\n if acceleration >= DRIVETRAIN_MAX_ACCELERATION:\r\n print(\"WARNING: The acceration is larger than the max!!\")\r\n\r\n if velocity >= DRIVETRAIN_MAX_VELOCITY:\r\n print(\"WARNING: The velocity is larger than the max!!\")\r\n\r\n if leftSide:\r\n kV = DRIVETRAIN_LEFT_KV\r\n kA = DRIVETRAIN_LEFT_KA\r\n VIntercept = DRIVETRAIN_LEFT_V_INTERCEPT\r\n else:\r\n kV = DRIVETRAIN_RIGHT_KV\r\n kA = DRIVETRAIN_RIGHT_KA\r\n VIntercept = DRIVETRAIN_RIGHT_V_INTERCEPT\r\n\r\n return kV * velocity + kA * acceleration + VIntercept",
"def roll_pitch_controller(self, acceleration_cmd, attitude, thrust_cmd):\n #Calculate rotation matrix \n R = euler2RM(attitude[0], attitude[1], attitude[2])\n c_d = thrust_cmd/DRONE_MASS_KG\n \n if thrust_cmd > 0.0:\n target_R13 = -np.clip(acceleration_cmd[0].item()/c_d, -self.max_tilt, self.max_tilt) #-min(max(acceleration_cmd[0].item()/c_d, -self.max_tilt), self.max_tilt)\n target_R23 = -np.clip(acceleration_cmd[1].item()/c_d, -self.max_tilt, self.max_tilt) #-min(max(acceleration_cmd[1].item()/c_d, -self.max_tilt), self.max_tilt)\n \n p_cmd = (1/R[2, 2]) * \\\n (-R[1, 0] * self.Kp_roll * (R[0, 2]-target_R13) + \\\n R[0, 0] * self.Kp_pitch * (R[1, 2]-target_R23))\n q_cmd = (1/R[2, 2]) * \\\n (-R[1, 1] * self.Kp_roll * (R[0, 2]-target_R13) + \\\n R[0, 1] * self.Kp_pitch * (R[1, 2]-target_R23))\n else: # Otherwise command no rate\n print(\"negative thrust command\")\n p_cmd = 0.0\n q_cmd = 0.0\n thrust_cmd = 0.0\n return np.array([p_cmd, q_cmd])",
"def _thruster_vap(motions: Dict[str, float]) -> int:\n heave, pitch, roll = motions[\"heave\"], motions[\"pitch\"], motions[\"roll\"]\n\n if heave:\n value = heave\n\n elif pitch:\n value = pitch\n\n elif roll:\n value = roll\n\n else:\n value = CONTROL_NORM_IDLE\n\n return Converter._to_thruster_value(value)",
"def verlet_next_vel(vel_t,accel_t,accel_t_plus_dt,dt):\n vel_t_plus_dt = vel_t.copy()\n vel_t_plus_dt += 0.5 * (accel_t + accel_t_plus_dt) * dt\n return vel_t_plus_dt",
"def thrust(self, evt=None):\n self.dbgprint(\"thrust(%r)\"%evt)",
"def create_velocity_column(self, perf):\n perf[\"velocity\"] = perf.index\n perf[\"velocity\"] = perf.moving * np.sqrt(np.power(perf.x.diff(), 2) + np.power(perf.y.diff(), 2)) / perf.velocity.diff()\n perf[\"velocity\"] = perf[\"velocity\"].fillna(0)\n return perf[\"velocity\"]",
"def UpdateCromer(self, deltaT):\n self.velocity += self.acceleration * deltaT\n self.position += self.velocity * deltaT",
"def accelerate(self, vector):\n (self.angle, self.speed) = addVectors((self.angle, self.speed), vector)",
"def eval_accel(self,t,endBehavior='halt') -> Vector:\n res = Trajectory.deriv_state(self,t,endBehavior)\n return res[len(res)//2:]",
"def add_tilt(df):\n if \"T_vehicle_attitude_0__NF_body_z_axis_x\" not in df:\n add_vehicle_z_axis(df)\n\n x = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"x\")\n y = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"y\")\n z = pd.Series(np.ones(df.shape[0]), index=df[\"timestamp\"], name=\"z\")\n\n tilt = mpd.series_dot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_0__NF_body_z_axis_x\"],\n df[\"T_vehicle_attitude_0__NF_body_z_axis_y\"],\n df[\"T_vehicle_attitude_0__NF_body_z_axis_z\"],\n )\n tilt.where(\n tilt < 1, 1, inplace=True\n ) # ensure that angle 1 is never exceeded\n df[\"T_vehicle_attitude_0__NF_tilt\"] = tilt.values\n df[\"T_vehicle_attitude_0__NF_tilt\"] = df[\n \"T_vehicle_attitude_0__NF_tilt\"\n ].apply(np.arccos)",
"def test_accel(self):\n experiment = Experiment(TasksMock())\n experiment.accel(80, 48, 97, time.time())\n experiment.accel(3, 42, 79, time.time())\n self.assertRegexpMatches(\n experiment.get_output(),\n '^[0-9]* 80 48 97\\n[0-9]* 3 42 79\\n$'\n )",
"def exec_velocity_cmd(self, cmd):\n joint_names = self.joint_names()\n\n velocity_command = dict(zip(joint_names, cmd))\n\n self.set_joint_velocities(velocity_command)",
"def velocity(self, *args):\n self.lmp.command('velocity' + (' {}' * len(args)).format(*args))",
"def calc_vertical_velocity(self):\n # Note: again we make use of the automatically added 'grid' attribute, and the stored coef_w attribute.\n # Here we arbitrarily set the vertical velocity to increase in the\n # y-direction (not very realistic).\n return self.coef_w[0] * self.grid.y[None, :] + self.coef_w[1]",
"def get_throttle_accel(vel):\n\t# Thanks to Chip https://samuelpmish.github.io/notes/RocketLeague/ground_control/\n\tvelocities = \t[0, 1400, 1410, 2300]\n\taccelerations = [1600, 160, 0, 0 ]\n\treturn multilerp(velocities, accelerations, vel)",
"def calc_accel(vel_data, dt):\n\n ax = np.gradient(vel_data[:, 0], dt)\n ay = np.gradient(vel_data[:, 1], dt)\n\n return np.c_[ax, ay]",
"def motors_update(t, x, u, params={}):\n tm = params['motor']['tm'] # Motor torque constant\n cr = params['motor']['cr'] # Motor speed constant\n wb = params['motor']['wb'] # Motor base speed\n\n u = np.clip(u / 199999, 0, 1)\n accel = [(cr * throttle + wb - speed) / tm for throttle, speed in zip(u, x)]\n\n return accel"
]
| [
"0.6242462",
"0.60378283",
"0.5884421",
"0.57984126",
"0.5753355",
"0.56302655",
"0.5609823",
"0.5575565",
"0.55670893",
"0.5563418",
"0.5552017",
"0.55361015",
"0.553391",
"0.5465061",
"0.54566216",
"0.5452906",
"0.5438225",
"0.54032683",
"0.5359112",
"0.5345518",
"0.53343374",
"0.5334089",
"0.5313764",
"0.5313594",
"0.5302625",
"0.5301702",
"0.5290637",
"0.5275782",
"0.52742237",
"0.52726495"
]
| 0.619276 | 1 |
Generate the rollrate and pitchrate commands in the body frame | def roll_pitch_controller(self, acceleration_cmd, attitude, thrust_cmd):
#Calculate rotation matrix
R = euler2RM(attitude[0], attitude[1], attitude[2])
c_d = thrust_cmd/DRONE_MASS_KG
if thrust_cmd > 0.0:
target_R13 = -np.clip(acceleration_cmd[0].item()/c_d, -self.max_tilt, self.max_tilt) #-min(max(acceleration_cmd[0].item()/c_d, -self.max_tilt), self.max_tilt)
target_R23 = -np.clip(acceleration_cmd[1].item()/c_d, -self.max_tilt, self.max_tilt) #-min(max(acceleration_cmd[1].item()/c_d, -self.max_tilt), self.max_tilt)
p_cmd = (1/R[2, 2]) * \
(-R[1, 0] * self.Kp_roll * (R[0, 2]-target_R13) + \
R[0, 0] * self.Kp_pitch * (R[1, 2]-target_R23))
q_cmd = (1/R[2, 2]) * \
(-R[1, 1] * self.Kp_roll * (R[0, 2]-target_R13) + \
R[0, 1] * self.Kp_pitch * (R[1, 2]-target_R23))
else: # Otherwise command no rate
print("negative thrust command")
p_cmd = 0.0
q_cmd = 0.0
thrust_cmd = 0.0
return np.array([p_cmd, q_cmd]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base_roll_pitch_yaw_rate(self):\n return np.asarray([self._robot_state.roll_rate, self._robot_state.pitch_rate, self._robot_state.yaw_rate])",
"def base_roll_pitch_yaw(self):\n #raise NotImplementedError('Not yet implemented!')\n return np.asarray([self._robot_state.roll, self._robot_state.pitch, self._robot_state.yaw])",
"def translate_to_rpc(self, rpcAngularVelocityBody):\n\n \n \n \n rpcAngularVelocityBody.roll_rad_s = self.roll_rad_s\n \n \n \n \n \n rpcAngularVelocityBody.pitch_rad_s = self.pitch_rad_s\n \n \n \n \n \n rpcAngularVelocityBody.yaw_rad_s = self.yaw_rad_s",
"def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0",
"def gen_ping(self, robot_position, robot_yaw):\n heading = self.__get_heading(robot_position, robot_yaw)\n shift_a, shift_b = self.__get_shifts(heading)\n # generate sin waves\n ref = self.__generate_sin()\n a = self.__generate_sin(shift_a)\n b = self.__generate_sin(shift_b)\n return np.vstack((a, ref, b, ref)), heading",
"def body_rate_control(self, body_rate_cmd, body_rate):\n Kp_rate = np.array([self.Kp_p, self.Kp_q, self.Kp_r])\n rate_error = body_rate_cmd - body_rate\n \n moment_cmd = MOI * np.multiply(Kp_rate, rate_error)\n if np.linalg.norm(moment_cmd) > MAX_TORQUE:\n moment_cmd = moment_cmd*MAX_TORQUE/np.linalg.norm(moment_cmd)\n return moment_cmd",
"def gen():\r\n\r\n output = \"\"\r\n frame = 12\r\n prior = 0\r\n second = 0\r\n\r\n while frame > 0:\r\n frame -= .5\r\n roll = randrange(11 - prior)\r\n\r\n # If strike is rolled:\r\n if roll == 10:\r\n output += \"X\"\r\n frame -= .5\r\n prior = 0\r\n second = 0\r\n\r\n # If spare is rolled:\r\n elif second == 1 and roll + prior == 10:\r\n output += \"/\"\r\n prior = 0\r\n second = 0\r\n\r\n # If not strike or spare:\r\n else:\r\n output += str(roll)\r\n prior = roll\r\n second = 1 - second\r\n\r\n # Deliminate frames:\r\n if second == 0:\r\n output += \"-\"\r\n\r\n return output[:-1]",
"def __init__(self):\n\n\t###\n\t#subscribers for takeoff and landing \n\n\tself.sub_land = rospy.Subscriber('/ardrone/land', \n Empty,\n self.land)\n\n\tself.sub_takeoff = rospy.Subscriber('/ardrone/takeoff', \n Empty,\n self.takeoff)\n\t###\n\n \n # Desired roll and pitch in radians, yaw rate in radians per second, and\n # climb rate in meters per second\n self.desired_roll = 0\n self.desired_pitch = 0\n self.desired_yaw_rate = 0\n self.desired_climb_rate = 0\n \n # Unit commands for controlling a quadrotor. Commands are in following \n # order: [front_left, front_right, rear_left, rear_right]\n self.unit_hover_cmd = np.array([1., 1., 1., 1.])\n self.unit_roll_cmd = np.array([1., -1., 1., -1.])\n self.unit_pitch_cmd = np.array([-1., -1., 1., 1.])\n self.unit_yaw_cmd = np.array([-1., 1., 1., -1.])\n \n # Onboard controller gains\n self.roll_Pgain = 2.5\n self.roll_Igain = 1.0\n self.roll_Dgain = 1.0\n \n self.pitch_Pgain = 2.5\n self.pitch_Igain = 1.0\n self.pitch_Dgain = 1.0\n \n self.yaw_rate_Pgain = 2.0\n self.yaw_rate_Igain = 0.0\n self.yaw_rate_Dgain = 0.0\n \n self.climb_Pgain = 35.0\n self.climb_Igain = 10.0\n self.climb_Dgain = 0.0\n \n # Motor speed which produces hover\n self.hover_speed = 70.0\n \n # Internal state\n self.internal_state = TransformStamped()\n self.internal_roll_error = 0.0\n self.internal_pitch_error = 0.0\n self.internal_yaw_error = 0.0\n self.internal_climb_error = 0.0\n \n self.internal_old_roll_error = 0.0\n self.internal_old_pitch_error = 0.0\n self.internal_old_yaw_rate_error = 0.0\n self.internal_old_climb_rate_error = 0.0\n \n self.yaw_old = 0.0\n self.z_old = 0.0\n \n # Motor speed envelope\n self.motor_cmd_min = 10\n self.motor_cmd_max = 100",
"def pitch(self, evt=None):\n self.dbgprint(\"pitch(%r)\"%evt)",
"def cmd_attitude_rate(self, yaw_rate, pitch_rate, roll_rate, collective):\n pass",
"def headingRx(self, inputs):\n result = {}\n ag = bpy.context.scene.objects[self.userid]\n for into in inputs:\n for i in into:\n emitterAgent = self.sim.agents[i]\n # eVel = emitterAgent.globalVelocity\n\n z = mathutils.Matrix.Rotation(-emitterAgent.arz, 4, 'Z')\n y = mathutils.Matrix.Rotation(-emitterAgent.ary, 4, 'Y')\n x = mathutils.Matrix.Rotation(-emitterAgent.arx, 4, 'X')\n\n rotation = x * y * z\n emitHead = Vector((0, 1, 0)) * rotation\n\n target = emitHead - ag.location\n\n z = mathutils.Matrix.Rotation(ag.rotation_euler[2], 4, 'Z')\n y = mathutils.Matrix.Rotation(ag.rotation_euler[1], 4, 'Y')\n x = mathutils.Matrix.Rotation(ag.rotation_euler[0], 4, 'X')\n\n rotation = x * y * z\n relative = target * rotation\n\n changez = math.atan2(relative[0], relative[1]) / math.pi\n changex = math.atan2(relative[2], relative[1]) / math.pi\n\n result[i] = changex\n return result",
"def roll(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n self.rc_lock.acquire()\n rc = self.rc_message\n self.rc_lock.release()\n\n px2_message = PX2()\n px2_message.Mode = 2\n px2_message.header.stamp = rospy.Time.now()\n\n if rc.enable:\n px2_message.TarSteeringAngle = rc.steering_angle\n\n # Mock rc angle\n self.px2_lock.acquire()\n px2_message = self.px2_message\n self.px2_lock.release()\n\n px2_message.TarAcce = rc.acceleration\n px2_message.Mode = 2\n if px2_message.TarAcce < 0:\n px2_message.DecToStop = 0\n else:\n px2_message.DecToStop = 1\n\n else:\n self.px2_lock.acquire()\n target_angle = self.px2_message.TarSteeringAngle\n target_speed = self.px2_message.TarAcce\n self.px2_lock.release()\n\n self.ecm_lock.acquire()\n current_speed = self.ecm_message.VehicleSpd\n self.ecm_lock.release()\n\n self.sas_lock.acquire()\n angle = self.sas_message.SteeringWheelAngle\n angle_sign = self.sas_message.SteeringWheelAngleSign\n if angle_sign == 1:\n angle = -angle\n self.sas_lock.release()\n\n self.inspva_lock.acquire()\n longitude = self.inspva_message.longitude\n latitude = self.inspva_message.latitude\n heading = self.inspva_message.azimuth\n self.inspva_lock.release()\n\n px2_message.TarAcce = target_acce \n px2_message.TarSteeringAngle = target_angle\n\n self.px2_pub.publish(px2_message)\n rate.sleep()",
"def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }",
"def pitch(self, pitch):\n pass",
"def generate_motion_patters(self):\n\n\t\t# Motion primimtives for the forward direction.....................\n\t\td_del = 0.08\t\n\t\tdt = self.dt\n\t\tv = 2\t# Assuming a constant longitudinal velocity\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns in forward directon: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\t\t\t\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp = np.vstack((p,np.array([x0, y0, theta0])))\n\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)\n\n\t\t\n\t\t# Motion primitives for the backward direction ...................\n\t\td_del = 0.1\n\t\tv = -1.2\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns for the backward direction: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp=np.vstack((p, np.array([x0, y0, theta0])))\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)",
"def relative():\n def transposeRelative(token, lastPitch):\n \"\"\"\n Make a new relative pitch from token, if possible.\n Return the last pitch used (absolute, untransposed).\n \"\"\"\n p = Pitch.fromToken(token, tokenizer)\n if not p:\n return lastPitch\n # absolute pitch determined from untransposed pitch of lastPitch\n octaveCheck = p.octaveCheck is not None\n p.absolute(lastPitch)\n if source.inSelection:\n # we may change this pitch. Make it relative against the\n # transposed lastPitch.\n try:\n last = lastPitch.transposed\n except AttributeError:\n last = lastPitch\n # transpose a copy and store that in the transposed\n # attribute of lastPitch. Next time that is used for\n # making the next pitch relative correctly.\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy # store transposed copy in new lastPitch\n new = copy.relative(last)\n if octaveCheck:\n new.octaveCheck = copy.octave\n if relPitchToken:\n # we are allowed to change the pitch after the\n # \\relative command. lastPitch contains this pitch.\n lastPitch.octave += new.octave\n new.octave = 0\n changes.replaceToken(relPitchToken[0], lastPitch.output(tokenizer.language))\n del relPitchToken[:]\n changes.replaceToken(token, new.output(tokenizer.language))\n return p\n\n lastPitch = None\n relPitchToken = [] # we use a list so it can be changed from inside functions\n \n # find the pitch after the \\relative command\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n lastPitch = Pitch.fromToken(token, tokenizer)\n if lastPitch and source.inSelection:\n relPitchToken.append(token)\n token = next(source)\n if not lastPitch:\n lastPitch = Pitch.c1()\n \n # eat stuff like \\new Staff == \"bla\" \\new Voice \\notes etc.\n while True:\n if token in ('\\\\new', '\\\\context'):\n next(source) # skip context type\n token = next(source)\n if token == '=':\n next(source) # skip context name\n token = next(source)\n elif isinstance(token, tokenizer.NoteMode):\n token = next(source)\n else:\n break\n \n # now transpose the relative expression\n if isinstance(token, tokenizer.OpenDelimiter):\n # Handle full music expression { ... } or << ... >>\n for token in consume():\n if token == '\\\\octaveCheck':\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n p = Pitch.fromToken(token, tokenizer)\n if p:\n if source.inSelection:\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy\n changes.replaceToken(token, copy.output(tokenizer.language)) \n lastPitch = p\n del relPitchToken[:]\n elif isinstance(token, tokenizer.OpenChord):\n chord = [lastPitch]\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n lastPitch = chord[:2][-1] # same or first\n break\n elif isinstance(token, tokenizer.Pitch):\n chord.append(transposeRelative(token, chord[-1]))\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.OpenChord):\n # Handle just one chord\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n break\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.Pitch):\n # Handle just one pitch\n transposeRelative(token, lastPitch)",
"def pitch_roll(self, px, pz):\n px -= self.unif[0]\n pz -= self.unif[2]\n halfw = self.width/2.0\n halfd = self.depth/2.0\n dx = self.width/self.ix\n dz = self.depth/self.iy\n x0 = int(math.floor((halfw + px)/dx + 0.5))\n if x0 < 0: x0 = 0\n if x0 > self.ix-1: x0 = self.ix-1\n z0 = int(math.floor((halfd + pz)/dz + 0.5))\n if z0 < 0: z0 = 0\n if z0 > self.iy-1: z0 = self.iy-1\n normp = array(self.buf[0].normals[z0*self.ix + x0])\n # slight simplification to working out cross products as dirctn always 0,0,1\n #sidev = cross(normp, dirctn)\n sidev = array([normp[1], -normp[0], 0.0])\n sidev = sidev / sqrt(sidev.dot(sidev))\n #forwd = cross(sidev, normp)\n forwd = array([-normp[2]*normp[0], -normp[2]*normp[1],\n normp[0]*normp[0] + normp[1]*normp[1]])\n forwd = forwd / sqrt(forwd.dot(forwd))\n return (degrees(arcsin(-forwd[1])), degrees(arctan2(sidev[1], normp[1])))",
"def yaw_pitch_roll(self):\n\n self._normalise()\n yaw = np.arctan2(2*(self.q[0]*self.q[3] - self.q[1]*self.q[2]),\n 1 - 2*(self.q[2]**2 + self.q[3]**2))\n pitch = np.arcsin(2*(self.q[0]*self.q[2] + self.q[3]*self.q[1]))\n roll = np.arctan2(2*(self.q[0]*self.q[1] - self.q[2]*self.q[3]),\n 1 - 2*(self.q[1]**2 + self.q[2]**2))\n\n return yaw, pitch, roll",
"def body_frame(env, body_name):\n ind = body_index(env.model, body_name)\n b = env.data.body_xpos[ind]\n q = env.data.body_xquat[ind]\n qr, qi, qj, qk = q\n s = np.square(q).sum()\n R = np.array([\n [1 - 2 * s * (qj ** 2 + qk ** 2), 2 * s * (qi * qj - qk * qr), 2 * s * (qi * qk + qj * qr)],\n [2 * s * (qi * qj + qk * qr), 1 - 2 * s * (qi ** 2 + qk ** 2), 2 * s * (qj * qk - qi * qr)],\n [2 * s * (qi * qk - qj * qr), 2 * s * (qj * qk + qi * qr), 1 - 2 * s * (qi ** 2 + qj ** 2)]\n ])\n return R",
"def generate(self): \r\n \r\n self.pfn={} # phase joint functions \r\n self.afn={} # anti phase joint functions\r\n\r\n ## Foot and hip -> Lateral motion\r\n foot_func=SinusoidFunction()\r\n foot_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n foot_func.amplitude= self.parameters[\"foot_amplitude\"]\r\n foot_func.amplitude_offset= self.parameters[\"foot_amplitude_offset\"]\r\n foot_func.phase_offset= self.parameters[\"foot_phase_offset\"]\r\n self.pfn[\"l_foot_joint\"]=foot_func \r\n foot_func_af=foot_func.mirror()\r\n self.afn[\"l_foot_joint\"]=foot_func_af\r\n \r\n hip_func=SinusoidFunction()\r\n hip_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n hip_func.amplitude= self.parameters[\"hip_amplitude\"]\r\n hip_func.amplitude_offset= self.parameters[\"hip_amplitude_offset\"]\r\n hip_func.phase_offset= self.parameters[\"hip_phase_offset\"]\r\n self.pfn[\"l_hip_joint\"]=hip_func\r\n hip_func_af=hip_func.mirror()\r\n self.afn[\"l_hip_joint\"]=hip_func_af\r\n \r\n ## Thigh, ankle and knee -> Frontal motion\r\n thigh_func=SinusoidFunction()\r\n thigh_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n thigh_func.amplitude= self.parameters[\"thigh_amplitude\"]\r\n thigh_func.amplitude_offset= self.parameters[\"thigh_amplitude_offset\"]\r\n thigh_func.phase_offset= self.parameters[\"thigh_phase_offset\"]\r\n self.pfn[\"l_thigh_joint\"]=thigh_func\r\n thigh_func_af=thigh_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_thigh_joint\"]=thigh_func_af\r\n \r\n ankle_func=SinusoidFunction()\r\n ankle_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n ankle_func.amplitude= self.parameters[\"ankle_amplitude\"]\r\n ankle_func.amplitude_offset= self.parameters[\"ankle_amplitude_offset\"]\r\n ankle_func.phase_offset= self.parameters[\"ankle_phase_offset\"]\r\n self.pfn[\"l_ankle_joint\"]=ankle_func\r\n ankle_func_af=ankle_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_ankle_joint\"]=ankle_func_af\r\n \r\n knee_func=SinusoidFunction()\r\n knee_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n knee_func.amplitude= self.parameters[\"knee_amplitude\"]\r\n knee_func.amplitude_offset= self.parameters[\"knee_amplitude_offset\"]\r\n knee_func.phase_offset= self.parameters[\"knee_phase_offset\"]\r\n self.pfn[\"l_knee_joint\"]=knee_func\r\n knee_func_af=knee_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_knee_joint\"]=knee_func_af\r\n \r\n #f3=SinusoidFunction()\r\n #f3.angular_frequency=self.parameters[\"step_frequency\"]\r\n #f3.amplitude=self.parameters[\"step_amplitude\"]\r\n #f3.amplitude_offset=self.parameters[\"step_amplitude_offset\"]\r\n #self.pfn[\"l_thigh_joint\"]= f3\r\n #f33=f3.clone()\r\n #f33.amplitude_offset = self.parameters[\"ankle_amplitude_offset\"]\r\n #f33.amplitude = self.parameters[\"ankle_amplitude\"]\r\n #self.pfn[\"l_ankle_joint\"]=f33\r\n #f4=f3.mirror()\r\n ##f4.amplitude_offset -= 0.4\r\n #self.pfn[\"l_knee_joint\"]=f4\r\n \r\n #f5=f3.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_thigh_joint\"]=f5\r\n \r\n #f6=f33.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_ankle_joint\"]=f6\r\n #f7=f5.mirror()\r\n ##f7.amplitude_offset -= 0.4\r\n #self.afn[\"l_knee_joint\"]=f7\r\n \r\n self.generate_right()\r\n \r\n self.show()",
"def pitch_roll(self, px, pz):\r\n px -= self.unif[0]\r\n pz -= self.unif[2]\r\n halfw = self.width/2.0\r\n halfd = self.depth/2.0\r\n dx = self.width/self.ix\r\n dz = self.depth/self.iy\r\n x0 = int(math.floor((halfw + px)/dx + 0.5))\r\n if x0 < 0: x0 = 0\r\n if x0 > self.ix-1: x0 = self.ix-1\r\n z0 = int(math.floor((halfd + pz)/dz + 0.5))\r\n if z0 < 0: z0 = 0\r\n if z0 > self.iy-1: z0 = self.iy-1\r\n normp = array(self.buf[0].normals[z0*self.ix + x0])\r\n # slight simplification to working out cross products as dirctn always 0,0,1\r\n #sidev = cross(normp, dirctn)\r\n sidev = array([normp[1], -normp[0], 0.0])\r\n sidev = sidev / sqrt(sidev.dot(sidev))\r\n #forwd = cross(sidev, normp)\r\n forwd = array([-normp[2]*normp[0], -normp[2]*normp[1],\r\n normp[0]*normp[0] + normp[1]*normp[1]])\r\n forwd = forwd / sqrt(forwd.dot(forwd))\r\n return (degrees(arcsin(-forwd[1])), degrees(arctan2(sidev[1], normp[1])))",
"def headingAutopilot(self,eta,nu,sampleTime): \n psi = eta[5] # yaw angle\n r = nu[5] # yaw rate\n e_psi = psi - self.psi_d # yaw angle tracking error\n e_r = r - self.r_d # yaw rate tracking error\n psi_ref = self.ref * math.pi / 180 # yaw angle setpoint\n \n wn = self.wn # PID natural frequency\n zeta = self.zeta # PID natural relative damping factor\n wn_d = self.wn_d # reference model natural frequency\n zeta_d = self.zeta_d # reference model relative damping factor\n\n m = self.m_PID \n d = self.d_PID\n k = 0\n\n # PID feedback controller with 3rd-order reference model \n [tau_N, self.e_int, self.psi_d, self.r_d, self.a_d] = PIDpolePlacement(\\\n self.e_int, e_psi, e_r,self.psi_d, self.r_d, \\\n self.a_d, m, d, k, wn_d, zeta_d, wn, zeta, psi_ref, self.r_max, \\\n sampleTime )\n \n # Control allocation: tau_N = Yd * delta\n delta_c = tau_N / self.Nd # rudder command\n \n u_control = np.array([delta_c],float) \n \n return u_control",
"def ROT(self):\n # The maximum update amount for these element\n # no dt since YawRate is already mult by dt\n YawRate_DELTA = (self.YawRate_LIMITS[1] -\n self.YawRate_LIMITS[0]) / (2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n YawRate_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.YawRate += YawRate_DIRECTION * YawRate_DELTA\n self.YawRate = np.clip(self.YawRate, self.YawRate_LIMITS[0],\n self.YawRate_LIMITS[1])",
"def generateBody(self):\n # get the anims\n animDict = self.generateAnimDict()\n \n # NOTE: It is always phase 3.5 because the models are there\n # while everything else is in phase 5.\n filePrefix, bodyPhase = ModelDict[self.style.body]\n self.loadModel(\"phase_3.5\" + filePrefix + \"mod\")\n self.loadAnims(animDict)\n self.setSuitClothes()",
"def add_roll_pitch_yaw(df):\n roll, pitch, yaw = mpd.series_quat2euler(\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n df[\"T_vehicle_attitude_0__NF_roll\"] = roll.values\n df[\"T_vehicle_attitude_0__NF_pitch\"] = pitch.values\n df[\"T_vehicle_attitude_0__NF_yaw\"] = yaw.values",
"def moveRightArm(self, pitch): # add yaw!\n\n\t\tangles = [pitch, 12, 39, 3, -8] # angles in degrees\n\t\tangles = [math.radians(angle) for angle in angles] # convert to radians\n\t\tangles.append(1) # also open hand\n\t\ttimes = [1, 1, 1.5, 1.5, 1.5, 1.5]\n\t\tself.motion.angleInterpolation(\"RArm\", angles, times, True) # move to those arm angles and open hand",
"def test_vw_controller(self):\n pass\n\n yarp.Network.init()\n\n pose_stream = yarp.BufferedPortBottle()\n pose_stream.open(\"/morse/test/pose/in\")\n yarp.Network.connect(\"/morse/robots/ATRV/Pose/out\", \"/morse/test/pose/in\")\n\n cmd_stream = yarp.BufferedPortBottle()\n cmd_stream.open(\"/morse/test/vw/out\")\n yarp.Network.connect(\"/morse/test/vw/out\", \"/morse/robots/ATRV/Motion_Controller/in\")\n \n # Read the start position, it must be (0.0, 0.0, 0.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n\n send_speed(cmd_stream, 1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 1.0, -math.pi/4.0, 2.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 0.5, -math.pi/8.0, 12.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -2.0, math.pi/2.0, 3.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n yarp.Network.fini()",
"def __init__(self, pitch=30, pitch_type='duo', Z=4, Alt = 100):\n \n self.pitch_type = pitch_type\n self.pitch = pitch\n self.Z = Z\n self.Alt = Alt\n \n \n # set the Ce value (exposure coeff NA 2.16)\n self.Ce = 1\n \n # set the Ct value (thermal coeff NA 2.17)\n self.Ct = 1\n \n # snow load shjape coefficients\n if self.pitch_type == 'mono':\n if self.pitch <= 30:\n self.mu = 0.80\n elif 30 < self.pitch <= 60:\n self.mu = 0.80 * (60 - self.pitch) / 30\n else:\n self.mu = 0.0\n elif self.pitch_type == 'duo':\n if self.pitch <= 15:\n self.mu = 0.80\n elif 15 < self.pitch <= 30:\n self.mu = 0.80 + 0.40*(self.pitch - 15) / 15\n elif 30 < self.pitch <= 60:\n self.mu = 1.2*(60 - self.pitch) / 30\n else:\n self.mu = 0.0\n else:\n self.mu = 0.80 # end conservative number\n \n # calculate the value of the snow load on the ground \n self.sk = (0.15 + (0.1 * self.Z + 0.05) + ((self.Alt - 100) / 525))\n \n # calculate the roof snow load\n self.s = self.mu * self.Ce * self.Ct * self.sk",
"def pitch(self, dangle): # aka elevation\n vr = self.getViewRight()\n GL.glTranslate(*self.focus)\n GL.glRotate(dangle, *vr)\n GL.glTranslate(*-self.focus)",
"def setRPY(self, roll, pitch, yaw):\n\n currentmat = self.__objbdb.get_homomat()\n currentmatnp = base.pg.mat4ToNp(currentmat)\n newmatnp = rm.rotmat_from_euler(roll, pitch, yaw, axes=\"sxyz\")\n self.setMat(base.pg.npToMat4(newmatnp, currentmatnp[:, 3]))"
]
| [
"0.600574",
"0.56870914",
"0.56655145",
"0.5648231",
"0.5631809",
"0.5598063",
"0.54620904",
"0.5438023",
"0.54291433",
"0.5419752",
"0.53822625",
"0.53808784",
"0.5375544",
"0.5347447",
"0.5311933",
"0.52853644",
"0.52546924",
"0.5252692",
"0.5225191",
"0.52118427",
"0.5200421",
"0.51805687",
"0.5179486",
"0.51786476",
"0.5171889",
"0.5168975",
"0.51397246",
"0.511306",
"0.50833464",
"0.508211"
]
| 0.6305453 | 0 |
Generate the target yawrate | def yaw_control(self, yaw_cmd, yaw):
# Ensure the target is within range of 0 to 2*pi
yaw_cmd = np.mod(yaw_cmd, 2.0*np.pi)
yaw_error = yaw_cmd - yaw
if yaw_error > np.pi:
yaw_error = yaw_error - 2.0*np.pi
elif yaw_error < -np.pi:
yaw_error = yaw_error + 2.0*np.pi
yawrate_cmd = self.Kp_yaw*yaw_error
return yawrate_cmd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def yaw_rate(self) -> float:\n return self._state[5]",
"def base_roll_pitch_yaw_rate(self):\n return np.asarray([self._robot_state.roll_rate, self._robot_state.pitch_rate, self._robot_state.yaw_rate])",
"def ROT(self):\n # The maximum update amount for these element\n # no dt since YawRate is already mult by dt\n YawRate_DELTA = (self.YawRate_LIMITS[1] -\n self.YawRate_LIMITS[0]) / (2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n YawRate_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.YawRate += YawRate_DIRECTION * YawRate_DELTA\n self.YawRate = np.clip(self.YawRate, self.YawRate_LIMITS[0],\n self.YawRate_LIMITS[1])",
"def yaw(self):\n return self._yaw",
"def yawAngle(self):\n return self._yawAngle",
"def cmd_attitude_rate(self, yaw_rate, pitch_rate, roll_rate, collective):\n pass",
"def get_yaw(self):\r\n return self.state['yaw']",
"def yaw_rate_taken(self) -> np.ndarray:\n return self._prev_states[:, 5]",
"def yaw_pitch_roll(self):\n\n self._normalise()\n yaw = np.arctan2(2*(self.q[0]*self.q[3] - self.q[1]*self.q[2]),\n 1 - 2*(self.q[2]**2 + self.q[3]**2))\n pitch = np.arcsin(2*(self.q[0]*self.q[2] + self.q[3]*self.q[1]))\n roll = np.arctan2(2*(self.q[0]*self.q[1] - self.q[2]*self.q[3]),\n 1 - 2*(self.q[1]**2 + self.q[2]**2))\n\n return yaw, pitch, roll",
"def base_roll_pitch_yaw(self):\n #raise NotImplementedError('Not yet implemented!')\n return np.asarray([self._robot_state.roll, self._robot_state.pitch, self._robot_state.yaw])",
"def convert_yaw_to_old_viewpoint(yaw):\n if yaw is None:\n return None\n view_theta = ((TAU / 2) - yaw) % TAU\n view_theta = ut.rad_to_deg(view_theta)\n return view_theta",
"def return_yaw(self, image):\n #Uncomment if you want to see the image\n #cv2.imshow('image',image)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n h, w, d = image.shape\n #check if the image has the right shape\n if(h == w and h==64 and d==3):\n image_normalised = np.add(image, -127) #normalisation of the input\n feed_dict = {self.tf_yaw_input_vector : image_normalised}\n yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict)\n yaw_vector = np.multiply(yaw_raw, 100.0)\n #yaw = yaw_raw #* 100 #cnn out is in range [-1, +1] --> [-100, + 100]\n return yaw_vector\n #If the image is > 64 pixel then resize it\n if(h == w and h>64 and d==3):\n image_resized = cv2.resize(image, (64, 64), interpolation = cv2.INTER_AREA)\n image_normalised = np.add(image_resized, -127) #normalisation of the input\n feed_dict = {self.tf_yaw_input_vector : image_normalised}\n yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict) \n yaw_vector = np.multiply(yaw_raw, 100.0) #cnn-out is in range [-1, +1] --> [-100, + 100]\n return yaw_vector\n #wrong shape \n if(h != w or w<64 or h<64):\n raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input has wrong shape. Height and Width must be >= 64 pixel')\n #wrong number of channels\n if(d!=3):\n raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input does not have 3 channels, this function accepts only colour images.')",
"def convert_old_viewpoint_to_yaw(view_theta):\n if view_theta is None:\n return None\n view_theta = ut.deg_to_rad(view_theta)\n yaw = (-view_theta + (TAU / 2)) % TAU\n return yaw",
"def yaw(vec):\n\n return math.atan(math.sqrt(math.pow(vec[0], 2)\n + math.pow(vec[1], 2)) / vec[2])",
"def calculate_yaw(pixel_x, center_x) -> float:\n yaw = math.degrees(math.atan((pixel_x - center_x) / H_FOCAL_LENGTH))\n return yaw",
"def yaw(self, dangle): # aka azimuth\n vu = self.getViewUp()\n GL.glTranslate(*self.focus)\n GL.glRotate(dangle, *vu)\n GL.glTranslate(*-self.focus)",
"def on_cam_base_yaw_hSlider_valueChanged(self, value):\n self.cam_base_yaw_ledit.setText(str(50 + value))",
"def rotate(self, yaw):\n rotation_matrix = tfs.rotation_matrix(yaw, (0, 0, 1))[:2, :2]\n return np.matmul(rotation_matrix, self).view(Vector)",
"def yaw(self, dangle): # aka azimuth\n vu = self.getViewUp()\n GL.glTranslatef(*self.focus)\n GL.glRotate(dangle, *vu)\n GL.glTranslatef(*-self.focus)",
"def calculate_yaw_error(desired_yaw, current_yaw):\n yaw_error = desired_yaw - current_yaw\n\n # ensure within [-pi, pi] bounds\n if yaw_error < -np.pi:\n yaw_error += 2*np.pi\n elif yaw_error > np.pi:\n yaw_error -= 2*np.pi\n\n return yaw_error",
"def angle_to_target(position: Vector3, yaw: float, target: Vector3) -> float:\n angle_between_bot_and_target = math.atan2(target.y - position.y,\n target.x - position.x)\n\n angle_front_to_target = angle_between_bot_and_target - yaw\n\n # Correct the values\n if angle_front_to_target < -math.pi:\n angle_front_to_target += 2 * math.pi\n if angle_front_to_target > math.pi:\n angle_front_to_target -= 2 * math.pi\n\n return angle_front_to_target",
"def YI_Rate(Uion,Z,E,w,phase,terms):\n\tnstar = Z/np.sqrt(2*Uion)\n\tlstar = nstar - 1\n\tAnl = 2**(2*nstar) / (nstar*s.gamma(nstar+lstar+1)*s.gamma(nstar-lstar))\n\ttheta = (phase - 0.5*np.pi)%np.pi - 0.5*np.pi\n\tgam = np.sqrt(2.0*Uion)*w/E\n\ta = 1+gam*gam-np.sin(theta)**2\n\tb = np.sqrt(a*a+4*gam*gam*np.sin(theta)**2)\n\tc = np.sqrt((np.sqrt((b+a)/2)+gam)**2 + (np.sqrt((b-a)/2)+np.sin(np.abs(theta)))**2)\n\tPhi = (gam**2 + np.sin(theta)**2 + 0.5)*np.log(c)\n\tPhi -= 3*(np.sqrt(b-a)/(2*np.sqrt(2)))*np.sin(np.abs(theta))\n\tPhi -= (np.sqrt(b+a)/(2*np.sqrt(2)))*gam\n\tkappa = np.log(gam+np.sqrt(gam**2+1)) - gam/np.sqrt(1+gam**2)\n\talpha = 2 * (np.arcsinh(gam)-gam/np.sqrt(1+gam**2))\n\tbeta = 2*gam/np.sqrt(1+gam**2)\n\tnu = (Uion/w) * (1 + 1/(2*gam**2))\n\tA0 = 0\n\tfor n in range(np.int(np.ceil(nu)),np.int(np.ceil(nu)+terms)):\n\t\tA0 += np.exp(-alpha*(n-nu))*w0(np.sqrt(beta*(n-nu)))\n\tA0 *= (4/np.sqrt(3*np.pi)) * (gam**2/(1+gam**2))\n\tpre = Anl * np.sqrt(3*kappa/gam**3)*(1+gam**2)**0.75 * A0 * Uion\n\tpre *= (2*(2*Uion)**1.5 / E)**(2*nstar-1)\n\treturn pre * np.exp(-E**2 * Phi / w**3)",
"def relative_rate(self) -> \"double\":\n return _beamforming_swig.randomsampler_sptr_relative_rate(self)",
"def add_roll_pitch_yaw(df):\n roll, pitch, yaw = mpd.series_quat2euler(\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n df[\"T_vehicle_attitude_0__NF_roll\"] = roll.values\n df[\"T_vehicle_attitude_0__NF_pitch\"] = pitch.values\n df[\"T_vehicle_attitude_0__NF_yaw\"] = yaw.values",
"def get_yaw_relative(from_x, from_y, to_x, to_y, yaw):\n\tangle = math.degrees(math.atan2(to_y - from_y, to_x - from_x))\n\tyaw_relative = angle - math.degrees(yaw)\n\t# Correct the values\n\tif yaw_relative < -180:\n\t\tyaw_relative += 360\n\tif yaw_relative > 180:\n\t\tyaw_relative -= 360\n\treturn yaw_relative",
"def get_control_input(self, dt):\n \n desired_z = 3.0\n actual_z = self.internal_state.transform.translation.z\n \n # Convert to quaternion object for use by euler_from_quaternion()\n quaternion = np.array([self.internal_state.transform.rotation.x,\n self.internal_state.transform.rotation.y,\n self.internal_state.transform.rotation.z,\n self.internal_state.transform.rotation.w])\n \n # Determine the euler angles\n euler = euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n \n # Corrections for yaw wrap around\n if (not np.sign(yaw) == np.sign(self.yaw_old) and \n np.abs(np.abs(yaw) - np.pi) < 0.2):\n # Add or subtract 2 pi depending on crossing direction\n self.yaw_old = self.yaw_old + np.sign(yaw) * 2 * np.pi \n \n \n # Determine current yaw rate for yaw rate controller\n yaw_rate = (yaw - self.yaw_old) / dt\n \n # After use, update the old yaw value with the current yaw\n self.yaw_old = yaw\n \n # Determine current climb rate\n climb_rate = (actual_z - self.z_old) / dt\n \n # After use, update the old altitude with the current altitude\n self.z_old = actual_z\n \n # Find the errors between desired and actual signals\n err_roll = self.desired_roll - roll\n err_pitch = self.desired_pitch - pitch\n err_yaw_rate = self.desired_yaw_rate - yaw_rate\n err_climb_rate = self.desired_climb_rate - climb_rate\n \n # Set the hover motor speed\n motor_control = self.hover_speed * self.unit_hover_cmd\n \n # Roll controller\n motor_control += self.roll_Pgain * self.unit_roll_cmd * err_roll\n \n # Pitch controller\n motor_control += self.roll_Pgain * self.unit_pitch_cmd * err_pitch\n \n # Yaw rate controller (assuming small angles)\n motor_control += self.yaw_rate_Pgain * self.unit_yaw_cmd * err_yaw_rate\n \n # Climb rate controller\n motor_control += ((self.climb_Pgain * err_climb_rate +\n self.climb_Igain * self.internal_climb_error +\n self.climb_Dgain / dt * (err_climb_rate - \n self.internal_old_climb_rate_error)) * \n self.unit_hover_cmd)\n \n # Update the cumulative errors for integration\n self.internal_roll_error += err_roll * dt\n self.internal_pitch_error += err_pitch * dt\n self.internal_yaw_error += err_yaw_rate * dt\n self.internal_climb_error += err_climb_rate * dt\n \n # Update old error with current errors for differentiation\n self.internal_old_roll_error = err_roll\n self.internal_old_pitch_error = err_pitch\n self.internal_old_yaw_rate_error = err_yaw_rate\n self.internal_old_climb_rate_error = err_climb_rate\n \n \n # Return the minimum of the \n return np.clip(motor_control, self.motor_cmd_min, self.motor_cmd_max)",
"def load_yaw_variables(self, YawFilePath):\n\n #It is possible to use the checkpoint file\n #y_ckpt = tf.train.get_checkpoint_state(YawFilePath)\n #.restore(self._sess, y_ckpt.model_checkpoint_path) \n\n #For future use, allocating a fraction of the GPU\n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) #Allocate only half of the GPU memory\n\n if(os.path.isfile(YawFilePath)==False): raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the yaw file path is incorrect.')\n\n tf.train.Saver(({\"conv1_yaw_w\": self.hy_conv1_weights, \"conv1_yaw_b\": self.hy_conv1_biases,\n \"conv2_yaw_w\": self.hy_conv2_weights, \"conv2_yaw_b\": self.hy_conv2_biases,\n \"conv3_yaw_w\": self.hy_conv3_weights, \"conv3_yaw_b\": self.hy_conv3_biases,\n \"dense1_yaw_w\": self.hy_dense1_weights, \"dense1_yaw_b\": self.hy_dense1_biases,\n \"out_yaw_w\": self.hy_out_weights, \"out_yaw_b\": self.hy_out_biases\n })).restore(self._sess, YawFilePath)",
"def rotation_rate(self) -> int:\n return self._rotation_rate",
"def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0",
"def set_yaw_angles(self, yaw_angles):\n if isinstance(yaw_angles, float) or isinstance(yaw_angles, int):\n yaw_angles = [yaw_angles] * len(self.turbines)\n\n for yaw_angle, turbine in zip(yaw_angles, self.turbines):\n turbine.yaw_angle = yaw_angle"
]
| [
"0.7615613",
"0.7032515",
"0.67301184",
"0.6561178",
"0.6524339",
"0.6409428",
"0.6381721",
"0.6186662",
"0.6064866",
"0.60448754",
"0.5987767",
"0.5842089",
"0.58199275",
"0.5816168",
"0.58009934",
"0.5781943",
"0.577507",
"0.57193905",
"0.5700633",
"0.565812",
"0.5622117",
"0.56071854",
"0.5588065",
"0.554763",
"0.5504992",
"0.5489235",
"0.5470341",
"0.546903",
"0.5443296",
"0.54082865"
]
| 0.7520038 | 1 |
derive flask app based on the combination of commandline options and the contents of the .ini files | def app(self):
## set flask specific things that are non-optional
error = lambda k: 'Fatal: You need to specify a "flask" section ' + \
'with an entry like "'+k+'=..." in your .ini file'
try: app_name = self['flask.app']
except KeyError: raise SystemExit(error('app'))
try: secret_key = self['flask.secret_key']
except KeyError: raise SystemExit(error('secret_key'))
app = Flask(app_name)
app.secret_key = secret_key
## set flask specific things that are optional
if 'flask.template_path' in self:
app.jinja_loader = FileSystemLoader(self['template_path'])
if 'flask.before_request' in self:
before_request = self['flask.before_request']
before_request = namedAny(before_request)
app.before_request(before_request)
if 'flask.after_request' in self:
after_request = self['flask.after_request']
after_request = namedAny(after_request)
app.after_request(after_request)
## setup views
try: view_holder = self['corkscrew.views']
except KeyError:
error = 'Fatal: could not "view=<dotpath>" entry in your .ini file'
raise SystemExit(error)
else:
view_list = namedAny(view_holder)
[ v(app=app, settings=self) for v in view_list]
return app | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app(\n config_file: str = typer.Option(..., '--config','-c', help='Config file')\n):\n config = Config(config_file)\n if config.mode == 'server':\n server = Server(config)\n server.start()\n elif config.mode == 'client':\n client = Client(config)\n client.start()",
"def main(run_app, config_filename=None):\n\n if not config_filename:\n parser = ThrowingArgumentParser(description='Provide a RESTful API service from the order database.')\n parser.add_argument('config_file', help='JSON configuration file', type=argparse.FileType('r'))\n args = parser.parse_args()\n\n app = create_app(args.config_file.name)\n\n else:\n app = create_app(config_filename)\n\n if 'flask-debug' in app.config:\n do_debug = app.config['flask-debug']\n else:\n do_debug = False\n\n if run_app:\n if app.config['listening_ip'] and app.config['listening_port']:\n app.run(host=app.config['listening_ip'], port=app.config['listening_port'], debug=do_debug)\n else:\n # expect a nginx environment\n app.run(debug=do_debug)",
"def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)",
"def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)",
"def main():\n options = lib.main.parse_args()\n\n #Initialize all the loggings with the options specified.\n lib.main.logs_initialization(options)\n logging.debug(\"Logs are now enabled and working\")\n\n #Update the main config file with the app information.\n logging.debug(\"Updating parameters on config files\")\n lib.config.update_params()\n\n # Finally, when all the initialization schedule is completed, Flask\n # will start.\n logging.debug(\"Calling Flask initializator function\")\n api.start(options[\"debug\"])",
"def main(_, **settings):\n config = Configurator(settings=settings)\n register_includes(config)\n register_json_renderer(config)\n register_routes(config)\n\n config.scan()\n return config.make_wsgi_app()",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)",
"def main(config: str):\n application = Application(config_path=config)\n application.run()",
"def main(app_handlers=None, app_settings=None, use_curl=False):\n\n options.parse_command_line()\n _path = os.path.abspath(options.dir)\n if _path not in sys.path:\n sys.path.append(_path)\n del _path\n\n app_handlers = app_handlers or []\n app_settings = app_settings or {}\n\n if options.framework == \"tornado\":\n launcher = TornadoAPILauncher(options.conf)\n elif options.framework == \"flask\":\n launcher = FlaskAPILauncher(options.conf)\n elif options.framework == \"fastapi\":\n launcher = FastAPILauncher(options.conf)\n else: # there are only three supported frameworks for now\n raise ValueError(\"Unsupported framework.\")\n\n try:\n if app_settings:\n launcher.settings.update(app_settings)\n if app_handlers:\n launcher.handlers = app_handlers\n if use_curl:\n launcher.use_curl()\n\n launcher.host = options.address\n launcher.settings.update(debug=options.debug)\n launcher.settings.update(autoreload=options.autoreload)\n except Exception:\n pass\n\n launcher.start(options.port)",
"def main(args=None):\n app()\n return 0",
"def main():\n\n apps = [\n 'fires', 'hw6',\n 'imageapp',\n 'quixote_demo',\n 'quotes',\n 'chat',\n 'cookie'\n ]\n parser = argparse.ArgumentParser(\n description='A WSGI Server implemented for CSE491-001.',\n epilog='Please check the non-existent documentation for more info.',\n formatter_class=argparse.RawTextHelpFormatter\n )\n # Add the '-?' alias for '--help', which I prefer to use:\n parser.add_argument('-?',\n action='help',\n help='Alias for --help')\n # Add the application argument:\n parser.add_argument('--app',\n nargs='?',\n dest='app',\n default='fires',\n choices=apps,\n help='\\n'.join([\n 'Which WSGI application to run.',\n '(default: \"%(default)s\" - my homework 6)',\n 'Alias: -A'\n ]))\n parser.add_argument('-A',\n nargs='?',\n dest='app',\n default='fires',\n choices=apps,\n help=argparse.SUPPRESS)\n # Add the port argument:\n parser.add_argument('--port',\n nargs='?',\n default=random.randint(8000, 9999),\n type=int,\n help='\\n'.join([\n 'Which port to start the server on.',\n '(default: random integer between 8000 and 9999)',\n 'Alias: -p'\n ]))\n # After that, parse the command-line arguments.\n args = parser.parse_args()\n\n # Create a socket object\n sock = socket.socket()\n # Get local machine name\n host = socket.getfqdn()\n\n if host in ('magrathea', 'Thoth'):\n # For testing, I don't want to have to change my url all the damn time.\n port = 8080\n else:\n port = args.port\n # Bind to the port\n # TODO figure out how to immediately unbind when I'm done\n sock.bind((host, port))\n print 'Starting server at http://%s:%d/' % (host, port)\n # Now wait for client connection.\n sock.listen(5)\n\n # get this from commandline\n app_to_run = args.app\n if app_to_run == 'quixote_demo':\n # quixote stuff for testing with that\n p = create_publisher()\n # p.is_thread_safe = True # hack...\n wsgi_app = quixote.get_wsgi_app()\n elif app_to_run == 'imageapp':\n imageapp.setup()\n p = imageapp.create_publisher()\n wsgi_app = quixote.get_wsgi_app()\n elif app_to_run == 'quotes':\n wsgi_app = QuotesApp('./quotes/quotes.txt', './quotes/html')\n elif app_to_run == 'chat':\n wsgi_app = ChatApp('./chat/html')\n elif app_to_run == 'cookie':\n wsgi_app = cookieapp.wsgi_app\n else: #if app_to_run == 'fires': # default\n wsgi_app = app.make_app()\n\n\n print 'Entering infinite loop; hit CTRL-C to exit'\n try:\n while True:\n # Establish connection with client.\n conn, (client_host, client_port) = sock.accept()\n print 'Got connection from', client_host, client_port\n handle_connection(conn, wsgi_app)\n finally:\n # teardown stuffs\n if app_to_run == 'imageapp':\n imageapp.teardown()\n sock.shutdown(2)\n sock.close()",
"def main(args):\n\n args = parse_args(args)\n setup_logging(args.loglevel)\n \n app.run(port='5002')",
"def __init__(self, app_name, file=\"~/.redshovel\"):\r\n usage = \"usage: %prog [options]\"\r\n self.optp = optparse.OptionParser(usage=usage)\r\n self.app_name = app_name\r\n self.opts = None\r\n self.args = None\r\n\r\n self.config = ConfigParser.RawConfigParser()\r\n self.config.read(os.path.expanduser(file))\r\n\r\n # Add the common options.\r\n self.default_section = set(('api_key', 'url', 'verbose'))\r\n self.required = set()\r\n self.add_option(\"-a\", \"--api\", dest=\"api_key\", required=True,\r\n help=\"The api key to connect to redmine with\")\r\n self.add_option(\"-u\", \"--url\", dest=\"url\", required=True,\r\n help=\"The url to redmine.\")\r\n self.add_option(\"-v\", \"--verbose\",\r\n action=\"count\", dest=\"verbose\",\r\n help=\"verbosity the more v's the more verbose.\")",
"def main(argv):\n parser = optparse.OptionParser(usage=__doc__)\n parser.add_option('-s', '--server', dest='server',\n help='The hostname your app is deployed on. '\n 'Defaults to <app_id>.appspot.com.')\n parser.add_option('-o', '--output', dest='filename', default=DEFAULT_FILE,\n help='The file to which Appstats data must '\n 'be downloaded. A .pkl extension is '\n 'recommended. Defaults to %s.' % DEFAULT_FILE)\n parser.add_option('-p', '--path', dest='path',\n help='The path on the server to the remote_api handler. '\n 'Defaults to %s for python and %s for java. '\n % (DEFAULT_PATH_PYTHON, DEFAULT_PATH_JAVA))\n parser.add_option('-q', '--quiet',\n action='store_false', dest='verbose', default=True,\n help='do not print download status messages to stdout')\n parser.add_option('-j', '--java',\n action='store_true', dest='java_application', default=False,\n help='set this for downloading from a java application')\n parser.add_option('-m', '--merge',\n action='store_true', dest='merge', default=False,\n help='if file exists, merge rather than overwrite')\n parser.add_option('--secure', dest='secure', action='store_true',\n default=False, help='Use HTTPS when communicating '\n 'with the server.')\n parser.add_option('--appdir', dest='appdir', action='store', default='.',\n help='application directory, for finding '\n 'appengine_config.py. Defaults to \".\".')\n (options, args) = parser.parse_args()\n\n\n if ((not options.server and not args) or len(args) > 2\n or (options.path and len(args) > 1)):\n parser.print_usage(sys.stderr)\n if len(args) > 2:\n print >> sys.stderr, 'Unexpected arguments: %s' % args[2:]\n elif options.path and len(args) > 1:\n print >> sys.stderr, 'Path specified twice.'\n sys.exit(1)\n\n\n servername = options.server\n appid = None\n if options.java_application:\n default_path = DEFAULT_PATH_JAVA\n else:\n default_path = DEFAULT_PATH_PYTHON\n path = options.path or default_path\n if args:\n if servername:\n\n appid = args[0]\n else:\n\n servername = '%s.appspot.com' % args[0]\n if len(args) == 2:\n\n path = args[1]\n if options.verbose:\n\n\n logging.getLogger().setLevel(logging.INFO)\n download_appstats(servername, appid, path, options.secure,\n appengine_rpc.HttpRpcServer, options.filename,\n options.appdir, options.merge, options.java_application)",
"def configure_app(self, app, parser):\n parser.add_argument(\n 'infile',\n nargs=argparse.OPTIONAL,\n default='-',\n help='JSON-encoded glucose data'\n )",
"def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include(includeme)\n return config.make_wsgi_app()",
"def generate(self) -> Flask:\n app = Flask(self.name, *self.args, **self.kwargs)\n app = self.setup_app_config(app)\n app = self.add_app_headers(app)\n app = self.add_xsrf_error_handler(app)\n\n return app",
"def startapp():",
"def launch_cli() -> None:\n app.run(main, flags_parser=_parse_flags)",
"def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()",
"def main(global_config, **settings):\n # add settings in here?\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()",
"def main(global_config, **settings):\n config = Configurator(settings=settings, root_factory=root_factory)\n config.include('substanced')\n config.include('.resources')\n config.scan()\n return config.make_wsgi_app()",
"def __init__(self,*args,**kwargs):\n # Pop config_file keyword argument\n # before calling flask app constructor,\n # since this is the only non-flask parameter\n if 'config_file' in kwargs.keys():\n config_file = kwargs['config_file']\n del kwargs['config_file']\n\n super().__init__(*args,**kwargs)\n\n # ----------------------------\n # Load config file\n msg = \"CentillionFlask: __init__(): Preparing to load webapp config file.\"\n logging.info(msg)\n loaded_config = False\n\n # Flask Config \n # ------------\n # We have 4 ways to pass a Flask config to centillion.\n # \n # Option 1: set the `CONFIG_CENTILLION` env var\n # Option 2A: specify the relative or absolute path to a config file when initializing flask app\n # Option 2B: have a config file (with name specified by DEFAULT_CONFIG on const.py) in the current directory\n # Option 3: have a config file (wth name specified by DEFAULT_CONFIG in const.py) in `~/.config/centillion/`\n # \n # (Option 2A and 2B are the same, but one specifies\n # the config file name and one uses the default.)\n\n\n cf = 'CENTILLION_CONFIG'\n if cf in os.environ:\n\n # Option 1:\n # \n # The user can set the centillion config file\n # using the CENTILLION_CONFIG environment var\n # when they run their centillion driver program.\n\n # If the config file is in the \n # current working directory:\n if os.path.isfile(os.path.join(call,os.environ[cf])):\n # relative path\n self.config.from_pyfile(os.path.join(call,os.environ[cf]))\n loaded_config = True\n msg = \"CentillionFlask: __init__(): Succesfuly loaded webapp config file from %s variable.\\n\"%(cf)\n msg += \"Loaded config file at %s\"%(os.path.join(call,os.environ[cf]))\n logging.info(msg)\n \n # If the config file is \n # an absolute path:\n elif os.path.isfile(os.environ[cf]):\n # absolute path\n self.config.from_pyfile(os.environ[cf])\n loaded_config = True\n msg = \"CentillionFlask: __init__(): Succesfuly loaded webapp config file from %s variable.\\n\"%(cf)\n msg += \"Loaded config file at %s\"%(os.environ[cf])\n logging.info(msg)\n \n else:\n\n msg = \"CentillionFlask: __init__(): Did not find CENTILLION_CONFIG environment variable. Still looking for config file...\\n\"\n logging.info(msg)\n\n # Option 2:\n # \n # User specifies the name of a config file,\n # either relative or absolute, when they\n # create the Flask app.\n # \n # Note: if config_file = DEFAULT_CONFIG,\n # this is Option 2B.\n # \n cwd_config = os.path.join(call,config_file)\n if os.path.isfile(cwd_config):\n self.config.from_pyfile(cwd_config)\n loaded_config = True\n msg = \"CentillionFlask: __init__(): Succesfuly loaded user-specified config file\\n\"\n msg += \"Loaded config file at %s\"%(cwd_config)\n logging.info(msg)\n\n elif os.path.isfile(config_file):\n self.config.from_pyfile(config_file)\n loaded_config = True\n msg = \"CentillionFlask: __init__(): Succesfuly loaded user-specified config file\\n\"\n msg += \"Loaded config file at %s\"%(config_file)\n logging.info(msg)\n\n else:\n\n msg = \"CentillionFlask: __init__(): Did not find config file anywhere, punting and looking for ~/.config/centillion/%s\\n\"%(DEFAULT_CONFIG)\n logging.info(msg)\n\n # Option 3:\n # \n # User must have a config file in \n # ~/.config/centillion/$DEFAULT_CONFIG\n # \n home = str(Path.home())\n home_config = os.path.join(home,'.config','centillion',DEFAULT_CONFIG)\n if os.path.isfile(home_config):\n self.config.from_pyfile(home_config)\n loaded_config = True\n msg = \"CentillionFlask: __init__(): Succesfuly loaded config file in home directory\\n\"\n msg += \"Loaded config file at %s\"%(home_config)\n logging.info(msg)\n\n\n if not loaded_config:\n err = \"ERROR: CentillionFlask: __init__(): Problem setting config file. Check that %s exists or that the %s environment variable is set!\\n\"%(config_file,cf)\n logging.exception(err)\n raise Exception(err)\n\n self.validate_config()",
"def create_app(testing=False, cli=False):\n app = Flask(__name__)\n app.config.from_object(\"flask_cli.config\")\n\n if testing is True:\n app.config[\"TESTING\"] = True\n\n configure_extensions(app, cli)\n configure_apispec(app)\n register_blueprints(app)\n return app",
"def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n init_includes(config)\n init_routing(config)\n init_db(config)\n return config.make_wsgi_app()",
"def main():\n\n import os,sys\n\n # Todo: this should really double fork (on posix platforms, not\n # sure about windows which doesn't have fork), I if you start appMan from the command line\n # and send it a ^C any sub app it starts also get's that ^C which\n # isn't what we want.\n\n from optparse import OptionParser\n parser = OptionParser()\n\n parser.add_option(\"-d\", dest=\"daemonize\", action=\"store_true\", default=False,\n help=\"Run this process as a background daemon\")\n parser.add_option(\"-v\", dest=\"logLevel\", action=\"count\", default=0,\n help=\"Increase logging verbosity by one for each -v specified on the commandline\")\n\n parser.add_option(\"-b\", dest=\"breakAt\", default=None,\n help=\"Use python debugger to break @ file:line, needs -f\")\n\n parser.add_option(\"-o\", action=\"append\", dest=\"options\",\n default=[], help='Set extension option -o \"section:key=value\"')\n\n\n (options, args) = parser.parse_args()\n\n # app's can be started using the ramblerapp shell command, in that\n # case the first argument must be the path to the application\n # bundle. If the name of the script isn't ramblerapp say it's\n # \"foo\" then we assume this is an application whose bundle is\n # under /usr/lib/Rambler/extensions.\n\n # Todo: come up with a better way to specify the rambler\n # extensions directory. Perhaps check home directories as well,\n # that might make development eaiser\n\n scriptname = os.path.basename(sys.argv[0])\n appBundlePath=None\n if scriptname != \"ramblerapp\":\n\n\n # Depending on what platform we're on the scriptname could\n # either either foo or foo.exe, to add insult too injury the\n # scrip probably isn't installed in the bundle directory (the\n # directory containg the app.conf) file. So using setuptools\n # we'll first determine which egg this script came from, then\n # we'll use that to find the directory containing the app.conf\n\n # We may have more than one module that uses the same\n # appBundle, like appmanager and ramblercon, so consolt the\n # pkg_resources for that information. This only works if the script\n # wass installed as an egg 'console_script'\n\n for ep in pkg_resources.iter_entry_points('console_scripts',scriptname):\n \n # Note, asking for the '' filename only works on unzipped\n # packages. If I want to make ramblerapps out of eggs\n # we'll need to redesign the \"Bundle\" concept. Heck we\n # might be able to ditch Bundles in favor of eggs\n # alltogether....\n\n # We may one or more scripts that \n\n\n appBundlePath = pkg_resources.resource_filename(ep.dist.project_name,'')\n\n # Warning: There could be more than one script of the same\n # name in setuptools database. Typically this means that\n # two different projects installed the same console_script\n # with the same name. Now the bad part is, who knows which\n # script is actually installed.\n break\n\n # if appBundlePath wasn't set, then the script name didn't\n # refer to a script installed by an egg. As a last ditch\n # effort and probably the most common case the scriptname\n # referes to a vanilla python package\n \n appBundlePath = appBundlePath or pkg_resources.resource_filename(os.path.basename(scriptname),'')\n\n \n elif len(args) < 1:\n print >> sys.stderr, \"Please specify the application directory.\"\n return 1\n else:\n appBundlePath = args[0]\n args = args[1:]\n \n \n\n # clear the options in sys.argv we didn't use and put the\n # positional ones back in. Right now we're donig this mostly for\n # ramblercon which is a commandline application. Might be nice to\n # have a way of getting positional arguments to a component that\n # doesn't involve munging the command line.\n\n #del sys.argv[1:]\n #sys.argv.extend(args)\n\n\n \n authoritativeOptions = None\n\n\n\n # close any open file handles, have to do this before we load the\n # app cause who knows what files we may open next\n\n if not options.daemonize:\n # keep stderr, stdin and stdout open\n startfd = 3\n else:\n startfd = 3\n\n # wonder if closing files is neccesary now that we don't fork..\n import os\n try:\n maxfd = os.sysconf(\"SC_OPEN_MAX\")\n except (AttributeError, ValueError):\n maxfd = 256 # default maximum\n\n for fd in range(startfd, maxfd):\n try:\n os.close(fd)\n except OSError: # ERROR (ignore)\n pass\n\n app = Application(appBundlePath,\n authoritativeOptions=authoritativeOptions)\n \n logService = app.lookup(\"LogService\")\n level = logService.defaultLevel - (options.logLevel * 10)\n logService.setLevel(level)\n\n\n if not options.daemonize:\n logService.useStreamHandler(sys.stderr)\n else:\n # since we didn't pass anything in this should already be done for us\n\n logService.useSyslogHandler()\n pass\n\n\n try:\n app.load()\n \n except:\n app.log.exception(\"Exception encountred while loading as a subprocess\")\n return 1\n\n try:\n RunLoop.currentRunLoop().run()\n except:\n app.log.exception(\"Unhandled exception encuntered in runLoop\")\n return 255\n \n # if we didn't die with an exception, exit with a 0, no errors\n return 0",
"def create_application(config=None):\n app = Flask(__name__) \n if config is not None:\n print('mewo')\n app.config.from_object(config)\n # else:\n # print(os.environ['APP_SETTINGS'])\n # app.config.from_object(os.environ['APP_SETTINGS'])\n\n @app.route('/')\n def example():\n \"\"\" a example funciton \"\"\"\n return 'hello world'\n\n return app",
"def create_app(config_filename=None, additional_config=None, hooks=None):\n\n from flask.logging import default_handler\n log.addHandler(default_handler)\n log.setLevel(logging.INFO)\n\n # first handle the configuration file before the flask app will be created\n if not config_filename:\n config_filename = ALTERNATIVE_CONFIG_FILE\n print('Using default config file: {0}'.format(config_filename))\n\n custom_config = parse_config_file_check(config_filename)\n\n if additional_config:\n custom_config.update(additional_config)\n\n global API_PATH\n API_PATH = custom_config['api_path']\n\n # now create the flask app object\n app = Flask(__name__, static_folder=DEFAULT_STATIC_PATH)\n app.logger.setLevel(logging.INFO)\n\n app.config.from_mapping(\n # a default secret that should be overridden by instance config\n SECRET_KEY='dev',\n )\n\n # copy the custom config into the flask app's config (app.config[<key>]) for using it in every route\n for key in custom_config:\n if key in app.config:\n log.info('Overriding config item {0}: {1} -> {2}'.format(key, app.config[key], custom_config[key]))\n app.config.update(custom_config)\n\n # set the config's flask static path insted of the original one\n if 'static_flask_filepath' in app.config:\n app.static_folder = app.config['static_flask_filepath']\n\n if 'LC_TIME' in app.config:\n try:\n locale.setlocale(locale.LC_TIME, app.config['LC_TIME'])\n except Exception as e:\n log.error('Cannot set language LC_TIME={0}: {1}'.format(app.config['LC_TIME'], e))\n\n if hooks and 'after_config_loaded' in hooks:\n # call first hook after loading configuration\n hooks['after_config_loaded'](app)\n\n # CORS is an access protection for browsers. Here we just open it to allow the browser accessing data from\n # different hosts than just from this flask server.\n CORS(app)\n\n # the api object is a flask_restful manager class which helps to provide and process JSON data over HTTP requests\n api = Api(app)\n\n # ensure the instance folder exists\n # try:\n # print('Create instance paths: ', app.instance_path)\n # os.makedirs(app.instance_path)\n # except OSError:\n # pass\n\n # if we're in pytest mode, the temporary database should already have been created\n if 'TEST_DATABASE' in app.config:\n app.config['db_uri'] = '{0}{1}'.format(app.config['db_uri'], app.config['TEST_DATABASE'])\n msg = 'Creating test database at {0}'.format(app.config['db_uri'])\n print(msg)\n log.warning(msg)\n\n # create the database connector and store it into the flask app config\n db_connect: sqlalchemy.Engine = sqlalchemy.create_engine(app.config['db_uri']) # not supported here: read_only=args.read_only)\n app.config['db_connect']: sqlalchemy.engine.Engine = db_connect\n app.config['db_fail'] = False\n app.config['db_meta']: sqlalchemy.MetaData = sqlalchemy.MetaData()\n app.config['db'] = {}\n try:\n # now create SQLAlchemy ORM reflection objects (= read ORM from the existing DB)\n app.config['db_meta'].reflect(bind=app.config['db_connect'])\n for table_object in app.config['db_meta'].sorted_tables:\n app.config['db'][str(table_object)]: sqlalchemy.Table = table_object\n app.config['db_base']: sqlalchemy.ext.automap = sqlalchemy.ext.automap.automap_base(metadata=app.config['db_meta'])\n app.config['db_base'].prepare()\n except sqlalchemy.exc.SQLAlchemyError as e:\n app.config['db_fail'] = True\n app.config['db_fail_err'] = 'SQL code:{0} message:{1}'.format(e.orig.args[0], e.orig.args[1])\n\n app.config['current_lang'] = 'en' # default language\n\n from flask_squirrel.table.db_placeholder_parser import DbPlaceholderParser\n app.config['db_placeholder_parser'] =\\\n DbPlaceholderParser(app.config['customview_spec'], app.config['translation_spec'], app.config['db_spec'],\n app.config['db'], db_connect)\n\n from flask_squirrel.util.view_spec_generator import ViewSpecGenerator\n view_spec_generator = ViewSpecGenerator(app.config['customview_spec'], app.config['translation_spec'],\n app.config['db_spec'])\n app.config['view_spec_generator'] = view_spec_generator\n\n log.info('Adding resource /{0}/resource-view-spec to routes...'.format(API_PATH))\n api.add_resource(ResourceViewSpec, '/{0}/resource-view-spec'.format(API_PATH), endpoint='resource-view-spec')\n\n for table_name in app.config['db_spec']:\n log.info('Adding table /{0}/{1} to routes...'.format(API_PATH, table_name))\n api.add_resource(DbTable, '/{0}/{1}'.format(API_PATH, table_name), endpoint='api-{0}'.format(table_name),\n resource_class_kwargs={'table_name': table_name})\n\n if 'upload_dir' in app.config:\n # optional upload route\n log.info('Adding resource /{0}/upload to routes...'.format(API_PATH))\n from flask_squirrel.util import uploadroute\n app.register_blueprint(uploadroute.bp, url_prefix='/{0}'.format(API_PATH))\n\n if ('static_directory' in app.config) and ('static_url' in app.config):\n # optionally provide web app (html/js/css)\n log.info('Adding local static path \"{0}\" to routes at /{1}...'\n .format(app.config['static_directory'], app.config['static_url']))\n frontend = Blueprint(app.config['static_url'], __name__, static_folder=app.config['static_directory'],\n static_url_path='/{0}'.format(app.config['static_url']))\n app.register_blueprint(frontend, url_prefix='') # no prefix here, take it directly from the config\n\n log.info('Adding resource /{0}/login-token to routes...'.format(API_PATH))\n # Set an initial login attribute for handling of verification in verify_password\n if 'login-token' not in app.config['customview_spec']:\n app.config['customview_spec']['login-token'] = {}\n app.config['customview_spec']['login-token']['_attributes'] = ['login_route']\n api.add_resource(LoginTokenApi, '/{0}/login-token'.format(API_PATH), endpoint='login-token')\n\n # Now make sure that an admin user exists. The initial UN/PW is in the config file and can be changed.\n from flask_squirrel.util import session_auth\n session_auth.User.check_initial_user(app)\n app.user = None\n\n return app",
"def main() -> None:\r\n # Configures and starts logging.\r\n app.config[\"logger\"] = start_logging()\r\n # Starts and runs Flask server on localhost:5000 if True.\r\n if app.config[\"localhost\"]:\r\n app.run()\r\n # Else starts and runs Flask server that listens on all IPs. Meaning, Flask\r\n # server can be accessed via executing machine's IP address, e.g.,\r\n # 100.68.241.2:5000 - so Flask server can be reached from machines in same\r\n # network or accessed via the public IP (remotely), e.g., 31.220.200.5:5000\r\n else:\r\n app.run(host=\"0.0.0.0\")",
"def main(self, options):\n import sys\n import getopt\n import errno\n from os import makedirs\n from os.path import dirname\n\n try:\n opts, args = getopt.getopt(options,\n \"lt:gsp:\", [\n \"port=\",\n \"view=\",\n \"list\",\n \"static=\",\n \"generate\",\n \"server\"])\n except getopt.GetoptError, err:\n print str(err)\n sys.exit(2)\n \n generate = False\n serve = False\n listfiles = False\n statichtml = 'static'\n view = False\n port = 8080\n \n for option, arg in opts:\n if option in (\"-g\",\"--generate\"):\n generate = True\n elif option in (\"-t\",\"--static\"):\n statichtml = arg\n elif option in (\"--view\"):\n view = arg\n elif option in (\"-s\",\"--serve\"):\n serve = True\n elif option in (\"-l\",\"--list\"):\n listfiles = True\n elif option in (\"-p\",\"--port\"):\n port = int(arg)\n else:\n assert False, \"unhandle option\"\n \n if generate or listfiles:\n for path, filename in self.genlist():\n \n path = path.replace('?','')\n \n if view and view != path:\n continue\n \n if listfiles:\n print path + \" <= \" + \\\n join(path[1:],'index.html') + ' <= ' + \\\n filename\n \n if generate:\n path = join(statichtml , path[1:], 'index.html')\n try:\n makedirs(dirname(path))\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n pass\n else: raise\n print \"Generating \" + path\n static_file = open(path,'w')\n static_file.write(self.generatehtml(path))\n static_file.close()\n \n if serve:\n print \"Starting wsgi web server on port \" + str(port)\n from wsgiref.simple_server import make_server\n server = make_server('', port, self.wsgiapp())\n server.serve_forever()"
]
| [
"0.7129765",
"0.6890105",
"0.6876767",
"0.6762991",
"0.661767",
"0.659027",
"0.6555113",
"0.6488689",
"0.6371736",
"0.6314148",
"0.6302245",
"0.6280655",
"0.62723935",
"0.6235607",
"0.62289125",
"0.6226812",
"0.6221567",
"0.62200433",
"0.62091607",
"0.62004334",
"0.6199967",
"0.6169011",
"0.61430115",
"0.61394006",
"0.6125171",
"0.6112612",
"0.6106311",
"0.61006165",
"0.60975593",
"0.6068582"
]
| 0.71354246 | 0 |
A function that creates a decorator which will use "cachefile" for caching the results of the decorated function "fn". | def cached():
def decorator(fn): # define a decorator for a function "fn"
cache_name = fn.func_name
def wrapped(*args, **kwargs): # define a wrapper that will finally call "fn" with all arguments
if os.path.exists(cache_name):
with gzip.GzipFile(cache_name, 'rb') as cachehandle:
return pickle.load(cachehandle)
# execute the function with all arguments passed
res = fn(*args, **kwargs)
# write to cache file
with gzip.GzipFile(cache_name, 'wb') as cachehandle:
pickle.dump(res, cachehandle, pickle.HIGHEST_PROTOCOL)
return res
return wrapped
return decorator # return this "customized" decorator that uses "cachefile" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def cache(filename):\n\n def decorator(func):\n def wrapper(*args, **kw):\n self = args[0]\n path = os.path.join(self.cache_dir, filename)\n time0 = time.time()\n if os.path.exists(path):\n result = joblib.load(path)\n cost = time.time() - time0\n logger.info('[cache] loading {} costs {:.2f}s'.format(path, cost))\n return result\n result = func(*args, **kw)\n cost = time.time() - time0\n logger.info('[cache] obtaining {} costs {:.2f}s'.format(path, cost))\n joblib.dump(result, path)\n return result\n\n return wrapper\n\n return decorator",
"def cache(fn):\n\tcache.c = dict()\n\tdef _fn(*args, **kwargs):\n\t\tkey = fn.__name__ + str(args) + str(kwargs)\n\t\ttry:\n\t\t\tret = cache.c[key]\n\t\texcept KeyError, e:\n\t\t\tret = fn(*args, **kwargs)\n\t\t\tcache.c[key] = ret\n\t\treturn ret\n\treturn _fn",
"def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper",
"def cache(cache_path):\n def cache_decorator(generator):\n def wrapper():\n return cached(cache_path, generator)\n return wrapper\n return cache_decorator",
"def cached(cache_name, cache_dir=REPOSIROTY_CACHING_DIR):\n cache_dir = os.path.join(cache_dir, cache_name)\n assert_dir_exists(cache_dir)\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(key='KEY', *args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments\n gzip_cachefile = os.path.abspath(os.path.join(cache_dir, key + \".gzip\"))\n assert_dir_exists(os.path.dirname(gzip_cachefile))\n if os.path.exists(gzip_cachefile):\n with gzip.GzipFile(gzip_cachefile, 'rb') as cachehandle:\n return pickle.load(cachehandle)\n\n\n # execute the function with all arguments passed\n if fn.func_code.co_argcount == 0:\n res = fn(*args, **kwargs)\n else:\n res = fn(key, *args, **kwargs)\n\n # write to cache file\n with gzip.GzipFile(gzip_cachefile, 'wb') as cachehandle:\n pickle.dump(res, cachehandle, pickle.HIGHEST_PROTOCOL)\n\n with open(gzip_cachefile + '.json', 'w') as jsonhandle:\n jsonhandle.write(jsonpickle.encode(res))\n jsonhandle.close()\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper",
"def decorate(func, *args, **kws):\n # setting cache expires for given decorated function,\n # if argument 'expire' is given.\n if expire:\n self.cache_expires[func] = expire\n else:\n self.cache_expires[func] = self.get_config().page_cache_expire\n if namespace_func:\n self.cache_nsfuncs[func] = namespace_func\n\n def do_cache(*args, **kws):\n \"\"\"\n A function works every time decorated functions are called.\n \"\"\"\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')\n return do_cache",
"def cache(filename, *argPatterns, **kwargPatterns):\n if callable(filename):\n # If we've been given a function, just use the function's name\n # as the filename.\n return cache(filename.func_name, *argPatterns, **kwargPatterns)(filename)\n \n def decorator(f):\n # this is the actual decorator\n # f is the function to be modified\n \n @wraps(f)\n def cacher(base, *args, **kwargs):\n # create the full path to the cache file\n cachefile = filename\n if len(argPatterns):\n cachefile += '.'+'.'.join([a%args[i] \n for i,a in enumerate(argPatterns)])\n pass\n \n keys = kwargPatterns.keys()\n if len(keys):\n keys.sort()\n cachefile += '.'+'.'.join([kwargPatterns[k]%kwargs[k] \n for k in keys])\n pass\n \n cacheFN = os.path.join(base, cachefile)\n \n if os.path.exists(cacheFN):\n ans = cPickle.load(file(cacheFN))\n else:\n ans = f(base, *args, **kwargs)\n cPickle.dump(ans,\n file(cacheFN, 'wb'),\n protocol=2)\n pass\n return ans\n \n # return the wrapped function\n return cacher\n\n return decorator",
"def cached(key):\n\n def decorator(fn):\n def decorated(cls):\n value = cls.get_cache(key)\n if value is not None:\n return value\n else:\n value = fn(cls)\n cls.set_cache(key, value)\n return value\n\n return decorated\n\n return decorator",
"def cached(func):\n cache_dct = {}\n\n @wraps(func)\n def _lru_cache_decorator(*args):\n key = args\n if key in cache_dct:\n return cache_dct[key]\n else:\n cache_dct[key] = func(*args)\n return cache_dct[key]\n return _lru_cache_decorator",
"def cachefor(name):\n def decorator(func):\n assert name not in cachefuncs\n cachefuncs[name] = func\n return func\n return decorator",
"def cached(*arg: Callable) -> Any:\n fn = arg and arg[0]\n if not callable(fn):\n raise TypeError(\n '\"cached\" decorator must be used without arguments.') from None\n\n @wraps(fn)\n def wrapper(obj: object, *, force=False) -> Any:\n cache_name = '_' + fn.__name__\n if force:\n with suppress(AttributeError):\n delattr(obj, cache_name)\n try:\n return getattr(obj, cache_name)\n except AttributeError:\n val = fn(obj)\n setattr(obj, cache_name, val)\n return val\n\n return wrapper",
"def pickle_cache(fname, overwrite=False):\n def decorator(fn):\n def decorated(*args, **kwargs):\n if (not overwrite) and os.path.exists(fname):\n with open(fname, 'rb') as f:\n return pickle.load(f)\n else:\n result = fn(*args, **kwargs)\n with open(fname, 'wb') as f:\n pickle.dump(result, f)\n return result\n return decorated\n\n return decorator",
"def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache",
"def one_use(func):\n attribute = \"_cache_\" + func.__name__\n\n @property\n @functools.wraps(func)\n def decorated(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, func(self))\n return getattr(self, attribute)\n return decorated",
"def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)",
"def cache(self, func=None, ignore=None, verbose=None,\r\n mmap_mode=False):\r\n if func is None:\r\n # Partial application, to be able to specify extra keyword\r\n # arguments in decorators\r\n return functools.partial(self.cache, ignore=ignore,\r\n verbose=verbose, mmap_mode=mmap_mode)\r\n if self.cachedir is None:\r\n return NotMemorizedFunc(func)\r\n if verbose is None:\r\n verbose = self._verbose\r\n if mmap_mode is False:\r\n mmap_mode = self.mmap_mode\r\n if isinstance(func, MemorizedFunc):\r\n func = func.func\r\n return MemorizedFunc(func, cachedir=self.cachedir,\r\n mmap_mode=mmap_mode,\r\n ignore=ignore,\r\n compress=self.compress,\r\n verbose=verbose,\r\n timestamp=self.timestamp)",
"def cache(func):\n storage = {}\n\n def wrapper(*args, **kwargs):\n key = str(*args, **kwargs)\n if storage.get(key):\n return storage[key]\n else:\n result = func(*args, **kwargs)\n storage[key] = result\n return result\n\n return wrapper",
"def cached(backend, **kwargs):\n def decorator(fn, key=None, key_generator=None, set_kwargs=None):\n if key is None:\n key = generate_function_key(fn)\n\n if key_generator is None:\n key_generator = generate_unique_key\n\n if set_kwargs is None:\n set_kwargs = {}\n\n @functools.wraps(fn)\n def inner(*args, **kwargs):\n unique_key = '%s:%s' % (key, key_generator(*args, **kwargs))\n\n # If the value is `None` from the cache, then generate the real\n # value and store it.\n value = backend.get(unique_key)\n if value is None:\n value = fn(*args, **kwargs)\n backend.set(unique_key, value, **set_kwargs)\n\n return value\n return inner\n\n return functools.partial(decorator, **kwargs)",
"def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper",
"def cached(func):\n return _lru_cache(None)(func)",
"def render_cache(fname, overwrite=False, verbose=True):\n def decorator(fn):\n def decorated(*args, **kwargs):\n if (not overwrite) and os.path.exists(fname):\n if verbose:\n print(f'[↑] {fname}')\n return mi.Bitmap(fname)\n else:\n result = fn(*args, **kwargs)\n mi.Bitmap(result).write(fname)\n if verbose:\n print(f'[+] {fname}')\n return result\n return decorated\n\n return decorator",
"def cache_result(func):\n\n @wraps(func)\n def with_cache(*args, **kwargs):\n \"\"\"\n Cached function\n \"\"\"\n key = '{}{}{}'.format(\n hash(func), hash(args), hash(frozenset(kwargs.items())))\n\n cached_result = cache.get(key)\n if cached_result is not None:\n return cached_result if cached_result != 'None' else None\n result = func(*args, **kwargs)\n cache.set(key, result if result is not None else 'None')\n\n return result\n\n return with_cache",
"def memoized(f):\n GlobalCache._caches[f] = {}\n GlobalCache._locks[f] = RLock()\n\n return decorator(GlobalCache.memoize, f)",
"def cache(func, cache, invalid_after):\n\n def cache_wrapper(*args, **kwargs):\n call_id = str(func) + str(args)\n try:\n return_value = cache[call_id]\n if ctime() - return_value[0] > invalid_after:\n raise Exception\n else:\n return return_value[1]\n except:\n return_value = func(*args, **kwargs)\n cache[call_id] = (ctime(), return_value)\n return return_value\n return cache_wrapper",
"def Decorator(func):\n\n # First, decorate the function with Cached.\n decorated = cache_decorator(func)\n\n # Then, decorate it with the conditional cache logic.\n @functools.wraps(decorated)\n def Wrapped(*args, **kwargs):\n result = decorated(*args, **kwargs)\n if not cache_predicate(args, kwargs, result):\n decorated.DeleteCache(*args, **kwargs)\n return result\n\n return Wrapped",
"def cache(timeout):\n def cached(func, *args, **kwargs):\n \"\"\"\n Cache data wrapper.\n \"\"\"\n lock = threading.Lock()\n key = func.__name__\n\n with lock:\n if key in CACHE:\n age = time() - CACHE[key]['time']\n if age < timeout:\n return CACHE[key]['result']\n\n result = func(*args, **kwargs)\n CACHE[key] = {\n 'result': result,\n 'time': time()\n }\n return result\n return decorator(cached)",
"def run(self, func, *args):\n @wraps(func)\n def cached_func(*args):\n \"\"\"Run wise cache function\"\"\"\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value\n return cached_func"
]
| [
"0.8379626",
"0.8379626",
"0.8296271",
"0.8254688",
"0.82139444",
"0.7880328",
"0.78647965",
"0.76926875",
"0.765407",
"0.7627374",
"0.7599777",
"0.75677496",
"0.754151",
"0.7532163",
"0.75206435",
"0.75165445",
"0.7485244",
"0.74727625",
"0.744931",
"0.7416763",
"0.7397632",
"0.7365121",
"0.7342263",
"0.73183006",
"0.73105544",
"0.72641927",
"0.72370774",
"0.72057873",
"0.72020805",
"0.718674"
]
| 0.84935564 | 0 |
returns an agent's resource url The resource will be looked for in the agent's properties. If it isn't found, it will look for it in the default location. | def get_agent_resource_url(ctx, agent_config, resource):
if agent_config.get(resource):
origin = utils.get_manager_file_server_blueprints_root_url() + \
'/' + ctx.blueprint.id + '/' + agent_config[resource]
else:
resource_path = DEFAULT_AGENT_RESOURCES.get(resource)
if not resource_path:
raise NonRecoverableError('no such resource: {0}'.format(resource))
if resource == 'agent_package_path':
origin = utils.get_manager_file_server_url() + \
resource_path.format(agent_config['distro'],
agent_config['distro_codename'])
else:
origin = utils.get_manager_file_server_url() + \
resource_path.format(agent_config['distro'])
ctx.logger.debug('resource origin: {0}'.format(origin))
return origin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_agent_resource_local_path(ctx, agent_config, resource):\n if agent_config.get(resource):\n origin = agent_config[resource]\n else:\n resource_path = DEFAULT_AGENT_RESOURCES.get(resource)\n if not resource_path:\n raise NonRecoverableError('no such resource: {0}'.format(resource))\n if resource == 'agent_package_path':\n origin = resource_path.format(agent_config['distro'],\n agent_config['distro_codename'])\n else:\n origin = resource_path.format(agent_config['distro'])\n ctx.logger.debug('resource origin: {0}'.format(origin))\n return origin",
"def get_resource_url(self, resource_name):\r\n return self.__resource_meta.get(resource_name,{}).get(\"resource\", None)",
"def resource_uri(self) -> Optional[str]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[str]:\n return pulumi.get(self, \"resource_uri\")",
"def get_url(self, resource_name):\r\n return self.__resource_meta.get(resource_name,{}).get(\"url\", None)",
"def resource_url(self, resource):\n raise NotImplementedError(\"Runtime needs to provide resource_url()\")",
"def get_url(self):\n return self.resource.url",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def target_resource_path(self) -> Optional[str]:\n return pulumi.get(self, \"target_resource_path\")",
"def url(self) -> str:\n if \"main\" not in self._resources:\n self._initialize()\n return self._resources[\"main\"].url",
"def get_reference(self):\n return self.resource.url",
"def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None",
"def get_resource(self):\n from rowgenerators import parse_app_url # Here, to break an import cycle\n\n self._resource = self._downloader.download(self.inner)\n\n\n ru = parse_app_url(self._resource.sys_path,\n downloader=self.downloader,\n scheme_extension=self.scheme_extension,\n **self.frag_dict)\n\n\n return ru",
"def referral_resource(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"referral_resource\")",
"def get_resource(resource_path):\n\n return pkg_resources.resource_string(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )",
"def resource(self):\n return self.properties.get('resource',\n Entity(self.context, ResourcePath(\"resource\", self.resource_path)))",
"def source_resource_path(self) -> Optional[str]:\n return pulumi.get(self, \"source_resource_path\")",
"def get_absolute_resource_path(resource_path):\n return pkg_resources.resource_filename(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )",
"def resource_reference(self):\n return self.properties.get(\"resourceReference\", ResourceReference())",
"def get_uri(self):\n return self.url",
"def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")",
"def uri(self) -> Optional[str]:\n return pulumi.get(self, \"uri\")",
"def getResource(self):\n pass;",
"def getResource(self):\n return self.__resource;",
"def resource_url(self):\n return self.portal_url + \"/\" + \"++resource++plonecommunity.app\"",
"def full_url(resource):\r\n # if (url/resource == '127.0.0.1':)\r\n if resource == '/' or resource == ' ':\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, URL_TEST)\r\n # else (if url/resource == 'Specific resource')\r\n else:\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, str(resource).replace('/', '\\\\'))\r\n print(f'the client request = {url}')\r\n return url",
"def get_url(self):\n raise NotImplementedError(\"This asset does not have a URL\")"
]
| [
"0.7100387",
"0.67178667",
"0.6693364",
"0.6693364",
"0.66524416",
"0.6572719",
"0.6500711",
"0.6478797",
"0.6478797",
"0.6478797",
"0.6478797",
"0.6375834",
"0.62469697",
"0.62379724",
"0.61198115",
"0.606946",
"0.6031326",
"0.6013786",
"0.5982946",
"0.5896016",
"0.58576375",
"0.58294564",
"0.5816187",
"0.5799615",
"0.5795339",
"0.5791988",
"0.5738773",
"0.57346666",
"0.5684517",
"0.56768364"
]
| 0.81056666 | 0 |
returns an agent's resource path The resource will be looked for in the agent's properties. If it isn't found, it will look for it in the default location. | def get_agent_resource_local_path(ctx, agent_config, resource):
if agent_config.get(resource):
origin = agent_config[resource]
else:
resource_path = DEFAULT_AGENT_RESOURCES.get(resource)
if not resource_path:
raise NonRecoverableError('no such resource: {0}'.format(resource))
if resource == 'agent_package_path':
origin = resource_path.format(agent_config['distro'],
agent_config['distro_codename'])
else:
origin = resource_path.format(agent_config['distro'])
ctx.logger.debug('resource origin: {0}'.format(origin))
return origin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_agent_resource_url(ctx, agent_config, resource):\n if agent_config.get(resource):\n origin = utils.get_manager_file_server_blueprints_root_url() + \\\n '/' + ctx.blueprint.id + '/' + agent_config[resource]\n else:\n resource_path = DEFAULT_AGENT_RESOURCES.get(resource)\n if not resource_path:\n raise NonRecoverableError('no such resource: {0}'.format(resource))\n if resource == 'agent_package_path':\n origin = utils.get_manager_file_server_url() + \\\n resource_path.format(agent_config['distro'],\n agent_config['distro_codename'])\n else:\n origin = utils.get_manager_file_server_url() + \\\n resource_path.format(agent_config['distro'])\n\n ctx.logger.debug('resource origin: {0}'.format(origin))\n return origin",
"def target_resource_path(self) -> Optional[str]:\n return pulumi.get(self, \"target_resource_path\")",
"def get_resource(resource_path):\n\n return pkg_resources.resource_string(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )",
"def get_absolute_resource_path(resource_path):\n return pkg_resources.resource_filename(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )",
"def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep",
"def source_resource_path(self) -> Optional[str]:\n return pulumi.get(self, \"source_resource_path\")",
"def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None",
"def resource_path(self, resource):\n return str(self.path.joinpath(resource))",
"def resource_path(self, resource):\n return str(self.path.joinpath(resource))",
"def resource(self):\n return self.properties.get('resource',\n Entity(self.context, ResourcePath(\"resource\", self.resource_path)))",
"def resource_uri(self) -> Optional[str]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[str]:\n return pulumi.get(self, \"resource_uri\")",
"def get_resource_base_path(self): # real signature unknown; restored from __doc__\n return \"\"",
"def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)",
"def resource_path(self, resource):\n # type: (Text) -> Text\n # This deliberately raises FileNotFoundError instead of\n # NotImplementedError so that if this method is accidentally called,\n # it'll still do the right thing.\n raise FileNotFoundError",
"def resource_path(self, relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)",
"def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")",
"def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resource_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_uri\")",
"def resourcePath(relative, dirname=\"data\"):\n # first look in pyinstaller bundle\n if hasattr(sys, \"_MEIPASS\"):\n path = os.path.join(sys._MEIPASS, dirname)\n \n else:\n # then look in py2app bundle\n path = os.environ.get(\"RESOURCEPATH\", None)\n if path is None:\n # then look in source code directory\n path = os.path.join(RESOURCE_BASE, dirname)\n \n path = os.path.join(path, relative)\n \n return path",
"def resource_path(relative_path):\n base_path= getattr(sys,'MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)",
"def get_resources_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES\n )",
"def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )",
"def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)",
"def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)",
"def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)",
"def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)"
]
| [
"0.7533681",
"0.6860353",
"0.6840019",
"0.6802987",
"0.67962813",
"0.6623386",
"0.6526511",
"0.6402395",
"0.6402395",
"0.63555926",
"0.6345799",
"0.6345799",
"0.6267637",
"0.62424225",
"0.620157",
"0.6197518",
"0.6171627",
"0.61677915",
"0.61616135",
"0.61616135",
"0.61616135",
"0.61616135",
"0.61353284",
"0.61048627",
"0.60768193",
"0.60575753",
"0.6052874",
"0.6052874",
"0.6052874",
"0.6052874"
]
| 0.7694797 | 0 |
Parses the source file to return the packages with their current versions. | def parse_versions(self, source):
config = VersionsConfigParser()
has_read = config.read(source)
if not has_read:
logger.warning("'%s' cannot be read.", source)
return []
try:
versions = config.items('versions')
except NoSectionError:
logger.debug(
"'versions' section not found in %s.",
source
)
return []
logger.info(
'- %d versions found in %s.',
len(versions), source
)
return versions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)",
"def __parsePackages__(self, f):\n\n\t\tp = apt_pkg.ParseTagFile(f)\n\n\t\t# Just load into memory the fields that are going to be useful\n\t\twhile p.Step() == 1:\n\t\t\tpkg = p.Section['Package']\n\n\t\t\tself.packages[pkg] = {}\n\n\t\t\tfor field in p.Section.keys():\n\t\t\t\tif field == 'Package':\n\t\t\t\t\tpass \n\t\t\t\telif ['Depends', 'Recommends', 'Suggests', 'Enhances', 'Pre-Depends', 'Conflicts', 'Provides'].count(field):\n\t\t\t\t\tvalue = p.Section.get(field, \"\")\n\t\t\t\t\tself.packages[pkg][field] = apt_pkg.ParseDepends(value)\n\t\t\t\telif ['Size', 'Installed-Size'].count(field):\n\t\t\t \t\tvalue = p.Section.get(field, \"0\")\n\t\t\t \t\tself.packages[pkg][field] = int(value)\n\t\t\t\telif field == 'Source':\n\t\t\t\t\tsrc = p.Section.get(field, pkg)\n\t\t\t\t\tidx = src.find('(')\n\t\t\t\t\tif idx != -1:\n\t\t\t\t\t\tsrc = src[:idx].strip()\n\t\t\t\t\tself.packages[pkg][field] = src\n\t\t\t\telif field == 'Provides':\n\t\t\t\t\tself.packages[pkg][\"Provides\"] = apt_pkg.ParseDepends(p.Section.get(\"Provides\", \"\"))\n\t\t\t\telse:\n\t\t\t\t\tself.packages[pkg][field] = p.Section.get(field, '')\n\n\t f.close()",
"def packages():",
"def version_from_srcinfo(self) -> str:\n\n if self.pkgbase is None:\n logging.error(\"base package name of {} not known\".format(self.name))\n raise InvalidInput(\"base package name of {} not known\".format(self.name))\n\n package_dir = os.path.join(Package.cache_dir, self.pkgbase)\n if not os.path.isdir(package_dir):\n logging.error(\"package dir of {} does not exist\".format(self.name))\n raise InvalidInput(\"package dir of {} does not exist\".format(self.name))\n\n src_lines = makepkg([\"--printsrcinfo\"], True, package_dir)\n pkgver = None\n pkgrel = None\n epoch = None\n try:\n for line in src_lines:\n if \"pkgver =\" in line:\n pkgver = line.split(\"=\")[1].strip()\n elif \"pkgrel =\" in line:\n pkgrel = line.split(\"=\")[1].strip()\n elif \"epoch =\" in line:\n epoch = int(line.split(\"=\")[1].strip())\n except ValueError:\n logging.error(\n \".SRCINFO of {} is malformed. It includes non integer values for the epoch.\".format(self.name)\n )\n raise InvalidInput(\n \".SRCINFO of {} is malformed. It includes non integer values for the epoch.\".format(self.name)\n )\n\n version = \"\"\n if epoch is not None and epoch > 0:\n version += str(epoch) + \":\"\n if pkgver is not None:\n version += pkgver\n else:\n logging.info(\"version of {} must be there\".format(self.name))\n raise InvalidInput(\"version of {} must be there\".format(self.name))\n if pkgrel is not None:\n try:\n float(pkgrel)\n except ValueError:\n logging.error(\n \".SRCINFO of {} is malformed. It includes non float values for the pkgrel.\".format(self.name)\n )\n raise InvalidInput(\n \".SRCINFO of {} is malformed. It includes non float values for the pkgrel.\".format(self.name)\n )\n\n version += \"-\" + pkgrel\n\n return version",
"def list_sources(topdir, version):\n sources = []\n with open(os.path.join(topdir, 'SPECS', 'openafs.spec'), 'r') as spec:\n for line in spec.readlines():\n line = line.rstrip()\n m = re.match(r'Source[\\d]+: (.*)', line)\n if m:\n source = m.group(1).replace(r'%{afsvers}',\n version['openafs_version'])\n sources.append(os.path.basename(source))\n return sources",
"def get_versions():\n version_py = os.path.join(os.path.split(__file__)[0], \"src/osmium/version.py\")\n v = {}\n with open(version_py) as version_file:\n # Execute the code in version.py.\n exec(compile(version_file.read(), version_py, 'exec'), v)\n\n return v['pyosmium_release'], v['libosmium_version'], v['protozero_version']",
"def get_package_names_and_versions(requirements_file: str) -> list:\n with_ver_reqlist = {}\n\n for package in requirements_file:\n split_location = package.find(\"==\")\n if split_location > 0:\n package_name = package[:split_location].lower()\n pakcage_version = package[split_location+2:]\n\n with_ver_reqlist[package_name] = pakcage_version\n else:\n latest_version = get_latest_version_number(package)\n with_ver_reqlist[package] = latest_version\n\n return with_ver_reqlist",
"def ParsePkgInfoFile(filename, valid_keys=None, required_keys=None):\n with open(filename) as f:\n return ParsePkgInfo(f.read(), filename, valid_keys, required_keys)",
"def _parse_freeze(text):\n result = []\n for line in text.splitlines():\n line = line.strip()\n if line.startswith('-'):\n raise Exception(\"Irregular line: %s\" % line)\n if line.startswith('#'):\n continue\n if not line:\n continue\n package, version = line.split('==')[:2]\n result.append((package, version))\n return result",
"def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')",
"def __extract_package_version(package_string):\n # remove leading whitespace\n package_string = package_string.strip()\n # create a re parser\n compil = re.compile(r'(?P<name>.+(-[^-])*)-(?P<version>.+)')\n # search package name and version\n search = compil.search(package_string)\n # retrieve result as list\n output = search.groupdict()\n\n return output",
"def parse_package(package_path):\n\n if DEBUG: print \"Parsing package\",package_path\n\n package_path = os.path.normpath(package_path)\n dir,file = os.path.split(package_path)\n if dir == \"\":\n dir = \".\"\n return parse_subpackage(dir,file)",
"def get_imports(source_or_file):\n tree = get_ast(source_or_file)\n lister = ImportLister().visit(tree)\n return lister.data",
"def extract_packages_and_versions_including_duplicates(self, output):\n self.composite_logger.log_debug(\"\\nExtracting package and version data...\")\n packages = []\n versions = []\n package_extensions = ['.x86_64', '.noarch', '.i686']\n\n def is_package(chunk):\n # Using a list comprehension to determine if chunk is a package\n return len([p for p in package_extensions if p in chunk]) == 1\n\n lines = output.strip().split('\\n')\n\n for line_index in range(0, len(lines)):\n line = re.split(r'\\s+', lines[line_index].strip())\n next_line = []\n\n if line_index < len(lines) - 1:\n next_line = re.split(r'\\s+', lines[line_index + 1].strip())\n\n # If we run into a length of 3, we'll accept it and continue\n if len(line) == 3 and is_package(line[0]):\n packages.append(self.get_product_name(line[0]))\n versions.append(line[1])\n # We will handle these two edge cases where the output is on\n # two different lines and treat them as one line\n elif len(line) == 1 and len(next_line) == 2 and is_package(line[0]):\n packages.append(self.get_product_name(line[0]))\n versions.append(next_line[0])\n line_index += 1\n elif len(line) == 2 and len(next_line) == 1 and is_package(line[0]):\n packages.append(self.get_product_name(line[0]))\n versions.append(line[1])\n line_index += 1\n else:\n self.composite_logger.log_debug(\" - Inapplicable line (\" + str(line_index) + \"): \" + lines[line_index])\n\n return packages, versions",
"def _version(self):\n # TODO: Can we delete this method and just print the line from the\n # reqs file verbatim instead?\n def version_of_archive(filename, package_name):\n # Since we know the project_name, we can strip that off the left, strip\n # any archive extensions off the right, and take the rest as the\n # version.\n for ext in ARCHIVE_EXTENSIONS:\n if filename.endswith(ext):\n filename = filename[:-len(ext)]\n break\n # Handle github sha tarball downloads.\n if is_git_sha(filename):\n filename = package_name + '-' + filename\n if not filename.lower().replace('_', '-').startswith(package_name.lower()):\n # TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?\n give_up(filename, package_name)\n return filename[len(package_name) + 1:] # Strip off '-' before version.\n\n def version_of_wheel(filename, package_name):\n # For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-\n # name-convention) we know the format bits are '-' separated.\n whl_package_name, version, _rest = filename.split('-', 2)\n # Do the alteration to package_name from PEP 427:\n our_package_name = re.sub(r'[^\\w\\d.]+', '_', package_name, re.UNICODE)\n if whl_package_name != our_package_name:\n give_up(filename, whl_package_name)\n return version\n\n def give_up(filename, package_name):\n raise RuntimeError(\"The archive '%s' didn't start with the package name '%s', so I couldn't figure out the version number. My bad; improve me.\" %\n (filename, package_name))\n\n get_version = (version_of_wheel\n if self._downloaded_filename().endswith('.whl')\n else version_of_archive)\n return get_version(self._downloaded_filename(), self._project_name())",
"def parse_version(module_file):\n f = open(module_file)\n s = f.read()\n f.close()\n match = re.findall(\"__version__ = '([^']+)'\", s)\n return match[0]",
"def parse_controlfile(path: Path) -> Dict[str, BinaryPackage]:\n source = \"\"\n build_deps = []\n pkgs = {}\n file = Path(path / \"debian/control\")\n with file.open() as control:\n for pg in deb822.Sources.iter_paragraphs(control):\n if \"Source\" in pg:\n source = pg[\"Source\"]\n if \"Build-Depends\" in pg:\n build_deps.extend(\n [\n re.sub(r\" \\([^\\)]*\\)\", \"\", s.strip()).strip()\n for s in pg[\"Build-Depends\"].split(\",\")\n ]\n )\n elif \"Package\" in pg:\n if source != \"\":\n pkgs[pg[\"Package\"]] = BinaryPackage(\n pg[\"Package\"], source, build_deps, path.stem\n )\n else:\n pkgs[pg[\"Package\"]] = BinaryPackage(\n pg[\"Package\"], pg[\"Package\"], build_deps, path.stem\n )\n return pkgs",
"def get_source(self, filename):\n # The main reason we care about ensuring we have the latest version\n # of a given file is for the 'avant-idle' project where we could\n # 'edit and run' multiple times a given file. We need to ensure that\n # the content shown by the traceback is accurate.\n\n if os.path.isfile(filename):\n last_modified = os.path.getmtime(filename) # modification time\n if filename not in self.cache:\n source, lines = self._get_file_source(filename)\n if source is not None:\n self.add(filename, source)\n elif filename in self.ages and last_modified != self.ages[filename]:\n # modified; get fresh copy\n source, lines = self._get_file_source(filename)\n if source is not None:\n self.add(filename, source)\n else: # had problems retrieving fresh copy\n source = self.cache[filename]\n lines = source.split(\"\\n\")\n else:\n source = self.cache[filename]\n lines = source.split(\"\\n\")\n elif filename in self.cache:\n source = self.cache[filename]\n lines = source.split(\"\\n\")\n else:\n lines = []\n\n return lines",
"def test_parse_file_source():\n from bs4 import BeautifulSoup\n from scraper import read_from_file, parse_source\n content, encoding = read_from_file(TEST_FILE)\n result = parse_source(content, encoding)\n assert isinstance(result, BeautifulSoup)",
"def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")",
"def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")",
"def process_text(self, input_text):\n package_names = input_text.split()\n packages = [__import__(package_name) for package_name in package_names]\n packages_info = {}\n\n for package in packages:\n self.log.debug(\"processing package %s\" % package)\n package_name = package.__name__\n method_source_code = {}\n class_info = {}\n prefix = package.__name__ + \".\"\n for module_loader, name, ispkg in pkgutil.walk_packages(package.__path__, prefix=prefix):\n self.log.debug(\"in package %s processing module %s\" % (package_name, name))\n try:\n __import__(name)\n mod = sys.modules[name]\n\n for k, m in inspect.getmembers(mod):\n self.log.debug(\"in package %s module %s processing element %s\" % (package_name, name, k))\n if not inspect.isclass(m) and hasattr(m, '__module__') and m.__module__.startswith(package_name):\n # TODO figure out how to get module constants\n key = \"%s.%s\" % (m.__module__, k)\n item_content = self.fetch_item_content(m)\n method_source_code[key] = item_content\n\n elif inspect.isclass(m) and m.__module__.startswith(package_name):\n class_key = \"%s.%s\" % (name, k)\n class_info[class_key] = {}\n try:\n class_info[class_key]['source'] = highlight(inspect.getsource(m), self.LEXER, self.HTML_FORMATTER)\n except IOError:\n self.log.debug(\"can't get source for\" % class_key)\n class_info[class_key]['source'] = \"\"\n\n for ck, cm in inspect.getmembers(m):\n key = \"%s.%s.%s\" % (name, k, ck)\n item_content = self.fetch_item_content(cm)\n method_source_code[key] = item_content\n class_info[class_key][ck] = item_content\n except ImportError as e:\n self.log.debug(e)\n packages_info[package.__name__] = method_source_code\n return json.dumps(packages_info, indent=4)",
"def process_package(working_directory, global_names=None, require_labels=False):\n # bodies = unpersist_if_present(os.path.join(working_directory, \"source_graph_bodies.bz2\"))\n # if bodies is None:\n # return []\n\n # offsets_path = os.path.join(working_directory, \"offsets.bz2\")\n\n # # offsets store information about spans for nodes referenced in the source code\n # if os.path.isfile(offsets_path):\n # offsets = unpersist(offsets_path)\n # else:\n # logging.warning(f\"No file with offsets: {offsets_path}\")\n # offsets = None\n\n if not os.path.isfile(join(working_directory, \"has_annotations\")):\n return []\n\n def load_local2global(working_directory):\n local2global = unpersist(os.path.join(working_directory, \"local2global_with_ast.bz2\"))\n id_maps = dict(zip(local2global['id'], local2global['global_id']))\n return id_maps\n\n id_maps = load_local2global(working_directory)\n\n local_names = load_names(os.path.join(working_directory, \"nodes_with_ast.bz2\"))\n\n node_maps = get_node_maps(unpersist(join(working_directory, \"nodes_with_ast.bz2\")))\n filecontent = get_filecontent_maps(unpersist(join(working_directory, \"filecontent_with_package.bz2\")))\n offsets = group_offsets(unpersist(join(working_directory, \"offsets.bz2\")))\n\n data = []\n nlp = create_tokenizer(\"spacy\")\n\n for ind, (f_body, f_offsets) in enumerate(iterate_functions(offsets, node_maps, filecontent)):\n try:\n entry = process_body(nlp, f_body, replacements=f_offsets, require_labels=require_labels)\n except Exception as e:\n logging.warning(\"Error during processing\")\n print(working_directory)\n print(e)\n continue\n\n if entry is not None:\n entry = to_global_ids(entry, id_maps, global_names, local_names)\n data.append(entry)\n\n # nlp = create_tokenizer(\"spacy\")\n #\n # data = []\n #\n # for ind, (_, row) in tqdm(\n # enumerate(bodies.iterrows()), total=len(bodies),\n # leave=True, desc=os.path.basename(working_directory)\n # ):\n # body = row['body']\n #\n # if offsets is not None:\n # graph_node_spans = offsets_for_func(offsets, body, row[\"id\"])\n # else:\n # graph_node_spans = []\n #\n # entry = process_body(nlp, body, replacements=graph_node_spans)\n #\n # if entry is not None:\n # entry = to_global_ids(entry, id_maps, global_names, local_names)\n # data.append(entry)\n\n return data",
"def parse_xml(path):\r\n ns = {'SSIS': \"www.microsoft.com/SqlServer/SSIS\",}\r\n proj_xml = et.parse(path)\r\n proj_packages = get_packages(proj_xml, ns)\r\n\r\n packages = [Package(*package_properties(package, ns))\r\n for package in proj_packages]\r\n \r\n #package_props = {}\r\n #for package in packages:\r\n # name, version = package_properties(package, ns)\r\n # package_props[name] = version\r\n\r\n return packages",
"def parse(self, *args):\n # type: (str) -> ParseResultType\n self.nested_files = self.parse_nested_files()\n pip_options, session = build_pip_session(*args)\n repository = PyPiRepository(pip_options, session)\n requirements = pip.req.parse_requirements(\n str(self.filename),\n finder=repository.finder,\n session=repository.session,\n options=pip_options)\n requirements = ordered_set.OrderedSet(sorted(\n (HashableInstallRequirement.from_ireq(ireq)\n for ireq in requirements),\n key=lambda ireq: str(ireq)))\n index_urls = ordered_set.OrderedSet(repository.finder.index_urls)\n nested_cfiles, nested_rfiles = self.parse_nested_files()\n nested_requirements = set(itertools.chain(\n *(requirements_file.requirements\n for requirements_file in nested_rfiles)))\n requirements -= nested_requirements\n return requirements, index_urls, nested_cfiles, nested_rfiles",
"def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())",
"def parse_deps():\n Files = []\n Dependencies = []\n TimeBins = ['recover_parameters', 'startup', 'wragh', 'paramcheck',\n 'preregridinitial', 'postregridinitial', 'basegrid', \n 'initial', 'postinitial', 'postrestrictinitial', \n 'postpostinitial', 'recover_variables', \n 'post_recover_variables', 'cpinitial', 'checkpoint', \n 'preregrid', 'postregrid', 'prestep', 'evol', 'postrestrict', \n 'poststep', 'analysis', 'terminate', 'shutdown']\n\n implement_re = re.compile('implements:\\s*(\\w+)', re.I)\n inherit_re = re.compile('inherits:\\s*(.+)', re.I)\n provides_function_re = re.compile('PROVIDES\\s+FUNCTION\\s+(\\w+)', re.I)\n uses_function_re = re.compile('USES\\s+FUNCTION\\s+(\\w+)', re.I)\n requires_function_re = re.compile('REQUIRES\\s+FUNCTION\\s+(\\w+)', re.I)\n shares_re = re.compile('shares:\\s*(\\w+)', re.I)\n requires_thorn_re = re.compile('REQUIRES\\s+(?!FUNCTION\\s*)(\\w+)', re.I)\n schedules_function_re = re.compile('schedule\\s+(?:group\\s+)?(\\w+)\\s+(?:in|at)\\s+(\\w+)', re.I)\n\n # find all interface.ccl and param.ccl files in cwd\n Cactus_Path = os.path.expanduser('~/Cactus/')\n for dirpath, dirnames, filenames in os.walk(Cactus_Path + 'arrangements', followlinks=True):\n for file in filenames:\n if file == 'interface.ccl':\n Files.append(os.path.join(dirpath, file))\n\n for file in Files:\n # first parse interface.ccl\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines = fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then parse param.ccl\n file = re.sub('interface.ccl', 'param.ccl', file)\n\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines += fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then configuration.ccl\n file = re.sub('param.ccl', 'configuration.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # then schedule.ccl\n file = re.sub('configuration.ccl', 'schedule.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # get the thorn dir and its parent\n thornname = os.path.basename(os.path.dirname(file))\n parentdir = os.path.basename(os.path.dirname(os.path.dirname(file)))\n thornname = os.path.join(parentdir, thornname)\n file_dict = {'name' : thornname.lower()}\n for line in lines:\n line = line.strip()\n m = re.match(implement_re, line)\n if m:\n file_dict['implements'] = m.group(1).lower()\n\n m = re.match(inherit_re, line)\n if m:\n inheritance = re.split('\\W+', m.group(1).lower())\n file_dict['inherits'] = inheritance\n\n m = re.match(provides_function_re, line)\n if m:\n try:\n file_dict['provides_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['provides_function'] = [m.group(1).lower()]\n\n m = re.match(uses_function_re, line)\n if m:\n try:\n file_dict['uses_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['uses_function'] = [m.group(1).lower()]\n\n m = re.match(requires_function_re, line)\n if m:\n try:\n file_dict['requires_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['requires_function'] = [m.group(1).lower()]\n\n m = re.match(requires_thorn_re, line)\n if m:\n requires = re.split('\\W+', m.group(1).lower())\n # sometimes we have 'REQUIRES THORNS' instead of 'REQUIRES'\n if requires[0].lower() == 'thorns':\n del requires[0]\n file_dict['requires_thorn'] = requires\n\n m = re.match(shares_re, line)\n if m:\n try:\n file_dict['shares'].append(m.group(1).lower())\n except KeyError:\n file_dict['shares'] = [m.group(1).lower()]\n\n m = re.match(schedules_function_re, line)\n if m:\n bin, func = m.group(2).lower(), m.group(1).lower()\n if bin in TimeBins:\n bin = 'cctk_' + bin\n func_dict = {bin : func}\n try:\n file_dict['schedules_function'].append(func_dict)\n except KeyError:\n file_dict['schedules_function'] = [func_dict]\n\n\n Dependencies.append(file_dict)\n\n return Dependencies",
"def extract_deps(self, srcinfo):\n packages = {}\n pkgname = \"\"\n\n for i in srcinfo.split(\"\\n\"):\n if not i:\n continue\n if i[0] == \"#\":\n continue\n option = i.strip()\n key, value = option.split(\" = \")\n if key == \"pkgbase\":\n pkgname = value\n packages[pkgname] = []\n if key == \"makedepends\":\n packages[pkgname].append(value)\n # if key == \"depends\":\n # packages[pkgname].append(value)\n return packages",
"def extract_packages_and_versions(self, output):\n packages, versions = self.extract_packages_and_versions_including_duplicates(output)\n packages, versions = self.dedupe_update_packages(packages, versions)\n return packages, versions"
]
| [
"0.61708677",
"0.6103998",
"0.57891846",
"0.57264787",
"0.5717269",
"0.5679652",
"0.56734043",
"0.56328624",
"0.5631887",
"0.5610238",
"0.5577528",
"0.55663425",
"0.55570847",
"0.5550549",
"0.55372876",
"0.5530389",
"0.5517335",
"0.5509466",
"0.54764056",
"0.54277015",
"0.5421385",
"0.5419607",
"0.54160583",
"0.5414244",
"0.5413094",
"0.5410677",
"0.54058105",
"0.53956866",
"0.53823704",
"0.5382311"
]
| 0.6358742 | 0 |
Includes and excludes packages to be checked in the default dict of packages with versions. | def include_exclude_versions(self, source_versions,
includes=[], excludes=[]):
versions = source_versions.copy()
packages_lower = [x.lower() for x in versions.keys()]
excludes_lower = [x.lower() for x in excludes]
for include in includes:
if include.lower() not in packages_lower:
versions[include] = self.default_version
for package in list(versions.keys()):
if package.lower() in excludes_lower:
del versions[package]
logger.info(
'- %d packages need to be checked for updates.',
len(versions)
)
return versions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras",
"def find_with_deps(self, package_names):",
"def missing_dependencies_by_extra(package=\"sunpy\", exclude_extras=None):\n exclude_extras = exclude_extras or []\n requirements = get_requirements(package)\n missing_dependencies = {}\n for group in requirements.keys():\n if group in exclude_extras:\n continue\n missing_dependencies[group] = find_dependencies(package, [group])[0]\n return missing_dependencies",
"def get_default_packages(rootfs, include_essential=False,\n include_priorities=None):\n if include_priorities is None:\n include_priorities = []\n\n package_list = set()\n\n list_dir = os.path.join(rootfs, 'var/lib/apt/lists')\n for filename in os.listdir(list_dir):\n if not filename.endswith('_Packages'):\n continue\n\n with open(os.path.join(list_dir, filename)) as infile:\n for pkg in rfc822_parse(infile):\n if 'Essential' in pkg and include_essential:\n package_list.add(pkg['Package'])\n continue\n if 'Priority' not in pkg:\n continue\n\n if pkg['Priority'] in include_priorities:\n package_list.add(pkg['Package'])\n\n return list(sorted(package_list))",
"def get_used_release_specs(package, installed_version=None):",
"def _filter_pkgs(self, pkgs):\n pkgs = [pkg.strip() for pkg in pkgs]\n return [\n pkg for pkg in pkgs\n if pkg.startswith(self.base_pkg) and not pkg.startswith(os.path.join(self.base_pkg, \"vendor/\"))\n ]",
"def get_whitelisted_packages() -> Mapping[str, Union[str, None]]:\n with (here / WHITELISTED_PACKAGES_FILE).open() as fh:\n package_data = json.load(fh)\n return {\n key.upper().strip(): val.upper().strip() if val else val\n for key, val in package_data.items()\n }",
"def get_fallback_packages(request, package_name):\n dists = request.locator.get_project(package_name)\n pkgs = {}\n for version, url_set in six.iteritems(dists.get('urls', {})):\n dist = dists[version]\n for url in url_set:\n filename = posixpath.basename(url)\n url = request.app_url('api', 'package', dist.name, filename)\n pkgs[filename] = url\n return pkgs",
"def required_packages(cls) -> List[Text]:\n return []",
"def get_incompatible_packages():\n pkgconfig_directory = '/usr/lib64/pkgconfig/'\n incompatibe_packages = []\n libraries = get_libraries_names() - set(NO_PKG_CONFIGS)\n for library in libraries:\n with open(pkgconfig_directory + library + '.pc') as f:\n out = f.readlines()\n for line in out:\n if 'version=' in line:\n version = line.split('=')[1].strip(linesep)\n if not version in PMDK_VERSION.replace('~', '-'):\n incompatibe_packages.append(library)\n return incompatibe_packages",
"def getusersitepackages():\n\tpass",
"def unsatisfied_requirements(buildout, package, working_set):\n\n # read all lines from \"requirements.txt\"\n specs = [k.strip() for k in package_readlines(package, 'requirements.txt')]\n\n # discard empty lines and comments\n specs = [k for k in specs if k and k[0] not in ('#', '-')]\n\n # do not consider packages which are already installed, with a reasonable\n # version matching the user specification, either on the current working\n # set, the installed eggs or the system paths\n newest = bool_option(buildout, 'newest', 'true')\n\n left_over = []\n for k in specs:\n if requirement_is_satisfied(k, working_set, newest):\n dist = working_set.require(k)[0]\n logger.info(\"taking requirement `%s' (%s) from `%s'\", dist.key,\n dist.version, dist.location)\n else:\n left_over.append(k)\n specs = left_over\n\n return left_over",
"def findPackages(self, pkgcode, packages=None):\n if not packages:\n if (self.system_pkgcode and pkgcode == self.system_pkgcode):\n scanlist = ['language-pack', 'language-support-fonts', 'language-support-input', 'language-support-writing']\n else:\n scanlist = ['language-pack']\n for x in scanlist:\n pkg = '%s-%s' % (x, pkgcode)\n if pkg in self._cache:\n if not self._cache[pkg].is_installed and \\\n not self._cache[pkg].marked_install:\n self.missing.add(pkg)\n else:\n self.installed.add(pkg)\n \n if pkgcode in self.pkg_translations:\n for (pkg, translation) in self.pkg_translations[pkgcode]:\n if packages:\n if pkg in packages and \\\n pkg in self._cache and \\\n translation in self._cache:\n if ((not self._cache[translation].is_installed and \\\n not self._cache[translation].marked_install and \\\n not self._cache[translation].marked_upgrade) or \\\n self._cache[translation].marked_delete):\n self.missing.add(translation)\n else:\n self.installed.add(translation)\n else:\n if pkg in self._cache and \\\n (self._cache[pkg].is_installed or \\\n self._cache[pkg].marked_install or \\\n self._cache[pkg].marked_upgrade) and \\\n translation in self._cache:\n if ((not self._cache[translation].is_installed and \\\n not self._cache[translation].marked_install and \\\n not self._cache[translation].marked_upgrade) or \\\n self._cache[translation].marked_delete):\n self.missing.add(translation)\n else:\n self.installed.add(translation)\n \n if pkgcode in self.pkg_writing and \\\n (pkgcode == self.system_pkgcode or \\\n ('language-support-writing-%s' % pkgcode in self._cache and \\\n self._cache['language-support-writing-%s' % pkgcode].is_installed) or \\\n ('language-support-writing-%s' % pkgcode in self._cache and \\\n self._cache['language-support-writing-%s' % pkgcode].mark_install) or \\\n ('language-support-writing-%s' % pkgcode in self._cache and \\\n self._cache['language-support-writing-%s' % pkgcode].markUpgrade)):\n for (pkg, pull_pkg) in self.pkg_writing[pkgcode]:\n if '|' in pkg:\n # multiple dependencies, if one of them is installed, pull the pull_pkg\n for p in pkg.split('|'):\n if packages:\n if p in packages and \\\n p in self._cache and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)\n break\n else:\n if p in self._cache and \\\n (self._cache[p].is_installed or \\\n self._cache[p].marked_install or \\\n self._cache[p].marked_upgrade) and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)\n break\n else:\n if packages:\n if pkg in packages and \\\n pkg in self._cache and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)\n else:\n if pkg in self._cache and \\\n (self._cache[pkg].is_installed or \\\n self._cache[pkg].marked_install or \\\n self._cache[pkg].marked_upgrade) and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)",
"def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)",
"def getsitepackages():\n\tpass",
"def getMissingPackages(self, language=None, all=False, packages=None, showInstalled=False):\n if self._cache.broken_count > 0:\n raise SoftwareIndexBroken\n \n self.langpack_locales = {}\n self.pkg_translations = {}\n self.pkg_writing = {}\n filter_list = {}\n blacklist = []\n show = []\n self.missing = set()\n self.installed = set()\n self.system_pkgcode = ''\n \n for l in open(self.BLACKLIST):\n l = l.strip()\n if not l.startswith('#'):\n blacklist.append(l)\n \n for l in open(self.LANGCODE_TO_LOCALE):\n try:\n l = l.rstrip()\n if ':' in l:\n (pkgcode, locale) = l.split(':')\n else:\n pkgcode = l\n locale = l\n except ValueError:\n continue\n self.langpack_locales[locale] = pkgcode\n \n for l in open(self.PACKAGE_DEPENDS):\n if l.startswith('#'):\n continue\n try:\n l = l.rstrip()\n # sort out comments\n if l.find('#') >= 0:\n continue\n (c, lc, k, v) = l.split(':')\n except ValueError:\n continue\n if (c == 'tr' and lc == ''):\n filter_list[v] = k\n elif (c == 'wa' and lc != ''):\n if '|' in lc:\n for l in lc.split('|'):\n if not l in self.pkg_writing:\n self.pkg_writing[l] = []\n self.pkg_writing[l].append((\"%s\" % k, \"%s\" % v))\n else:\n if not lc in self.pkg_writing:\n self.pkg_writing[lc] = []\n self.pkg_writing[lc].append((\"%s\" % k, \"%s\" % v))\n\n # get list of all packages available on the system and filter them\n for item in self._cache.keys():\n if item in blacklist: \n continue\n for x in filter_list.keys():\n if item.startswith(x) and not item.endswith('-base'):\n # parse language code\n langcode = item.replace(x, '')\n #print \"%s\\t%s\" % (item, langcode)\n if langcode == 'zh':\n # special case: zh langpack split\n for langcode in ['zh-hans', 'zh-hant']:\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n elif langcode in self.langpack_locales.values():\n # langcode == pkgcode\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n else:\n # need to scan for LL-CC and LL-VARIANT codes\n for locale in self.langpack_locales.keys():\n if '_' in locale or '@' in locale:\n if '@' in locale:\n (locale, variant) = locale.split('@')\n else:\n variant = ''\n (lcode, ccode) = locale.split('_')\n if langcode in [\"%s-%s\" % (lcode, ccode.lower()),\n \"%s%s\" % (lcode, ccode.lower()),\n \"%s-%s\" % (lcode, variant),\n \"%s%s\" % (lcode, variant),\n \"%s-latn\" % lcode,\n \"%slatn\" % lcode,\n \"%s-%s-%s\" % (lcode, ccode.lower(), variant),\n \"%s%s%s\" % (lcode, ccode.lower(), variant)]:\n # match found, get matching pkgcode\n langcode = self.langpack_locales[locale]\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n break\n\n if language:\n pkgcode = ''\n if language == 'zh-hans' or language == 'zh-hant':\n self.system_pkgcode = language\n elif language in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[language]\n else:\n # pkgcode = ll\n if '_' in language:\n (self.system_pkgcode) = language.split('_')[0]\n elif '@' in language:\n (self.system_pkgcode) = language.split('@')[0]\n else:\n self.system_pkgcode = language\n\n if packages:\n self.findPackages(self.system_pkgcode, packages)\n else:\n self.findPackages(self.system_pkgcode)\n \n elif all:\n # try all available languages\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist:\n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base'):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n\n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n\n else:\n # get a list of language-packs we have already installed or are going to install\n # 1. system locale\n system_langcode = self._localeinfo.getSystemDefaultLanguage()[0]\n if system_langcode == None:\n system_langcode = 'en_US'\n if system_langcode in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[system_langcode]\n # 2. installed language-packs\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist: \n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base') and \\\n (self._cache[item].is_installed or \\\n self._cache[item].marked_install):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n if self.system_pkgcode and \\\n not self.system_pkgcode in pkgcodes:\n pkgcodes.append(self.system_pkgcode)\n \n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n \n if showInstalled:\n show = self.missing | self.installed\n else:\n show = self.missing\n\n return show",
"def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)",
"def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version",
"def packages():",
"def get_imported_packages(self):\n package_versions_dict = {'python': sys.version, 'SasView': sas.system.version.__version__}\n err_version_dict = {}\n no_version_list = []\n # Generate a list of standard modules by looking at the local python library\n try:\n standard_lib = [path.stem.split('.')[0] for path in pathlib.Path(pathlib.__file__)\n .parent.absolute().glob('*')]\n except Exception:\n standard_lib = ['abc', 'aifc', 'antigravity', 'argparse', 'ast', 'asynchat', 'asyncio', 'asyncore',\n 'base64', 'bdb', 'binhex', 'bisect', 'bz2', 'calendar', 'cgi', 'cgitb', 'chunk', 'cmd',\n 'code', 'codecs', 'codeop', 'collections', 'colorsys', 'compileall', 'concurrent',\n 'configparser', 'contextlib', 'contextvars', 'copy', 'copyreg', 'cProfile', 'crypt',\n 'csv', 'ctypes', 'curses', 'dataclasses', 'datetime', 'dbm', 'decimal', 'difflib',\n 'dis', 'distutils', 'doctest', 'email', 'encodings', 'ensurepip', 'enum', 'filecmp',\n 'fileinput', 'fnmatch', 'formatter', 'fractions', 'ftplib', 'functools', 'genericpath',\n 'getopt', 'getpass', 'gettext', 'glob', 'graphlib', 'gzip', 'hashlib', 'heapq', 'hmac',\n 'html', 'http', 'idlelib', 'imaplib', 'imghdr', 'imp', 'importlib', 'inspect', 'io',\n 'ipaddress', 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma',\n 'mailbox', 'mailcap', 'mimetypes', 'modulefinder', 'msilib', 'multiprocessing', 'netrc',\n 'nntplib', 'ntpath', 'nturl2path', 'numbers', 'opcode', 'operator', 'optparse', 'os',\n 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', 'pkgutil', 'platform', 'plistlib',\n 'poplib', 'posixpath', 'pprint', 'profile', 'pstats', 'pty', 'pyclbr', 'pydoc',\n 'pydoc_data', 'py_compile', 'queue', 'quopri', 'random', 're', 'reprlib', 'rlcompleter',\n 'runpy', 'sched', 'secrets', 'selectors', 'shelve', 'shlex', 'shutil', 'signal',\n 'site-packages', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', 'sqlite3',\n 'sre_compile', 'sre_constants', 'sre_parse', 'ssl', 'stat', 'statistics', 'string',\n 'stringprep', 'struct', 'subprocess', 'sunau', 'symbol', 'symtable', 'sysconfig',\n 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'test', 'textwrap', 'this', 'threading',\n 'timeit', 'tkinter', 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'tty',\n 'turtle', 'turtledemo', 'types', 'typing', 'unittest', 'urllib', 'uu', 'uuid', 'venv',\n 'warnings', 'wave', 'weakref', 'webbrowser', 'wsgiref', 'xdrlib', 'xml', 'xmlrpc',\n 'zipapp', 'zipfile', 'zipimport', 'zoneinfo', '_aix_support', '_bootlocale',\n '_bootsubprocess', '_collections_abc', '_compat_pickle', '_compression', '_markupbase',\n '_osx_support', '_pydecimal', '_pyio', '_py_abc', '_sitebuiltins', '_strptime',\n '_threading_local', '_weakrefset', '__future__', '__phello__', '__pycache__']\n standard_lib.extend(sys.builtin_module_names)\n standard_lib.append(\"sas\")\n\n for module_name in sys.modules.keys():\n\n package_name = module_name.split('.')[0]\n\n # A built in python module or a local file, which have no version, only the python/SasView version\n if package_name in standard_lib or package_name in package_versions_dict:\n continue\n\n # Import module\n try:\n package = __import__(package_name)\n except Exception as e:\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to import module\"\n continue\n\n # Retrieving the modules version using the __version__ attribute\n if hasattr(package, '__version__'):\n # Module has __version__ attribute\n try:\n package_versions_dict[package_name] = package.__version__\n continue\n except Exception as e:\n # Unable to access module\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n f\"version using .__version__\"\n pass\n\n # Retrieving the modules version using the pkg_resources package\n # Unreliable, so second option\n try:\n package_versions_dict[package_name] = pkg_resources.get_distribution(package_name).version\n except Exception:\n # Modules that cannot be found by pkg_resources\n pass\n else:\n continue\n\n # Modules version number could not be attained by any of the previous methods\n\n no_version_list.append(package_name)\n\n # Currently not required for any packages used by SasView\n # Retrieving the modules version using the version attribute\n # if hasattr(package, 'version'):\n # # Module has version attribute\n # try:\n # if isinstance(package.version, str):\n # print(package)\n # package_versions_dict[package_name] = package.version\n # continue\n # except Exception as e:\n # # Unable to access module\n # err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n # f\"version using .version\"\n # pass\n\n # Clean up\n package_versions_dict = self.remove_duplicate_modules(package_versions_dict)\n no_version_dict = self.format_no_version_list(package_versions_dict, no_version_list)\n\n return {\"results\": package_versions_dict, \"no_results\": no_version_dict, \"errors\": err_version_dict}",
"def version(*names, **kwargs):\n ret = {}\n versions_as_list = salt.utils.data.is_true(kwargs.pop(\"versions_as_list\", False))\n pkg_glob = False\n if names:\n pkgs = __salt__[\"pkg.list_pkgs\"](versions_as_list=True, **kwargs)\n for name in names:\n if \"*\" in name:\n pkg_glob = True\n for match in fnmatch.filter(pkgs, name):\n ret[match] = pkgs.get(match, [])\n else:\n ret[name] = pkgs.get(name, [])\n if not versions_as_list:\n __salt__[\"pkg_resource.stringify\"](ret)\n # Return a string if no globbing is used, and there is one item in the\n # return dict\n if len(ret) == 1 and not pkg_glob:\n try:\n return next(iter(ret.values()))\n except StopIteration:\n return \"\"\n return ret",
"def before_packages(manager):\n if manager not in b.packages:\n return\n if 'apt' == manager:\n s.add('export APT_LISTBUGS_FRONTEND=\"none\"')\n s.add('export APT_LISTCHANGES_FRONTEND=\"none\"')\n s.add('export DEBIAN_FRONTEND=\"noninteractive\"')\n s.add('apt-get -q update')\n elif 'yum' == manager:\n s.add('yum makecache')",
"def test_default_packages(host, pkg):\n assert host.package(pkg).is_installed",
"def packages_by_version(packages):\n pkg_by_version = collections.defaultdict(list)\n for package in packages:\n if package.py_versions is None: # SRPMS\n continue\n for version in package.py_versions:\n pkg_by_version[version].append(package)\n return pkg_by_version",
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def test_with_markers_and_lowest_version(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0,<=4.1,!=4.0;python_version=='2.7'\"\n \"or python_version=='2.6'\"]))",
"def includes(self):\n r = {\n k: sorted(list(copy.deepcopy(v).values()), key=lambda x: x.get(\"order\", 0))\n for k, v in list(self.get_config(\"includes\").items())\n }\n if self.version is not None:\n for k, v in list(r.items()):\n for j in v:\n j[\"path\"] = self.versioned_url(j[\"path\"])\n return r",
"def populated_script_constraints(original_constraints):\n sorted_python_versions = sorted(set(original_constraints) - {\"default\"})\n for variant in itertools.chain([\"default\"], sorted_python_versions):\n if variant == \"default\":\n major, minor = map(int, sorted_python_versions[-1].split(\".\"))\n minor += 1\n else:\n major, minor = map(int, variant.split(\".\"))\n\n mapping = original_constraints[variant].copy()\n mapping[\"minimum_supported_version\"] = f\"({major}, {minor})\"\n\n yield variant, mapping",
"def _package_upgrades(args, env_attrs):\n\n overrides = env_attrs.get('override_attributes')\n if overrides.get('osops'):\n osops = overrides['osops']\n else:\n osops = overrides['osops'] = {}\n\n if args.get('disable_pkg_upgrades') is True:\n osops['do_package_upgrades'] = False\n else:\n osops['do_package_upgrades'] = True\n return env_attrs",
"def autodetect_files(self):\n if self._is_valid_requirements_file('requirements.txt'):\n self.filenames.append('requirements.txt')\n\n if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover\n self.filenames.append('requirements.pip')\n\n if os.path.isdir('requirements'):\n for filename in os.listdir('requirements'):\n file_path = os.path.join('requirements', filename)\n if self._is_valid_requirements_file(file_path):\n self.filenames.append(file_path)\n self._check_inclusions_recursively()"
]
| [
"0.61390465",
"0.61268306",
"0.6111778",
"0.5894769",
"0.5857268",
"0.58489394",
"0.5845743",
"0.58031565",
"0.57330775",
"0.5704154",
"0.56850886",
"0.56834555",
"0.56450295",
"0.5621943",
"0.5613174",
"0.5603382",
"0.5574903",
"0.5570474",
"0.55632347",
"0.555067",
"0.5486108",
"0.5484983",
"0.54818714",
"0.5480257",
"0.5476297",
"0.5464753",
"0.5461417",
"0.54470307",
"0.54372615",
"0.54330546"
]
| 0.67187184 | 0 |
Fetch the latest versions of a list of packages with specifiers, in a threaded manner or not. | def fetch_last_versions(self, packages, allow_pre_releases,
service_url, timeout, threads):
versions = []
if threads > 1:
with futures.ThreadPoolExecutor(
max_workers=threads
) as executor:
tasks = [
executor.submit(
self.fetch_last_version,
package,
allow_pre_releases,
service_url,
timeout
)
for package in packages
]
for task in futures.as_completed(tasks):
versions.append(task.result())
else:
for package in packages:
versions.append(
self.fetch_last_version(
package,
allow_pre_releases,
service_url,
timeout
)
)
return versions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(\n req_files,\n verbose=False,\n outdated=False,\n latest=False,\n verbatim=False,\n repo=None,\n path=\"requirements.txt\",\n token=None,\n branch=\"master\",\n url=None,\n delay=None,\n):\n requirements = []\n\n if repo:\n github_url = build_github_url(repo, branch, path, token)\n req_file = get_requirements_file_from_url(github_url)\n requirements.extend(parse_req_file(req_file))\n elif url:\n req_file = get_requirements_file_from_url(url)\n requirements.extend(parse_req_file(req_file))\n else:\n for req_file in req_files:\n requirements.extend(parse_req_file(req_file, verbatim=verbatim))\n req_file.close()\n\n total_time_delta = 0\n max_outdated_time = 0\n results = []\n\n for req, version, ignore in requirements:\n if verbatim and not req:\n results.append(version)\n elif req:\n results.append(\n {\n \"req\": req,\n \"version\": version,\n \"ignore\": ignore,\n \"latest\": request(get_pypi_url(req)),\n \"specified\": request(get_pypi_url(req, version)),\n }\n )\n\n for result in results:\n if isinstance(result, str):\n print(result.replace(\"\\n\", \"\"))\n continue\n\n if result[\"ignore\"]:\n if verbatim:\n print(\"{}=={} # norot\".format(result[\"req\"], result[\"version\"]))\n else:\n print(\"Ignoring updates for {}. \".format(result[\"req\"]))\n continue\n\n req = result[\"req\"]\n version = result[\"version\"]\n\n latest_version, latest_release_date = get_version_and_release_date(\n req, verbose=verbose, response=result[\"latest\"]\n )\n specified_version, specified_release_date = get_version_and_release_date(\n req, version, response=result[\"specified\"]\n )\n\n if latest_release_date and specified_release_date:\n time_delta = (latest_release_date - specified_release_date).days\n total_time_delta = total_time_delta + time_delta\n max_outdated_time = max(time_delta, max_outdated_time)\n\n if verbose:\n if time_delta > 0:\n print(\n \"{} ({}) is {} days out of date. \"\n \"Latest is {}\".format(req, version, time_delta, latest_version)\n )\n elif version != latest_version:\n print(\n \"{} ({}) is out of date. \"\n \"Latest is {}\".format(req, version, latest_version)\n )\n elif not outdated:\n print(\"{} ({}) is up to date\".format(req, version))\n\n if latest and latest_version != specified_version:\n print(\n \"{}=={} # Updated from {}\".format(\n req, latest_version, specified_version\n )\n )\n elif verbatim and latest_version != specified_version:\n print(\n \"{}=={} # Latest {}\".format(req, specified_version, latest_version)\n )\n elif verbatim:\n print(\"{}=={}\".format(req, specified_version))\n\n elif verbatim:\n print(\"{}=={} # Error checking latest version\".format(req, version))\n\n verbatim_str = \"\"\n if verbatim:\n verbatim_str = \"# Generated with piprot {}\\n# \".format(VERSION)\n\n if total_time_delta > 0 and delay is None:\n print(\n \"{}Your requirements are {} \"\n \"days out of date\".format(verbatim_str, total_time_delta)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time > int(delay):\n print(\n \"{}At least one of your dependencies is {} \"\n \"days out of date which is more than the allowed\"\n \"{} days.\".format(verbatim_str, max_outdated_time, delay)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time <= int(delay):\n print(\n \"{}All of your dependencies are at most {} \"\n \"days out of date.\".format(verbatim_str, delay)\n )\n else:\n print(\n \"{}Looks like you've been keeping up to date, \"\n \"time for a delicious beverage!\".format(verbatim_str)\n )",
"def fetch_last_version(self, package, allow_pre_releases,\n service_url, timeout):\n package, specifier = package\n specifier = SpecifierSet(specifier, allow_pre_releases)\n max_version = parse_version(self.default_version)\n package_json_url = '%s/%s/json' % (service_url, package)\n\n logger.info('> Fetching latest datas for %s...', package)\n socket.setdefaulttimeout(timeout)\n try:\n content = urlopen(package_json_url).read().decode('utf-8')\n except URLError as error:\n content = '{\"releases\": []}'\n logger.debug('!> %s %s', package_json_url, error.reason)\n results = json.loads(content)\n socket.setdefaulttimeout(None)\n\n for version in specifier.filter(results['releases']):\n version = parse_version(version)\n if version > max_version:\n max_version = version\n\n logger.debug(\n '-> Last version of %s%s is %s.',\n package, specifier, max_version\n )\n\n return (package, str(max_version))",
"def test_load_many_mixed(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"conda3&cdutil\", \"pip>munit1\", \"conda3&nltk\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"8.1\"\n assert version_list[1] == \"0.12.4\"\n assert version_list[2] == \"3.2.5\"",
"def test_load_many_pip(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"pip>munit1\", \"pip>munit2\", \"pip>munit3\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"0.12.4\"\n assert version_list[1] == \"12.2\"\n assert version_list[2] == \"5.0\"",
"def _get_requirements_and_latest(\n filename,\n force=False,\n minor=[],\n patch=[],\n pre=[],\n index_urls=[],\n verify=True):\n session = PipSession()\n if verify:\n session.verify = verify\n finder = PackageFinder(\n session=session,\n find_links=[],\n index_urls=index_urls or [PyPI.simple_url],\n )\n\n _, content = get_file_content(filename, session=session)\n for line_number, line, orig_line in yield_lines(content):\n line = req_file.COMMENT_RE.sub('', line)\n line = line.strip()\n req = parse_requirement_line(line, filename, line_number, session, finder)\n if req is None or req.name is None or req_file.SCHEME_RE.match(req.name):\n yield (orig_line, None, None, None)\n continue\n spec_ver = current_version(req)\n if spec_ver or force:\n latest_ver = latest_version(req, spec_ver, session, finder,\n minor=minor, patch=patch, pre=pre)\n yield (orig_line, req, spec_ver, latest_ver)",
"def get_used_release_specs(package, installed_version=None):",
"def test_get_all_available_release_updates(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): DATA,\n '%d.%d/maintained/component/%s/all/Packages.gz' % (MAJOR, MINOR + 1, 'a'): DATA,\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): DATA,\n })\n versions, component = self.u.get_all_available_release_updates()\n self.assertEqual(['%d.%d-%d' % (MAJOR, MINOR + 1, 0)], versions)\n self.assertEqual('a', component)",
"def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)",
"def get_packages_by_classifier(classifier: str) -> List[str]:\n packages = []\n page = 1\n pattern = re.compile('class=\"package-snippet__name\">(.+)</span>')\n url = f\"https://pypi.org/search/?c={parse.quote_plus(classifier)}&page=\"\n while True:\n try:\n with request.urlopen(f'{url}{page}') as response:\n html = response.read().decode()\n packages.extend(pattern.findall(html))\n page += 1\n except error.HTTPError:\n break\n return packages",
"def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return",
"def certifiVersions():\n log = logger.new(function='certifiVersions')\n r = yield treq.get('https://pypi.python.org/pypi/certifi/json', timeout=5)\n log.msg(\"got certifi versions!\")\n data = yield r.json()\n\n # Note: this takes advantage of the fact that certifi's releases have the\n # same version number sort order as lexicographical. If that changes,\n # this will break.\n releases = sorted(data[u'releases'].keys())\n\n first_release = releases.index('14.05.14')\n target_versions = releases[first_release:]\n\n result = []\n for version in target_versions:\n files = data[u'releases'][version]\n\n # Find the .tar.gz release.\n for file in files:\n if file[u'filename'].endswith(u'.tar.gz'):\n break\n else:\n continue\n\n log.msg(\"new release located\", version=version, tarball=file[u'url'])\n result.append((version, file[u'url']))\n\n returnValue(result)",
"def download_packages(list, failfast=False):\n return _librepo.download_packages(list, failfast)",
"def _find_rpms_in_packages(koji_api, name_list, major_minor):\n rpms_for_package = {}\n tags = _tags_for_version(major_minor)\n for package in name_list:\n for tag in tags:\n for build in koji_api.getLatestBuilds(tag=tag, package=package):\n rpm_list = set(rpm[\"name\"] for rpm in koji_api.listBuildRPMs(build[\"build_id\"]))\n rpms_for_package.setdefault(package, set()).update(rpm_list)\n\n if package not in rpms_for_package:\n # it wasn't in our tags; look for it by name\n pkg_info = koji_api.getPackage(package)\n if not pkg_info:\n continue\n latest_builds = koji_api.listBuilds(packageID=pkg_info[\"id\"], state=1, queryOpts=dict(limit=1))\n if not latest_builds:\n continue\n rpm_list = set(rpm[\"name\"] for rpm in koji_api.listBuildRPMs(latest_builds[0][\"build_id\"]))\n rpms_for_package[package] = set(rpm_list)\n\n return rpms_for_package",
"def test_load_many_conda(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"conda3&cdutil\", \"conda3&python-coveralls\", \"conda3&nltk\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n\n assert version_list[0] == \"8.1\"\n assert version_list[1] == \"2.9.1\"\n assert version_list[2] == \"3.2.5\"",
"def test_load_many_conda2(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"conda3&cdutil\", \"conda3&python-coveralls\", \"conda3&nltk\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"8.1\"\n assert version_list[1] == \"2.9.1\"\n assert version_list[2] == \"3.2.5\"",
"def select_latest_micro_versions(versions):\n seen_minors = set()\n res = []\n\n for ver, _ in sorted(\n versions.items(),\n # Sort by (minor_version, upload_time) in descending order\n key=lambda x: (Version(x[0]).release[:2], x[1]),\n reverse=True,\n ):\n minor_ver = Version(ver).release[:2]\n\n if minor_ver not in seen_minors:\n seen_minors.add(minor_ver)\n res.insert(0, ver)\n\n return res",
"def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())",
"def iter_latest_packages(paths=None, packages=None):\n names = sorted(\n set(family.name for family in packages_.iter_package_families(paths=paths))\n )\n\n if packages:\n names = [name for name in names if name in packages]\n\n for name in names:\n package = packages_.get_latest_package(name, paths=paths)\n\n if not package:\n _LOGGER.warning(\n 'Package family \"%s\" was found but `get_latest_package` returned None.'\n \"The package is probably damaged.\",\n name,\n )\n\n continue\n\n yield package",
"def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")",
"def test_4_timever(self):\n\n bronze10 = fmri.PkgFmri(self.published[2], None)\n bronze20_1 = fmri.PkgFmri(self.published[3], None)\n bronze20_2 = fmri.PkgFmri(self.published[4], None)\n\n # Retrieve bronze using -m all-timestamps and a version pattern.\n # This should only retrieve bronze20_1 and bronze20_2.\n self.pkgrecv(self.durl1, \"--raw -m all-timestamps -r -k \"\n \"-d {0} {1}\".format(self.tempdir, \"/[email protected]\"))\n\n # Verify that only expected packages were retrieved.\n expected = [\n bronze20_1.get_dir_path(),\n bronze20_2.get_dir_path(),\n ]\n\n for d in os.listdir(os.path.join(self.tempdir, \"bronze\")):\n self.assertTrue(os.path.join(\"bronze\", d) in expected)\n\n mpath = os.path.join(self.tempdir, \"bronze\", d,\n \"manifest\")\n self.assertTrue(os.path.isfile(mpath))\n\n # Cleanup for next test.\n shutil.rmtree(os.path.join(self.tempdir, \"bronze\"))\n\n # Retrieve bronze using -m all-timestamps and a package stem.\n # This should retrieve bronze10, bronze20_1, and bronze20_2.\n self.pkgrecv(self.durl1, \"--raw -m all-timestamps -r -k \"\n \"-d {0} {1}\".format(self.tempdir, \"bronze\"))\n\n # Verify that only expected packages were retrieved.\n expected = [\n bronze10.get_dir_path(),\n bronze20_1.get_dir_path(),\n bronze20_2.get_dir_path(),\n ]\n\n for d in os.listdir(os.path.join(self.tempdir, \"bronze\")):\n self.assertTrue(os.path.join(\"bronze\", d) in expected)\n\n mpath = os.path.join(self.tempdir, \"bronze\", d,\n \"manifest\")\n self.assertTrue(os.path.isfile(mpath))\n\n # Cleanup for next test.\n shutil.rmtree(os.path.join(self.tempdir, \"bronze\"))\n\n # Retrieve bronze using -m all-versions, this should only\n # retrieve bronze10 and bronze20_2.\n self.pkgrecv(self.durl1, \"--raw -m all-versions -r -k \"\n \"-d {0} {1}\".format(self.tempdir, \"bronze\"))\n\n # Verify that only expected packages were retrieved.\n expected = [\n bronze10.get_dir_path(),\n bronze20_2.get_dir_path(),\n ]\n\n for d in os.listdir(os.path.join(self.tempdir, \"bronze\")):\n self.assertTrue(os.path.join(\"bronze\", d) in expected)\n\n mpath = os.path.join(self.tempdir, \"bronze\", d,\n \"manifest\")\n self.assertTrue(os.path.isfile(mpath))\n\n # Cleanup for next test.\n shutil.rmtree(os.path.join(self.tempdir, \"bronze\"))\n\n # Retrieve bronze using -m latest, this should only\n # retrieve bronze20_2.\n self.pkgrecv(self.durl1, \"--raw -m latest -r -k \"\n \"-d {0} {1}\".format(self.tempdir, \"bronze\"))\n\n # Verify that only expected packages were retrieved.\n expected = [\n bronze20_2.get_dir_path(),\n ]\n\n for d in os.listdir(os.path.join(self.tempdir, \"bronze\")):\n self.assertTrue(os.path.join(\"bronze\", d) in expected)\n\n mpath = os.path.join(self.tempdir, \"bronze\", d,\n \"manifest\")\n self.assertTrue(os.path.isfile(mpath))\n\n # Cleanup for next test.\n shutil.rmtree(os.path.join(self.tempdir, \"bronze\"))\n\n # Retrieve bronze using default setting.\n # This should retrieve bronze10, bronze20_1, and bronze20_2.\n self.pkgrecv(self.durl1, \"--raw -r -k \"\n \"-d {0} {1}\".format(self.tempdir, \"bronze\"))\n\n # Verify that all expected packages were retrieved.\n expected = [\n bronze10.get_dir_path(),\n bronze20_1.get_dir_path(),\n bronze20_2.get_dir_path(),\n ]\n\n for d in expected:\n paths = os.listdir(os.path.join(self.tempdir, \"bronze\"))\n self.assertTrue(os.path.basename(d) in paths)\n\n mpath = os.path.join(self.tempdir, d, \"manifest\")\n self.assertTrue(os.path.isfile(mpath))",
"def follow_dependencies(subset, package_list):\n dependency_graph = get_dependency_graph(package_list)\n\n curr_pkgs = None\n updated_pkgs = set(subset)\n while curr_pkgs != updated_pkgs:\n curr_pkgs = updated_pkgs\n updated_pkgs = set(curr_pkgs)\n for package in curr_pkgs:\n updated_pkgs.update(dependency_graph[package])\n\n return sorted(curr_pkgs)",
"def query_releases(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/query invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# be as current as possible.\n\t\tself.uu.ucr_reinit()\n\t\tself.ucr.load()\n\n\t\tappliance_mode = self.ucr.is_true('server/appliance')\n\n\t\tresult = []\n\t\ttry:\n\t\t\trequest.status = SUCCESS\n\t\t\tavailable_versions, blocking_component = self.uu.get_all_available_release_updates()\n\t\t\tfor rel in available_versions:\n\t\t\t\tentry = {}\n\t\t\t\tentry['id'] = rel\n\t\t\t\tentry['label'] = 'UCS %s' % rel\n\t\t\t\tresult.append(entry)\n\t\t\t#\n\t\t\t# appliance_mode=no ; blocking_comp=no → add \"latest version\"\n\t\t\t# appliance_mode=no ; blocking_comp=yes → no \"latest version\"\n\t\t\t# appliance_mode=yes; blocking_comp=no → add \"latest version\"\n\t\t\t# appliance_mode=yes; blocking_comp=yes → add \"latest version\"\n\t\t\t#\n\t\t\tif len(result) and (appliance_mode or not blocking_component):\n\t\t\t\t# UniventionUpdater returns available version in ascending order, so\n\t\t\t\t# the last returned entry is the one to be flagged as 'latest' if there's\n\t\t\t\t# no blocking component.\n\t\t\t\tresult[-1]['label'] = '%s (%s)' % (result[-1]['label'],_('latest version'))\n\n\t\texcept Exception,ex:\n\t\t\trequest.status = FAILURE\n\t\t\tself.finished(request.id, [], str(ex))\n\t\t\treturn\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/query returns: %d entries\" % len(result))\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)",
"def version(*names, **kwargs):\n ret = {}\n versions_as_list = salt.utils.data.is_true(kwargs.pop(\"versions_as_list\", False))\n pkg_glob = False\n if names:\n pkgs = __salt__[\"pkg.list_pkgs\"](versions_as_list=True, **kwargs)\n for name in names:\n if \"*\" in name:\n pkg_glob = True\n for match in fnmatch.filter(pkgs, name):\n ret[match] = pkgs.get(match, [])\n else:\n ret[name] = pkgs.get(name, [])\n if not versions_as_list:\n __salt__[\"pkg_resource.stringify\"](ret)\n # Return a string if no globbing is used, and there is one item in the\n # return dict\n if len(ret) == 1 and not pkg_glob:\n try:\n return next(iter(ret.values()))\n except StopIteration:\n return \"\"\n return ret",
"def get_packages(module, repo_mgr, list_type, package):\n rc_code, out, err = module.run_command(\"/usr/bin/%s -q list %s %s\"\n % (repo_mgr, list_type, package), environ_update=ENV_LOCALE)\n if rc_code is 0:\n return out.splitlines()\n else:\n if rc_code == 1 and str(err) == 'Error: No matching Packages to list\\n':\n return out.splitlines()\n else:\n module.fail_json(msg=\"Unable to collect \" + repo_mgr + \" list \" + list_type + \" : \" + str(err) + \" - \" + str(out))",
"def packages_by_version(packages):\n pkg_by_version = collections.defaultdict(list)\n for package in packages:\n if package.py_versions is None: # SRPMS\n continue\n for version in package.py_versions:\n pkg_by_version[version].append(package)\n return pkg_by_version",
"def get_releases(is_vertebrate: bool):\n url = \"http://ftp.ensemblgenomes.org/pub?\"\n if is_vertebrate:\n url = \"http://ftp.ensembl.org/pub?\"\n ret = retry(requests.get, 3, url)\n # sort releases new to old\n releases = sorted(\n [int(i) for i in re.findall(r'\"release-(\\d+)/\"', ret.text)],\n reverse=True,\n )\n if is_vertebrate:\n # ignore immature releases\n releases = [r for r in releases if r > 46]\n return releases",
"def get_released_versions(package_name):\n url = \"https://pypi.python.org/pypi/{}/json\".format(package_name)\n data = json.load(urllib.request.urlopen(url))\n\n versions = {\n # We can actually select any element in `dist_files` because all the distribution files\n # should have almost the same upload time.\n version: dist_files[0][\"upload_time\"]\n for version, dist_files in data[\"releases\"].items()\n # If len(dist_files) = 0, this release is unavailable.\n # Example: https://pypi.org/project/xgboost/0.7\n #\n # > pip install 'xgboost==0.7'\n # ERROR: Could not find a version that satisfies the requirement xgboost==0.7\n if len(dist_files) > 0 and (not dist_files[0].get(\"yanked\", False))\n }\n return versions",
"def fetch(args):\n do_all_projects_remotes(args + [\"fetch\"])",
"def get_installation_packages_latest(self):\n self.logger.debug(\"get_installation_packages_latest()\")\n parameter = {'onlyLatest':'False'}\n resp = self._im_session.get(\"{}/{}\".format(self._im_api_url, 'types/InstallationPackageWithLatest/instances'), params=parameter)\n #resp = self._im_session.get('https://192.168.100.52/types/InstallationPackageWithLatest/instances', params=parameter)\n jresp = json.loads(resp.text)\n\n #pprint(jresp)",
"def get_next_best_versions(self, versions):\n fake_matches = set()\n\n if not self.major.is_yes:\n major_versions = [self.major.val()]\n else:\n major_versions = sorted(set([v.major for v in versions]))\n\n for major in range(min(major_versions), max(major_versions) + 1):\n if not self.minor.is_yes:\n minor_versions = [self.minor.val()]\n else:\n minor_versions = sorted(set([v.minor for v in versions if v.major == major]))\n\n for minor in range(min(minor_versions), max(minor_versions) + 1):\n if not self.patch.is_yes:\n patch_versions = [self.patch.val()]\n else:\n patch_versions = sorted(set([v.patch for v in versions if v.major == major and v.minor == minor]))\n\n for patch in range(min(patch_versions), max(patch_versions) + 1):\n fake = _parse_semver(\"{}.{}.{}\".format(major, minor, patch), makefake=True)\n if fake not in versions:\n fake_matches.add(fake)\n\n return fake_matches"
]
| [
"0.6130742",
"0.6022223",
"0.598399",
"0.59458536",
"0.58944225",
"0.5817369",
"0.57562554",
"0.5738081",
"0.57167447",
"0.5690409",
"0.5682631",
"0.5644309",
"0.5641783",
"0.5640585",
"0.5601502",
"0.5571681",
"0.5530789",
"0.5515461",
"0.55109876",
"0.55059534",
"0.5485498",
"0.54836464",
"0.5479578",
"0.54683113",
"0.54588723",
"0.5452228",
"0.544673",
"0.5419595",
"0.53868973",
"0.53748345"
]
| 0.6876928 | 0 |
Compare the current versions of the packages with the last versions to find updates. | def find_updates(self, versions, last_versions):
updates = []
for package, current_version in versions.items():
last_version = last_versions[package]
if last_version != current_version:
logger.debug(
'=> %s current version (%s) and last '
'version (%s) are different.',
package, current_version, last_version
)
updates.append(
(package, last_version)
)
logger.info('- %d package updates found.', len(updates))
return updates | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return",
"def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True",
"def compare_version(self, ref):\n if not self.version:\n print 'No version for package %s' % self.package\n if not ref.version:\n print 'No version for package %s' % ref.package\n if not self.parsed_version:\n self.parsed_version = parse_version(self.version)\n if not ref.parsed_version:\n ref.parsed_version = parse_version(ref.version)\n return self.parsed_version.compare(ref.parsed_version)",
"def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")",
"def compare_versions(a, b):\n if a:\n a = distutils.version.LooseVersion(a)\n b = distutils.version.LooseVersion(b)\n return a >= b\n else:\n return False",
"def check_updates(self):\n try:\n if not common.latest_version(version):\n self.update_notify()\n except:\n self.neterror()",
"def compare_versions(deployed_version, current_version):\n assert isinstance(deployed_version, str)\n assert isinstance(current_version, str)\n\n deployed_version = deployed_version.replace('_', '0')\n current_version = current_version.replace('_', '0')\n deployed = [int(x) for x in deployed_version.split('.')]\n current = [int(x) for x in current_version.split('.')]\n\n if deployed[0] != current[0]:\n return False\n if deployed[1] != current[1]:\n return False\n return True",
"def compare_versions(version1, version2):\n version1 = coerce_version(version1)\n version2 = coerce_version(version2)\n return compare_version_objects(version1, version2)",
"def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass",
"def test_all_versions(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])",
"def get_and_update_versions ():\n\n try:\n get_comp_versions (\"ACE\")\n get_comp_versions (\"TAO\")\n\n if opts.update:\n files = []\n files += update_version_files (\"ACE\")\n files += update_version_files (\"TAO\")\n files += create_changelog (\"ACE\")\n files += create_changelog (\"TAO\")\n files += update_spec_file ()\n files += update_debianbuild ()\n\n commit (files)\n\n except:\n print (\"Fatal error in get_and_update_versions.\")\n raise",
"def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])",
"def test_get_all_available_release_updates(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): DATA,\n '%d.%d/maintained/component/%s/all/Packages.gz' % (MAJOR, MINOR + 1, 'a'): DATA,\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): DATA,\n })\n versions, component = self.u.get_all_available_release_updates()\n self.assertEqual(['%d.%d-%d' % (MAJOR, MINOR + 1, 0)], versions)\n self.assertEqual('a', component)",
"def updates_check(self,request):\n\t\tp0 = subprocess.Popen(['LC_ALL=C apt-get update'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p0.communicate()\n\n\t\tp1 = subprocess.Popen(['LC_ALL=C apt-get -u dist-upgrade -s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p1.communicate()\n\n\t\tresult = {}\n\t\tresult['install'] = []\n\t\tresult['update'] = []\n\t\tresult['remove'] = []\n\t\tfor line in stdout.split('\\n'):\n\t\t\t# upgrade:\n\t\t\t# Inst univention-updater [3.1.1-5] (3.1.1-6.408.200810311159 192.168.0.10)\n\t\t\t# inst:\n\t\t\t# Inst mc (1:4.6.1-6.12.200710211124 oxae-update.open-xchange.com)\n\t\t\t#\n\t\t\t# *** FIX ***\tthe above example lines ignore the fact that there's\n\t\t\t#\t\t\t\tsome extra text (occasionally) after the last closing\n\t\t\t#\t\t\t\tparenthesis. Until now, I've seen only a pair of empty\n\t\t\t#\t\t\t\tbrackets [], but who knows...\n\t\t\tmatch = re.search('^Inst (\\S+)\\s+(.*?)\\s*\\((\\S+)\\s.*\\)',line)\n\t\t\tif match:\n\t\t\t\tpkg = match.group(1)\n\t\t\t\told = match.group(2)\n\t\t\t\tver = match.group(3)\n\t\t\t\tif old:\n\t\t\t\t\tresult['update'].append([pkg,ver])\n\t\t\t\telse:\n\t\t\t\t\tresult['install'].append([pkg,ver])\n\t\t\telif line.startswith('Remv '):\n\t\t\t\tl=line.split(' ')\n\t\t\t\tpkg = l[1]\n\t\t\t\tver = _('unknown')\n\t\t\t\tif len(l) > 2:\n\t\t\t\t\tver = l[2].replace('[','').replace(']','')\n\t\t\t\tresult['remove'].append([pkg,ver])\n\n\n\t\t# sort package names?\n\t\tresult['update'] = sorted(result['update'])\n\t\tresult['install'] = sorted(result['install'])\n\t\tresult['remove'] = sorted(result['remove'])\n\n\t\tself.finished(request.id,result)",
"def download_updates_if_available(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n latest_version = self.get_latest_version()\n\n print('Checking version... ')\n print('\\tCurrent version: ', current_version)\n print('\\tLatest version: ', latest_version)\n\n if not latest_version:\n return False\n\n if (not current_version) or (latest_version > current_version):\n print('Updating...')\n if not self.path_exists(self._module):\n os.mkdir(self._module)\n\n # Check if there's a botched download already. If next directory already exists remove it and tree.\n if self.path_exists(self.get_module_and_path('next')):\n self.rmtree(self.get_module_and_path('next')) # Remove the 'next' directory and contents.\n\n # Create the next directory and download the source files.\n os.mkdir(self.get_module_and_path('next'))\n self.download_all_files(self._github_repo + '/contents/' + self._main_dir, latest_version)\n\n # Last step is to write the .version file only if we have completed the download\n with open(self.get_module_and_path('next/.version'), 'w') as versionfile:\n versionfile.write(latest_version)\n versionfile.close()\n\n return True\n return False",
"def get_other_updates(self):\n self.composite_logger.log(\"\\nDiscovering 'other' packages...\")\n other_packages = []\n other_package_versions = []\n\n all_packages, all_package_versions = self.get_all_updates(True)\n security_packages, security_package_versions = self.get_security_updates()\n if len(security_packages) == 0 and 'CentOS' in str(self.env_layer.platform.linux_distribution()): # deliberately terminal - erring on the side of caution to avoid dissat in uninformed customers\n self.composite_logger.log_error(\"Please review patch management documentation for information on classification-based patching on YUM.\")\n error_msg = \"Classification-based patching is only supported on YUM if the computer is independently configured to receive classification information.\" \\\n \"Please remove classifications from update deployments to CentOS machines to bypass this error.\"\n self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)\n raise Exception(error_msg, \"[{0}]\".format(Constants.ERROR_ADDED_TO_STATUS))\n\n for index, package in enumerate(all_packages):\n if package not in security_packages:\n other_packages.append(package)\n other_package_versions.append(all_package_versions[index])\n\n self.composite_logger.log(\"Discovered \" + str(len(other_packages)) + \" 'other' package entries.\")\n return other_packages, other_package_versions",
"async def check_new_version(now):\n result = await get_newest_version(hass, huuid, include_components)\n\n if result is None:\n return\n\n newest, releasenotes, android, apt = result\n\n # Load data from supervisor on hass.io\n if hass.components.hassio.is_hassio():\n newest = hass.components.hassio.get_homeassistant_version()\n\n # Validate version\n if StrictVersion(newest) > StrictVersion(current_version):\n _LOGGER.info(\"The latest available version is %s\", newest)\n info = 'Dostępna jest nowa wersja ' + newest + '. ' + releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Aktualizacja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": True,\n \"reinstall_android_app\": android,\n \"apt\": apt\n }\n )\n # add all entities to keep the order\n # hass.async_add_job(\n # hass.services.async_call(\n # 'group',\n # 'set', {\n # \"object_id\": \"dom_system_version\",\n # \"entities\": [\n # \"sensor.version_info\",\n # \"script.ais_update_system\",\n # \"camera.remote_access\",\n # \"input_boolean.ais_remote_access\",\n # \"sensor.ais_secure_android_id_dom\",\n # \"script.ais_scan_network_devices\",\n # \"script.ais_restart_system\",\n # \"script.ais_stop_system\"]}))\n\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Zainstaluj aktualizację',\n \"icon\": \"mdi:download\"\n }\n )\n\n else:\n info = 'Twój system jest aktualny, wersja ' + newest + '. '\n info += releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Wersja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": False,\n \"reinstall_android_app\": False,\n \"apt\": apt\n }\n )\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Sprawdź dostępność aktualizacji',\n \"icon\": \"mdi:refresh\"\n }\n )\n _LOGGER.info(\n \"You are on the latest version (%s) of Assystent domowy\", newest)",
"def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])",
"def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))",
"def test_compare_local_version_is_newer(self):\n\n given = \"1.15.0.dev (Hello, World)\"\n expected = False\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)",
"def updateToLatest(self):\n # Determine the newest stamp in each dependency\n latest = {}\n for item in self.spec:\n if isinstance(item, CachedResource):\n latest[item] = item.getLatestStamp()\n if not latest:\n return\n\n # Our new stamp is the greatest out of all deps' stamps\n stamp = max(latest.itervalues())\n\n # Update only if we need to\n if self.getLatestStamp() >= stamp:\n return\n self.updateStamp(latest, stamp)\n\n # Clean old versions if that was successful\n self.cleanStamps(lambda s: s < stamp)",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def compareVersions(self):\n logger.debug(\"Func: compareVersions\")\n\n cMajorV = nuke.NUKE_VERSION_MAJOR\n cMinorV = nuke.NUKE_VERSION_MINOR\n currentVersion = float(\"{0}.{1}\".format(cMajorV, cMinorV))\n\n dbMajorV = self._currentSceneInfo[\"NukeVersion\"][0]\n dbMinorV = self._currentSceneInfo[\"NukeVersion\"][1]\n databaseVersion = float(\"{0}.{1}\".format(dbMajorV, dbMinorV))\n\n messageList = []\n\n\n if currentVersion == databaseVersion:\n pass\n\n if currentVersion < databaseVersion: # version compare\n message = \"Base Scene is created with a HIGHER Nuke version ({0}). Are you sure you want to continue?\".format(databaseVersion)\n messageList.append(message)\n\n if currentVersion > databaseVersion:\n message = \"Base Scene is created with a LOWER Nuke version ({0}). Are you sure you want to continue?\".format(databaseVersion)\n messageList.append(message)\n\n message=\"\"\n for x in messageList:\n message = message + \"\\n\" + str(x)\n\n if messageList == []:\n return 0, message\n else:\n return -1, message",
"def test_component_update_get_packages(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)\\nInst b (new from)\\nRemv c (old PKG)\\nRemv d PKG'\n installed, upgraded, removed = self.u.component_update_get_packages()\n self.assertEqual([('b', 'new')], installed)\n self.assertEqual([('a', 'old', 'new')], upgraded)\n self.assertEqual([('c', 'old'), ('d', 'unknown')], removed)",
"def get_all_available_versions_of_package(self, package_name):\n # Sample output format\n # Available Packages\n # kernel.x86_64 3.10.0-862.el7 base\n # kernel.x86_64 3.10.0-862.2.3.el7 updates\n # kernel.x86_64 3.10.0-862.3.2.el7 updates\n cmd = self.single_package_check_versions.replace('<PACKAGE-NAME>', package_name)\n output = self.invoke_package_manager(cmd)\n packages, package_versions = self.extract_packages_and_versions_including_duplicates(output)\n return package_versions",
"def compare():\n body: t.Any = request.json\n check_error({'input': {'old': {}, 'new': {}}}, body)\n response_new = rpc_search({'input': body['input']['new']})\n response_old = rpc_search({'input': body['input']['old']})\n\n modules_new = response_new['yang-catalog:modules']['module']\n modules_old = response_old['yang-catalog:modules']['module']\n\n if len(modules_new) == 0 or len(modules_old) == 0:\n abort(404, description='No hits found either in old or new input')\n\n new_mods = []\n for mod_new in modules_new:\n new_rev = mod_new['revision']\n new_name = mod_new['name']\n found = False\n new_rev_found = False\n for mod_old in modules_old:\n old_rev = mod_old['revision']\n old_name = mod_old['name']\n if new_name == old_name and new_rev == old_rev:\n found = True\n break\n if new_name == old_name and new_rev != old_rev:\n new_rev_found = True\n if not found:\n mod_new['reason-to-show'] = 'New module'\n new_mods.append(mod_new)\n if new_rev_found:\n mod_new['reason-to-show'] = 'Different revision'\n new_mods.append(mod_new)\n if len(new_mods) == 0:\n abort(404, description='No new modules or modules with different revisions found')\n output = {'output': new_mods}\n return output",
"def checkForUpdates(self):\r\n url = self.config.get_conf(\"Client\", \"versions-url\")\r\n try:\r\n self._logger.info(\"Checking for updates...\")\r\n response = requests.get(url)\r\n if 200 <= response.status_code <= 300:\r\n data = response.json()[self.mission_name]\r\n self.updateGrafana(data)\r\n self.updateSubSystems(data)\r\n self.updateVersions()\r\n else:\r\n self._logger.warning(\"Connection failed to version check endpoint %s\", url)\r\n except requests.ConnectionError:\r\n self._logger.warning(\"Connection failed to version check endpoint %s\", url)\r\n except requests.Timeout:\r\n self._logger.warning(\"Connection to version check endpoint %s timed out.\", url)\r\n except requests.RequestException:\r\n self._logger.warning(\"Something went wrong with the version check %s request.\", url)\r\n except Exception as exc:\r\n self._logger.warning(\"Something went wrong with version updating: %s\", str(exc))",
"def test_compare_local_version_is_older(self):\n\n given = \"2.34.0.dev (Hello, World)\"\n expected = True\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)",
"def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions",
"def compare_versions(fixed_version, target_version):\n for i, j in zip(map(int, fixed_version.split(\".\")), map(int, target_version.split(\".\"))):\n if i == j:\n continue\n return i > j\n return len(fixed_version.split(\".\")) > len(target_version.split(\".\"))"
]
| [
"0.72568524",
"0.68499315",
"0.67050654",
"0.66041917",
"0.6572279",
"0.65208465",
"0.6417936",
"0.641462",
"0.6412614",
"0.6402912",
"0.63977844",
"0.63609093",
"0.63354707",
"0.63339275",
"0.63038474",
"0.62751496",
"0.62681097",
"0.6237268",
"0.62309456",
"0.6142296",
"0.6105759",
"0.60947585",
"0.60864615",
"0.6082122",
"0.60756683",
"0.60651153",
"0.6044204",
"0.6040779",
"0.6026301",
"0.60162616"
]
| 0.78968257 | 0 |
returns a dictionary with 'variable' > 'domain' mappings | def vdict(self, variables=None):
if variables is None:
return { self.vp_node[v].name: self.vp_node[v].domain for v in self.variables }
return { v.name: v.domain for v in variables } | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_domain_mappings(domain_to_intents: Dict) -> Dict:\n domain2id = {}\n domains = list(domain_to_intents)\n for index, domain in enumerate(domains):\n domain2id[domain] = index\n return domain2id",
"def order_domain_values(var,assignment,csp):\n #right now it works only as just convert value and return\n #no special black magic yet\n return var.domain",
"def get_domains(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n domains = {}\n\n # add all domain triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.domain, None)):\n if subject in property_to_id and object in entity_type_to_id:\n domains[property_to_id[subject]] = entity_type_to_id[object]\n\n return domains",
"def domain_resolve(self, domain):\n a_result = server.resolve(domain, 'A')\n mx_result = server.resolve(domain, 'MX')\n ns_result = server.resolve(domain, 'NS')\n\n domain_data = {}\n domain_data.setdefault('resolving', {'A': a_result})\n domain_data['resolving']['MX'] = mx_result\n domain_data['resolving']['NS'] = ns_result\n\n return domain_data",
"def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}",
"def detect_domains (nffg):\n return {infra.domain for infra in nffg.infras}",
"def known_domain_data(known_uid, known_verbose_name, known_os_type):\n return {\n 'id': known_uid,\n 'verbose_name': known_verbose_name,\n 'os_type': known_os_type\n }",
"def order_domain_values(self, var, assignment):\n # retrieve the domain for the variable\n domain = self.domains[var]\n # initialise a dictionary for sorting the values in the variable's domain\n sorting_dict = {} \n # for each of the values in the variable's domain \n for value in domain:\n # set the constraint counter to zero\n sorting_dict[value] = 0\n # for each of the neighbors of the variable\n for neighbor in self.crossword.neighbors(var):\n # retrieve the overlap indexes\n overlap = self.crossword.overlaps[(neighbor, var)]\n # for each of the overlap's possible values (the overlap's domain)\n for test in self.domains[neighbor]:\n # if the overlap letter is not the same\n if test[overlap[0]] != value[overlap[1]]:\n # this value constrains the neighbor's domain\n sorting_dict[value] += 1\n # sort the dictionary by the value of the sorting key\n sorted_vars = sorted(domain, key=lambda x: sorting_dict[x])\n return sorted_vars",
"def set_domain(self, var, domain) :\n if var not in self.variables :\n raise KeyError(str(var) + \" is not a variable in this problem.\")\n self.domains[var] = sorted(domain[:])\n return self",
"def domainnames(l):\n mapping = {}\n # locate all the samba domains in the ldap\n r = l.search_s('dc=elex', ldap.SCOPE_SUBTREE, '(objectClass=sambaDomain)', ['sambaDomainName','sambaSID'])\n for dn, entry in r:\n mapping[dn] = (entry['sambaDomainName'][0], entry['sambaSID'][0])\n return mapping",
"def support_pruning(self):\r\n if self.curr_domains is None:\r\n self.curr_domains = {v: list(self.domains[v]) for v in self.variables}",
"def get_feature_domain_dict(self):\n feature_domain_dict = {}\n for feature_index in range(len(self.train_examples[0])):\n domain = set([example[feature_index] for example in self.train_examples])\n feature_domain_dict[self.features[feature_index]] = domain\n\n return feature_domain_dict",
"def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}",
"def __learn__(self):\n\n return domains # including 3 values, (begin, end, key)",
"def _infer_domain(self, name, domain, elements):\n if '*' not in domain:\n return domain\n debug('guessing a better domain for {}: {}'.format(name, domain))\n\n # Domain as a list of references to Variables in the File/xr.Dataset\n domain_ = [self[d] for d in domain]\n\n for i, d in enumerate(domain_): # Iterate over dimensions\n e = set(elements[i])\n if d.name != '*' or len(e) == 0: # pragma: no cover\n assert set(d.values).issuperset(e)\n continue # The stated domain matches the data; or no data\n # '*' is given\n if (self._state[name]['attrs']['type_code'] == gdxcc.GMS_DT_PAR and\n self._implicit):\n d = '_{}_{}'.format(name, i)\n debug(('Constructing implicit set {} for dimension {} of {}\\n'\n ' {} instead of {} elements')\n .format(d, name, i, len(e), len(self['*'])))\n self.coords[d] = elements[i]\n d = self[d]\n else:\n # try to find a smaller domain for this dimension\n # Iterate over every Set/Coordinate\n for s in self.coords.values():\n if s.ndim == 1 and set(s.values).issuperset(e) and \\\n len(s) < len(d):\n d = s # Found a smaller Set; use this instead\n domain_[i] = d\n\n # Convert the references to names\n inferred = [d.name for d in domain_]\n\n if domain != inferred:\n # Store the result\n self._state[name]['attrs']['domain_inferred'] = inferred\n debug('…inferred {}.'.format(inferred))\n else:\n debug('…failed.')\n\n return inferred",
"def _remove_domain(req):\n r = {}\n for key in req:\n if \"domain\" not in key:\n r[key] = req[key]\n return r",
"def format_domain(domain):\n domain.ns_converted = []\n for ns in domain.ns :\n if isinstance(ns, objects.DomainHostAttr) :\n ns_item = {\n 'hostname' : ns.hostname,\n 'ips' : []\n }\n\n for hostaddr in ns.hostAddr :\n ns_item['ips'].append(hostaddr.ip)\n else :\n ns_item = {\n 'hostname' : ns.name,\n 'ips' : [],\n 'hostobj' : 1\n }\n domain.ns_converted.append(ns_item)\n\n return domain",
"def par_domain(self):",
"def all_different_assignment_propagator(var: str, val: int, domains: Domains, problem_vars: FrozenSet[str]) -> Domains:\r\n reduced_domains = {v: frozenset({val}) if v == var else\r\n domains[v] - {val} if v in problem_vars else\r\n domains[v] for v in domains}\r\n return reduced_domains",
"def get_domain_name(self, DomainName: str) -> Dict:\n pass",
"def unordered_domain_values(var, assignment, csp):\r\n return csp.choices(var)",
"def domain(self):\n return self.keys()",
"def _fqdn_parts(fqdn):\n parts = tldextract.extract(fqdn)\n result = {}\n result['subdomain'] = parts.subdomain\n result['domain'] = parts.domain\n result['tld'] = parts.suffix\n\n return result",
"def read_vars(self, vars):\n fields = {}\n for var in vars:\n try:\n fields[var] = Variable(self.template, var)[:]\n except:\n if var == 'NN':\n fields[var] = self.brunt_vaisalla()\n elif var == 'KE':\n fields[var] = self.kinetic_energy()\n elif var == 'Ep':\n fields[var] = self.potential_energy()\n elif var == 'none':\n fields[var] = np.ones(self.params['global_shape'])\n elif var == 'APE':\n fields[var] = self.available_potential_energy()\n elif var == 'Eb':\n fields[var] = self.background_potential_energy()\n elif var == 'test':\n fields[var] = self.test()\n elif var == 'p_mean':\n fields[var] = self.mean_pressure()\n elif var == 'Q_times_z':\n fields[var] = self.E_2()\n elif var == 'br_times_z':\n fields[var] = self.E_1()\n elif var == 'phi_z':\n fields[var] = self.buoyancy_flux()\n elif var == 'phi_b':\n fields[var] = self.buoyancy_forcing()\n elif var == 'pr':\n fields[var] = self.backgroud_pressure()\n\n if var == 'u':\n fields[var] = fields[var]/self.params['dx']\n elif var == 'v':\n fields[var] = fields[var]/self.params['dy']\n elif var == 'w':\n fields[var] = fields[var]/self.params['dz']\n\n return fields",
"def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict",
"def get_technique_to_domain():\n global technique_to_domain\n \n if not technique_to_domain:\n technique_to_domain = stixhelpers.get_technique_id_domain_map(get_ms())\n \n return technique_to_domain",
"def build_dp_variables(dp):\n v = {}\n for dps in dp.deployedpackageservice_set.all():\n # 1st pass: build network variables\n if dps.address:\n for net in json.loads(dps.address):\n for nn in net: # net name\n for a in net[nn]: # address\n if \":\" in a: # XXX: do better\n a_type = \"ipv6\"\n else:\n a_type = \"ipv4\"\n v[\"%s.%s_%s\" % (dps.service.ident, nn, a_type)] = a\n # 2nd pass: pull in all Chef's attributes from the \"default\"\n # and the \"normal\" keys of \"knife node show -l\"\n try:\n attrs = json.loads(exec_knife(\"node show -l -F json %s\" % dps.hostname))\n flatten_dict(dps.service.ident, v, attrs[u\"default\"])\n flatten_dict(dps.service.ident, v, attrs[u\"normal\"])\n except:\n pass\n return v",
"def get_variable_record(self) -> PhyPropMapping:\n return {var: getattr(self, var, None) for var in self._record_vars}",
"def in_second_domain_set(self, var1, var2, var3):\n var4 = [\"ha\", \"hb\", \"ac\", \"sc\", \"gd\", \"sd\", \"he\", \"ah\", \"qh\", \"sh\", \"hi\", \"bj\", \"fj\", \"tj\", \"xj\", \"zj\", \"hk\", \"hl\", \"jl\", \"nm\", \"hn\", \"ln\", \"sn\", \"yn\", \"co\", \"mo\", \"cq\", \"gs\", \"js\", \"tw\", \"gx\", \"jx\", \"nx\", \"sx\", \"gz\", \"xz\"]\n var5 = [\"cat\", \"edu\", \"net\", \"biz\", \"mil\", \"int\", \"com\", \"gov\", \"org\", \"pro\"]\n var6 = [\"name\", \"aero\", \"info\", \"coop\", \"jobs\", \"mobi\", \"arpa\"]\n var7 = [\"travel\", \"museum\"]\n var8 = [None, None, var4, var5, var6, None, var7]\n var9 = [0, 0, len(var4), len(var5), len(var6), 0, len(var7)]\n if var2==2:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==3:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==4:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==6:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==5:\n pass\n else:\n return 0",
"def copy_with_assign(self, domains, var=None, new_domain=set()):\r\n newdoms = domains.copy()\r\n if var is not None:\r\n newdoms[var] = new_domain\r\n return newdoms"
]
| [
"0.65483457",
"0.6292216",
"0.6283033",
"0.623878",
"0.62214243",
"0.61496407",
"0.61425346",
"0.6082981",
"0.60341144",
"0.60192126",
"0.59888387",
"0.59511423",
"0.5947151",
"0.5796901",
"0.57882506",
"0.5723775",
"0.57043797",
"0.56787944",
"0.56731504",
"0.5666347",
"0.56439245",
"0.5609814",
"0.56059855",
"0.559828",
"0.55896336",
"0.5587865",
"0.5572213",
"0.5544532",
"0.55269855",
"0.5522682"
]
| 0.71076834 | 0 |
Check to see if the network encoded by the input GeneGraph has a valid shape. A shape can be invalid if (1) the final shape is <= 0 along any axis, or (2) if the end of any encoder convblock has an odd shape along any axis. | def shape_check(graph: mt.GeneGraph,
min_size: Union[int, Sequence[int]]=0) -> Tuple[bool, bool]:
# True if the shape goes <= 0
too_small = False
# True if any encoder blocks have odd output shape
odd_output = False
# If the min acceptable size is an int, make into a list of ints
output_shape = graph.output_shape()
if isinstance(min_size, int):
min_size = [min_size] * len(output_shape)
if any([s <= m for s, m in zip(output_shape, min_size)]):
logger.info(f'Output shape {output_shape} has components '
f'<= {min_size}.')
too_small = True
# Check the output shape of each convolution block in the network encoder
# path
encoder_path = graph.genes['encoderdecoder'].children[0]
for i, block in enumerate(encoder_path.children):
if any([s % 2 == 1 for s in block.data_shape_out]):
logger.info(f'Encoder block {i} has shape {block.data_shape_out}'
f'with odd components.')
odd_output = True
return too_small, odd_output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])",
"def isValidPcbShape(g):\n return g.GetShape() != pcbnew.S_SEGMENT or g.GetLength() > 0",
"def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg",
"def _is_all_input_shape_generalize(input_shape_tuple):\n for elem in input_shape_tuple:\n if not is_shape_unknown(elem.shape):\n return False\n return True",
"def validate_shape(self):\n if len(self._first_rgb.shape) != 5:\n raise ValueError(f\"Invalid shape: {self._first_rgb.shape}\")",
"def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg",
"def check_data_shape(self, data_shape):\n if not len(data_shape) == 2:\n raise ValueError('data_shape should have length 2')\n if not data_shape[0] == 1:\n raise ValueError('This iterator expects inputs to have 1 channels.')",
"def validate_graph(self) -> bool:\n return True",
"def _check_shape(self, obj, expected_shape):\n if self.shape != expected_shape:\n raise ValueError(\n '%s expects internal signal %s to be %s, but it is %s' % (\n obj, self.name, Shaped(expected_shape).describe_shape(),\n self.describe_shape()))",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid",
"def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape",
"def _shape_check(self, X, y):\n if not len(y.shape) > 1:\n raise RuntimeError(\"The shape of y is incorrect.\")\n if y.shape != X.shape[:-1]:\n raise RuntimeError(\"X and y must have the same number of \" +\n \"samples and microstructure shape.\")\n if X.shape[-1] != 3:\n raise RuntimeError(\"X must have 3 continuous local states \" +\n \"(euler angles)\")",
"def _is_empty(shape):\n return F.shape_mul(shape) == 0",
"def is_valid(i, j, shape):\n return i >= 0 and j >= 0 and i < shape[0] and j < shape[1]",
"def _check_shape(shape):\n if type(shape) == int:\n shape = (shape, shape)\n check_odd(shape, 'psf shape')\n return shape",
"def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())",
"def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True",
"def _is_valid(self):\n\n if (\n self.poly.weight_0 != 0\n or len(self.poly.weight_1) != self.num_qubits\n or len(self.poly.weight_2) != int(self.num_qubits * (self.num_qubits - 1) / 2)\n or len(self.poly.weight_3)\n != int(self.num_qubits * (self.num_qubits - 1) * (self.num_qubits - 2) / 6)\n ):\n return False\n if (\n (self.linear).shape != (self.num_qubits, self.num_qubits)\n or len(self.shift) != self.num_qubits\n or not np.allclose((np.linalg.det(self.linear) % 2), 1)\n ):\n return False\n if (\n not (set(self.poly.weight_1.flatten())).issubset({0, 1, 2, 3, 4, 5, 6, 7})\n or not (set(self.poly.weight_2.flatten())).issubset({0, 2, 4, 6})\n or not (set(self.poly.weight_3.flatten())).issubset({0, 4})\n ):\n return False\n if not (set(self.shift.flatten())).issubset({0, 1}) or not (\n set(self.linear.flatten())\n ).issubset({0, 1}):\n return False\n return True",
"def assert_shape(tensor, shape, name):\n real_shape = tensor.get_shape().as_list()\n same_rank = len(real_shape) == len(shape)\n all_equal = all([(s == r or s == -1) for s, r in zip(shape, real_shape)])\n if not same_rank or not all_equal:\n raise tf.errors.InvalidArgumentError(\n 'Error: Expected tensor %s to have shape %s, but it had shape %s.' %\n (name, str(shape), str(real_shape)))",
"def _check_shape(input_shape):\n msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg",
"def test_shape(self, varname, varshape, ndim): \n if len(varshape) != ndim:\n raise ShapeError('Shape=%s. Expected %i-D array for %s' %\n (repr(varshape), ndim, varname))",
"def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):\n shape_len = len(shape_a)\n inp_src_dtype = src_dtype.lower()\n k_block_size = cce.BLOCK_REDUCE\n check_list = (\"float16\")\n\n if inp_src_dtype not in check_list:\n raise RuntimeError(\"Dtype of input only support float16\")\n\n if shape_len != len(shape_b):\n raise RuntimeError(\"length of a and b are not equal\")\n\n if shape_len < 2:\n raise RuntimeError(\"shape length for batch matmul must large than 2\")\n\n if shape_len == 2:\n raise RuntimeError(\n \"batch matmul not support shape length 2, if shape length equal 2, use matmul!\")\n\n if shape_a[:shape_len - 2] != shape_b[:shape_len - 2]:\n raise RuntimeError(\"batch size of a and b are not equal\")\n\n is_gevm = bool((shape_a[-2] == 1) or (shape_a[-1] == 1))\n is_gemv = bool((shape_b[-2] == 1) or (shape_b[-1] == 1))\n\n if trans_a:\n m_shape = shape_a[shape_len - 1]\n km_shape = shape_a[shape_len - 2]\n else:\n m_shape = shape_a[shape_len - 2]\n km_shape = shape_a[shape_len - 1]\n\n if trans_b:\n kn_shape = shape_b[shape_len - 1]\n n_shape = shape_b[shape_len - 2]\n else:\n kn_shape = shape_b[shape_len - 2]\n n_shape = shape_b[shape_len - 1]\n\n if m_shape == 1:\n if n_shape == 1:\n raise RuntimeError(\"input shape M and N can't both be 1\")\n\n if km_shape != kn_shape:\n raise RuntimeError(\"reduce axis not same\")\n\n if m_shape % cce.BLOCK_IN != 0 and m_shape != 1:\n raise RuntimeError(\n \"input shape M should be 1 or multiple of %d\" % cce.BLOCK_IN)\n\n if m_shape != 1:\n if km_shape % k_block_size != 0:\n raise RuntimeError(\n \"input shape K1 should be multiple of %d\" % cce.BLOCK_IN)\n\n if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:\n raise RuntimeError(\n \"input shape N should be 1 or multiple of %d\" % cce.BLOCK_IN)\n\n shape_bias_length = len(shape_bias)\n\n if shape_bias_length > 0:\n if shape_bias_length == 1:\n if is_gevm or is_gemv:\n if shape_bias[0] != m_shape * n_shape:\n raise RuntimeError(\"broadcast case shape bias for gemv must be equal m*n\")\n else:\n if shape_bias[0] != n_shape:\n raise RuntimeError(\"broadcast bias shape must be equal to shape n\")\n elif shape_bias_length == shape_len:\n out_shape = [i for i in shape_a[:-2]] + [m_shape, n_shape]\n if [i for i in shape_bias] != out_shape:\n raise RuntimeError(\"non broadcast bias shape must be same as output shape\")\n else:\n raise RuntimeError(\"unsupport input shape now for batch bias case\")",
"def _check_size(size):\r\n\r\n if not isinstance(size, (list, tuple)):\r\n raise ValueError(\"Size must be a tuple\")\r\n if len(size) != 2:\r\n raise ValueError(\"Size must be a tuple of length 2\")\r\n if size[0] < 0 or size[1] < 0:\r\n raise ValueError(\"Width and height must be >= 0\")\r\n\r\n return True",
"def test_invalid_input_shape(self):\r\n seed_rng()\r\n verbose = 0\r\n random = True\r\n print_ = False\r\n ones = False\r\n if ones:\r\n random = False\r\n\r\n global mode_with_gpu\r\n mode_with_gpu_orig = mode_with_gpu\r\n try:\r\n if theano.config.mode in ['DebugMode', 'DEBUG_MODE']:\r\n mode_with_gpu = theano.compile.mode.get_mode(\r\n 'FAST_RUN').including('gpu')\r\n for mode in ['valid', 'full']:\r\n for shapes in [((3, 2, 8, 8), (4, 2, 5, 5), (8, 8)),\r\n ((3, 2, 8, 8), (4, 2, 5, 5), (5, 8)),\r\n #((3, 2, 8, 8), (4, 2, 5, 5), (8, 5)),\r\n # We use only the number of columns.\r\n ]:\r\n\r\n self.assertRaises(ValueError, _params_allgood,\r\n shapes[0], shapes[1],\r\n verbose=verbose, random=random,\r\n mode=mode,\r\n print_=print_, ones=ones,\r\n compile_kshp=shapes[2])\r\n finally:\r\n mode_with_gpu = mode_with_gpu_orig",
"def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")",
"def validate(self):\n invalid = []\n self.load()\n\n if self.graph.num_vertices() < 1:\n return 'Graph is invalid, no vertices'\n\n if self.graph.num_edges() < 1:\n return 'Graph is invalid, no edges'\n\n for v in self.graph.vertices():\n if(v.in_degree() + v.out_degree() == 0):\n invalid.append(v)\n\n if len(invalid) != 0:\n return invalid\n else:\n return 'Graph is valid'",
"def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)"
]
| [
"0.74824107",
"0.7270184",
"0.7125569",
"0.6997042",
"0.6988682",
"0.68868434",
"0.6845374",
"0.6727434",
"0.6715909",
"0.6706594",
"0.668039",
"0.6535564",
"0.651072",
"0.6506139",
"0.6424306",
"0.6356764",
"0.6340228",
"0.6319391",
"0.62358725",
"0.6234697",
"0.6223679",
"0.6222809",
"0.6207965",
"0.6193776",
"0.61256313",
"0.6099719",
"0.6063184",
"0.6052994",
"0.60367256",
"0.6030318"
]
| 0.7861982 | 0 |
Create a queue buffer. One adds items by calling the put(item) method. One can wait for new items to be added by using the blocking pop() method which returns the index and the item that have been added. One can read items that have been added previously using the read(index) method. The constructor takes one optional argument, size, which means older items are deleted. | def __init__(self,size=10):
self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.
self.index = Value('i',0) #index of next item to be added.
self.manager = Manager()
self.buffer = self.manager.list() #the buffer we will store things in.
self.size = size #the maximum size of the buffer
self.newitem = Queue() #a blocking event to control the pop method
t = threading.Thread(target=self.worker) #the worker that will run when items are added.
t.start() #start the worker
self.newitemindex = 0 #index of items to pop | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, size):\n self.queue = collections.deque(maxlen=size)",
"def __init__(self, size):\n self.queue = collections.deque(maxlen = size)",
"def __init__(self, size):\n self.q = deque( maxlen=size)",
"def __init__(self, size):\n self.size = size\n self.queue = []",
"def __init__(self, buffer_size=1000):\r\n\r\n super(Pipe, self).__init__()\r\n self.buffer_size = buffer_size\r\n\r\n # Should it be deque or array?\r\n self.staging_buffer = []\r\n self._ready_buffer = None\r\n\r\n self._done_sending = False\r\n self._done_receiving = False\r\n self._closed = False\r\n\r\n # Taken from Python Queue implementation:\r\n\r\n # mutex must beheld whenever the queue is mutating. All methods\r\n # that acquire mutex must release it before returning. mutex\r\n # is shared between the three conditions, so acquiring and\r\n # releasing the conditions also acquires and releases mutex.\r\n self.mutex = threading.Lock()\r\n # Notify not_empty whenever an item is added to the queue; a\r\n # thread waiting to get is notified then.\r\n self.not_empty = threading.Condition(self.mutex)\r\n # Notify not_full whenever an item is removed from the queue;\r\n # a thread waiting to put is notified then.\r\n self.not_full = threading.Condition(self.mutex)",
"def __init__(self, size):\n self.size = size\n self.queue = deque([])\n self.cur_sum = 0",
"def __init__ (self, size: int):\n self.size = size\n self.queue = []\n self.sum = 0",
"def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)\n self.last_recent_batch = 0",
"def __init__(self, size):\n self.size = size\n self.buffer = [None]*size\n self.start = 0\n self.end = 0",
"def new_queue() -> Queue:\n return multiprocessing.Queue()",
"def __init__(self, buffer_size, random_seed=123):\n\t\tself.buffer_size = buffer_size\n\t\tself.count = 0\n\t\tself.buffer = deque()\n\t\trandom.seed(random_seed)",
"def __init__(self, size):\n self.size = size\n self.q = collections.deque()\n self.sum_ = 0",
"def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue",
"def __init__(self, size):\n self.size = size\n self.queue = []\n self.sum = 0",
"def __init__(self, size):\n self.size = size\n self.current_size = 0\n self.values = collections.deque()",
"def __init__(self, size: int):\n self.size = size\n self.queue = [0] * self.size # queue w/ len of size\n self.head = self.window_sum = 0\n self.count = 0",
"def worker(self):\n while True:\n item,index = self.inbound.get()\n if index is None:\n self.buffer.append(item)\n self.index.value = self.index.value + 1 #index of next item for buffer\n if len(self.buffer)>self.size:\n del self.buffer[0]\n self.newitem.put(None)\n else:\n self.buffer[len(self.buffer)+(index - self.index.value)] = item",
"def __init__(self, size: int):\n self.q = deque()\n self.max_size = size\n self.sum = 0.0",
"def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size",
"def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size",
"def __init__(self, size):\n self.cache = deque()\n self.max_size = size",
"def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1",
"def __init__(self, buffer_size, random_seed=0):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)",
"def __init__(self, max_size):\n self.buffer = cns.deque(maxlen=max_size)",
"def __init__(self, queueLength):\r\n self.queueLength = queueLength\r\n self.data = []\r\n return",
"def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)",
"def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)",
"def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)",
"def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)",
"def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)"
]
| [
"0.7055755",
"0.7002627",
"0.68083966",
"0.6783001",
"0.6552732",
"0.64187753",
"0.63499993",
"0.63266903",
"0.63185835",
"0.6228891",
"0.62085295",
"0.6208389",
"0.61978066",
"0.61875516",
"0.6185742",
"0.6173956",
"0.6154561",
"0.6149934",
"0.6100217",
"0.6100217",
"0.6091093",
"0.6068039",
"0.60589826",
"0.60290873",
"0.6028002",
"0.6024951",
"0.6024951",
"0.6024951",
"0.6024951",
"0.6024951"
]
| 0.77845526 | 0 |
Return the number of items still to pop | def unpopped(self):
return self.newitem.qsize() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def popsize(self):\r\n return self.sp.popsize",
"def size(self):\n return self.N # Number of items in the stack",
"def size(self): #returns the size or number of items in the stack\n if self.is_empty():\n return 0\n else:\n return self.num_items",
"def items_num(self):\n\t\treturn len(self.items)",
"def items_num(self):\n\t\treturn len(self.items)",
"def size(self):\n return len(self._stack_items)",
"def pop(self) -> int:\n while len(self.push_queue) != 1:\n self.pop_queue.append(self.push_queue.pop(0))\n self.push_queue, self.pop_queue = self.pop_queue, self.push_queue\n return self.pop_queue.pop()",
"def items_num(self):\n return len(self.items)",
"def pop(self) -> int:\n tmp_list = ArrayStack(10)\n res = None\n for i in range(self.data.get_size()):\n if self.data.get_size() == 1:\n res = self.data.pop()\n else:\n tmp_list.push(self.data.pop())\n self.data = ArrayStack(10)\n for i in range(tmp_list.get_size()):\n self.data.push(tmp_list.pop())\n return res",
"def get_num_items(self):\r\n return self.num_items",
"def __len__(self):\n return len(self.stack)",
"def size(self):\n return self.states.size() * self.n_pop",
"def items_count(self):\n return len(self.items)",
"def __len__(self):\n return self._number_of_items",
"def size(self):\n\t\tif self.isEmpty():\n\t\t\treturn 0;\n\t\telse:\n\t\t\treturn self.top+1;",
"def pop(self) -> int:\r\n return self.items.pop(0)",
"def __len__(self):\n return len(self.top)",
"def pop(self):\n try:\n item = self._items.pop()\n # This operation decrements the number of items\n # in the stack, we need to update the count variable\n self._update_count()\n return item\n except IndexError:\n raise IndexError(\"Stack is empty\")",
"def size(stk: Stack) -> int:\n side_stack = Stack()\n count = 0\n # Pop everything off <stk> and onto <side_stack>, counting as we go.\n while not stk.is_empty():\n side_stack.add(stk.remove())\n count += 1\n # Now remove everything off <side_stack> and back onto <stk>.\n while not side_stack.is_empty():\n stk.add(side_stack.remove())\n # <stk> is restored to its state at the start of the function call.\n # We consider that it was not mutated.\n return count",
"def __len__(self):\r\n return len(self._items)",
"def __len__(self):\r\n return len(self._items)",
"def pop(self) -> int:\n if len(self.a) != 0:\n while len(self.a) != 1:\n self.topvalue = self.a.popleft()\n self.b.append(self.topvalue)\n\n return self.a.popleft()\n else:\n while len(self.b) != 1:\n self.topvalue = self.b.popleft()\n self.a.append(self.topvalue)\n return self.b.popleft()",
"def __len__(self):\n return len(self._items)",
"def __len__(self):\n return len(self._items)",
"def __len__(self):\n return len(self._items)",
"def __len__(self):\n return len(self._items)",
"def __len__(self):\n return len(self._items)",
"def __len__(self):\n return len(self._items)",
"def pop():",
"def __len__(self): # pragma: no cover\n return len(self.stash)"
]
| [
"0.77036655",
"0.72722363",
"0.71897376",
"0.7007627",
"0.7007627",
"0.69321567",
"0.6887765",
"0.6875599",
"0.6849064",
"0.6831568",
"0.68283343",
"0.68271565",
"0.6726608",
"0.6707557",
"0.6681107",
"0.6639482",
"0.6617566",
"0.6606565",
"0.6603618",
"0.6535069",
"0.6535069",
"0.65255237",
"0.65246576",
"0.65246576",
"0.65246576",
"0.65246576",
"0.65246576",
"0.65246576",
"0.6498277",
"0.6495135"
]
| 0.82157266 | 0 |
Helper function, internally blocks until an item is added to the internal queue, this is then added into our buffer, and various indices are sorted. | def worker(self):
while True:
item,index = self.inbound.get()
if index is None:
self.buffer.append(item)
self.index.value = self.index.value + 1 #index of next item for buffer
if len(self.buffer)>self.size:
del self.buffer[0]
self.newitem.put(None)
else:
self.buffer[len(self.buffer)+(index - self.index.value)] = item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def drainQueue(q):\n buf = []\n while True:\n # Get as much as possible without blocking\n try:\n while True:\n item = q.get_nowait()\n if item is None:\n return buf\n else:\n buf.append(item)\n except Queue.Empty:\n pass\n\n if buf:\n return buf\n\n # Nothing in the queue. Block for\n # one item, then go back and get any\n # that we can without blocking.\n item = q.get()\n if item is None:\n return buf\n else:\n buf.append(item)",
"def _append(self, key):\n \"\"\" Returns: The index at which the new value is appended \"\"\"\n if len(self.queue) > self.size:# There is still space left in queue\n self.queue[self.size] = key\n # No space left in queue\n self.queue.append(key)\n atIndex = self.size\n self.size += 1\n return atIndex",
"def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1",
"def use_queue():\n q = queue.Queue()\n for i in range(10):\n q.put_nowait(i)\n while q.qsize() > 0:\n element = q.get_nowait()\n sys.stdout.write(\"poping out from queue: {0}\\n\".format(element))",
"def delayed_buffer_item(self, buffer_item, buffer_item_len, item):\n item_copy = copy.copy(item)\n if buffer_item is None:\n buffer_item = buffer_item_len * [item_copy]\n else:\n buffer_item.append(item_copy)\n item_cur = copy.copy(buffer_item.pop(0))\n\n return buffer_item, item_cur",
"def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2",
"def poll(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n \n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d",
"def add_next(self, item, index):\n if index in self.d_buffer.keys():\n return\n if len(self) == self.size:\n self.pop_first()\n self.add_item(item, index)",
"def append ( self , item ) :\n self.cond.acquire()\n try:\n if self.closed :\n raise Exception( \"Trying to append to a closed queue\" )\n else :\n self.weight += int( item['size'] )\n self.push( item )\n self.cond.notify()\n finally:\n self.cond.release()",
"def process_queue_slowly(self):\n start = time.process_time()\n while self.queue and time.process_time() - start < 1.0 / TICKS_PER_SECOND:\n self.queue.popleft()()",
"def _put(self, item, queue):",
"def process_queue_fast(self):\n while self.queue:\n self.queue.popleft()()",
"def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q",
"def _put_nowait(self, value):\n while True:\n if self._waiting_consumers:\n consume_wish = self._waiting_consumers.pop(0)\n with consume_wish.group.lock:\n if not consume_wish.group.fulfilled:\n consume_wish.fulfill(value)\n return\n elif self._buf is not None and not self._buf.full:\n self._buf.push(value)\n return\n else:\n raise Full()",
"def offer(self, value):\n if self.isFull():\n raise Exception(\"Queue too small!\")\n \n self.data[self.end] = value\n self.end += 1\n self.qSize += 1\n self.end = self.end % len(self.data)",
"def reveille(self):\n now = datetime.datetime.now()\n # TODO: this logic can be optimized if our queue has a peek() method\n while self._queue.size() > 0:\n item = self._queue.get_tuple()\n if item[1] <= now:\n self._callback(item[0])\n else:\n # put the item back & terminate iteration\n self._queue.put(item[0], item[1])\n break",
"def dequeue(self):",
"def put_nowait(self, item):\r\n if self.full():\r\n raise QueueFull\r\n self._put(item)\r\n self._unfinished_tasks += 1\r\n self._finished.clear()\r\n self._wakeup_next(self._getters)",
"def pop(self):\n self.newitem.get() #blocks until an item is added, using a queue for this to ensure that only one worker is triggered.\n #if self.newitemindex+1==self.index: self.newitem.clear()\n index = self.newitemindex\n item = self.read(index)\n self.newitemindex += 1\n \n return index, item",
"def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()",
"def enqueue(self,data): # total: O(n)\n # if pass capacity, double capacity and the array\n while self._size >= self._capacity: #O(n)\n self.doubleCapacity() #O(n)\n if self._size != 0: # O(1)\n self._end = (self._end+1)% self._capacity # O(1)\n self._queue[self._end] = data # O(1)\n self._size += 1 # O(1) ",
"def put(self, message):\r\n stamp = int(message[\"stamp\"]) / 1000000.0\r\n\r\n # sort it into the existing waiting messages\r\n self.lock.acquire()\r\n bisect.insort(self.queue, (stamp, time.time(), message))\r\n self.lock.release()",
"def AdvanceQueue(self):\r\n self.data.pop(0)\r\n return",
"def curr_queue(self):\n pass",
"def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)",
"def next(self):\n if self.pointer > len(self.queue) - 1:\n self.pointer = 0\n raise StopIteration\n val = self.queue[self.pointer]\n self.pointer += 1\n return val",
"def bypass_queue(self, name):\n # self.queue = [name] + self.queue\n # self.queue.insert(0, name)\n\n # self.lst = [name] + self.lst # This person is brought to the front of the queue\n self.lst.insert(0, name) #Not constant time as the pointer is moved for all the members of the queue, 0(n)\n print(f\"{name} has bypassed the queue\")",
"def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n empty_queue.enqueue(100)\n assert len(empty_queue) == 1",
"def enqueue(self, val):\n if self.size+1 == self.capacity:\n self.grow() # double the array size\n #avail = (self.head + self.size) % len(self.data)\n self.data[self.tail] = val\n self.size += 1\n self.tail = (self.tail + 1) % self.capacity\n return None"
]
| [
"0.69142944",
"0.6775045",
"0.6572859",
"0.6417179",
"0.6393169",
"0.6383432",
"0.63217866",
"0.6306395",
"0.6299939",
"0.6282621",
"0.6255991",
"0.62280786",
"0.6193022",
"0.61870414",
"0.61837673",
"0.6095415",
"0.60823333",
"0.6045565",
"0.6034187",
"0.60179716",
"0.59995633",
"0.5997179",
"0.5991117",
"0.5982812",
"0.5982023",
"0.5964172",
"0.5934424",
"0.59092534",
"0.5901808",
"0.58939195"
]
| 0.7502149 | 0 |
Read item at index getindex. Returns the item. Fails if item no longer exists. | def read(self,getindex):
if getindex<0:
#print("Indicies are non-negative")
return None
try:
bufinx = len(self.buffer)+(getindex - self.index.value)
if bufinx<0:
#print("This item has been deleted, try increasing the queue size")
return None
return self.buffer[bufinx]
except IndexError:
#print("This item doesn't exist yet")
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_by_index(self, index):\n if index > self.length - 1:\n return None\n else:\n return self.items[index]",
"def get_item(self, index: int) -> _T:\n return self.index_to_item[index]",
"def get_item(self, index):\n if index == 0:\n raise IndexError(\"<{0}> Index start as 1\".format(type(self).__name__))\n index = self.get_index(index)\n res = self.get_item_type()()\n self.get_Item(index, res)\n return res",
"def __getitem__(self, index):\n if self.valid_index(index):\n return self._data[index]\n else:\n return IndexError",
"def __getitem__(self, index):\n item = self.data[index]\n return item",
"def __getitem__(self, index):\n if index >= self.size:\n raise KeyError\n else:\n return self._get_item(index)",
"def __getitem__(self, index):\n # check whether the requested index is available.\n # raise an error if not\n # [BUG] ? self._nrows-1: -> self._nrows:\n if index > self._nrows-1:\n err_msg = 'Index: '+str(index)+' is larger than nrows: '\\\n +str(self._nrows)+'!!'\n raise Exception(err_msg)\n\n # return the value at the index\n return self._data[index]",
"def __getitem__(self, index):\n #Check to see whether or not the index is within the array's element range.\n if index >= 0 and index < len(self):\n return self._items[index]\n\n return None",
"def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data",
"def __getitem__(self, item_index: Index) -> Item:\n raise NotImplementedError(\"__getitem__\")",
"def __getitem__(self, index):\r\n return self._items[index]",
"def __getitem__(self, index):\r\n #if index < 0 or index >= self.size():\r\n # raise IndexError(\"Array index out of bounds\")\r\n return self._items[index]",
"def _get_item_by_idx(self, iterator, idx):\r\n size = len(self)\r\n idx = operator.index(idx)\r\n if not -size <= idx < size:\r\n raise IndexError('index {} is out of range'.format(idx))\r\n idx %= size\r\n return next(itertools.islice(iterator, idx, None))",
"def __getitem__(self, index):\n if self.hdf5_cache_mode == \"all\":\n return self.getitem_cache[index]\n return self.get_item(index)",
"def GetItem(self,index):\r\n return self.itemId_item[self.gList.GetItemData(index)]",
"def __getitem__(self, idx):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n return self.data[nidx]",
"def __getitem__(self, index):\r\n\r\n if self._instance is not _unset and index not in self:\r\n self._instance[index]\r\n return self._contents[index]",
"def get(self, index):\n self.__validate_index(index)\n return self.__list[index]",
"def get_at_index(self, index: int) -> object:\n return self.data[index]",
"def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value",
"def get_indexed_item_from_file(index, file_name):\n\n list_in_file = read_list_bin(file_name)\n return get_indexed_item_from_list(index, list_in_file)",
"def __getitem__ ( self , index ):\n\t\treturn self . data [ index ]",
"def __getitem__(self, idx):\n return self.items[idx]",
"def __getitem__(self, idx):\n if idx < 0 or idx >= self.length():\n raise KeyError()\n return self.data[idx]",
"def __getitem__(self, index):\n if index < 0 or index >= len(self.sequence):\n raise Exception(\"Index is out of bounds\")\n return self.sequence[index]",
"def __getitem__(self, index):\n\t\treturn self.data[index]",
"def __getitem__(self, index: int) -> object:\n return self.get_at_index(index)",
"def get(self, index):\n raise NotImplementedError() # pragma: no cover",
"def __getitem__(self, index):\n return self.data[index]",
"def __getitem__(self, index):\n return self.data[index]"
]
| [
"0.7535031",
"0.7388097",
"0.7387124",
"0.73721397",
"0.7266759",
"0.72450334",
"0.71537787",
"0.7090138",
"0.702019",
"0.7002688",
"0.69995654",
"0.6852183",
"0.68299997",
"0.68008107",
"0.6783352",
"0.67813903",
"0.67672634",
"0.67473143",
"0.6732041",
"0.66635954",
"0.66635746",
"0.66080946",
"0.6602259",
"0.65912557",
"0.6579583",
"0.65702945",
"0.656326",
"0.65434885",
"0.65380335",
"0.65380335"
]
| 0.75241035 | 1 |
Blocks until a new item is added. Returns the index and the item. !Item remains in the QueueBuffer, so 'pop' is slightly misleading. It will return (index,None) if the item has already been lost from the buffer. | def pop(self):
self.newitem.get() #blocks until an item is added, using a queue for this to ensure that only one worker is triggered.
#if self.newitemindex+1==self.index: self.newitem.clear()
index = self.newitemindex
item = self.read(index)
self.newitemindex += 1
return index, item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur",
"def read(self,getindex):\n if getindex<0:\n #print(\"Indicies are non-negative\")\n return None\n try:\n bufinx = len(self.buffer)+(getindex - self.index.value)\n if bufinx<0:\n #print(\"This item has been deleted, try increasing the queue size\")\n return None\n return self.buffer[bufinx]\n except IndexError:\n #print(\"This item doesn't exist yet\")\n return None",
"def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem",
"def delayed_buffer_item(self, buffer_item, buffer_item_len, item):\n item_copy = copy.copy(item)\n if buffer_item is None:\n buffer_item = buffer_item_len * [item_copy]\n else:\n buffer_item.append(item_copy)\n item_cur = copy.copy(buffer_item.pop(0))\n\n return buffer_item, item_cur",
"def pop(self) -> int:\n return self.queue.get()",
"def get_new_item(self, index):\n if index not in self.d_buffer.keys():\n if 0 <= index < self.n_im:\n return self.stack[index]\n # Does not raise an error in case of wrong index for easier\n # handling upstream\n else:\n return None\n else:\n return self.d_buffer[index]",
"def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item",
"def dequeue(self):\n\n item = self.__items__.pop(0)\n return item",
"def pop(self):\n while not self.queue[self.tag].empty():\n temp = self.queue[self.tag].get()\n if not self.queue[self.tag].empty():\n self.queue[1 - self.tag].put(temp)\n else:\n self.tag = 1 - self.tag\n return temp",
"def Rear(self):\r\n if (len(self.queue) >= 1):\r\n return self.queue[-1]\r\n else:\r\n return -1",
"def pop(self) -> int:\n return self.q.pop(0)",
"def pop(self) -> int:\n return self.q.pop(0)",
"def pop(self):\r\n return self.queue.pop(0)",
"def dequeue(self): # total O(1)\n topItem = self._queue[self._start] #O(1)\n self._queue[self._start] = None #O(1)\n self._start = (self._start+1)% self._capacity #O(1)\n self._size -= 1 #O(1)\n return topItem #O(1)",
"def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()",
"def pop(self) -> int:\n last = self.queue.popleft()\n while self.queue:\n self.aux_queue.append(last)\n last = self.queue.popleft()\n self.queue, self.aux_queue = self.aux_queue, self.queue\n return last",
"def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))",
"def pop(self) -> int:\n if self.empty():\n raise RuntimeError(\"Queue is empty!\")\n result = self.data[self.head]\n self.data[self.head] = None\n self.head = (self.head + 1) % len(self.data)\n self.size -= 1\n if 0 < self.size < len(self.data) // 4 and len(self.data) > 10:\n self.resize(len(self.data) // 2)\n return result",
"def Rear(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[-1]",
"def pop(self, timeout=None):\n item = super(ExclusiveQueue, self).pop(timeout)\n try:\n self.remove(item)\n except ValueError:\n pass\n return item",
"def poll(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n \n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d",
"def popitem(self):\n with self.__plock:\n if len(self._keys) == 0:\n raise KeyError('Empty')\n\n key = self._keys[-1]\n val = self[key]\n del self[key]\n\n return (key, val)",
"def pop(self,n):\r\n\t\treturn self.queue.pop(0)[1]",
"def pop(self):\n return self.queue.pop(0)",
"def pop(self):\n return self.q1.dequeue()",
"def getitem(self, index):\n #FIXME: A better way to get item without removing it.\n priority,size,trace=self.queues[index].get()\n self.enqueue(index,trace,priority)\n return trace",
"def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')",
"def peek(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n return self.first.item",
"def dequeue(self):\n\t\treturn self.items.pop()",
"def getItem(self):\n with self.lock:\n if self.isEmpty():\n return None\n else:\n returnval = heapq.heappop(self.ItemList)\n self.ItemHashList.pop(returnval[1])\n return returnval"
]
| [
"0.69388396",
"0.68256915",
"0.68198776",
"0.67803955",
"0.6718834",
"0.67103136",
"0.6671482",
"0.6626551",
"0.661111",
"0.6601476",
"0.6587418",
"0.6587418",
"0.6583516",
"0.6570004",
"0.65641826",
"0.6547664",
"0.653801",
"0.6529275",
"0.65275687",
"0.6526335",
"0.65049285",
"0.6500807",
"0.64855576",
"0.6485258",
"0.64765114",
"0.64653856",
"0.6450894",
"0.64500517",
"0.64494073",
"0.64402455"
]
| 0.83028567 | 0 |
Performs majority votes over the given attributes, which are assumed to be `CategoricalAttribute`s. A separate vote is performed for attributes of each name. If a list of AttributeContainers is provided, all attributes are combined into a single vote. | def majority_vote_categorical_attrs(attrs, confidence_weighted=False):
if not isinstance(attrs, list):
attrs = [attrs]
accums = defaultdict(etan.Accumulator)
for _attrs in attrs:
for attr in _attrs:
accums[attr.name].add(attr.value, weight=attr.confidence or 0.0)
voted_attrs = AttributeContainer()
for name, accum in iteritems(accums):
value = accum.argmax(weighted=confidence_weighted)
confidence = accum.get_average_weight(value) or None
attr = CategoricalAttribute(name, value, confidence=confidence)
voted_attrs.add(attr)
return voted_attrs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, clfs, dataset):\n if len(clfs)==0:\n return [] # to don't even bother\n\n all_label_counts = None\n for clf in clfs:\n # Lets check first if necessary conditional attribute is enabled\n if not clf.ca.is_enabled(\"predictions\"):\n raise ValueError, \"MaximalVote needs classifiers (such as \" + \\\n \"%s) with state 'predictions' enabled\" % clf\n predictions = clf.ca.predictions\n if all_label_counts is None:\n all_label_counts = [ {} for i in xrange(len(predictions)) ]\n\n # for every sample\n for i in xrange(len(predictions)):\n prediction = predictions[i]\n # XXX fishy location due to literal labels,\n # TODO simplify assumptions and logic\n if isinstance(prediction, basestring) or \\\n not is_sequence_type(prediction):\n prediction = (prediction,)\n for label in prediction: # for every label\n # XXX we might have multiple labels assigned\n # but might not -- don't remember now\n if not label in all_label_counts[i]:\n all_label_counts[i][label] = 0\n all_label_counts[i][label] += 1\n\n predictions = []\n # select maximal vote now for each sample\n for i in xrange(len(all_label_counts)):\n label_counts = all_label_counts[i]\n # lets do explicit search for max so we know\n # if it is unique\n maxk = [] # labels of elements with max vote\n maxv = -1\n for k, v in label_counts.iteritems():\n if v > maxv:\n maxk = [k]\n maxv = v\n elif v == maxv:\n maxk.append(k)\n\n assert len(maxk) >= 1, \\\n \"We should have obtained at least a single key of max label\"\n\n if len(maxk) > 1:\n warning(\"We got multiple labels %s which have the \" % maxk +\n \"same maximal vote %d. XXX disambiguate. \" % maxv +\n \"Meanwhile selecting the first in sorted order\")\n predictions.append(sorted(maxk)[0])\n\n ca = self.ca\n ca.estimates = all_label_counts\n ca.predictions = predictions\n return predictions",
"def majority_vote(labels):\n\n conta = Counter(labels)\n\n winner, winner_count = conta.most_common(1)[0]\n\n num_winner = sum([1 for count in conta.values() if count == winner_count])\n\n if num_winner == 1:\n return winner\n else:\n return majority_vote(labels[:-1])",
"def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score",
"def majority_vote(preds):\n if len(preds) == 1:\n return preds[0]\n return [1 if sum(votes) > 0 else -1\n for votes in zip(*preds)]",
"def naive_majority(voters):\n half = len(voters)//2\n for index, voter in enumerate(voters):\n count = 0\n for other_voter in voters:\n if voter == other_voter:\n count += 1\n if count > half:\n return Outcome.has_majority\n return Outcome.no_majority",
"def get_majority_accs(ds):\n get_label = lambda label: (\n label.argmax(0) if len(label.size()) > 0 else label\n ).item()\n Y = [get_label(y) for x, y, s in ds]\n S = [get_label(s) for x, y, s in ds]\n get_majority = lambda labels: Counter(labels).most_common(1)[0][1] / len(\n labels\n )\n return get_majority(Y), get_majority(S)",
"def majority_vote(indices, df_training, attrib_column):\n \n return [stats.mode(df_training.iloc[np.unique(indices_row),\n attrib_column])[0][0] for indices_row in indices]",
"def take_majority_vote(frame_labels: np.ndarray,\n segment_inds_list: list[np.ndarray]) -> np.ndarray:\n for segment_inds in segment_inds_list:\n segment = frame_labels[segment_inds]\n majority = scipy.stats.mode(segment, keepdims=False)[0].item()\n frame_labels[segment_inds] = majority\n\n return frame_labels",
"def majority_vote():\n iris = datasets.load_iris()\n x_vals, y_vals = iris.data[50:, [1, 2]], iris.target[50:]\n labenc = LabelEncoder()\n y_vals = labenc.fit_transform(y_vals)\n x_train, x_test, y_train, y_test = train_test_split(x_vals, y_vals,\n test_size=0.5, random_state=1)\n\n clf1 = LogisticRegression(penalty='l2', C=0.001, random_state=0)\n clf2 = DecisionTreeClassifier(max_depth=1, criterion='entropy', random_state=0)\n clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')\n pipe1 = Pipeline([['sc', StandardScaler()], ['clf', clf1]])\n pipe3 = Pipeline([['sc', StandardScaler()], ['clf', clf3]])\n clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']\n\n # Majority Rule (hard) Voting\n mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])\n\n clf_labels += ['Majority Voting']\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n print('10-fold cross validation:\\n')\n for clf, label in zip(all_clf, clf_labels):\n scores = cross_val_score(estimator=clf, X=x_train, y=y_train, cv=10, scoring='roc_auc')\n print(\"ROC AUC: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label))\n\n colors = ['black', 'orange', 'blue', 'green']\n linestyles = [':', '--', '-.', '-']\n for clf, label, clr, lin_style in zip(all_clf, clf_labels, colors, linestyles):\n # assuming the label of the positive class is 1\n y_pred = clf.fit(x_train, y_train).predict_proba(x_test)[:, 1]\n fpr, tpr, _ = roc_curve(y_true=y_test, y_score=y_pred)\n print(y_pred)\n roc_auc = auc(x=fpr, y=tpr)\n plt.plot(fpr, tpr, color=clr, linestyle=lin_style,\n label='%s (auc = %0.2f)' % (label, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], linestyle='--', color='gray', linewidth=2)\n\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.grid()\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'roc.png', dpi=300)\n plt.close()\n\n stdc = StandardScaler()\n x_train_std = stdc.fit_transform(x_train)\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n x_min = x_train_std[:, 0].min() - 1\n x_max = x_train_std[:, 0].max() + 1\n y_min = x_train_std[:, 1].min() - 1\n y_max = x_train_std[:, 1].max() + 1\n xxx, yyy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))\n _, axarr = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(7, 5))\n for idx, clf, ttt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):\n clf.fit(x_train_std, y_train)\n z_vals = clf.predict(np.c_[xxx.ravel(), yyy.ravel()])\n z_vals = z_vals.reshape(xxx.shape)\n axarr[idx[0], idx[1]].contourf(xxx, yyy, z_vals, alpha=0.3)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 0, 0], x_train_std[y_train == 0, 1],\n c='blue', marker='^', s=50)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 1, 0], x_train_std[y_train == 1, 1],\n c='red', marker='o', s=50)\n axarr[idx[0], idx[1]].set_title(ttt)\n plt.text(-3.5, -4.5, s='Sepal width [standardized]', ha='center', va='center', fontsize=12)\n plt.text(-10.5, 4.5, s='Petal length [standardized]', ha='center', va='center',\n fontsize=12, rotation=90)\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'voting_panel.png', bbox_inches='tight', dpi=300)\n # print(mv_clf.get_params())\n params = {'decisiontreeclassifier__max_depth': [1, 2],\n 'pipeline-1__clf__C': [0.001, 0.1, 100.0]}\n grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring='roc_auc')\n grid.fit(x_train, y_train)\n\n for params, mean_score, scores in grid.cv_results_:\n print(\"%0.3f+/-%0.2f %r\" % (mean_score, scores.std() / 2, params))\n print('Best parameters: %s' % grid.best_params_)\n print('Accuracy: %.2f' % grid.best_score_)",
"def mode_categorical_attribute(nodes: typ.Iterable[vtna.graph.TemporalNode], attribute_name: str) -> str:\n values = [node.get_global_attribute(attribute_name) for node in nodes]\n return max(set(values), key=values.count)",
"def _select_attribute(self, attributes_list, df):\r\n entropy_all_data = self._entropy(df)\r\n\r\n mAtt = random.sample(attributes_list, math.ceil(math.sqrt(len(attributes_list)))) # select m random attributes\r\n best = 0\r\n \r\n for attr in mAtt:\r\n entropy_attribute = 0\r\n\r\n ## atributos numéricos: discretiza para acima da média ou abaixo da média\r\n if pd.api.types.is_numeric_dtype(df[attr]):\r\n mean = df[attr].mean()\r\n df_attribute_below_average = df.loc[df[attr] < mean]\r\n df_attribute_above_average = df.loc[df[attr] >= mean]\r\n entropy_attribute = len(df_attribute_below_average)/len(df)*self._entropy(df_attribute_below_average) + \\\r\n len(df_attribute_above_average)/len(df)*self._entropy(df_attribute_above_average)\r\n\r\n else:\r\n for value in df[attr].unique():\r\n df_attribute = df.loc[df[attr] == value]\r\n entropy_attribute += len(df_attribute)/len(df)*self._entropy(df_attribute)\r\n\r\n gain = entropy_all_data - entropy_attribute\r\n\r\n if gain >= best:\r\n best = gain\r\n chosen = attr\r\n return chosen",
"def _majority_vote(self, claims):\n c_df = claims[['source_id', 'object_id', 'value']].copy()\n discovered_truths = c_df.groupby(['object_id'\n ]).apply(lambda x: self.elect(x))\n discovered_truths = pd.DataFrame(discovered_truths)\n discovered_truths = discovered_truths.rename(columns={\n 0: 'value'\n }).reset_index()\n return discovered_truths",
"def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction",
"def majority_vote(labels):\n vote_counts = Counter(labels)\n winner, winner_count = vote_counts.most_common(1)[0]\n num_winners = len([count for count in vote_counts.values()\n if count == winner_count])\n if num_winners == 1:\n return winner\n else:\n #try again without the farthest\n return majority_vote(labels[:-1])",
"def iterative_majority(votes):\n half = len(votes)//2\n counts = defaultdict(lambda: 0)\n for vote in votes:\n counts[vote] += 1\n\n sorted_counts = sorted((count for count in counts.values()), reverse=True)\n return (Outcome.has_majority if sorted_counts[0] > half\n else Outcome.no_majority)",
"def get_movie_scores(df, min_vote_count=1000):\n ratings = {}\n\n for x in CREW_ATTRIBUTES:\n ratings[x] = get_avg_scores_for_attribute(df, x, min_vote_count)\n\n df['production_score'] = df.apply(calculate_final_production_score, ratings=ratings, axis=1)\n return df",
"def calculate_attribute_similarity(similars, cmdb_attributes, db_attributes):\n attr_similarity = {}\n for db_t in similars:\n cmdb_t = list(similars[db_t].keys())[0]\n db_attrs = db_attributes.get(db_t)\n cmdb_attrs = cmdb_attributes.get(cmdb_t)\n if db_attrs != None and cmdb_attrs != None:\n attr_similarity[cmdb_t] = calculate_class_similarity(\n db_attrs, cmdb_attrs)\n else:\n attr_similarity[cmdb_t] = {}\n return attr_similarity",
"def evaluate(self, attributes):\n return self.predicate(attributes[self.name])",
"def findMajority(roundX, branch):\n majority = []\n for attributeType, value in roundX[\"attributes\"][branch[0]][\"attrTypes\"].items():\n # print(\"\\n\"+tabs+str(attributeType).upper())\n if value[\"values\"][\"purity\"] == 0 and value[\"values\"][\"numInstance\"] != 0: # we are at a termianl node, this node is pure\n # check the class values to get the class node value\n for key, val in value[\"values\"].items():\n if key != \"purity\" and key != \"numInstance\": # for class values only\n if val != 0:\n majority.append(key)\n counter = collections.Counter(majority)\n return counter.most_common()[0][0]",
"def majority_vote(votes):\n import scipy.stats as ss\n mode, count = ss.mstats.mode(votes)",
"def id3(x, y, attributes, max_depth, weight, depth=0):\r\n tree = {}\r\n new_attr = []\r\n arr, count = np.unique(y, return_counts=True)\r\n # checking edge cases - reached maximum depth, or no attributes\r\n if len(attributes) == 0 or depth == max_depth or len(x) == 0:\r\n return np.bincount(y).argmax()\r\n # if all the values of y are one return one\r\n elif len(np.unique(y)) == 1:\r\n return arr[0]\r\n else:\r\n # calculating mutual information values\r\n info_values = {} \r\n # over number of columns\r\n for i in range(data.dim):\r\n oldX = partition(x[:,i])\r\n oldKeys = oldX.keys()\r\n # check in attributes recieved from bagging\r\n for attr in attributes:\r\n binX = []\r\n key , value = attr\r\n # check for key and value\r\n if i == key and value in oldKeys:\r\n # get the index\r\n index = oldX[value]\r\n for n in range(len(x)):\r\n if n in index:\r\n # if match binary classification 1 / 0 and appending to binX list\r\n binX.append(1)\r\n else:\r\n binX.append(0)\r\n # adding to a dictionary \r\n info_values[(i, value)] = mutual_information(binX, y, weight)\r\n \r\n # getting the maximum feature value\r\n best_feature_index = max(info_values, key=info_values.get) \r\n best_feature, best_val = best_feature_index\r\n # creating the best partition\r\n x_best_part = partition(x[:,best_feature])\r\n #selecting other than the best feature value from the dictionary\r\n new_attr = list(filter(lambda x: x!= (best_feature, best_val), attributes))\r\n # increasing depth\r\n depth += 1\r\n\r\n # Calling id3 recursively, checking over 0,1 making a prediction as True / False \r\n for n in range(0,2):\r\n if n == 0:\r\n # recursively calling id3 over the best values of the x partition\r\n bestX = x[x_best_part[best_val]]\r\n bestY = y[x_best_part[best_val]]\r\n tree[best_feature, best_val, 'True'] = id3(bestX, bestY, new_attr, max_depth,weight, depth)\r\n else:\r\n \t # recursively calling id3 selecting other than best features\r\n othr_idx = []\r\n for i in x_best_part:\r\n if i != best_val:\r\n othr_idx.extend(x_best_part[i])\r\n\r\n otherX = x[othr_idx]\r\n otherY = y[othr_idx]\r\n tree[best_feature, best_val, 'False'] = id3(otherX, otherY, new_attr, max_depth,weight, depth)\r\n return tree",
"def find_best_attribute_to_split_on_entropy(examples_set, attributes):\n current_entropy = find_entropy(examples_set)\n\n gains_to_calculate = get_value_subsets_for_every_attribute(examples_set, attributes)\n largest_gain = 0\n best_attribute = None\n # Return a random attribute if all are the same\n for x in range(14):\n if attributes[x] != -1:\n best_attribute = attributes[x]\n\n best_gain = 0\n for attribute in gains_to_calculate.keys():\n gain = 0\n for value in gains_to_calculate[attribute].keys():\n value_sub_set = gains_to_calculate[attribute][value]\n numerator = len(value_sub_set)\n denominator = 0\n for attributeValue in gains_to_calculate[attribute].keys():\n denominator = denominator + len(gains_to_calculate[attribute][attributeValue])\n weight = numerator / denominator\n if not value_sub_set:\n continue\n gain = gain + (weight * find_entropy(value_sub_set))\n # Return a random attribute if all attributes have 0 gain\n gain = current_entropy - gain\n if gain >= largest_gain:\n largest_gain = gain\n best_attribute = attribute\n best_gain = gain\n if best_gain < gain_threshold:\n return \"None\"\n return best_attribute",
"def MergeAttributeContainers(\n self, callback=None, maximum_number_of_containers=0):\n if not self._container_types:\n self._container_types = self._GetContainerTypes()\n\n number_of_containers = 0\n while (self._active_cursor or self._container_types\n or self._active_extra_containers):\n if not self._active_cursor and not self._active_extra_containers:\n self._PrepareForNextContainerType()\n\n containers = self._GetAttributeContainers(\n self._active_container_type, callback=callback,\n cursor=self._active_cursor,\n maximum_number_of_items=maximum_number_of_containers)\n\n if not containers:\n self._active_cursor = 0\n continue\n\n for container in containers:\n self._add_active_container_method(container)\n number_of_containers += 1\n\n if 0 < maximum_number_of_containers <= number_of_containers:\n logger.debug(\n 'Only merged {0:d} containers'.format(number_of_containers))\n return False\n\n logger.debug('Merged {0:d} containers'.format(number_of_containers))\n # While all the containers have been merged, the 'merging' key is still\n # present, so we still need to remove the store.\n self._store.Remove()\n return True",
"def classify(self, features):\n\n # TODO: finish this.\n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for i in range(feat_shape[0]):\n vote = np.zeros((self.num_trees))\n for j in range(self.num_trees):\n #print self.trees[j].classify(feat)\n vote[j] = self.trees[j].classify(features[i,self.attr_track[j]].reshape(1,-1))[0]\n counts = np.bincount(vote.astype(int))\n class_labels.append(np.argmax(counts))\n return class_labels",
"def AUC_multiclass(y_true, y_proba, verbose=3):\n macro_roc_auc_ovo = roc_auc_score(y_true, y_proba, multi_class=\"ovo\", average=\"macro\")\n weighted_roc_auc_ovo = roc_auc_score(y_true, y_proba, multi_class=\"ovo\", average=\"weighted\")\n macro_roc_auc_ovr = roc_auc_score(y_true, y_proba, multi_class=\"ovr\", average=\"macro\")\n weighted_roc_auc_ovr = roc_auc_score(y_true, y_proba, multi_class=\"ovr\", average=\"weighted\")\n if verbose>=3:\n print(\"[classeval] One-vs-One ROC AUC scores:\\n {:.6f} (macro),\\n {:.6f} \" \"(weighted by prevalence)\" .format(macro_roc_auc_ovo, weighted_roc_auc_ovo))\n print(\"[classeval] One-vs-Rest ROC AUC scores:\\n {:.6f} (macro),\\n {:.6f} \" \"(weighted by prevalence)\" .format(macro_roc_auc_ovr, weighted_roc_auc_ovr))\n\n out = {}\n out['macro_roc_auc_ovo'] = macro_roc_auc_ovo\n out['weighted_roc_auc_ovo'] = weighted_roc_auc_ovo\n out['macro_roc_auc_ovr'] = macro_roc_auc_ovr\n out['weighted_roc_auc_ovr'] = weighted_roc_auc_ovr\n return(out)",
"def categorize(seeds):\n global category_distances\n attr_by_category = {c: [] for c in seeds}\n \n distance = lambda a, c: min(distances[seed][a] for seed in seeds[c])\n for attr in attrs:\n for c in seeds:\n category_distances[attr][c] = distance(attr, c)\n\n (score, category) = min_argmin(\n lambda c: distance(attr, c), seeds)\n attr_by_category[category].append((attr, score))\n\n return {c: sorted(attr_by_category[c], key=itemgetter(1))\n for c in attr_by_category}",
"def attribute(self, attribute):\n value = 3\n if self.age == \"child\":\n value -= 1\n if attribute == \"physique\" or attribute == \"phy\":\n if self.age == \"adult\":\n value += 1\n if self.gender == \"male\":\n value += 1\n elif self.gender == \"female\":\n value -= 1\n\n if attribute == \"sensitivity\" or attribute == \"sns\":\n if self.age == \"child\":\n value += 2\n if self.gender == \"male\":\n value -= 1\n elif self.gender == \"female\":\n value += 1\n\n if attribute == \"agility\" or attribute == \"agi\":\n if self.age == \"child\":\n value += 1 # to be equally as high as adult and young\n elif self.age == \"elder\":\n value -= 1\n\n if attribute == \"mind\" or attribute == \"mnd\":\n if self.age == \"elder\":\n value += 1\n\n for feature in self.features:\n if feature.name == \"blood\":\n for key in feature.modifiers:\n if attribute == key:\n value += feature.modifiers[key]\n\n if value < 1:\n value = 1\n return value",
"def important_attributes(vector):\r\n atts = []\r\n for i in xrange(len(vector)):\r\n if len(atts) < 3:\r\n atts.append((attributes[i], vector[i]))\r\n atts.sort(key=lambda x: x[1], reverse = True)\r\n else:\r\n if vector[i] > atts[2][1]:\r\n if attributes[i] in ('score', 'a_b_v'):\r\n continue\r\n atts[2] = (attributes[i], vector[i])\r\n atts.sort(key=lambda x: x[1], reverse = True)\r\n return atts",
"def select_attribute(instances, available_attributes):\n #Define variable(s)\n entropies = []\n #Loop through each attribute --> [att0, att1, att2]\n for attribute in available_attributes:\n #Partition on given attribute, and return dictionary of partitioned values\n partition = partition_instances(instances,attribute,available_attributes)\n entropy = 0\n #Loop through each list in given partition\n for key in partition:\n num_partition_instances = len(partition[key])\n #Calculates frequencies in a partition\n class_columns, frequencies = get_frequencies(create_list(partition[key]))\n #Loop through each frequency in the list\n for frequency in frequencies:\n prob = frequency/num_partition_instances #probability of given frequency occurring\n weight = num_partition_instances/len(instances)\n entropy = entropy + (weight * calculate_entropy(prob)) #sum\n entropies.append(entropy)\n\n #Determine which attribute has the smallest entropy\n min_entropy = entropies[0]\n min_attr = 0\n #Loop through each entropy value\n for i in range(len(entropies)):\n if entropies[i] < min_entropy:\n min_entropy = entropies[i]\n min_attr = i\n\n return list(available_attributes.keys())[min_attr]",
"def _get_attribute_functions(self, attributes):\n subqueries = []\n columns = []\n for attr in attributes:\n function = attributes[attr]\n if function == 'sum':\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n cast(self.db_value.value, Float).label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(\n self.db_value,\n self.db_value.id == self.db_tag.fk_value). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n columns.append(func.sum(sq.c.v))\n elif function == 'count' or function == 'count distinct':\n if attr == 'Activity' or attr == 'Stakeholder':\n columns.append(func.count())\n else:\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n self.db_value.value.label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(self.db_value). \\\n join(self.db_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n if (function == 'count distinct'):\n columns.append(func.count(distinct(sq.c.v)))\n else:\n columns.append(func.count(sq.c.v))\n return subqueries, columns"
]
| [
"0.530733",
"0.5292166",
"0.51155865",
"0.5054365",
"0.5016916",
"0.5009911",
"0.4980732",
"0.49682903",
"0.495229",
"0.4946295",
"0.4930192",
"0.4910622",
"0.4899093",
"0.489731",
"0.48556",
"0.47941115",
"0.47523022",
"0.47461033",
"0.47104993",
"0.46908027",
"0.4684973",
"0.46843567",
"0.46822113",
"0.4679503",
"0.46515763",
"0.4642731",
"0.46113494",
"0.4597543",
"0.45971552",
"0.45965585"
]
| 0.7125202 | 0 |
Filters the attribute by the given schema. | def filter_by_schema(self, schema):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_by_schema(self, schema, constant_schema=None):\n if constant_schema is None:\n constant_schema = schema\n\n get_schema = lambda attr: constant_schema if attr.constant else schema\n\n # Remove attributes with invalid names\n filter_fcn = lambda attr: get_schema(attr).has_attribute(attr.name)\n self.filter_elements([filter_fcn])\n\n #\n # Filter objects by their schemas\n #\n\n del_inds = set()\n found_names = set()\n for idx, attr in enumerate(self):\n name = attr.name\n\n # Remove attributes that violate schema\n attr_schema = get_schema(attr).get_attribute_schema(name)\n if not attr_schema.is_valid_attribute(attr):\n del_inds.add(idx)\n\n # Enforce exclusivity, if necessary\n is_exclusive = get_schema(attr).is_exclusive_attribute(name)\n if is_exclusive:\n if name in found_names:\n del_inds.add(idx)\n else:\n found_names.add(name)\n\n self.delete_inds(del_inds)",
"def filter_by_schema(self, schema, allow_none_label=False):\n if self.label is None:\n if not allow_none_label:\n raise KeypointsSchemaError(\n \"None keypoints label is not allowed by the schema\"\n )\n elif self.label != schema.get_label():\n raise KeypointsSchemaError(\n \"Label '%s' does not match keypoints schema\" % self.label\n )\n\n self.attrs.filter_by_schema(\n schema.frames, constant_schema=schema.attrs\n )",
"def filter_by_schema(self, schema):\n # Remove keypoints with invalid labels\n filter_func = lambda keypoints: schema.has_keypoints_label(\n keypoints.label\n )\n self.filter_elements([filter_func])\n\n # Filter keypoints by their schemas\n for keypoints in self:\n keypoints_schema = schema.get_keypoints_schema(keypoints.label)\n keypoints.filter_by_schema(keypoints_schema)",
"def filter_schema(schema):\n for column, column_schema in schema.iteritems():\n if column_schema.get(CARDINALITY):\n del column_schema[CARDINALITY]\n schema[column] = column_schema\n\n return schema",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def test_parse_filter(self):\n old_type = Filter(\n 'filter[student.school.title]', [\n Relationship('student', PersonSchema(), None),\n Relationship('school', StudentSchema(), None)],\n Attribute('title', SchoolSchema(), None), ('eq', ['test']))\n new_type = self.driver.parse(old_type)\n \n assert new_type.source == old_type\n assert old_type.relationships != new_type.relationships\n assert isinstance(new_type.relationships[0], Mapper)\n assert old_type.attribute != new_type.attribute\n assert isinstance(new_type.attribute, Column)\n assert old_type.value == new_type.value",
"def set_schema(self, schema):\r\n self.__schema = schema",
"def schema(self, schema):\n self._schema = schema",
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def schema(self, schema, in_='formData'):\n parameters = core.parameters_from_object_schema(schema, in_=in_)\n return compose(*map(self.parameter, parameters))",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def make_filter(name, schema):\n return HSMFilter(name, schema)",
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def filter_func(fieldname):\n if fieldname.startswith('_'):\n return False\n value = getattr(class_, fieldname)\n \n return isinstance(value, type)",
"def validate_subset_of_schema(self, schema):\n super(BooleanAttributeSchema, self).validate_subset_of_schema(schema)\n\n if not self.values.issubset(schema.values):\n raise AttributeSchemaError(\n \"Values %s are not a subset of %s\"\n % (self.values, schema.values)\n )",
"def test_query_filter_field(self):\n obj = self.provision_single_asset()\n # TODO: Write a positive test for this\n ret = self.get('widget', 200,\n params={'__filter': [\n {'field': 'created_at', 'name': 'name', 'op': 'eq'}]})\n assert len(ret['objects']) == 0",
"def itemFilterAttr(*args, byName: Union[AnyStr, bool]=\"\", byNameString: Union[AnyStr,\n List[AnyStr], bool]=\"\", byScript: Union[AnyStr, bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", dynamic: bool=True, exists: bool=True, hasCurve:\n bool=True, hasDrivenKey: bool=True, hasExpression: bool=True, hidden:\n bool=True, intersect: Union[List[AnyStr, AnyStr], bool]=None, keyable:\n bool=True, listBuiltInFilters: bool=True, listOtherFilters: bool=True,\n listUserFilters: bool=True, negate: bool=True, parent: AnyStr=\"\", published:\n bool=True, readable: bool=True, scaleRotateTranslate: bool=True,\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, writable: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def get_field_schemas(self, value, schemas, **kwargs):\n keys = kwargs.get(\"keys\", GET_SCHEMAS_KEYS)\n search = re.compile(value.lower().strip(), re.I)\n\n matches = []\n\n for schema in schemas:\n if not schema.get(\"selectable\"):\n continue\n\n for key in keys:\n if search.search(schema[key]) and schema not in matches:\n matches.append(schema)\n\n # XXX fix test case for this\n # os\\. will fail for adapters that do not have os.type/dist/etc\n # if not matches:\n # msg = \"No field found where {} matches regex {!r}, valid fields: \\n{}\"\n # msg = msg.format(\n # keys, value, \"\\n\".join(self._prettify_schemas(schemas=schemas)),\n # )\n # raise NotFoundError(msg)\n return matches",
"def get_field_schemas(\n self, value: str, schemas: List[dict], keys: List[str] = GET_SCHEMAS_KEYS\n ) -> List[dict]:\n search = re.compile(value.lower().strip(), re.I)\n\n matches = []\n\n for schema in schemas:\n if not schema.get(\"selectable\"):\n continue\n\n for key in keys:\n if search.search(schema[key]) and schema not in matches:\n matches.append(schema)\n return matches",
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def clean_schema(schema):\n # type: (Dict) -> Dict\n return {k: v for k, v in schema.items()\n if k not in _SWAGGER_FIELDS and not k.lower().startswith(\"x-\")}",
"def schema(value: Any) -> Schema:\n raise InputTypeError(value)",
"def filter(self, **args ):\n query = TXLOG.select('*')\n for key, value in args.items():\n if '__' in key:\n key, op = key.split('__')\n else:\n op = 'eq'\n\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n if not isinstance(value, basestring ):\n value = str(value)\n\n query = query.where({key:value}, self.operators[op])\n\n items = query.list()\n return items",
"def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)"
]
| [
"0.75847036",
"0.6768858",
"0.62234396",
"0.62064046",
"0.59790224",
"0.58667177",
"0.5720148",
"0.5491392",
"0.52851987",
"0.528129",
"0.5225991",
"0.5172035",
"0.51716125",
"0.51716125",
"0.51716125",
"0.51563376",
"0.5155657",
"0.51186794",
"0.50850403",
"0.50318366",
"0.5020568",
"0.50179714",
"0.5003126",
"0.49844253",
"0.49785182",
"0.49757284",
"0.4966594",
"0.49585405",
"0.49185532",
"0.49142286"
]
| 0.8063648 | 0 |
Creates a CategoricalAttribute instance. | def __init__(
self,
name,
value,
confidence=None,
top_k_probs=None,
constant=False,
tags=None,
):
super(CategoricalAttribute, self).__init__(
name, value, confidence=confidence, constant=constant, tags=tags
)
self.top_k_probs = top_k_probs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, name, categories=None, exclusive=False, default=None):\n super(CategoricalAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.categories = set(categories or [])\n self.validate_default_value()",
"def set_categorical(self, meta_field):\n self._data[meta_field] = pd.Categorical(self._data[meta_field])",
"def create(data):\n \n # create category\n return Category(\n category_id = data['id'],\n name = data['name'])",
"def generate_categories(s, discrete=True, ordered=True, n=10, name='category'):\n\n if discrete:\n s_cat = pd.Series(pd.Categorical(s, ordered=ordered))\n else:\n s_cat = pd.cut(s, n, include_lowest=True, labels=[name+str(x) for x in range(n)])\n\n return s_cat",
"def predict_category(self):\n pass",
"def to_categorical(data, categories=[\"Silencer\", \"Inactive\", \"Weak enhancer\", \"Strong enhancer\"]):\n data = pd.Categorical(data, categories=categories, ordered=True)\n return data",
"def _build_categorical_dist(*args, probs=None, **kwargs):\n return tf_dists.Categorical(*args, probs=probs, **kwargs)",
"def majority_vote_categorical_attrs(attrs, confidence_weighted=False):\n if not isinstance(attrs, list):\n attrs = [attrs]\n\n accums = defaultdict(etan.Accumulator)\n for _attrs in attrs:\n for attr in _attrs:\n accums[attr.name].add(attr.value, weight=attr.confidence or 0.0)\n\n voted_attrs = AttributeContainer()\n for name, accum in iteritems(accums):\n value = accum.argmax(weighted=confidence_weighted)\n confidence = accum.get_average_weight(value) or None\n attr = CategoricalAttribute(name, value, confidence=confidence)\n voted_attrs.add(attr)\n\n return voted_attrs",
"def __init__(self, categories, prior=None, transform=None, name=None):\n if transform == \"identity\":\n self.categories = tuple([str(c) for c in categories])\n else:\n self.categories = tuple(categories)\n\n self.name = name\n\n if transform is None:\n transform = \"onehot\"\n self.transform_ = transform\n if transform not in [\"identity\", \"onehot\"]:\n raise ValueError(\n \"Expected transform to be 'identity' or 'onehot' \"\n \"got {}\".format(transform)\n )\n if transform == \"onehot\":\n self.transformer = CategoricalEncoder()\n self.transformer.fit(self.categories)\n else:\n self.transformer = Identity(dtype=type(categories[0]))\n\n self.prior = prior\n\n if prior is None:\n self.prior_ = np.tile(1.0 / len(self.categories), len(self.categories))\n else:\n self.prior_ = prior",
"def test_categorical_feature():\n\n feature = Categorical(\"abc\")\n\n for element in \"abc\":\n feature.set(element)\n feature.set(\"ignore this\")\n feature.push()\n\n for element in \"abc\":\n getattr(feature, \"set_\" + element)()\n feature.push()\n\n array = feature.array()\n assert array.shape == (6, 3)\n for i, row in enumerate(array):\n assert sum(row) == 1.0 and row[i % 3] == 1.0",
"def test_categorical_column_validates_categories(self):\n\n categories = 1\n\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", 1]\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", \"Blue\"]\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)",
"def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category",
"def to_categorical(y, num_columns):\n y_cat = np.zeros((y.shape[0], num_columns))\n y_cat[range(y.shape[0]), y] = 1.0\n return y_cat",
"def attributes(self):\n attrs_ = super(CategoricalAttributeSchema, self).attributes()\n attrs_.append(\"categories\")\n return attrs_",
"def categorical_cast(dataframe, cat_field):\n if _is_object_type(dataframe, cat_field):\n dataframe[cat_field] = dataframe[cat_field].astype('category')\n return dataframe\n else:\n raise TypeError",
"def test_categorical_constraint():\n categories = [\"Blue\", \"Red\", \"Yellow\"]\n prop = \"Property Color\"\n c = CategoricalConstraint(name=prop, accepted_categories=categories)\n\n mapped_c = c.to_dict()\n\n assert mapped_c[\"type\"] is \"categorical\"\n assert mapped_c[\"name\"] is prop\n assert mapped_c[\"options\"][\"categories\"] is categories",
"def __create_categorical_col(self, df, columns):\n\n # Temporarily remove tuple such that columns can be checked\n for n, item in enumerate(columns):\n if isinstance(item, tuple):\n name, _ = item\n temporary_columns = columns.copy()\n temporary_columns[n] = name\n\n # Use appropriate var in validation\n if 'temporary_columns' in locals():\n column_set = temporary_columns\n else:\n column_set = columns\n\n\n for n, column in enumerate(columns):\n if type(column) == tuple:\n cat_col, new_col = column\n df[new_col] = df[cat_col]\n column = cat_col\n df[column], uniques = pd.factorize(df[column])\n return df",
"def categoricals(self):\n return base_64_to_object(self.categorical_hyperparameters_64)",
"def create_category(name):\n return Category.objects.create(name=name)",
"def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n db_session.add(c)\n db_session.commit()\n return c",
"def _to_categorical(self, y, num_classes=None):\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=np.float32)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical",
"def to_categorical(y, num_classes):\n arr = np.eye(num_classes)[y]\n tensor = torch.LongTensor(arr)\n return autograd.Variable(tensor)",
"def _to_categorical(self, y):\n num_classes = len(set(y))\n x = np.eye(num_classes, dtype='uint8')[y]\n return x",
"def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n session.add(c)\n session.commit()\n print 'Category \"' + name + '\" created.'\n return c",
"def test_categorical(self):\n with Model() as model:\n Categorical('x', np.array([0.25, 0.75]))\n steps = assign_step_methods(model, [])\n assert isinstance(steps, BinaryGibbsMetropolis)\n with Model() as model:\n Categorical('y', np.array([0.25, 0.70, 0.05]))\n steps = assign_step_methods(model, [])\n assert isinstance(steps, CategoricalGibbsMetropolis)",
"def create_category(category_name, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Category.objects.create(category_name=category_name, pub_date=time)",
"def create_new_wallet_category(self):\n wallet_category_vals = self._build_new_wallet_category_vals()\n return self.env['wallet.category'].create(wallet_category_vals)",
"def create_category(party_id: PartyID, title: str) -> TourneyCategory:\n party = DbParty.query.get(party_id)\n if party is None:\n raise ValueError('Unknown party ID \"{}\"'.format(party_id))\n\n category = TourneyCategory(party.id, title)\n party.tourney_categories.append(category)\n\n db.session.commit()\n\n return category",
"def to_categorical(y, nb_classes):\n y = np.asarray(y, dtype='int32')\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n for i in range(len(y)):\n Y[i, y[i]] = 1.\n return Y",
"def to_categorical(y, num_classes=None):\n y = np.array(y, dtype=np.int32)\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=np.float32)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical"
]
| [
"0.6384755",
"0.6279566",
"0.62452924",
"0.6244721",
"0.5907963",
"0.58979803",
"0.5890514",
"0.5843316",
"0.58307725",
"0.58294404",
"0.58147645",
"0.58140194",
"0.58103013",
"0.5786111",
"0.57816666",
"0.57790756",
"0.5774232",
"0.575643",
"0.5720464",
"0.5631398",
"0.56268704",
"0.5604193",
"0.5545648",
"0.5545045",
"0.5543323",
"0.5515113",
"0.55097204",
"0.5494271",
"0.54882693",
"0.54768336"
]
| 0.6910468 | 0 |
Parses the attribute value. | def parse_value(cls, value):
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parseAttributeString(self, line):\n attribute, value = line.partition(' ')[::2]\n self._setAttribute(attribute, value)",
"def parse(self, value):\n raise NotImplementedError(\"Please implement the Class\")",
"def extractValue(line, attribute):\n\t\n\tmyValue = ''\n\n\t#to avoid attributes in javascript or normal text\n\tif attribute + '=\"' in line or attribute + \"='\" in line:\n\t\taIndex = line.index(attribute)\n\telse:\n\t\taIndex = None\n\t\n\t#attribute exists and it's a tag\n\tif aIndex != None:\n\t\n\t\t#traverse upto the value\n\t\tch = line[aIndex]\n\t\twhile ch != '\"' and ch != \"'\":\n\t\t\taIndex += 1\n\t\t\tch = line[aIndex]\n\n\t\taIndex += 1\n\t\tch = line[aIndex]\n\t\t\n\t\t# extract the value\n\t\twhile ch != \"'\" and ch != '\"':\n\t\t\tmyValue += ch\n\t\t\taIndex += 1\n\t\t\tch = line[aIndex]\n\n\treturn myValue",
"def parseTag(self) :\n pos = self.position\n tagtype = self.tags[ord(self._data[pos])]\n if tagtype == 'end-of-attributes-tag':\n return 0\n pos += 1\n posend = pos2 = pos + 2\n namelength = unpack(\">H\", self._data[pos:pos2])[0]\n if not namelength :\n name = self._curname\n else :\n posend += namelength\n self._curname = name = self._data[pos2:posend]\n pos2 = posend + 2\n valuelength = unpack(\">H\", self._data[posend:pos2])[0]\n posend = pos2 + valuelength\n value = self._data[pos2:posend]\n if tagtype in (\"integer\", \"enum\") :\n value = unpack(\">I\", value)[0]\n elif tagtype == \"boolean\" :\n value = ord(value)\n try :\n (oldname, oldval) = self._curattributes[-1][-1]\n if oldname == name :\n oldval.append((tagtype, value))\n else :\n raise IndexError\n except IndexError :\n self._curattributes[-1].append((name, [(tagtype, value)]))\n self.logDebug(\"%s(%s) : %s\" % (name, tagtype, value))\n return posend - self.position",
"def getSVGItemAttrValue(self, item, attr):\n\n # Find attribute.\n for section in item.split(' '):\n pos = section.find(attr)\n if not pos == -1:\n break\n\n # Get attributes value.\n return section.split('\"')[1]",
"def parse_value(cls, value):\n raise NotImplementedError(\"subclass must implement parse_value()\")",
"def getValue(self,attrName):\n try:\n value = self.getAttribute(attrName)[0].value\n except:\n if(attrName == 'ALARMTIME' or attrName == 'CYCLETIME'): #default value for alarm\n return '0x00'\n else:\n print 'ERROR: no attribute %s definition for %s'%(attrName,self.name)\n sys.exit(-1)\n return value",
"def parseAttribute(self, attr_str):\r\n parts = []\r\n lastpos = 0\r\n while lastpos < len(attr_str):\r\n newpos = self.nextString(attr_str, lastpos)\r\n s = attr_str[lastpos:newpos-1]\r\n if (s[0] == \"(\" and s[-1] == \")\"): # list, recurse\r\n parts.append(self.parseAttribute(s[1:-1]))\r\n else:\r\n try:\r\n parts.append(float(s)) # number, any kind\r\n except ValueError:\r\n if s[0] == \"'\" and s[-1] == \"'\": # string\r\n parts.append(s[1:-1])\r\n elif s == \"$\":\r\n parts.append(None)\r\n else:\r\n parts.append(s) # ref, enum or other\r\n\r\n lastpos = newpos\r\n \r\n return parts",
"def _get_attribute(self):\n return self.split_text[1] if len(self.split_text) > 1 else \"\"",
"def parse_attribute(self,attr,table):\n \n attr_dict = {}\n \n for child in attr:\n name = child.attrib['name']\n \n #attributes can either have string or bool as the value we need\n #checking for boolean\n if 'val' in child[0].attrib:\n val = child[0].attrib['val']\n \n if val == 'true':\n flag = True\n else:\n flag = False\n \n attr_dict[name] = flag\n \n #else it's string stroed as text\n else:\n attr_dict[name] = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n attr = db_attribute.DbAttribute(table,attr_dict)\n \n return attr",
"def read_value(self):\n return self.load_attr(\"value\")",
"def getAttrValue(self, *args):\n return _libsbml.XMLToken_getAttrValue(self, *args)",
"def value(self):\n return self.raw.get_attribute(\"value\")",
"def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}",
"def attribute_value(self) -> str:\n return pulumi.get(self, \"attribute_value\")",
"def parse(self, argument):\n self._value = argument",
"def attribute_value(self) -> Optional[str]:\n return pulumi.get(self, \"attribute_value\")",
"def _parse_aqara_attributes(self, value):\n attributes = {}\n attribute_names = {\n 1: BATTERY_VOLTAGE_MV,\n 3: TEMPERATURE,\n 4: XIAOMI_ATTR_4,\n 5: XIAOMI_ATTR_5,\n 6: XIAOMI_ATTR_6,\n 10: PATH,\n }\n result = {}\n while value:\n skey = int(value[0])\n svalue, value = foundation.TypeValue.deserialize(value[1:])\n result[skey] = svalue.value\n for item, val in result.items():\n key = (\n attribute_names[item]\n if item in attribute_names\n else \"0xff01-\" + str(item)\n )\n attributes[key] = val\n if BATTERY_VOLTAGE_MV in attributes:\n attributes[BATTERY_LEVEL] = int(\n self._calculate_remaining_battery_percentage(\n attributes[BATTERY_VOLTAGE_MV]\n )\n )\n return attributes",
"def visit_Attribute(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n args = [ node.value, ast.Str(node.attr) ]\n return to_call(to_name('getattr'), args)\n return node",
"def Parse(self, argument):\n self._value = argument",
"def get_radist_value(line):\n assert line.startswith(' ')\n key, value = line.split('=')\n key = key.strip()\n return key, attr_map[key](value)",
"def _get_value_by_attribute(self, die, attribute):\n # in the case of str form data `DW_FORM_strp`, return str\n if attribute.form == \"DW_FORM_strp\":\n return attribute.value.decode(\"utf-8\", errors=\"ignore\")\n\n # parse `DW_AT_decl_file`\n if attribute.name == \"DW_AT_decl_file\":\n if attribute.value == 0:\n return attribute.value\n lineprogram = die.dwarfinfo.line_program_for_CU(die.cu)\n file_entry = lineprogram.header.file_entry[attribute.value-1]\n file_name = file_entry.name.decode(\"utf-8\", errors=\"ignore\")\n file_dir_bytes = lineprogram.header.include_directory[file_entry.dir_index-1] # noqa: E501\n file_dir = file_dir_bytes.decode(\"utf-8\", errors=\"ignore\")\n return file_dir + \"/\" + file_name\n\n # if extra_info is not empty, return extra_info\n extra_info_func = _EXTRA_INFO_DESCRIPTION_MAP[attribute.name]\n extra_info = extra_info_func(attribute, die, self.section_offset)\n if extra_info:\n return extra_info\n\n # the last choice\n descr_func = _ATTR_DESCRIPTION_MAP[attribute.form]\n val_description = descr_func(attribute, die, self.section_offset)\n if val_description:\n return val_description\n\n return attribute.value",
"def _parse_attributes(self, attributes):\n\n var_value_pairs = attributes.split()\n\n self.logger.debug('attributes:{} pairs:{}'.format(attributes, var_value_pairs))\n\n for var_value_pair in var_value_pairs:\n (var, separator, value) = var_value_pair.partition('=')\n value = value.replace('\"', '')\n self._current_element.add_attribute(var, value)",
"def _extract_by_key(self, line, key):\n search = r'{0}=.+?,'.format(key) # lazy match to first ,\n attr_match = re.search(search, line)\n if attr_match:\n # grab just the value of the attribute from attr_key=value,\n value = attr_match.group()[len(key) + 1 : len(attr_match.group()) - 1]\n return value\n else:\n return \"notfound\"",
"def parse_value(cls, value):\n choice, value = value.split('=')\n value = cls.VALUES_MAP[value]\n\n return choice, value",
"def _validate_value(\n cls,\n attribute: models.Attribute,\n value_data: dict,\n is_swatch_attr: bool,\n ):\n value = value_data.get(\"name\")\n if value is None:\n raise ValidationError(\n {\n cls.ATTRIBUTE_VALUES_FIELD: ValidationError(\n \"The name field is required.\",\n code=AttributeErrorCode.REQUIRED.value,\n )\n }\n )\n\n if is_swatch_attr:\n cls.validate_swatch_attr_value(value_data)\n else:\n cls.validate_non_swatch_attr_value(value_data)\n\n slug_value = value\n value_data[\"slug\"] = slugify(unidecode(slug_value))\n\n attribute_value = models.AttributeValue(**value_data, attribute=attribute)\n try:\n attribute_value.full_clean()\n except ValidationError as validation_errors:\n for field, err in validation_errors.error_dict.items():\n if field == \"attribute\":\n continue\n errors = []\n for error in err:\n error.code = AttributeErrorCode.INVALID.value\n errors.append(error)\n raise ValidationError({cls.ATTRIBUTE_VALUES_FIELD: errors})",
"def ReadAttributeValue(self):\n ret = libxml2mod.xmlTextReaderReadAttributeValue(self._o)\n return ret",
"def attribute_value(self):\n return self._attribute_value",
"def visit_Attribute(self, node):\n\n if isinstance(node.value, ast.Name):\n attr_str = \"%s.%s\" % (node.value.id, node.attr)\n if self._is_write_ctx(node.ctx):\n self.yield_checker.record_assignment(attr_str)\n else:\n self.yield_checker.record_usage(attr_str, node)\n\n root_value = self.visit(node.value)\n if self._is_write_ctx(node.ctx):\n return self._visit_set_attribute(node, root_value)\n elif self._is_read_ctx(node.ctx):\n if self._is_checking():\n self.asynq_checker.record_attribute_access(root_value, node.attr, node)\n value = self._get_attribute(node, node.attr, root_value)\n if self._should_use_varname_value(value):\n varname_value = VariableNameValue.from_varname(\n node.attr, self.config.varname_value_map()\n )\n if varname_value is not None:\n return varname_value\n if self.scope.scope_type() == ScopeType.function_scope:\n composite = self.varname_for_constraint(node)\n if composite:\n local_value = self.scope.current_scope().get_local(\n composite, node, self.state, fallback_value=value\n )\n if isinstance(local_value, MultiValuedValue):\n vals = [\n val\n for val in local_value.vals\n if val is not UNINITIALIZED_VALUE\n ]\n if vals:\n local_value = unite_values(*vals)\n else:\n local_value = UNINITIALIZED_VALUE\n if local_value is not UNINITIALIZED_VALUE:\n value = local_value\n value = self._maybe_use_hardcoded_type(value, node.attr)\n return value\n else:\n self.show_error(node, \"Unknown context\", ErrorCode.unexpected_node)\n return None",
"def get_attribute_value(self, typ, attr_name):\n for base_typ in self._get_mro(typ):\n serialized_base = self.serialize_type(base_typ)\n if serialized_base is None:\n continue\n value = self.attribute_values[serialized_base].get(attr_name)\n if value is not None:\n return value\n else:\n return UNRESOLVED_VALUE"
]
| [
"0.70519006",
"0.65825087",
"0.6487424",
"0.64370435",
"0.6392208",
"0.6368744",
"0.62816554",
"0.6264809",
"0.6242067",
"0.62218463",
"0.6188011",
"0.61878055",
"0.6172431",
"0.6101577",
"0.60859686",
"0.6055087",
"0.6045903",
"0.60443044",
"0.6021183",
"0.6009539",
"0.5944895",
"0.5936162",
"0.59270513",
"0.5919711",
"0.59164876",
"0.5887986",
"0.58629704",
"0.58060503",
"0.57829446",
"0.5769908"
]
| 0.689424 | 1 |
Whether this attribute is exclusive. | def is_exclusive(self):
return self.exclusive | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_exclusive_attribute(self, name):\n return self.get_attribute_schema(name).is_exclusive",
"def has_exclusive_attributes(self):\n return any(schema.is_exclusive for schema in itervalues(self.schema))",
"def set_exclusive(self, exclusive):\n self.widget.setExclusive(exclusive)",
"def is_non_exclusive(self, variable):\n non_exclusive = False\n for sub_effect in self._sub_effects:\n if sub_effect.get_variable() == variable:\n if not sub_effect.is_exclusive():\n non_exclusive = True\n elif len(sub_effect.get_value()) > 0 and not sub_effect.is_negated():\n return False\n return non_exclusive",
"def read_only(self):\n return bool(self.__read_only)",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def __ne__(self, other: 'Attribute') -> bool:\n return not self == other",
"def disabled(self):\n check1 = \"pf-m-disabled\" in self.browser.classes(self)\n check2 = \"pf-m-aria-disabled\" in self.browser.classes(self)\n return check1 or check2 or self.browser.get_attribute(\"disabled\", self) is not None",
"def IsReadOnly(self) -> bool:",
"def attribute(self):\n\n return not bool(self.arguments)",
"def isReadOnly(self) -> bool:\n ...",
"def isReadOnly(self) -> bool:\n ...",
"def can_be_disabled(self) -> bool:\n return True",
"def read_only(self) -> Optional[bool]:\n return self._read_only",
"def readonly(self):\n return self._readonly",
"def readonly(self):\n return self._readonly",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def is_disabled(self):\n\n return self.__contains__('disabled')",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")"
]
| [
"0.81824136",
"0.740527",
"0.68166596",
"0.6737781",
"0.6495307",
"0.6457551",
"0.6457551",
"0.6457551",
"0.6457551",
"0.64246213",
"0.640931",
"0.6389615",
"0.63834655",
"0.6336614",
"0.6336614",
"0.63310194",
"0.62956697",
"0.62916607",
"0.62916607",
"0.6222839",
"0.6222839",
"0.62195265",
"0.61871725",
"0.61871725",
"0.61871725",
"0.61871725",
"0.61871725",
"0.61871725",
"0.61871725",
"0.61871725"
]
| 0.80982614 | 1 |
Whether this attribute has a default value. | def has_default_value(self):
return self.default is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_default_value(self, name):\n return self.get_attribute_schema(name).has_default_value",
"def is_attr_value_default(self, attr_name):\n try:\n attr_value = self.field_attrs[attr_name]\n except KeyError:\n return True\n\n return attr_value == self.get_attr_default(attr_name)",
"def is_attr_value_default(self, attr_name):\n try:\n attr_value = self.field_attrs[attr_name]\n except KeyError:\n return True\n\n return attr_value == self.get_attr_default(attr_name)",
"def has_default(self):\r\n return self.default is not None",
"def has_default(self):\r\n return self._default is not None",
"def has_default(self):\n return self._default is not NOT_PROVIDED",
"def isDefault (self):\n val = self.__getattribute__('StoredValue')\n dft = self.__class__.StoredValue\n return val==dft",
"def is_default(self) -> Optional[bool]:\n return pulumi.get(self, \"is_default\")",
"def is_default(self) -> bool:\n return self._is_default",
"def is_default(self):\n return self._is_default",
"def is_default(self) -> bool:\n return pulumi.get(self, \"is_default\")",
"def is_default(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_default\")",
"def required(self) -> bool:\n return self._default is None",
"def is_default(self):\n # type: () -> bool\n return self._is_default",
"def is_default(self):\n\n # Make sure matching default and value cases are found to be\n # equivalent.\n if self.default is None: # empty string should equal None\n current_val = (None if self.value == \"\" else self.value)\n elif isinstance(self.default, str): # avoid str v float comparisons\n current_val = str(self.value)\n else:\n current_val = self.value\n\n # self.template does not contain any information about self.value, so\n # we need to check this separately.\n if current_val != self.default:\n return False\n\n # At this point, self.value is equivalent to self.default, so we should\n # check the remaining attribute defaults defined in self.template.\n default = True\n for attr, val in self.template.items():\n current = getattr(self, attr)\n if current != val:\n default = False\n break\n\n return default",
"def isDefault(self) -> bool:\n ...",
"def isDefault(self) -> bool:\n ...",
"def isDefault(self) -> bool:\n ...",
"def has_default(model_field: DataclassCreationFields) -> bool:\n return (model_field.field.default is not dataclasses.MISSING) or (\n model_field.field.default_factory is not dataclasses.MISSING\n )",
"def is_default(self) -> bool:\n return self.object_id is None or self.data_id is None",
"def is_default(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_default\")",
"def is_default(self):\n return self._tag == 'default'",
"def default(self) -> Optional[bool]:\n return pulumi.get(self, \"default\")",
"def default(self) -> Optional[bool]:\n return pulumi.get(self, \"default\")",
"def default(self) -> Optional[bool]:\n return pulumi.get(self, \"default\")",
"def default(self) -> Optional[bool]:\n return pulumi.get(self, \"default\")",
"def default(self) -> Optional[bool]:\n return pulumi.get(self, \"default\")",
"def default(self) -> bool:\n return pulumi.get(self, \"default\")",
"def is_default(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"is_default\")",
"def is_optional(self):\n return self.default is not NotImplemented"
]
| [
"0.85349417",
"0.82452166",
"0.82452166",
"0.81859136",
"0.80237293",
"0.80165124",
"0.7748865",
"0.7633698",
"0.7621697",
"0.76085377",
"0.7557479",
"0.75521266",
"0.75233644",
"0.75227034",
"0.7517625",
"0.74916875",
"0.74916875",
"0.74916875",
"0.7448208",
"0.74473214",
"0.7436804",
"0.7432619",
"0.7388113",
"0.7388113",
"0.7388113",
"0.7388113",
"0.7388113",
"0.73773134",
"0.7332613",
"0.72857916"
]
| 0.8726552 | 0 |
Gets the Attribute class associated with this schema. | def get_attribute_class(self):
return self._attr_cls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attribute_class(self, name):\n self.validate_attribute_name(name)\n return self.schema[name].get_attribute_class()",
"def get_attribute_class(self, attr_name):\n return self.attrs.get_attribute_class(attr_name)",
"def get_class_attribute(self):\n return self.class_attr",
"def get_attr(self):\n attr = self._bld.FindOrCreateAttribute(self._sobj, self.sname)\n return attr._narrow(self.stype)",
"def get_type(self, ):\n return self.attrs.get(self.AttributeNames.TYPE, None)",
"def getCustomAttribute(self):\n\t\treturn self.Attribute",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def get_class(self):\n return self.meta_model.get_class()",
"def attributeType(self) -> unicode:\n ...",
"def get_attr(self, attr_type):\n attr = attr_type()\n attr.attach_to(self.get_sobj(), self._bld)\n return attr",
"def get_class(self):\n return devices.get_class(self.type)",
"def class_attribute(self):\n\n return getattr(self.parent.class_, self.key)",
"def get_attr_fe_type(typ):\n model = default_manager.lookup(typ)\n if not isinstance(model, StructModel):\n raise TypeError(\"make_struct_attribute_wrapper() needs a type \"\n \"with a StructModel, but got %s\" % (model,))\n return model.get_member_fe_type(struct_attr)",
"def schema(cls):\n return Schema.get_instance(cls)",
"def Type(self):\r\n\t\treturn self._get_attribute('type')",
"def _get_marshmallow_field_cls(self):\n return self.MARSHMALLOW_FIELD_CLS",
"def get_class(self):\n\t\treturn self.CLASS",
"def get_data_class(self):\n return self.data_class",
"def type(self) -> Type[ClassType]:\n return self._type",
"def _class(self):\n return self.__class",
"def cls(self):\n return self._cls",
"def meta(cls):\n if getattr(cls, '__from_class__', None) is not None:\n cls = cls.__from_class__\n attribute_info = {}\n for name, value in cls.__table__.columns.items():\n attribute_info[name] = str(value.type).lower()\n\n return {cls.__name__: attribute_info}",
"def get_attribute(self, attribute_name, attribute_type='StringValue'):\n\n return self.sqs_message.message_attributes \\\n .get(attribute_name) \\\n .get(attribute_type)",
"def attr(self):\n\n return EnumAttr(self)",
"def cls(self):\n return self.cls_index",
"def getKssUIDClass(self):\n uid = IUUID(aq_inner(self.context))\n\n return \"kssattr-atuid-%s\" % uid",
"def asset_class(self) -> Union[AssetClass, str]:\n return self.__asset_class",
"def GetAttribute(self, attr):\n return self._attrs[attr]",
"def GetAttributes(self):\r\n\r\n return self._attr",
"def get_attribute(self, lexeme: str) -> typing.Union[Attributes.Attributes]:\n return self._parent_node.get_attribute(lexeme)"
]
| [
"0.8190385",
"0.79435694",
"0.7145243",
"0.69960994",
"0.6341953",
"0.6313961",
"0.6174065",
"0.6074287",
"0.60729676",
"0.59743506",
"0.58819413",
"0.5792457",
"0.57806647",
"0.56970257",
"0.5666237",
"0.5620442",
"0.5617537",
"0.5617366",
"0.553197",
"0.55195576",
"0.55044514",
"0.5489079",
"0.54420364",
"0.5441723",
"0.54230726",
"0.54216385",
"0.5401661",
"0.5393815",
"0.5378701",
"0.53731424"
]
| 0.87033117 | 0 |
Validates that the given AttributeSchema has the same class and `name` as this schema. | def validate_schema(self, schema):
if type(schema) is not type(self):
raise AttributeSchemaError(
"Expected schema to have type '%s'; found '%s'"
% (type(self), type(schema))
)
if schema.name != self.name:
raise AttributeSchemaError(
"Expected schema to have name '%s'; found '%s'"
% (self.name, schema.name)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_class_schema(self, schema):\n json_schema_path = os.path.join(_ROOT,\n 'data',\n 'class_json_schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def validate(self, fqn, data, errors):\n\t\terrors.append(\"{}: validate() must be implemented for SchemaBase derived classes.\".format(self.__class__.__name__))\n\t\treturn False",
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def check_consistency(self) -> 'Schema':\n errors = []\n fields = self.__fields__\n for k, v in fields.items():\n _, err = v.validate(getattr(self, k), fields, loc=k)\n if err:\n errors.append(err)\n if errors:\n raise ValidationError(errors, self.__class__)\n return self",
"def validate_type(self, attr):\n if not isinstance(attr, self._attr_cls):\n raise AttributeSchemaError(\n \"Expected attribute '%s' to have type '%s'; found '%s'\"\n % (attr.name, self.type, etau.get_class_name(attr))\n )",
"def check_schema_name(name: str):\n if not is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")",
"def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))",
"def isValidForSchema(schema):\n\n return True",
"def get_attribute_class(self, name):\n self.validate_attribute_name(name)\n return self.schema[name].get_attribute_class()",
"def check_for_schema(cls):\n if not hasattr(cls, \"Schema\") or cls.Schema is None:\n raise PillowtalkError(\"Schema not found. @add_schema may not have been added to class definition.\")",
"def __matchSchema(self, featureClass):\n fClassFields = []\n for field in arcpy.ListFields(featureClass):\n fieldName = field.name.lower()\n if fieldName == 'objectid' or fieldName == 'oid' or 'shape' in fieldName or field.name in self.userFields:\n pass\n else:\n fClassFields.append(field.name)\n fClassFields.insert(0, 'Shape@')\n objFields = [f['name'] for f in self.updateFields]\n return sorted(fClassFields) == sorted(objFields)",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def validate(instance, schema, cls=None, *args, **kwargs):\r\n if cls is None:\r\n cls = validator_for(schema)\r\n cls.check_schema(schema)\r\n cls(schema, *args, **kwargs).validate(instance)",
"def self_check(self, fqn, errors):\n\t\terrors.append(\"{}: self_check() must be implemented for SchemaBase derived classes.\".format(self.__class__.__name__))\n\t\treturn False",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.label != schema.label:\n raise KeypointsSchemaError(\n \"Expected keypoints label '%s'; found '%s'\"\n % (schema.label, self.label)\n )\n\n self.attrs.validate_subset_of_schema(schema.attrs)",
"def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))",
"def _validate_link_name(klass, name):\n split_name = name.split(\"__\")\n if len(split_name) > 1:\n relationship_name = split_name[0]\n if relationship_name not in klass.__relationships_fields_set__:\n raise ValueError(f\"'{relationship_name}' is not a valid relationship for {klass.__name__}.\")",
"def equals(self, other: Schema) -> bool:\n if not isinstance(other, Schema):\n raise TypeError(\n f\"invalid equality comparison between Schema and {type(other)}\"\n )\n return self.__cached_equals__(other)",
"def equals(self, other: Schema) -> bool:\n if not isinstance(other, Schema):\n raise TypeError(\n f\"invalid equality comparison between Schema and {type(other)}\"\n )\n return self.__cached_equals__(other)",
"def validate(self):\n unused_leaves, all_class_names = self._validate()\n duplicates = set([x for x in all_class_names\n if all_class_names.count(x) > 1])\n\n assert not duplicates, f'Found duplicate Asset class(es): {duplicates}'\n return self",
"def __eq__(self, other):\n return (\n isinstance(other, Attribute)\n and self.name == other.name\n and self.type == other.type\n and self.is_required == other.is_required\n )",
"def validate(self, attrs: dict):\n attr_types_set = {attr.lower() for attr in attrs.keys()}\n\n missing_required = self.required_attrs - attr_types_set\n if missing_required:\n missing_required = ', '.join(missing_required)\n raise SchemaValidationError(f'Missing required attributes: {missing_required}')\n\n not_required = attr_types_set - self.required_attrs\n not_allowed = not_required - self.allowed_attrs\n if not_allowed:\n not_allowed = ', '.join(not_allowed)\n raise SchemaValidationError(f'Attribute types are not allowed: {not_allowed}')\n\n self.attr_type_validate(attrs)",
"def validate(self, schema=os.path.join(os.path.dirname(__file__), 'am.xsd')):\n return validate_xml(schema, self.path, from_path=True)",
"def same_schema(self):\n return self._same_schema",
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def filter_by_schema(self, schema, constant_schema=None):\n if constant_schema is None:\n constant_schema = schema\n\n get_schema = lambda attr: constant_schema if attr.constant else schema\n\n # Remove attributes with invalid names\n filter_fcn = lambda attr: get_schema(attr).has_attribute(attr.name)\n self.filter_elements([filter_fcn])\n\n #\n # Filter objects by their schemas\n #\n\n del_inds = set()\n found_names = set()\n for idx, attr in enumerate(self):\n name = attr.name\n\n # Remove attributes that violate schema\n attr_schema = get_schema(attr).get_attribute_schema(name)\n if not attr_schema.is_valid_attribute(attr):\n del_inds.add(idx)\n\n # Enforce exclusivity, if necessary\n is_exclusive = get_schema(attr).is_exclusive_attribute(name)\n if is_exclusive:\n if name in found_names:\n del_inds.add(idx)\n else:\n found_names.add(name)\n\n self.delete_inds(del_inds)",
"def match_xsd_type(self, schema_item, name):\n try:\n if isinstance(schema_item, AttributeNode):\n if not schema_item[1].is_matching(name):\n return\n xsd_type = schema_item[1].type\n elif not schema_item.is_matching(name, self.parser.default_namespace):\n return\n else:\n xsd_type = schema_item.type\n except AttributeError:\n return\n\n self.add_xsd_type(name, xsd_type)\n\n try:\n value = XSD_BUILTIN_TYPES[xsd_type.local_name].value\n except KeyError:\n primitive_type = self.parser.schema.get_primitive_type(xsd_type)\n try:\n value = XSD_BUILTIN_TYPES[primitive_type.local_name or 'anyType'].value\n except KeyError:\n value = XSD_BUILTIN_TYPES['anyType'].value\n\n if isinstance(schema_item, AttributeNode):\n return TypedAttribute(schema_item, value)\n return TypedElement(schema_item, value)"
]
| [
"0.65096617",
"0.63164973",
"0.6119396",
"0.60405695",
"0.58915716",
"0.5889611",
"0.58063215",
"0.5773859",
"0.5740706",
"0.57290673",
"0.56869334",
"0.5677395",
"0.5620335",
"0.56038564",
"0.5588101",
"0.5586749",
"0.55687493",
"0.5541297",
"0.55256647",
"0.54847336",
"0.54161245",
"0.54161245",
"0.5409099",
"0.53978133",
"0.5381906",
"0.5336749",
"0.5314492",
"0.5305828",
"0.5294839",
"0.52512527"
]
| 0.7815782 | 0 |
Validates that the Attribute is of the correct class. | def validate_type(self, attr):
if not isinstance(attr, self._attr_cls):
raise AttributeSchemaError(
"Expected attribute '%s' to have type '%s'; found '%s'"
% (attr.name, self.type, etau.get_class_name(attr))
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type",
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True",
"def validate_type(self: BaseType, attr: Optional[str] = None) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._validate_attr_type(attribute)\n else:\n self._validate_attr_type(attr)\n return self",
"def validate_instance(instance: Any) -> Any:\n attr.validate(instance)",
"def _validability(self, ability):\n return isinstance(ability, AttributeAbility)",
"def validate_attributes(self, attrs):\n self.attrs.validate(attrs)",
"def __class_validation(cls):\n\n # check if this class is a subClass of Model\n if not issubclass(cls, db.Model):\n raise AttributeError(cls.__name__ + \" is not subclass of \" + db.Model.__name__)",
"def check_yaml_tag(cls, subcls, yaml_tag):\n if not yaml_tag.endswith('Class'):\n raise ValueError\n super().check_yaml_tag(subcls, yaml_tag)",
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid_attribute(self, attr):\n return self.is_valid(attr)",
"def validate(self: BaseType, attr: Optional[str] = None) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._validate(attribute)\n else:\n self._validate(attr)\n return self",
"def check(self, description: Description) -> bool:\n # if the name of the attribute is not present, return false.\n name = self.attribute_name\n if name not in description.values:\n return False\n\n # if the type of the value is different from the type of the attribute, return false.\n value = description.values[name]\n if type(self.constraint_type.value) in {list, tuple, set} and not isinstance(\n value, type(next(iter(self.constraint_type.value)))\n ):\n return False\n if type(self.constraint_type.value) not in {\n list,\n tuple,\n set,\n } and not isinstance(value, type(self.constraint_type.value)):\n return False\n\n # dispatch the check to the right implementation for the concrete constraint type.\n return self.constraint_type.check(value)",
"def check_param(cls, key, val) -> None:\n\n # Check if attribute is valid for this resource at all\n if key not in cls._Attributes:\n raise TypeError(\"Unexpected attribute '{}' for resource '{}'\".format(\n key,\n cls))\n\n Attribute = cls._Attributes[key]\n\n # Check if attribute is null and is allowed to be null\n if Attribute['optional'] and val is None:\n return\n\n # Check if attribute has correct type\n if Attribute['list']:\n msg = \"Attribute '{}' of resource {} has to be of type list of '{}'\".format(\n key,\n cls,\n Attribute['type'])\n if not isinstance(val, list):\n raise TypeError(msg)\n for item in val:\n if not isinstance(item, Attribute['type']):\n raise TypeError(msg)\n else:\n msg = \"Attribute '{}' of resource {} has to be of type '{}'\".format(\n key,\n cls,\n Attribute['type'])\n if not isinstance(val, Attribute['type']):\n raise TypeError(msg)\n\n # Check all checks\n if 'checks' in Attribute:\n msg = \"Illegal value '{}' for attribute '{}' of resource {}\".format(\n val,\n key,\n cls)\n for value_check in Attribute['checks']:\n if value_check(cls, val) is False:\n raise ValueError(msg)",
"def check_attributes(self):\n self.assertEqual(type(self.amenity_1.name), str)",
"def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)",
"def validate(self):\n self._check_type()",
"def validate(self):\n if hasattr(self, 'iri'):\n assert self.iri != self.parentClass, f'{self} iri and subClassOf match! {self.iri}'\n else:\n pass # TODO do we the class_label?",
"def test_attribute():\n params = dict(name=\"test\", type_=str, is_required=True)\n\n assert Attribute(**params) == Attribute(**params)\n assert Attribute(**params) is not None\n assert Attribute(**params) != Attribute(name=\"another\", type_=int, is_required=True)\n assert (\n str(Attribute(**params))\n == \"Attribute(name=test,type=<class 'str'>,is_required=True)\"\n )",
"def test_attr_cls(self):\n self.assertEqual(hasattr(self.review, \"text\"), True)\n self.assertEqual(hasattr(self.review, \"place_id\"), True)\n self.assertEqual(hasattr(self.review, \"user_id\"), True)",
"def get_attribute_class(self, attr_name):\n return self.attrs.get_attribute_class(attr_name)",
"def check_class_in_element():\n nonlocal class_not_expected\n result = []\n expected_class_ls = expected_class.split(\" \")\n actual_class = element.get_attribute(\"class\")\n for class_ in expected_class_ls:\n for element_class_ in actual_class.split(\" \"):\n if element_class_ == class_:\n result.append(element)\n if len(result) == len(expected_class_ls):\n return element\n if class_not_expected is None:\n class_not_expected = actual_class\n return False",
"def isAttribute(self, p_int): # real signature unknown; restored from __doc__\n return False",
"def validate(self, attrs: dict):\n attr_types_set = {attr.lower() for attr in attrs.keys()}\n\n missing_required = self.required_attrs - attr_types_set\n if missing_required:\n missing_required = ', '.join(missing_required)\n raise SchemaValidationError(f'Missing required attributes: {missing_required}')\n\n not_required = attr_types_set - self.required_attrs\n not_allowed = not_required - self.allowed_attrs\n if not_allowed:\n not_allowed = ', '.join(not_allowed)\n raise SchemaValidationError(f'Attribute types are not allowed: {not_allowed}')\n\n self.attr_type_validate(attrs)",
"def check_class(instance, type):\n\tif not issubclass(instance, type):\n\t\traise TypeError('Subclass expected type {0}, but got: {1}', type(type), type(instance))",
"def require_attribute_value(\n self, attribute: str,\n value: Union[int, str, float, bool, None]) -> None:\n self.require_mapping()\n found = False\n for key_node, value_node in self.yaml_node.value:\n if (key_node.tag == 'tag:yaml.org,2002:str'\n and key_node.value == attribute):\n found = True\n node = Node(value_node)\n if not node.is_scalar(type(value)):\n raise RecognitionError(\n ('Incorrect attribute type where value {}'\n ' of type {} was required').format(\n value, type(value)))\n if node.get_value() != value:\n raise RecognitionError((\n 'Incorrect attribute value {} where {} was required'\n ).format(value_node.value, value))\n\n if not found:\n raise RecognitionError(\n 'Required key \"{}\" not found'.format(attribute))"
]
| [
"0.70458794",
"0.69427705",
"0.66600865",
"0.6646065",
"0.6621674",
"0.654256",
"0.6537403",
"0.6460707",
"0.63829124",
"0.6350043",
"0.6295183",
"0.6271765",
"0.62120336",
"0.62120336",
"0.6199839",
"0.61158985",
"0.6080903",
"0.6077219",
"0.6073006",
"0.60484755",
"0.6001563",
"0.5994265",
"0.5966924",
"0.5916735",
"0.59150046",
"0.5907377",
"0.58966213",
"0.5892386",
"0.58760196",
"0.5865531"
]
| 0.7480556 | 0 |
Validates that the Attribute is compliant with the schema. | def validate(self, attr):
if attr.name != self.name:
raise AttributeSchemaError(
"Expected name '%s'; found '%s'" % (self.name, attr.name)
)
self.validate_type(attr)
if not self.is_valid_value(attr.value):
raise AttributeSchemaError(
"Value '%s' of attribute '%s' is not allowed by the "
"schema " % (attr.value, attr.name)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)",
"def validate_attributes(self, attrs):\n self.attrs.validate(attrs)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_type(self, attr):\n if not isinstance(attr, self._attr_cls):\n raise AttributeSchemaError(\n \"Expected attribute '%s' to have type '%s'; found '%s'\"\n % (attr.name, self.type, etau.get_class_name(attr))\n )",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))",
"def validate(self, attrs: dict):\n attr_types_set = {attr.lower() for attr in attrs.keys()}\n\n missing_required = self.required_attrs - attr_types_set\n if missing_required:\n missing_required = ', '.join(missing_required)\n raise SchemaValidationError(f'Missing required attributes: {missing_required}')\n\n not_required = attr_types_set - self.required_attrs\n not_allowed = not_required - self.allowed_attrs\n if not_allowed:\n not_allowed = ', '.join(not_allowed)\n raise SchemaValidationError(f'Attribute types are not allowed: {not_allowed}')\n\n self.attr_type_validate(attrs)",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def is_valid_attribute(self, attr):\n return self.is_valid(attr)",
"def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type",
"def validateAttribute(self, attributeName):\n if (not attributeName in self._attributes):\n raise pcssErrors.PcssGlobalException(\"Error: attempted to set attribute %s which is not a valid pfa attribute\" % attributeName)",
"def is_valid(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate(attr)\n except (TypeError, ValueError):\n return False\n return True",
"def isValidForSchema(schema):\n\n return True",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def validated() -> Any:\n return attr.s(slots=True, kw_only=True, eq=False)",
"def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def validate(self: BaseType, attr: Optional[str] = None) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._validate(attribute)\n else:\n self._validate(attr)\n return self",
"def check_param(cls, key, val) -> None:\n\n # Check if attribute is valid for this resource at all\n if key not in cls._Attributes:\n raise TypeError(\"Unexpected attribute '{}' for resource '{}'\".format(\n key,\n cls))\n\n Attribute = cls._Attributes[key]\n\n # Check if attribute is null and is allowed to be null\n if Attribute['optional'] and val is None:\n return\n\n # Check if attribute has correct type\n if Attribute['list']:\n msg = \"Attribute '{}' of resource {} has to be of type list of '{}'\".format(\n key,\n cls,\n Attribute['type'])\n if not isinstance(val, list):\n raise TypeError(msg)\n for item in val:\n if not isinstance(item, Attribute['type']):\n raise TypeError(msg)\n else:\n msg = \"Attribute '{}' of resource {} has to be of type '{}'\".format(\n key,\n cls,\n Attribute['type'])\n if not isinstance(val, Attribute['type']):\n raise TypeError(msg)\n\n # Check all checks\n if 'checks' in Attribute:\n msg = \"Illegal value '{}' for attribute '{}' of resource {}\".format(\n val,\n key,\n cls)\n for value_check in Attribute['checks']:\n if value_check(cls, val) is False:\n raise ValueError(msg)",
"def validate_instance(instance: Any) -> Any:\n attr.validate(instance)",
"def validate_field(self, fieldname):\n fieldname = self.__class__.FIELD_ALIAS.get(fieldname, fieldname)\n v = self._data[fieldname]\n t = self._field_or_default_datatype(fieldname, v)\n gfapy.Field._validate_gfa_field(v, t, fieldname)",
"def _check_consistency(self):\n # check that all required attributes in the schema are contained in the description\n required_attributes = [\n attribute.name\n for attribute in self.data_model.attributes\n if attribute.is_required\n ]\n if not all(\n attribute_name in self.values for attribute_name in required_attributes\n ):\n raise AttributeInconsistencyException(\"Missing required attribute.\")\n\n # check that all values are defined in the data model\n all_attributes = [attribute.name for attribute in self.data_model.attributes]\n if not all(key in all_attributes for key in self.values.keys()):\n raise AttributeInconsistencyException(\n \"Have extra attribute not in data model.\"\n )\n\n # check that each of the provided values are consistent with that specified in the data model\n for key, value in self.values.items():\n attribute = next(\n (\n attribute\n for attribute in self.data_model.attributes\n if attribute.name == key\n ),\n None,\n )\n if not isinstance(value, attribute.type):\n # values does not match type in data model\n raise AttributeInconsistencyException(\n \"Attribute {} has incorrect type: {}\".format(\n attribute.name, attribute.type\n )\n )\n if not type(value) in ALLOWED_ATTRIBUTE_TYPES:\n # value type matches data model, but it is not an allowed type\n raise AttributeInconsistencyException(\n \"Attribute {} has unallowed type: {}. Allowed types: {}\".format(\n attribute.name, type(value), ALLOWED_ATTRIBUTE_TYPES,\n )\n )",
"def validate(self, attrs):\n # Validate attributes\n for attr in attrs:\n self.validate_attribute(attr)\n\n # Enforce attribute exclusivity, if necessary\n if self.has_exclusive_attributes:\n counts = attrs.get_attribute_counts()\n for name, count in iteritems(counts):\n if count > 1 and self.is_exclusive_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is exclusive but appears %d times in \"\n \"this container\" % (name, count)\n )",
"def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)"
]
| [
"0.84634745",
"0.8003326",
"0.79975826",
"0.74881256",
"0.73158926",
"0.73158926",
"0.7129263",
"0.70151407",
"0.6967542",
"0.6961131",
"0.6935066",
"0.69347423",
"0.69187576",
"0.682446",
"0.6820103",
"0.67508584",
"0.6711849",
"0.6581434",
"0.65688425",
"0.6534537",
"0.64808375",
"0.6474006",
"0.6449837",
"0.64272094",
"0.6381021",
"0.63463074",
"0.6345787",
"0.6344409",
"0.6307085",
"0.62541753"
]
| 0.8203012 | 1 |
Validates that this schema is a subset of the given schema. | def validate_subset_of_schema(self, schema):
self.validate_schema_type(schema)
if self.name != schema.name:
raise AttributeSchemaError(
"Expected name '%s'; found '%s'" % (schema.name, self.name)
)
if self.exclusive != schema.exclusive:
raise AttributeSchemaError(
"Expected exclusive '%s' for attribute '%s'; found '%s'"
% (schema.exclusive, self.name, self.exclusive)
)
if self.default != schema.default:
raise AttributeSchemaError(
"Expected default '%s' for attribute '%s'; found '%s'"
% (schema.default, self.name, self.default)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for label, poly_schema in iteritems(self.schema):\n if not schema.has_keypoints_label(label):\n raise KeypointsContainerSchemaError(\n \"Keypoints label '%s' does not appear in schema\" % label\n )\n\n other_keypoints_schema = schema.get_keypoints_schema(label)\n poly_schema.validate_subset_of_schema(other_keypoints_schema)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.label != schema.label:\n raise KeypointsSchemaError(\n \"Expected keypoints label '%s'; found '%s'\"\n % (schema.label, self.label)\n )\n\n self.attrs.validate_subset_of_schema(schema.attrs)",
"def validate_subset_of_schema(self, schema):\n super(BooleanAttributeSchema, self).validate_subset_of_schema(schema)\n\n if not self.values.issubset(schema.values):\n raise AttributeSchemaError(\n \"Values %s are not a subset of %s\"\n % (self.values, schema.values)\n )",
"def validate_subset_of_schema(self, schema):\n super(CategoricalAttributeSchema, self).validate_subset_of_schema(\n schema\n )\n\n if not self.categories.issubset(schema.categories):\n raise AttributeSchemaError(\n \"Categories %s are not a subset of %s\"\n % (self.categories, schema.categories)\n )",
"def validate_subset_of_schema(self, schema):\n super(NumericAttributeSchema, self).validate_subset_of_schema(schema)\n\n if self.range and (\n not schema.range\n or self.range[0] < schema.range[0]\n or self.range[1] > schema.range[1]\n ):\n raise AttributeSchemaError(\n \"Range %s is not a subset of %s\" % (self.range, schema.range)\n )",
"def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/10149169\n is_subset = db_tdx_schema.items() <= schema.items()\n if not is_subset and raise_error:\n raise ValueError((\n \"The given database schema is not compatible with the\"\n \" existing database schema. The given schema was {}\"\n \" but the existing schema was {}\").format(\n schema, db_tdx_schema))\n return is_subset",
"def isValidForSchema(schema):\n\n return True",
"def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))",
"def is_subset(self, other):",
"def is_strict_subset(self, other):\n return self.is_subset(other) and self != other",
"def is_proper_subset(self, other):\n if isinstance(other, Set):\n return self != other and self.is_subset(other)\n else:\n raise ValueError(\"Unknown argument '%s'\" % other)",
"def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)",
"def is_proper_subset(self, other):\n if not isinstance(other, SetPy):\n raise TypeError(\"Can only be proper subset of another SetPy\")\n return self.is_subset(other) and not self == other",
"def check_consistency(self) -> 'Schema':\n errors = []\n fields = self.__fields__\n for k, v in fields.items():\n _, err = v.validate(getattr(self, k), fields, loc=k)\n if err:\n errors.append(err)\n if errors:\n raise ValidationError(errors, self.__class__)\n return self",
"def _validate_subsets(self, subsets: Sequence[str]) -> Sequence[str]:\n if not subsets:\n raise ValueError(\"no subsets specified\")\n for subset in subsets:\n if subset not in self.data_files.keys():\n raise ValueError(f\"{subset} is not valid\")\n return subsets",
"def validate_schema(self, data, **kwargs):\n if \"role\" not in data and \"visible\" not in data:\n raise ValidationError(_(\"Missing fields 'role' and/or 'visible'.\"))",
"def _verify_schema(schema):\n assert type(schema) in [dict, tuple], f'Expected a dict or a tuple but got {type(schema)}'\n if isinstance(schema, tuple):\n assert len(schema) == 2, f'Expected a tuple with length 2 but got length {len(schema)}'\n if schema[1] is not None:\n assert isinstance(schema[1], schema[0]), f'{str(schema[1])} does not have expected type {str(schema)}'\n elif isinstance(schema, dict):\n for sub_schema in schema.values():\n _verify_schema(sub_schema)",
"def validate_json(schema, doc):\n is_invalid = set(doc).difference(set(schema))\n if is_invalid:\n return False\n return True",
"def filter_by_schema(self, schema):\n pass",
"def test_schema_completeness_validation_valid_input(self):\n for complete_schema in list_of_schema_inputs:\n validate_json_schema_completeness(complete_schema)\n\n assert True",
"def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def subfields_all(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') != req_val:\n return False\n return True",
"def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))",
"def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()",
"def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()",
"def validate(self):\n self._validate_time_index()\n self._validate_num_profiles()\n self._validate_merge_col_exists()\n self._validate_unique_merge_col()\n self._validate_merge_col_overlaps()",
"def is_subset(self, other):\n # we want to know if some set, A, is a subset of another set, B\n # go through every element in set A\n for bucket in self.buckets:\n for element in bucket.iterate():\n # if B has an element that A does not, then\n # A is NOT a subset of B\n if not other.contains(element):\n return False\n # if we do not find an element in B that is not in A, then\n # A must be a subset of B\n return True",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def is_subset(self, other):\n if not isinstance(other, Set):\n raise ValueError(\"Unknown argument '%s'\" % other)\n\n # Handle the trivial cases\n if self == other:\n return True\n is_empty = self.is_empty\n if is_empty is True:\n return True\n elif fuzzy_not(is_empty) and other.is_empty:\n return False\n if self.is_finite_set is False and other.is_finite_set:\n return False\n\n # Dispatch on subclass rules\n ret = self._eval_is_subset(other)\n if ret is not None:\n return ret\n ret = other._eval_is_superset(self)\n if ret is not None:\n return ret\n\n # Use pairwise rules from multiple dispatch\n from sympy.sets.handlers.issubset import is_subset_sets\n ret = is_subset_sets(self, other)\n if ret is not None:\n return ret\n\n # Fall back on computing the intersection\n # XXX: We shouldn't do this. A query like this should be handled\n # without evaluating new Set objects. It should be the other way round\n # so that the intersect method uses is_subset for evaluation.\n if self.intersect(other) == self:\n return True"
]
| [
"0.81938004",
"0.80990547",
"0.8020877",
"0.79472554",
"0.7687689",
"0.7619746",
"0.6748884",
"0.66119224",
"0.64827746",
"0.6362671",
"0.63433886",
"0.62965935",
"0.6293175",
"0.61685395",
"0.6159616",
"0.6158576",
"0.6087979",
"0.6073577",
"0.5879684",
"0.58497864",
"0.5847108",
"0.5846107",
"0.5833199",
"0.5818039",
"0.5811146",
"0.5811146",
"0.58037823",
"0.579918",
"0.5768558",
"0.57442695"
]
| 0.8153359 | 1 |
Validates that the schema's default value (if any) is compliant with the schema. | def validate_default_value(self):
if self.has_default_value:
if not self.is_valid_value(self.default):
raise AttributeSchemaError(
"Default value '%s' is not compliant with the schema"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)",
"def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)",
"def validate(self, value):\r\n if value is None:\r\n if self.has_default:\r\n return self.get_default()\r\n elif self.required:\r\n raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field))\r\n return value",
"def validate(self, value):\r\n if value is None:\r\n if self.has_default:\r\n return self.get_default()\r\n elif self.required:\r\n raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field))\r\n return value",
"def test_default_required(self):\n schema = yaml.load(self.yaml_multiple_term, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n val.validate(document)\n self.assertEqual(val.errors, {'eventDate': ['required field']})\n\n document = {'eventDate': '2018-01-01'}\n val.validate(document)\n self.assertEqual(val.errors, {})",
"def validate_default_element(self, value):\n return self.validate_element(value)",
"def validate_default_element(self, value):\n return self.validate_element(value)",
"def test_validate_has_default(self, args, value):\n sch = scheme.Scheme(*args)\n sch.validate(value)",
"def _validate_default_fields() -> None:\n default_fields = [\n field\n for field in fields\n if field[\"name\"] in DEFAULT_PREDICTIONS_TABLE_FIELDS\n ]\n if len(DEFAULT_PREDICTIONS_TABLE_FIELDS) != len(default_fields):\n raise NotFilledDefaultFields",
"def testNoDefaultSchemata(self):\n self.failUnless('default' not in self.person.schema.getSchemataNames())",
"def test_schema_default_missing_validator_combinations(test_case):\n evaluate_test_cases([test_case])",
"def test_schema_default_missing_validator_openapi():\n converter = ce.ObjectTypeConverter(ce.OAS3TypeConversionDispatcher())\n test_schemas = [\n Mapping,\n Missing,\n Default,\n Validator,\n DefaultMissing,\n DefaultValidator,\n MissingValidator,\n DefaultMissingValidator,\n DefaultDropValidator,\n DefaultDropRequired,\n ]\n for schema in test_schemas:\n converted = converter.convert_type(schema())\n assert converted == schema.schema_expected, f\"Schema for [{schema.__name__}] not as expected\"",
"def has_default_value(self, name):\n return self.get_attribute_schema(name).has_default_value",
"def test_default_error_empty_string(self):\n val = DwcaValidator(yaml.load(self.yaml_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': ''}\n self.assertFalse(val.validate(document))\n self.assertEqual(val.errors,\n {'abundance': ['empty values not allowed']})\n document = {'sex': 'male'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'female'}\n self.assertTrue(val.validate(document))\n document = {'sex': ''}\n self.assertFalse(val.validate(document))",
"def test_default_handling_none(self):\n val = DwcaValidator(yaml.load(self.yaml_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': None}\n val.validate(document)\n self.assertEqual(val.errors,\n {'abundance': ['null value not allowed']})",
"def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.",
"def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")",
"def test_with_default() -> None:\n soup = generate_case(\"with_default\")\n\n tests.html_schema_doc_asserts.assert_default_values(soup, ['\"Linux\"', '[\"white\", \"blue\"]', \"2\"])",
"def test_empty_allow_explicit(self):\n val = DwcaValidator(yaml.load(self.empty2, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'sex': 'male'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'female'}\n self.assertTrue(val.validate(document))\n document = {'sex': ''}\n self.assertTrue(val.validate(document))",
"def _GetAllowedDefaultValue(\n value_type: pa.DataType,\n default_value_proto: schema_pb2.TensorRepresentation.DefaultValue\n) -> Union[int, float, bytes]:\n kind = default_value_proto.WhichOneof(\"kind\")\n if kind in (\"int_value\", \"uint_value\") and pa.types.is_integer(value_type):\n value = getattr(default_value_proto, kind)\n iinfo = np.iinfo(value_type.to_pandas_dtype())\n if value <= iinfo.max and value >= iinfo.min:\n return value\n else:\n raise ValueError(\"Integer default value out of range: {} is set for a \"\n \"{} column\".format(value, value_type))\n elif kind == \"float_value\" and pa.types.is_floating(value_type):\n return default_value_proto.float_value\n elif kind == \"bytes_value\" and _IsBinaryLike(value_type):\n return default_value_proto.bytes_value\n\n raise ValueError(\n \"Incompatible default value: {} is set for a {} column\".format(\n kind, value_type))",
"def test_boolean_default_values(self):\n true_values = ['True', 'true', 'True', 'YES', 'yes', 'y', 'Y', 'Yes']\n false_values = ['FALSE', 'false', 'False', 'NO', 'no', 'n', 'N', 'No']\n wrong_values = [2, 3, 'FLSE', 'flse', 'NON', 'oui', 'maybe', 'not sure', 't', '1', 1, '0', 0]\n descriptor = self.base_field_descriptor\n descriptor['type'] = 'boolean'\n # only 'default' format\n descriptor['format'] = 'default'\n f = SchemaField(descriptor)\n for v in true_values:\n self.assertTrue(f.cast(v))\n for v in false_values:\n self.assertFalse(f.cast(v))\n for v in wrong_values:\n with self.assertRaises(Exception):\n f.cast(v)",
"def has_default_value(self):\n return self.default is not None",
"def test_empty_required_only(self):\n val = DwcaValidator(yaml.load(self.empty4, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'required_to_be_empty': ''}\n self.assertTrue(val.validate(document))\n document = {'required_to_be_empty': 'tdwg'}\n self.assertFalse(val.validate(document))\n self.assertEqual(val.errors,\n {'required_to_be_empty': ['unallowed value tdwg']})",
"def _default_value(self):\n return None",
"def test_should_return_none_for_defaults(self):\r\n default_spec = {\r\n 'type': 'defaults',\r\n 'spec_type': 'property',\r\n 'functional': True\r\n }\r\n\r\n assert 'property' not in self.spec_parser._defaults\r\n assert self.spec_parser.parse_statement(default_spec) is None\r\n assert 'property' in self.spec_parser._defaults\r\n assert 'functional' in self.spec_parser._defaults['property']._values",
"def test_oneof_optional_default_with_nested_required():\n class MappingSchema(ce.ExtendedMappingSchema):\n value = ce.ExtendedSchemaNode(ce.ExtendedInteger()) # strict int, no auto convert to str\n\n class OneOfDifferentNested(ce.OneOfKeywordSchema):\n _one_of = [\n ce.ExtendedSchemaNode(ce.ExtendedString()), # strict string, no auto convert from int\n MappingSchema()\n ]\n\n class OneOfRequiredDefaultStr(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=\"1\") # match first schema of OneOf\n\n class OneOfRequiredDefaultMap(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default={\"value\": 1}) # match second schema of OneOf\n\n class OneOfMissingDropDefaultStr(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=\"1\", missing=colander.drop)\n\n class OneOfMissingDropDefaultMap(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default={\"value\": 1}, missing=colander.drop)\n\n class OneOfMissingNullDefaultStr(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=\"1\", missing=colander.null)\n\n class OneOfMissingNullDefaultMap(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default={\"value\": 1}, missing=colander.null)\n\n class OneOfMissingNullDefaultNull(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=colander.null, missing=colander.null)\n\n evaluate_test_cases([\n (OneOfRequiredDefaultStr, {}, {\"field\": \"1\"}),\n (OneOfRequiredDefaultStr, None, colander.Invalid), # oneOf itself is required\n (OneOfRequiredDefaultStr, {\"field\": True}, colander.Invalid), # raise because provided is wrong format\n (OneOfRequiredDefaultStr, {\"field\": {}}, colander.Invalid),\n (OneOfRequiredDefaultStr, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfRequiredDefaultStr, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfMissingDropDefaultStr, {\"field\": True}, {}),\n (OneOfMissingDropDefaultStr, {\"field\": 1}, {}),\n (OneOfMissingNullDefaultStr, {}, {\"field\": \"1\"}),\n (OneOfMissingNullDefaultStr, {\"field\": True}, colander.Invalid),\n (OneOfMissingNullDefaultStr, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfMissingNullDefaultStr, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfRequiredDefaultMap, {}, {\"field\": {\"value\": 1}}), # default\n (OneOfRequiredDefaultMap, None, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": True}, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": {}}, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfRequiredDefaultMap, {}, {\"field\": {\"value\": 1}}), # default\n (OneOfMissingDropDefaultMap, {\"field\": True}, {}),\n (OneOfMissingDropDefaultMap, {\"field\": 1}, {}),\n (OneOfMissingNullDefaultMap, {}, {\"field\": {\"value\": 1}}),\n (OneOfMissingNullDefaultMap, {\"field\": True}, colander.Invalid),\n (OneOfMissingNullDefaultMap, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfMissingNullDefaultMap, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfMissingNullDefaultNull, {}, {}),\n (OneOfMissingNullDefaultNull, {\"field\": True}, colander.Invalid),\n (OneOfMissingNullDefaultNull, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfMissingNullDefaultNull, {\"field\": \"1\"}, {\"field\": \"1\"}),\n (OneOfMissingNullDefaultNull, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n ])",
"def test_default_zero_fields_validate(self):\r\n it = self.IntegerTest()\r\n it.validate()",
"def is_required(self):\r\n return self.default == self.NotSpecified",
"def _default_value(self):\n raise NotImplementedError",
"def _update_default(self, default_value):\n if self.type == \"uri_folder\" or self.type == \"uri_file\":\n self.default = default_value\n return\n else:\n if isinstance(default_value, float) and not math.isfinite(default_value):\n # Since nan/inf cannot be stored in the backend, just ignore them.\n # logger.warning(\"Float default value %r is not allowed, ignored.\" % default_value)\n return\n \"\"\"Update provided default values.\n Here we need to make sure the type of default value is allowed or it could be parsed..\n \"\"\"\n if default_value is not None and not isinstance(default_value, self._allowed_types):\n try:\n default_value = self._parse(default_value)\n except Exception as e:\n if self.name is None:\n msg = \"Default value of %s Input cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n default_value,\n type(default_value),\n )\n else:\n msg = \"Default value of %s Input '%s' cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n self.name,\n default_value,\n type(default_value),\n )\n raise MldesignerComponentDefiningError(cause=msg) from e\n self.default = default_value"
]
| [
"0.76519585",
"0.75321203",
"0.6860783",
"0.6860783",
"0.680885",
"0.6804212",
"0.675413",
"0.668234",
"0.6521006",
"0.6421395",
"0.640982",
"0.63684344",
"0.6358686",
"0.6273223",
"0.62184685",
"0.6020462",
"0.6019303",
"0.60110074",
"0.6001693",
"0.59867406",
"0.59839046",
"0.5982064",
"0.5956657",
"0.5955379",
"0.59485614",
"0.5945987",
"0.5917422",
"0.58896464",
"0.58713186",
"0.58488965"
]
| 0.8765054 | 0 |
Constructs an AttributeSchema from a JSON dictionary. Note that this function reflectively parses the schema type from the dictionary, so subclasses do not need to implement this method. | def from_dict(cls, d):
attr_cls = etau.get_class(d["type"])
schema_cls = attr_cls.get_schema_cls()
name = d["name"]
exclusive = d.get("exclusive", False)
default = d.get("default", None)
return schema_cls(
name,
exclusive=exclusive,
default=default,
**schema_cls.get_kwargs(d)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_dict(cls, d):\n schema = d.get(\"schema\", None)\n if schema is not None:\n schema = {\n attr_name: AttributeSchema.from_dict(asd)\n for attr_name, asd in iteritems(schema)\n }\n\n return cls(schema=schema)",
"def from_json(cls, json_string:str):\n data = json.loads(json_string)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance",
"def from_dict(cls: Type[BaseType], dict_: Mapping) -> BaseType:\n kwargs = {}\n for attr, attr_type in cls._attributes.items():\n value = dict_.get(attr)\n if value is None:\n if attr in cls._optional_attributes:\n continue\n raise TypeError(\"`{}` must not be None.\".format(attr))\n if isclass(attr_type) and issubclass(attr_type, Base):\n if attr in cls._list_attributes:\n kwargs[attr] = [attr_type.from_dict(v) for v in value]\n else:\n kwargs[attr] = attr_type.from_dict(value)\n else:\n kwargs[attr] = value\n return cls(**kwargs)",
"def parse(json_string):\n try:\n json_data = json.loads(json_string)\n except Exception as exn:\n raise SchemaParseException(\n 'Error parsing schema from JSON: %r. '\n 'Error message: %r.'\n % (json_string, exn))\n\n # Initialize the names object\n names = Names()\n\n # construct the Avro Schema object\n return schema_from_json_data(json_data, names)",
"def _dict2schema(dct):\n attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n\n class Meta(object):\n strict = True\n\n attrs[\"Meta\"] = Meta\n return type(str(\"\"), (ma.Schema,), attrs)",
"def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n return attr_cls._from_dict(d)",
"def schema_from_json_data(json_data, names=None):\n if names is None:\n names = Names()\n\n # Select the appropriate parser based on the JSON data type:\n parser = _JSONDataParserTypeMap.get(type(json_data))\n if parser is None:\n raise SchemaParseException(\n 'Invalid JSON descriptor for an Avro schema: %r.' % json_data)\n return parser(json_data, names=names)",
"def from_dict(cls, _dict: Dict) -> 'Attribute':\n args = {}\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n if 'value' in _dict:\n args['value'] = _dict.get('value')\n return cls(**args)",
"def from_dict(cls, _dict: Dict) -> 'Resource':\n args = {}\n if 'attributes' in _dict:\n args['attributes'] = [Attribute.from_dict(x) for x in _dict.get('attributes')]\n return cls(**args)",
"def unmarshal_dict(data: typing.Dict, t: type) -> t:\n for k, v in data.items():\n attr_name = json_to_python_name(k)\n attr_type = t.__annotations__.get(attr_name)",
"def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)",
"def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance",
"def from_schema(cls, sdict):\n\n table_schema = TableSchema()\n for name, dschema in sdict.items():\n\n schema = ColumnSchema(name=name, **dschema)\n table_schema.add_column(schema)\n\n return table_schema",
"def from_json(data: dict) -> \"Policy\":\n try:\n return PolicySchema().load(data)\n except ValidationError as err:\n raise PolicyCreateError(*err.args)",
"def parse(json_value: Dict[str, Jsonish]) -> ParamSchema:\n return _parse_kwargs(**json_value)",
"def from_dict(cls, dikt) -> 'LightSourceMaterialSchema':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, note_dict):\n\n missing_properties = set(cls.SERIALIZED_ATTRIBUTE_TYPES.keys()) - set(note_dict.keys())\n if len(missing_properties) > 0:\n raise MissingProperties(\"Some required properties are missing: {}\".format(missing_properties))\n\n for attribute in cls.SERIALIZED_ATTRIBUTE_TYPES:\n if not type(note_dict[attribute]) in cls.SERIALIZED_ATTRIBUTE_TYPES[attribute]:\n raise WrongAttributeType(\"Expected the type of attribute {} to be one of {}; got {}\".format(\n attribute, cls.SERIALIZED_ATTRIBUTE_TYPES[attribute], type(note_dict[attribute])\n ))\n\n for tag in note_dict['tags']:\n if type(tag) != str:\n raise WrongAttributeType(\"Tag '{}' is not a string\".format(tag))\n\n if any(',' in tag for tag in note_dict['tags']):\n raise InvalidTagCharacter(\"Commas (,) are not allowed in tags\")\n\n return cls(\n body = note_dict['body'],\n tags = note_dict['tags'],\n created_at = cls.deserialize_timestamp(note_dict['created_at']),\n modified_at = cls.deserialize_timestamp(note_dict['modified_at']),\n id = note_dict['id']\n )",
"def load_from_dict(self, dict_):\n for key, value in six.iteritems(dict_):\n setattr(self, util.as_attr(key), value)\n self._check_against_schema()",
"def object_from_key_jsonvalue_dict(clazz, json_value_dict):\n from .documents import DynamicModel\n from .fields import FieldDoesNotExist\n \n obj = clazz()\n for name, value in json_value_dict.items():\n if name in [CLAZZ,DYNAMIC_ATTRIBUTES]:\n continue\n try:\n field = obj._meta.get_field(name) \n \n if field.rel:\n #assert isinstance(field.rel, OneOnOneRelation), \"only OneOf relations allowed here\"\n \n if isinstance( field.rel, OneOnOneRelation):\n attr_value = JsonUnSerializer().unserialize(value, field.rel.to)\n setattr(obj, name, attr_value)\n elif isinstance( field.rel, MapRelation):\n attr_value = JsonUnSerializer().unserializeMap(value, field.rel.to, field.rel.contains_built_in_type)\n setattr(obj, name, attr_value)\n elif isinstance( field.rel, ListRelation):\n attr_value = JsonUnSerializer().unserializeList(value, field.rel.to, field.rel.contains_built_in_type)\n setattr(obj, name, attr_value) \n \n else:\n setattr(obj, name, value)\n except FieldDoesNotExist:\n \"add it as a dynamic field\"\n if issubclass( clazz, DynamicModel):\n child = JsonUnSerializer().unserialize(value)\n obj.add_dynamic_attribute(name, child)\n\n return obj",
"def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)",
"def _from_json_dict(cls, dic):\n # create a new object\n obj = cls.__new__(cls)\n for attr, typ in cls._attrs_to_save.items():\n setattr(obj, attr, typ.to_python(dic[attr]))\n return obj",
"def fromdict(cls, data: Mapping[str, Any]):\n try:\n atlist = AttribList.fromdict(data)\n return cls(device_id=data['device_id'],\n entity_type=atlist.entity_type,\n entity_name=data['entity_name'],\n protocol=data['protocol'],\n static_attributes=atlist.static_attributes,\n attributes=atlist.attributes)\n except (KeyError, TypeError) as err:\n raise ParseError(err=err, obj=data)",
"def init_from_dict(self, d):\n for k, v in d.items():\n # First, keys must be strings, not ints\n if isinstance(k, int):\n k = str(k)\n # Now, assign to the key, handling nested AttrDicts properly\n if isinstance(v, dict):\n self.set_key(k, AttrDict(v))\n elif isinstance(v, list):\n self.set_key(k, [i if not isinstance(i, dict) else AttrDict(i)\n for i in v])\n else:\n self.set_key(k, v)",
"def from_dict(cls, data: Dict[str, any]):\n return cls(**data)",
"def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)",
"def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)",
"def from_json(cls, json_str: str):\n\n def read_input(x: dict):\n return TensorSpec.from_json_dict(**x) if x[\"type\"] == \"tensor\" else ColSpec(**x)\n\n return cls([read_input(x) for x in json.loads(json_str)])",
"def from_json(source, schema, path=None, converter=None, json_options=None, **kwargs):\n if not isinstance(schema, XMLSchemaBase):\n raise TypeError(\"An XMLSchema instance required for 'schema' argument: %r\" % schema)\n elif json_options is None:\n json_options = {}\n\n dict_class = kwargs.pop('dict_class', ordered_dict_class)\n object_hook = json_options.pop('object_hook', ordered_dict_class)\n object_pairs_hook = json_options.pop('object_pairs_hook', ordered_dict_class)\n if hasattr(source, 'read'):\n obj = json.load(source, object_hook=object_hook,\n object_pairs_hook=object_pairs_hook, **json_options)\n else:\n obj = json.loads(source, object_hook=object_hook,\n object_pairs_hook=object_pairs_hook, **json_options)\n\n return schema.encode(obj, path=path, converter=converter, dict_class=dict_class, **kwargs)",
"def from_json(cls, tag_json):\n return cls.from_dict(json.loads(tag_json))",
"def from_dict(cls, d):\n ret_obj = AssetClass(d.pop('Name'))\n for child_dict in d.pop('Children', []):\n ret_obj.add_subclass(\n child_dict.pop('Ratio'),\n AssetClass.from_dict(child_dict))\n assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'\n return ret_obj.validate()"
]
| [
"0.7644983",
"0.6328274",
"0.62916005",
"0.62879604",
"0.61576164",
"0.61387366",
"0.5965841",
"0.5962421",
"0.5904961",
"0.58912",
"0.58775073",
"0.58033687",
"0.5785732",
"0.5724698",
"0.57203984",
"0.57162833",
"0.5660964",
"0.5597983",
"0.55465",
"0.55423206",
"0.54708445",
"0.5440361",
"0.54402936",
"0.5439293",
"0.5416724",
"0.5416724",
"0.5413811",
"0.5402078",
"0.53955144",
"0.53656745"
]
| 0.7123541 | 1 |
Creates a CategoricalAttributeSchema instance. | def __init__(self, name, categories=None, exclusive=False, default=None):
super(CategoricalAttributeSchema, self).__init__(
name, exclusive=exclusive, default=default
)
self.categories = set(categories or [])
self.validate_default_value() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_active_schema(cls, attr):\n return cls(attr.name, categories={attr.value})",
"def attributes(self):\n attrs_ = super(CategoricalAttributeSchema, self).attributes()\n attrs_.append(\"categories\")\n return attrs_",
"def test_categorical_constraint():\n categories = [\"Blue\", \"Red\", \"Yellow\"]\n prop = \"Property Color\"\n c = CategoricalConstraint(name=prop, accepted_categories=categories)\n\n mapped_c = c.to_dict()\n\n assert mapped_c[\"type\"] is \"categorical\"\n assert mapped_c[\"name\"] is prop\n assert mapped_c[\"options\"][\"categories\"] is categories",
"def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)",
"def __init__(\n self,\n name,\n value,\n confidence=None,\n top_k_probs=None,\n constant=False,\n tags=None,\n ):\n super(CategoricalAttribute, self).__init__(\n name, value, confidence=confidence, constant=constant, tags=tags\n )\n self.top_k_probs = top_k_probs",
"def test_categorical_column_validates_categories(self):\n\n categories = 1\n\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", 1]\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", \"Blue\"]\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)",
"def create(data):\n \n # create category\n return Category(\n category_id = data['id'],\n name = data['name'])",
"def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category",
"def proto_category(self):\n categories_set = baker.prepare(Category, _quantity=5)\n return categories_set",
"def proto_category(self):\n categories_set = baker.prepare(Category, _quantity=5)\n return categories_set",
"def proto_category(self):\n categories_set = baker.prepare(Category, _quantity=5)\n return categories_set",
"def set_categorical(self, meta_field):\n self._data[meta_field] = pd.Categorical(self._data[meta_field])",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})",
"def test_categorical_feature():\n\n feature = Categorical(\"abc\")\n\n for element in \"abc\":\n feature.set(element)\n feature.set(\"ignore this\")\n feature.push()\n\n for element in \"abc\":\n getattr(feature, \"set_\" + element)()\n feature.push()\n\n array = feature.array()\n assert array.shape == (6, 3)\n for i, row in enumerate(array):\n assert sum(row) == 1.0 and row[i % 3] == 1.0",
"def validate_subset_of_schema(self, schema):\n super(CategoricalAttributeSchema, self).validate_subset_of_schema(\n schema\n )\n\n if not self.categories.issubset(schema.categories):\n raise AttributeSchemaError(\n \"Categories %s are not a subset of %s\"\n % (self.categories, schema.categories)\n )",
"def create(self, validated_data):\n new_category = SpecificationCategory(name = validated_data.get('name'),\n car = validated_data.get('car'),)\n new_category.save()\n\n return new_category",
"def __create_categorical_col(self, df, columns):\n\n # Temporarily remove tuple such that columns can be checked\n for n, item in enumerate(columns):\n if isinstance(item, tuple):\n name, _ = item\n temporary_columns = columns.copy()\n temporary_columns[n] = name\n\n # Use appropriate var in validation\n if 'temporary_columns' in locals():\n column_set = temporary_columns\n else:\n column_set = columns\n\n\n for n, column in enumerate(columns):\n if type(column) == tuple:\n cat_col, new_col = column\n df[new_col] = df[cat_col]\n column = cat_col\n df[column], uniques = pd.factorize(df[column])\n return df",
"def generate_categories(s, discrete=True, ordered=True, n=10, name='category'):\n\n if discrete:\n s_cat = pd.Series(pd.Categorical(s, ordered=ordered))\n else:\n s_cat = pd.cut(s, n, include_lowest=True, labels=[name+str(x) for x in range(n)])\n\n return s_cat",
"def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n db_session.add(c)\n db_session.commit()\n return c",
"def FE_create_categorical_feature_crosses(dfc, cats):\r\n dfc = copy.deepcopy(dfc)\r\n combos = list(combinations(cats, 2))\r\n for cat1, cat2 in combos:\r\n dfc.loc[:,cat1+'_cross_'+cat2] = dfc[cat1].astype(str)+\" \"+dfc[cat2].astype(str)\r\n return dfc",
"def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n session.add(c)\n session.commit()\n print 'Category \"' + name + '\" created.'\n return c",
"def create_category(name):\n return Category.objects.create(name=name)",
"def categoricals(self):\n return base_64_to_object(self.categorical_hyperparameters_64)",
"def category_table(self):\r\n\r\n self.mycursor.execute('CREATE TABLE IF NOT EXISTS category(\\\r\n CAT_id int PRIMARY KEY AUTO_INCREMENT,\\\r\n CAT_nom VARCHAR(50) UNIQUE)')",
"def _is_categorical(df, field):\n return df[field].dtype.name == 'category'",
"def categorical_column_validator_factory(categories, ignore_missing_vals=False):\n categories = set(categories)\n\n def categorical_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (x in categories), {}\n\n categorical_validation_fn.__doc__ = (\n f\"checks whether values are within this set of values: {categories}\"\n )\n if ignore_missing_vals:\n categorical_validation_fn.__doc__ += \", ignoring nulls\"\n\n return categorical_validation_fn",
"def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical",
"def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical",
"def test_assign_categorical(curve):\n assert curve.dtypes[0] == 'float'\n curve.dtypes = 'category'\n assert curve.dtypes[0] == 'category'"
]
| [
"0.64069116",
"0.6404652",
"0.5585234",
"0.5555363",
"0.5546677",
"0.5528087",
"0.5460727",
"0.54497313",
"0.5369751",
"0.5369751",
"0.5369751",
"0.53172123",
"0.53150636",
"0.5212389",
"0.5207569",
"0.51688194",
"0.5150824",
"0.5136213",
"0.5080584",
"0.5065041",
"0.50454825",
"0.5038742",
"0.5025194",
"0.5020676",
"0.5010625",
"0.49997228",
"0.49478683",
"0.49185535",
"0.49185535",
"0.48988578"
]
| 0.6922686 | 0 |
Builds a CategoricalAttributeSchema that describes the active schema of the CategoricalAttribute. | def build_active_schema(cls, attr):
return cls(attr.name, categories={attr.value}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def attributes(self):\n attrs_ = super(CategoricalAttributeSchema, self).attributes()\n attrs_.append(\"categories\")\n return attrs_",
"def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})",
"def __init__(self, name, categories=None, exclusive=False, default=None):\n super(CategoricalAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.categories = set(categories or [])\n self.validate_default_value()",
"def build_active_schema(cls, attrs):\n schema = cls()\n schema.add_attributes(attrs)\n return schema",
"def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)",
"def build_active_schema(cls, attr):\n return cls(attr.name, range=(attr.value, attr.value))",
"def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema",
"def generate_cooccur_schema():\n json_str = json.dumps({'fields': [\n {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)",
"def validate_subset_of_schema(self, schema):\n super(CategoricalAttributeSchema, self).validate_subset_of_schema(\n schema\n )\n\n if not self.categories.issubset(schema.categories):\n raise AttributeSchemaError(\n \"Categories %s are not a subset of %s\"\n % (self.categories, schema.categories)\n )",
"def categoricals(self):\n return base_64_to_object(self.categorical_hyperparameters_64)",
"def test_categorical_constraint():\n categories = [\"Blue\", \"Red\", \"Yellow\"]\n prop = \"Property Color\"\n c = CategoricalConstraint(name=prop, accepted_categories=categories)\n\n mapped_c = c.to_dict()\n\n assert mapped_c[\"type\"] is \"categorical\"\n assert mapped_c[\"name\"] is prop\n assert mapped_c[\"options\"][\"categories\"] is categories",
"def convert_categorical(df):\n print(\" --- Converting Categories into binary features.\")\n columns = df.columns\n categorical = [x for x in columns if x.startswith('c_')]\n for col in categorical:\n print(\" ---- Converting: {}\".format(col))\n category_binary = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, category_binary], axis=1)\n df = df.drop(categorical, axis=1)\n print(\" --- Finished converting Categories into binary features.\")\n return df",
"def build_categorical_model_fn(model_builder, dataset):\n def model_fn():\n keras_model = model_builder()\n return tff.learning.from_keras_model(\n keras_model,\n input_spec=dataset.element_spec,\n loss=losses.CategoricalCrossentropy(),\n metrics=[metrics.CategoricalCrossentropy(), metrics.CategoricalAccuracy()])\n return model_fn",
"def proto_category(self):\n categories_set = baker.prepare(Category, _quantity=5)\n return categories_set",
"def proto_category(self):\n categories_set = baker.prepare(Category, _quantity=5)\n return categories_set",
"def proto_category(self):\n categories_set = baker.prepare(Category, _quantity=5)\n return categories_set",
"def category_attributes_types(categories, db_info, connection):\n attributes = {}\n for cat in categories:\n attributes[cat] = {}\n category_info = api_category_info(cat, db_info, connection)\n\n attr = {}\n for a in category_info.get(\"attributes\"):\n for key in a:\n attr[key] = a[key]\n\n attributes[cat][\"attributes\"] = {k: d for d, k in attr.items()}\n\n types = category_info.get(\"types\")\n attributes[cat][\"types\"] = {\n attr.get(a): types.get(a) for a in types}\n\n attributes[cat][\"dialogs\"] = category_info.get(\"dialogs\")\n\n return attributes",
"def _dict2schema(dct):\n attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n\n class Meta(object):\n strict = True\n\n attrs[\"Meta\"] = Meta\n return type(str(\"\"), (ma.Schema,), attrs)",
"def build_categorical_pipeline(self) -> Pipeline:\n pipeline = Pipeline([\n ('extract_data', FunctionTransformer(self.get_categorical_features)),\n ('impute', SimpleImputer(missing_values=np.nan, strategy='median')),\n ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False))\n ])\n return pipeline",
"def __encode_categorical(self):\n if self.cat_cols is None:\n rows = self.train_df.shape[0]\n rows = 0.2 * rows\n self.cat_cols = []\n for col in self.train_df.columns:\n if col not in self.ord_cols:\n if (\n self.train_df[col].dtype == \"object\"\n and type(self.train_df[col][0]) == \"str\"\n ) and (\n \"$\" in self.train_df[col][0]\n or self.train_df[col].str.contains(\",\").any()\n ):\n self.train_df[col] = (\n self.train_df[col]\n .apply(\n lambda x: x.replace(\"$\", \"\").replace(\",\", \"\")\n )\n .astype(\"float\")\n )\n # elif pd.to_datetime(\n # self.train_df[col], errors=\"coerce\"\n # ).isnull().sum() < 0.7 * len(self.train_df[col]):\n # self.train_df[col] = pd.to_datetime(\n # self.train_df[col], errors=\"coerce\"\n # )\n elif (\n is_numeric_dtype(self.train_df[col])\n or is_string_dtype(self.train_df[col])\n ) and self.train_df[col].dropna().nunique() < rows:\n self.cat_cols.append(col)\n else:\n continue\n\n if self.one_hot:\n self.__encode_one_hot_util()\n else:\n self.__encode_categorical_util()\n return",
"def FE_create_categorical_feature_crosses(dfc, cats):\r\n dfc = copy.deepcopy(dfc)\r\n combos = list(combinations(cats, 2))\r\n for cat1, cat2 in combos:\r\n dfc.loc[:,cat1+'_cross_'+cat2] = dfc[cat1].astype(str)+\" \"+dfc[cat2].astype(str)\r\n return dfc",
"def __init__(\n self,\n name,\n value,\n confidence=None,\n top_k_probs=None,\n constant=False,\n tags=None,\n ):\n super(CategoricalAttribute, self).__init__(\n name, value, confidence=confidence, constant=constant, tags=tags\n )\n self.top_k_probs = top_k_probs",
"def generate_wc_schema():\n json_str = json.dumps({'fields': [\n {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)",
"def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score",
"def __create_categorical_col(self, df, columns):\n\n # Temporarily remove tuple such that columns can be checked\n for n, item in enumerate(columns):\n if isinstance(item, tuple):\n name, _ = item\n temporary_columns = columns.copy()\n temporary_columns[n] = name\n\n # Use appropriate var in validation\n if 'temporary_columns' in locals():\n column_set = temporary_columns\n else:\n column_set = columns\n\n\n for n, column in enumerate(columns):\n if type(column) == tuple:\n cat_col, new_col = column\n df[new_col] = df[cat_col]\n column = cat_col\n df[column], uniques = pd.factorize(df[column])\n return df",
"def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical",
"def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical",
"def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def __encode_categorical_util(self):\n cat = []\n # cat = self.cat_cols\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df[col + str(\"Encoded\")] = pd.factorize(\n self.test_df[col]\n )[0]\n self.test_df[col + str(\"Encoded\")] = self.test_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n self.train_df[col + str(\"Encoded\")] = pd.factorize(\n self.train_df[col]\n )[0]\n self.train_df[col + str(\"Encoded\")] = self.train_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n cat.append(str(col + str(\"Encoded\")))\n self.cat_cols += cat"
]
| [
"0.6653668",
"0.61256295",
"0.6016856",
"0.59066474",
"0.580082",
"0.5568534",
"0.54435915",
"0.51423573",
"0.5096481",
"0.5034082",
"0.5005395",
"0.48591545",
"0.48311645",
"0.48297423",
"0.48297423",
"0.48297423",
"0.48248908",
"0.48134267",
"0.47964922",
"0.47401783",
"0.47004324",
"0.4698392",
"0.46959174",
"0.46553084",
"0.46496442",
"0.464768",
"0.464768",
"0.46454662",
"0.46407616",
"0.4621201"
]
| 0.7176708 | 0 |
Merges the given CategoricalAttributeSchema into this schema. | def merge_schema(self, schema):
super(CategoricalAttributeSchema, self).merge_schema(schema)
self.categories.update(schema.categories) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)",
"def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)",
"def attributes(self):\n attrs_ = super(CategoricalAttributeSchema, self).attributes()\n attrs_.append(\"categories\")\n return attrs_",
"def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default",
"def __init__(self, name, categories=None, exclusive=False, default=None):\n super(CategoricalAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.categories = set(categories or [])\n self.validate_default_value()",
"def validate_subset_of_schema(self, schema):\n super(CategoricalAttributeSchema, self).validate_subset_of_schema(\n schema\n )\n\n if not self.categories.issubset(schema.categories):\n raise AttributeSchemaError(\n \"Categories %s are not a subset of %s\"\n % (self.categories, schema.categories)\n )",
"def build_active_schema(cls, attr):\n return cls(attr.name, categories={attr.value})",
"def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}",
"def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})",
"def _add_cat_fields(self, odata, copy=True):\n # these are required fileds from get_meds_output_dtype\n # that we have put into the input catalog\n always_copy=[\n 'id',\n 'ra',\n 'dec',\n ]\n cat = self.cat_orig\n\n add_dt = []\n for d in cat.dtype.descr:\n n = d[0]\n if n not in odata.dtype.names:\n add_dt.append(d)\n\n obj_data = eu.numpy_util.add_fields(\n odata,\n add_dt,\n )\n\n if copy:\n for n in always_copy:\n obj_data[n] = cat[n]\n\n for d in add_dt:\n n = d[0]\n if n in always_copy:\n continue\n\n # don't clobber things that should be left at\n # their default values\n if n not in odata.dtype.names:\n obj_data[n] = cat[n]\n\n\n return obj_data",
"def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs",
"def load_schema(self, schema):\n if not self.default_schema_loaded:\n self.load_default_schema()\n # load JSON-LD file of user defined schema\n self.schema_extension_only = preprocess_schema(load_json_or_yaml(schema))\n if \"@context\" in self.schema_extension_only:\n self.context.update(self.schema_extension_only[\"@context\"])\n # convert user defined schema into a networkx DiGraph\n self.schema_extension_nx = load_schema_into_networkx(self.schema_extension_only)\n # update undefined classes/properties\n undefined_nodes = [node for node, attrdict in self.schema_extension_nx.node.items() if not attrdict]\n attr_dict = {}\n \n for _node in undefined_nodes:\n if _node in self.schemaorg_nx.nodes():\n attr_dict[_node] = self.schemaorg_nx.nodes[_node]\n nx.set_node_attributes(self.schema_extension_nx, attr_dict)\n # merge networkx graph of user-defined schema with networkx graph of schema defined by Schema.org\n #self.schema_nx = merge_schema_networkx(self.schemaorg_nx, self.schema_extension_nx)\n self.schema_nx = self.schema_extension_nx\t\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_full_schema()\n # merge together the given schema and the schema defined by schemaorg\n #self.schema = merge_schema(self.schema_extension_only, self.schemaorg_schema)\n self.schema = self.schemaorg_schema\n # split the schema networkx into individual ones\n isolates = list(nx.isolates(self.schema_nx))\n \n for node, attrdict in self.schema_extension_nx.node.items():\n if not 'type' in attrdict:\n self.schema_extension_nx.nodes[node][\"type\"] = \"Class\" \n for node, attrdict in self.schema_nx.node.items():\n if not 'type' in attrdict:\n self.schema_nx.nodes[node][\"type\"] = \"Class\" \n \n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)",
"def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def add(self, attr):\n self.validate_type(attr)\n self.categories.add(attr.value)",
"def merge_schema(self, schema):\n super(NumericAttributeSchema, self).merge_schema(schema)\n\n if not self.range:\n self.range = schema.range\n else:\n self.range = (\n min(self.range[0], schema.range[0]),\n max(self.range[1], schema.range[1]),\n )",
"def set_categorical(self, meta_field):\n self._data[meta_field] = pd.Categorical(self._data[meta_field])",
"def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )",
"def cat(input, other, can_reorder=False, _builder=None):\n return semantic.cat(input, other, can_reorder, _builder)",
"def load(self, base_schema):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n self.schema_org_version = get_schemaorg_version()\n _base_schema.append(\n load_schemaorg(version=self.schema_org_version, verbose=self.verbose)\n )\n continue\n elif self.is_a_dde_schema(_sc):\n _base_schema.append(self.load_dde_schemas(_sc))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))",
"def deduce_schema_for_record(self, json_object, schema_map, base_path=None):\n for key, value in json_object.items():\n # The canonical key is the lower-cased version of the sanitized key\n # so that the case of the field name is preserved when generating\n # the schema but we don't create invalid, duplicate, fields since\n # BigQuery is case insensitive\n canonical_key = self.sanitize_name(key).lower()\n schema_entry = schema_map.get(canonical_key)\n new_schema_entry = self.get_schema_entry(\n key=key,\n value=value,\n base_path=base_path\n )\n schema_map[canonical_key] = self.merge_schema_entry(\n old_schema_entry=schema_entry,\n new_schema_entry=new_schema_entry,\n base_path=base_path\n )",
"def _set_schema(self, schema_value):\n self._id = schema_value.id\n\n if type(self).__name__ != schema_value.type:\n # Make sure this object is the correct type.\n raise ValueError('Cannot convert a {} protocol to a {}.'\n .format(str(type(self)), schema_value.type))\n\n for input_full_path in schema_value.inputs:\n\n value = copy.deepcopy(schema_value.inputs[input_full_path])\n\n input_path = ProtocolPath.from_string(input_full_path)\n self.set_value(input_path, value)",
"def apply(self, attrs=None, kattrs=None, merge=False):\n for attr in attrs:\n kattrs = kattrs or {}\n # Treat objects as assigned to their name\n if hasattr(attr, \"__name__\"):\n kattrs[attr.__name__] = attr\n else:\n kattrs[attr] = inspect.getattr_static(self.source, attr)\n for attr, value in kattrs.items():\n old_value = inspect.getattr_static(self.target, attr, None)\n # If callable, preserve old func\n if callable(value) and callable(old_value):\n # Prevent duplicate patching\n if value in patchy_records:\n continue\n patchy_records[value] = old_value\n\n # Merge collections and classes instead of replacing\n if merge:\n if isinstance(old_value, abc.Container):\n if isinstance(value, abc.Mapping) and isinstance(old_value, abc.MutableMapping):\n old_value.update(value)\n logger.info('Merging mapping {mod}.{attr}'.format(mod=self.target.__name__, attr=attr))\n elif isinstance(value, abc.Sequence) and isinstance(old_value, abc.MutableSequence):\n old_value.extend(value)\n logger.info('Merging sequence {mod}.{attr}'.format(mod=self.target.__name__, attr=attr))\n elif isinstance(value, abc.Set) and isinstance(old_value, abc.MutableSet):\n old_value.update(value)\n logger.info('Merging set {mod}.{attr}'.format(mod=self.target.__name__, attr=attr))\n else:\n setattr(self.target, attr, value)\n logger.info(\"Couldn't merge collection {target}.{attr}, replaced instead\".format(\n target=self.target.__name__,\n attr=attr))\n continue\n elif isinstance(old_value, type):\n logger.info('Merging class for {target}.{attr}'.format(\n target=self.target.__name__, attr=attr))\n self.cls(old_value, value).auto()\n continue\n logger.info('Setting value {target}.{attr}'.format(target=self.target.__name__, attr=attr))\n # Apply patched value\n setattr(self.target, attr, value)",
"def __encode_categorical(self):\n if self.cat_cols is None:\n rows = self.train_df.shape[0]\n rows = 0.2 * rows\n self.cat_cols = []\n for col in self.train_df.columns:\n if col not in self.ord_cols:\n if (\n self.train_df[col].dtype == \"object\"\n and type(self.train_df[col][0]) == \"str\"\n ) and (\n \"$\" in self.train_df[col][0]\n or self.train_df[col].str.contains(\",\").any()\n ):\n self.train_df[col] = (\n self.train_df[col]\n .apply(\n lambda x: x.replace(\"$\", \"\").replace(\",\", \"\")\n )\n .astype(\"float\")\n )\n # elif pd.to_datetime(\n # self.train_df[col], errors=\"coerce\"\n # ).isnull().sum() < 0.7 * len(self.train_df[col]):\n # self.train_df[col] = pd.to_datetime(\n # self.train_df[col], errors=\"coerce\"\n # )\n elif (\n is_numeric_dtype(self.train_df[col])\n or is_string_dtype(self.train_df[col])\n ) and self.train_df[col].dropna().nunique() < rows:\n self.cat_cols.append(col)\n else:\n continue\n\n if self.one_hot:\n self.__encode_one_hot_util()\n else:\n self.__encode_categorical_util()\n return",
"def schema(self, schema):\n self._schema = schema"
]
| [
"0.66761893",
"0.6562346",
"0.5620021",
"0.5591604",
"0.53152627",
"0.5303806",
"0.50917417",
"0.47311455",
"0.45369187",
"0.4431705",
"0.4395007",
"0.43437606",
"0.4324432",
"0.43148988",
"0.42817253",
"0.42398593",
"0.42169097",
"0.42113233",
"0.41836387",
"0.41354522",
"0.41134846",
"0.40872765",
"0.40872765",
"0.40872765",
"0.40820974",
"0.40815943",
"0.40700668",
"0.40529922",
"0.4037336",
"0.40343168"
]
| 0.8113991 | 0 |
Creates a NumericAttributeSchema instance. | def __init__(self, name, range=None, exclusive=False, default=None):
super(NumericAttributeSchema, self).__init__(
name, exclusive=exclusive, default=default
)
self.range = tuple(range or [])
self.validate_default_value() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(\n self, dtype: str, name: str, index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[Union[int, float]] = None, required: Optional[bool] = False,\n group: Optional[str] = None, constraint: Optional[RangeConstraint] = None\n ):\n if dtype not in NUMERIC_TYPES:\n raise ValueError(\"invalid numeric type '{}'\".format(dtype))\n super(Numeric, self).__init__(\n dtype=dtype,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.constraint = constraint",
"def attributes(self):\n attrs_ = super(NumericAttributeSchema, self).attributes()\n attrs_.append(\"range\")\n return attrs_",
"def from_dict(doc: Dict, validate: Optional[bool] = True) -> Numeric:\n if validate:\n try:\n util.validate_doc(\n doc,\n mandatory=pd.MANDATORY,\n optional=pd.OPTIONAL + ['range']\n )\n constraint = None\n if 'range' in doc:\n constraint = RangeConstraint.from_string(doc['range'])\n except (ValueError, TypeError) as ex:\n raise err.InvalidParameterError(str(ex))\n try:\n constraint = None\n if 'range' in doc:\n constraint = RangeConstraint.from_string(doc['range'])\n except (ValueError, TypeError) as ex:\n raise err.InvalidParameterError(str(ex))\n return Numeric(\n dtype=doc[pd.TYPE],\n name=doc[pd.NAME],\n index=doc[pd.INDEX],\n label=doc.get(pd.LABEL),\n help=doc.get(pd.HELP),\n default=doc.get(pd.DEFAULT),\n required=doc[pd.REQUIRED],\n group=doc.get(pd.GROUP),\n constraint=constraint\n )",
"def merge_schema(self, schema):\n super(NumericAttributeSchema, self).merge_schema(schema)\n\n if not self.range:\n self.range = schema.range\n else:\n self.range = (\n min(self.range[0], schema.range[0]),\n max(self.range[1], schema.range[1]),\n )",
"def _create_numeric_class(baseclass, itemsize):\n\n prefix = '%s%d' % (baseclass.prefix(), itemsize * 8)\n type_ = prefix.lower()\n classdict = {'itemsize': itemsize, 'type': type_,\n '__doc__': \"Defines an atom of type ``%s``.\" % type_}\n\n def __init__(self, shape=(), dflt=baseclass._defvalue):\n Atom.__init__(self, self.type, shape, dflt)\n classdict['__init__'] = __init__\n return type('%sAtom' % prefix, (baseclass,), classdict)",
"def add_numeric_cols(self):\n self.create_numeric_status()\n self.create_date_integer()",
"def create_num(n, lineno=0, col_offset=0):\n num = ast.Num()\n num.n = n\n num.lineno = lineno\n num.col_offset = col_offset\n\n return num",
"def numeric_attribute_charts(self):\n\n file_or_case = \"case\"\n if self.ui.radioButton_file.isChecked():\n file_or_case = \"file\"\n attribute = self.ui.comboBox_num_attributes.currentText()\n title = _(\"Attribute histogram\")\n subtitle = \"<br><sup>\" + _(file_or_case) + _(\" attribute: \") + attribute\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.setCurrentIndex(0)\n self.ui.comboBox_num_attributes.blockSignals(False)\n\n cur = self.app.conn.cursor()\n cur.execute(\"select cast(value as int) from attribute where attr_type=? and name=?\",\n [file_or_case, attribute])\n res = cur.fetchall()\n values = []\n for r in res:\n values.append(r[0])\n # Create pandas DataFrame\n data = {attribute: values}\n df = pd.DataFrame(data)\n fig = px.histogram(df, x=attribute, title=title + subtitle)\n fig.show()\n self.helper_export_html(fig)",
"def test_numeric(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_numeric')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_numeric ' \\\n '( value NUMERIC(100,50) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_numeric VALUES (%s)'\n for i in range(100):\n int = random.getrandbits(150)\n frac = random.getrandbits(150)\n item = decimal.Decimal('%d.%s' % (int, frac))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_numeric'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, decimal.Decimal)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_numeric')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_numeric')\n cursor.execute(query)\n conn.commit()",
"def numeric_value(number, units):\n k = Factory.build('shared.numeric')\n k.value = number\n k.units = units\n return k",
"def __new__(cls, **kwargs):\n # Call up to allocate the new instance:\n try:\n instance = super(Schema, cls).__new__(cls, **kwargs)\n except TypeError:\n instance = super(Schema, cls).__new__(cls)\n \n # Create the “__fields__” attribute and retrieve the class-based\n # field indexes, “__field_names__” and “__field_index__”:\n instance.__fields__ = Flat()\n field_names, field_index = pyattrs(cls, 'field_names',\n 'field_index')\n \n # Set each of the field-default values through a call to\n # the underlying descriptor instances’ “get_default()” method:\n for field, nsfield in zip(field_names, field_index):\n instance.__fields__[nsfield] = stattr(instance, field).get_default()\n \n # Override defaults with any instance-specific values,\n # as specfied through keywords:\n for key, value in kwargs.items():\n if key in field_names:\n setattr(instance, key, value)\n \n for namespace in instance.__fields__.namespaces():\n if namespace in field_names:\n setattr(instance, namespace, field_names[namespace])\n \n # Return the new instance:\n return instance",
"def num_spec(\n tag: Tag = \"num\",\n type_: Union[Type, Tuple[Type, ...]] = (float, int),\n min_: Union[complex, float, int, None] = None,\n max_: Union[complex, float, int, None] = None,\n conformer: Optional[Conformer] = None,\n) -> Spec:\n\n @pred_to_validator(f\"Value '{{value}}' is not type {type_}\", complement=True)\n def is_numeric_type(x: Any) -> bool:\n return isinstance(x, type_)\n\n validators = [is_numeric_type]\n\n if min_ is not None:\n\n @pred_to_validator(f\"Number '{{value}}' is smaller than minimum {min_}\")\n def num_meets_min(x: Union[complex, float, int]) -> bool:\n return x < min_ # type: ignore\n\n validators.append(num_meets_min)\n\n if max_ is not None:\n\n @pred_to_validator(f\"String '{{value}}' exceeds maximum length {max_}\")\n def num_under_max(x: Union[complex, float, int]) -> bool:\n return x > max_ # type: ignore\n\n validators.append(num_under_max)\n\n if min_ is not None and max_ is not None:\n if min_ > max_: # type: ignore\n raise ValueError(\"Cannot define a spec with min greater than max\")\n\n return ValidatorSpec.from_validators(tag, *validators, conformer=conformer)",
"def create_numeric(cls, name, question, default_response, contacts, user):\n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_NUMERIC)\n poll.contacts = contacts\n return poll",
"def build_active_schema(cls, attr):\n return cls(attr.name, range=(attr.value, attr.value))",
"def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema",
"def ensure_numeric(A, typecode=None):\n\n if isinstance(A, basestring):\n msg = 'Sorry, cannot handle strings in ensure_numeric()'\n raise Exception(msg)\n\n if typecode is None:\n if isinstance(A, numpy.ndarray):\n return A\n else:\n return numpy.array(A)\n else:\n return numpy.array(A, dtype=typecode, copy=False)",
"def update_numeric_width(self, eval_dict):\n # If width is already a number, do nothing\n if isinstance(self.width, int):\n self.width_numeric = self.width\n return\n self.width_numeric = eval(self.width.replace(\"`\", \"\"), eval_dict)\n if not isinstance(self.width_numeric, int):\n logger.error(\"Could not evaluate width {} of wire {}\".format(self.width_numeric, self.name))",
"def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)",
"def __init__(self, data_type, other_props=None):\n if data_type not in PRIMITIVE_TYPES:\n raise AvroException('%r is not a valid primitive type.' % data_type)\n super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)",
"def df_numeric_column(min_value=0, max_value=1, num_rows=100):\n # Generate numeric column\n return pd.Series(np.random.uniform(min_value, max_value, num_rows))",
"def create(self, validated_data):\n return Dimension(**validated_data)",
"def m_numeric_array(self, value):\n return '<numeric_array id=\"%s\" encoding=\"base64\">%s</numeric_array>' % \\\n (self.register(value), Numeric.dumps(value).encode('base64'))",
"def validate_number_attribute(tag, attribute_name, attribute_value):\n if not attribute_value:\n return\n\n # If the given attribute value is either integer/float, then return the\n # value.\n if isinstance(attribute_value, (int, float)):\n return attribute_value\n # Give attribute value can be a string. For example, the given attribute\n # value can be '1'. When we do int('1') --> returns 1. The same logic\n # works in float values also. So, if we do float('1.23') --> returns 1.23.\n # If both the cases fail, then we raise an AttributeError.\n elif isinstance(attribute_value, str):\n try:\n return int(attribute_value)\n except ValueError:\n try:\n return float(attribute_value)\n except ValueError:\n raise AttributeValueError('<{tag}>: {attribute} attribute '\n 'should be an integer or float value'\n .format(tag=tag,\n attribute=attribute_name))\n else:\n # In rest all the cases, the attribute value is not a valid int/float\n # value.\n raise AttributeValueError('<{tag}>: {attribute} attribute should be an'\n ' integer or float value'\n .format(tag=tag, attribute=attribute_name))",
"def test_number(self):\n descriptor = clone(SPECIES_OBSERVATION_SCHEMA)\n record = {\n 'Observation Date': \"18/08/2016\",\n 'Latitude': -32,\n 'Longitude': 115,\n 'Species Name': 1234\n }\n schema = SpeciesObservationSchema(descriptor)\n with self.assertRaises(Exception):\n schema.cast_species_name(record)",
"def __to_num(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"to_num\",\n operand1=self,\n operand2=None\n )",
"def create(self, validated_data):\n return Quantity(**validated_data)",
"def generate(data: pd.DataFrame) -> np.ndarray:\n # TODO tests\n return data.apply(pd.to_numeric, errors='coerce')",
"def gen_numeric_literal(self, expr):\n return self.emit_const(expr.value, expr.typ)",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )"
]
| [
"0.5549439",
"0.550502",
"0.52814686",
"0.49523935",
"0.49471354",
"0.49455547",
"0.48985994",
"0.48495618",
"0.4763161",
"0.47305086",
"0.4647385",
"0.4579941",
"0.45615834",
"0.45446146",
"0.45197606",
"0.45111766",
"0.4476501",
"0.44345817",
"0.4382006",
"0.43748778",
"0.43568382",
"0.42896524",
"0.42724183",
"0.4258509",
"0.42553863",
"0.424929",
"0.42409167",
"0.42178714",
"0.42157724",
"0.41969317"
]
| 0.63034064 | 0 |
Builds a NumericAttributeSchema that describes the active schema of the NumericAttribute. | def build_active_schema(cls, attr):
return cls(attr.name, range=(attr.value, attr.value)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def attributes(self):\n attrs_ = super(NumericAttributeSchema, self).attributes()\n attrs_.append(\"range\")\n return attrs_",
"def __init__(self, name, range=None, exclusive=False, default=None):\n super(NumericAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.range = tuple(range or [])\n self.validate_default_value()",
"def merge_schema(self, schema):\n super(NumericAttributeSchema, self).merge_schema(schema)\n\n if not self.range:\n self.range = schema.range\n else:\n self.range = (\n min(self.range[0], schema.range[0]),\n max(self.range[1], schema.range[1]),\n )",
"def build_active_schema(cls, attrs):\n schema = cls()\n schema.add_attributes(attrs)\n return schema",
"def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})",
"def add_numeric_cols(self):\n self.create_numeric_status()\n self.create_date_integer()",
"def build_active_schema(cls, attr):\n return cls(attr.name, categories={attr.value})",
"def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema",
"def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)",
"def update_numeric_width(self, eval_dict):\n # If width is already a number, do nothing\n if isinstance(self.width, int):\n self.width_numeric = self.width\n return\n self.width_numeric = eval(self.width.replace(\"`\", \"\"), eval_dict)\n if not isinstance(self.width_numeric, int):\n logger.error(\"Could not evaluate width {} of wire {}\".format(self.width_numeric, self.name))",
"def numeric_attribute_charts(self):\n\n file_or_case = \"case\"\n if self.ui.radioButton_file.isChecked():\n file_or_case = \"file\"\n attribute = self.ui.comboBox_num_attributes.currentText()\n title = _(\"Attribute histogram\")\n subtitle = \"<br><sup>\" + _(file_or_case) + _(\" attribute: \") + attribute\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.setCurrentIndex(0)\n self.ui.comboBox_num_attributes.blockSignals(False)\n\n cur = self.app.conn.cursor()\n cur.execute(\"select cast(value as int) from attribute where attr_type=? and name=?\",\n [file_or_case, attribute])\n res = cur.fetchall()\n values = []\n for r in res:\n values.append(r[0])\n # Create pandas DataFrame\n data = {attribute: values}\n df = pd.DataFrame(data)\n fig = px.histogram(df, x=attribute, title=title + subtitle)\n fig.show()\n self.helper_export_html(fig)",
"def from_dict(doc: Dict, validate: Optional[bool] = True) -> Numeric:\n if validate:\n try:\n util.validate_doc(\n doc,\n mandatory=pd.MANDATORY,\n optional=pd.OPTIONAL + ['range']\n )\n constraint = None\n if 'range' in doc:\n constraint = RangeConstraint.from_string(doc['range'])\n except (ValueError, TypeError) as ex:\n raise err.InvalidParameterError(str(ex))\n try:\n constraint = None\n if 'range' in doc:\n constraint = RangeConstraint.from_string(doc['range'])\n except (ValueError, TypeError) as ex:\n raise err.InvalidParameterError(str(ex))\n return Numeric(\n dtype=doc[pd.TYPE],\n name=doc[pd.NAME],\n index=doc[pd.INDEX],\n label=doc.get(pd.LABEL),\n help=doc.get(pd.HELP),\n default=doc.get(pd.DEFAULT),\n required=doc[pd.REQUIRED],\n group=doc.get(pd.GROUP),\n constraint=constraint\n )",
"def extract(cls, extractor, typ):\n schema = {\n \"title\": typ.__name__,\n \"type\": \"object\",\n \"properties\": {},\n \"required\": []\n }\n for attribute in attr.fields(typ):\n details = cls._extract_attribute(extractor, attribute)\n if details.is_required:\n schema[\"required\"].append(details.name)\n schema[\"properties\"][details.name] = details.schema\n return schema",
"def m_numeric_array(self, value):\n return '<numeric_array id=\"%s\" encoding=\"base64\">%s</numeric_array>' % \\\n (self.register(value), Numeric.dumps(value).encode('base64'))",
"def schema(self):\n return {\n 'Date': { 'type': 'datetime', 'required': True},\n 'Errors': { 'type': 'dict', 'required': True},\n 'RecordType': {'type': 'string', 'required': True},\n 'RowNum': { 'type': 'integer', 'nullable': True}\n }",
"def __init__(\n self, dtype: str, name: str, index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[Union[int, float]] = None, required: Optional[bool] = False,\n group: Optional[str] = None, constraint: Optional[RangeConstraint] = None\n ):\n if dtype not in NUMERIC_TYPES:\n raise ValueError(\"invalid numeric type '{}'\".format(dtype))\n super(Numeric, self).__init__(\n dtype=dtype,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.constraint = constraint",
"def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema",
"def generate_distribution(attribute_type_dict):\n\n all_attr_distribution = []\n all_attribute_types = attribute_type_dict.keys()\n attr_type_counter = 0\n\n TIMESTAMP = get_timestamp()\n\n\n for attr_type in all_attribute_types:\n attr_type_counter += 1\n formatted_attr_type = attr_type.lower()\n formatted_attr_type = formatted_attr_type.replace(\" \", \"_\")\n\n if attr_type_counter <= int(args.num_attr_review):\n token_distribution = {}\n\n num_attr_tokens = len(re.findall(r'\\w+', attr_type))\n token_distribution[\"token_count\"] = num_attr_tokens\n\n\n token_lengths_list = []\n attr_distribution = {}\n\n for token in attr_type.split():\n token_length_dict = {}\n token_length_dict[token] = len(token)\n \n token_lengths_list.append(token_length_dict)\n\n token_distribution[\"token_lengths\"] = token_lengths_list\n\n attr_distribution[formatted_attr_type] = token_distribution\n\n all_attr_distribution.append(attr_distribution)\n \n return all_attr_distribution",
"def instance_schema(self):\n raise NotImplementedError",
"def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))",
"def _create_numeric_class(baseclass, itemsize):\n\n prefix = '%s%d' % (baseclass.prefix(), itemsize * 8)\n type_ = prefix.lower()\n classdict = {'itemsize': itemsize, 'type': type_,\n '__doc__': \"Defines an atom of type ``%s``.\" % type_}\n\n def __init__(self, shape=(), dflt=baseclass._defvalue):\n Atom.__init__(self, self.type, shape, dflt)\n classdict['__init__'] = __init__\n return type('%sAtom' % prefix, (baseclass,), classdict)",
"def build_song_schema():\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema",
"def build_song_schema():\n schema = T.StructType(\n [\n T.StructField('artist_id', T.StringType(), True),\n T.StructField('artist_latitude', T.DecimalType(), True),\n T.StructField('artist_longitude', T.DecimalType(), True),\n T.StructField('artist_location', T.StringType(), True),\n T.StructField('artist_name', T.StringType(), True),\n T.StructField('duration', T.DecimalType(), True),\n T.StructField('num_songs', T.IntegerType(), True),\n T.StructField('song_id', T.StringType(), True),\n T.StructField('title', T.StringType(), True),\n T.StructField('year', T.IntegerType(), True)\n ]\n )\n return schema",
"def _get_numeric_feature_columns(self,\n include_integer_columns: bool = False\n ) -> List[FeatureColumn]:\n\n numeric_columns = []\n for feature in self._dataset_schema.feature:\n\n feature_name = feature.name\n if feature_name == self.raw_label_key:\n continue\n\n feature_storage_type = _get_feature_storage_type(self._dataset_schema,\n feature_name)\n\n if feature_storage_type == tf.int64 and not include_integer_columns:\n continue\n\n # NOTE: Int features are treated as both numerical and categorical. For\n # example MNIST stores its features as int16 features, but are continuous.\n if feature_storage_type == tf.float32 or feature_storage_type == tf.int64:\n\n # Numerical feature.\n dim = _get_feature_dim(self._dataset_schema, feature_name)\n\n # Numerical feature normalized in 0-1.\n current_feature = tf.feature_column.numeric_column(\n feature_name, shape=(dim,), dtype=feature_storage_type)\n numeric_columns.append(current_feature)\n return numeric_columns",
"def _dict2schema(dct):\n attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n\n class Meta(object):\n strict = True\n\n attrs[\"Meta\"] = Meta\n return type(str(\"\"), (ma.Schema,), attrs)",
"def schema(self):\n raise NotImplementedError",
"def __call__(self):\n schema = geoutils.Schema()\n\n schema.build_gdelt_spatial_index('gdelt_spatial_index', self)\n\n return schema()",
"def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }",
"def schema(self):\n return self._schema",
"def _numeric_system(self):\n if self.__numeric_system is None:\n self.__numeric_system = self._lambdify(self._symbolic_system)\n return self.__numeric_system"
]
| [
"0.5745743",
"0.5220101",
"0.51414",
"0.510134",
"0.50879943",
"0.50192666",
"0.48779523",
"0.47359407",
"0.46691066",
"0.46501595",
"0.46292365",
"0.45427948",
"0.45160258",
"0.43934676",
"0.4364968",
"0.43548426",
"0.43459022",
"0.43127623",
"0.42895886",
"0.4248163",
"0.4224082",
"0.42156038",
"0.42069626",
"0.42040893",
"0.41794926",
"0.41791335",
"0.4177237",
"0.4174568",
"0.4171505",
"0.41607285"
]
| 0.5373601 | 1 |
Merges the given NumericAttributeSchema into this schema. | def merge_schema(self, schema):
super(NumericAttributeSchema, self).merge_schema(schema)
if not self.range:
self.range = schema.range
else:
self.range = (
min(self.range[0], schema.range[0]),
max(self.range[1], schema.range[1]),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)",
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)",
"def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default",
"def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)",
"def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})",
"def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )",
"def update_numeric_width(self, eval_dict):\n # If width is already a number, do nothing\n if isinstance(self.width, int):\n self.width_numeric = self.width\n return\n self.width_numeric = eval(self.width.replace(\"`\", \"\"), eval_dict)\n if not isinstance(self.width_numeric, int):\n logger.error(\"Could not evaluate width {} of wire {}\".format(self.width_numeric, self.name))",
"def attributes(self):\n attrs_ = super(NumericAttributeSchema, self).attributes()\n attrs_.append(\"range\")\n return attrs_",
"def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()",
"def add_numeric_cols(self):\n self.create_numeric_status()\n self.create_date_integer()",
"def __init__(self, name, range=None, exclusive=False, default=None):\n super(NumericAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.range = tuple(range or [])\n self.validate_default_value()",
"def __num__(self):\n import copy\n numself = copy.deepcopy(self)\n if self.A.dtype == object or self.b.dtype == object:\n numself.A=np.array(self.A,dtype=np.float64)\n numself.b=np.array(self.b,dtype=np.float64)\n numself.c=np.array(self.c,dtype=np.float64)\n numself.alpha=np.array(self.alpha,dtype=np.float64)\n numself.beta=np.array(self.beta,dtype=np.float64)\n return numself",
"def toNumeric(self):\n for column in list(self.data.columns):\n if not is_numeric_dtype(self.data[column]):\n values = list(sorted(self.data[column].unique()))\n indices = [index for index, value in enumerate(values)]\n self.data[column] = self.data[column].replace(\n to_replace=values, value=indices)",
"def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)",
"def set_schema(self, schema, set_num_columns=True):\n update_dict = {self.SCHEMA: schema}\n\n if set_num_columns:\n update_dict.update({self.NUM_COLUMNS: len(schema.keys())})\n\n self.update(update_dict)",
"def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs",
"def load_schema(self, schema):\n if not self.default_schema_loaded:\n self.load_default_schema()\n # load JSON-LD file of user defined schema\n self.schema_extension_only = preprocess_schema(load_json_or_yaml(schema))\n if \"@context\" in self.schema_extension_only:\n self.context.update(self.schema_extension_only[\"@context\"])\n # convert user defined schema into a networkx DiGraph\n self.schema_extension_nx = load_schema_into_networkx(self.schema_extension_only)\n # update undefined classes/properties\n undefined_nodes = [node for node, attrdict in self.schema_extension_nx.node.items() if not attrdict]\n attr_dict = {}\n \n for _node in undefined_nodes:\n if _node in self.schemaorg_nx.nodes():\n attr_dict[_node] = self.schemaorg_nx.nodes[_node]\n nx.set_node_attributes(self.schema_extension_nx, attr_dict)\n # merge networkx graph of user-defined schema with networkx graph of schema defined by Schema.org\n #self.schema_nx = merge_schema_networkx(self.schemaorg_nx, self.schema_extension_nx)\n self.schema_nx = self.schema_extension_nx\t\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_full_schema()\n # merge together the given schema and the schema defined by schemaorg\n #self.schema = merge_schema(self.schema_extension_only, self.schemaorg_schema)\n self.schema = self.schemaorg_schema\n # split the schema networkx into individual ones\n isolates = list(nx.isolates(self.schema_nx))\n \n for node, attrdict in self.schema_extension_nx.node.items():\n if not 'type' in attrdict:\n self.schema_extension_nx.nodes[node][\"type\"] = \"Class\" \n for node, attrdict in self.schema_nx.node.items():\n if not 'type' in attrdict:\n self.schema_nx.nodes[node][\"type\"] = \"Class\" \n \n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)",
"def _extend(self, other_field, memo) -> None:\n if other_field.data.ndim != self.data.ndim:\n raise ValueError(\n f\"Field '{self.name}' cannot be extended. Dimensions must be equal. ({other_field.data.ndim} != {self.data.ndim})\"\n )\n\n old_id = id(self.data)\n if self.data.dtype < other_field.data.dtype:\n # Increase size of self.data.dtype before inserting\n new_data = np.insert(self.data.astype(other_field.data.dtype), self.num_obs, other_field.data, axis=0)\n else:\n new_data = np.insert(self.data, self.num_obs, other_field.data, axis=0)\n memo[old_id] = (self.data, new_data)\n self.data = new_data",
"def addAllNumericHas (self, other):\n \n if self.hasEpoch():\n if other.hasEpoch():\n self.epoch += other.epoch\n \n if self.hasUtcOffsetMinutes():\n if other.hasUtcOffsetMinutes():\n self.utcOffsetMinutes += other.utcOffsetMinutes\n \n \n pass",
"def add_numeric_op(attr_name):\n def closure(self, other):\n return VTKArray._numeric_op(self, other, attr_name)\n closure.__name__ = attr_name\n attr[attr_name] = closure",
"def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}",
"def coerce_empty_numeric_values(self):\n if \"numeric\" in self.annot_types:\n numeric_columns = self.file.xs(\n \"numeric\", axis=1, level=1, drop_level=False\n ).columns.tolist()\n self.file[numeric_columns].replace(\"\", np.nan, inplace=True)",
"def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)",
"def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result",
"def merge(self, other):\n\n if not self.can_merge(other):\n raise ValueError('These protocols can not be safely merged.')\n\n inputs_to_consider = self._find_inputs_to_merge()\n\n for input_path in inputs_to_consider:\n\n merge_behavior = getattr(type(self), input_path.property_name).merge_behavior\n\n if merge_behavior == MergeBehaviour.ExactlyEqual:\n continue\n\n if (isinstance(self.get_value(input_path), ProtocolPath) or\n isinstance(other.get_value(input_path), ProtocolPath)):\n\n continue\n\n if merge_behavior == InequalityMergeBehaviour.SmallestValue:\n value = min(self.get_value(input_path), other.get_value(input_path))\n elif merge_behavior == InequalityMergeBehaviour.LargestValue:\n value = max(self.get_value(input_path), other.get_value(input_path))\n else:\n raise NotImplementedError()\n\n self.set_value(input_path, value)\n\n return {}",
"def add_numeric_op(attr_name, op):\n def closure(self, other):\n return VTKCompositeDataArray._numeric_op(self, other, op)\n closure.__name__ = attr_name\n attr[attr_name] = closure",
"def _numeric_op(self, other, attr_name):\n l = reshape_append_ones(self, other)\n return getattr(numpy.ndarray, attr_name)(l[0], l[1])",
"def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()",
"def morph_numeric(lex, ord_or_card, value, digit):\n raise NotImplementedError"
]
| [
"0.613379",
"0.6076565",
"0.56533253",
"0.55631006",
"0.53362817",
"0.50358003",
"0.49248102",
"0.47552547",
"0.47158542",
"0.46539938",
"0.46114725",
"0.45533928",
"0.454039",
"0.4514584",
"0.44801235",
"0.44582006",
"0.44557527",
"0.43551463",
"0.42997453",
"0.42904726",
"0.42803293",
"0.420784",
"0.41999596",
"0.41979924",
"0.41876438",
"0.41868934",
"0.4183544",
"0.418285",
"0.4181175",
"0.41721687"
]
| 0.72852665 | 0 |
Creates a BooleanAttributeSchema instance. | def __init__(self, name, values=None, exclusive=False, default=None):
super(BooleanAttributeSchema, self).__init__(
name, exclusive=exclusive, default=default
)
self.values = set(values or [])
self.validate_default_value() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def schema():\n return vol.Schema({\"venus\": cv.boolean, \"mars\": cv.boolean, \"jupiter\": cv.boolean})",
"def form_Boolean(request):\n schema = schemaish.Structure()\n schema.add('myBooleanField', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n return form",
"def __init__(\n self, name: str, index: Optional[int] = 0, label: Optional[str] = None,\n help: Optional[str] = None, default: Optional[bool] = None,\n required: Optional[bool] = False, group: Optional[str] = None\n ):\n super(Bool, self).__init__(\n dtype=PARA_BOOL,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )",
"def attributes(self):\n attrs_ = super(BooleanAttributeSchema, self).attributes()\n attrs_.append(\"values\")\n return attrs_",
"def test_boolean(self):\n endpoint = self.api.boolean\n assert endpoint.openapi_types['body'] == (bool,)\n assert endpoint.settings['response_type'] == (bool,)",
"def validate_boolean_attribute(tag, attribute_name, attribute_value):\n if not attribute_value:\n return\n\n if not isinstance(attribute_value, bool):\n raise AttributeError('<{tag}>: {attribute_name} attribute should be a '\n 'boolean value.'\n .format(tag=tag, attribute_name=attribute_name))",
"def boolean(self, column, nullable=False):\n self._last_column = self.table.add_column(column, \"boolean\", nullable=nullable)\n return self",
"def test_get_field_type_boolean(self):\n db_introspection = DatabaseIntrospection(self.connection)\n self.assertEqual(\n db_introspection.get_field_type(TypeCode.BOOL, description=None),\n \"BooleanField\",\n )",
"def bool_attr(attr):\n if attr.lower() == \"true\":\n val = True\n elif attr.lower() == \"false\":\n val = False\n else:\n raise EzXMLError(\"Must be \"\\\n \"'true' or 'false'. Not %s\" % (attr))\n return val",
"def as_bool(self):\n return self.as_type(bool)",
"def _validate_bool(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'value'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'bool':\n raise ValidationError('expected _type \"bool\"', path)\n if not isinstance(instance['value'], bool):\n raise ValidationError('value must be bool', path)",
"def Shape(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Shape(self, *args)",
"def ShapeFrom(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_ShapeFrom(self, *args)",
"def make_bool(value):\n def make_value():\n return verify.Term(verify.BOOLEAN, value)\n return make_value",
"def is_boolean_type(self):\n raise exceptions.NotImplementedError()",
"def Shape1(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperation_Shape1(self, *args)",
"def test_boolean_default(self):\r\n default = True\r\n prop = Boolean(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))",
"def test_boolean_custom_values(self):\n true_values = ['YES', 'yes', 'Yes']\n false_values = ['NO', 'no', 'No']\n wrong_values = ['true', 'false', 'True', 'False', 'y', 'n', 'Y', 'N', 't', '1', 1, '0', 0]\n descriptor = self.base_field_descriptor\n descriptor['type'] = 'boolean'\n # only 'default' format\n descriptor['format'] = 'default'\n descriptor['trueValues'] = true_values\n descriptor['falseValues'] = false_values\n\n f = SchemaField(descriptor)\n for v in true_values:\n self.assertTrue(f.cast(v))\n for v in false_values:\n self.assertFalse(f.cast(v))\n for v in wrong_values:\n with self.assertRaises(Exception):\n f.cast(v)",
"def add_boolean(self, name, **kwargs):\n self.add(Flags.BooleanFlag(name, **kwargs))",
"def test_boolean_default_values(self):\n true_values = ['True', 'true', 'True', 'YES', 'yes', 'y', 'Y', 'Yes']\n false_values = ['FALSE', 'false', 'False', 'NO', 'no', 'n', 'N', 'No']\n wrong_values = [2, 3, 'FLSE', 'flse', 'NON', 'oui', 'maybe', 'not sure', 't', '1', 1, '0', 0]\n descriptor = self.base_field_descriptor\n descriptor['type'] = 'boolean'\n # only 'default' format\n descriptor['format'] = 'default'\n f = SchemaField(descriptor)\n for v in true_values:\n self.assertTrue(f.cast(v))\n for v in false_values:\n self.assertFalse(f.cast(v))\n for v in wrong_values:\n with self.assertRaises(Exception):\n f.cast(v)",
"def form_BooleanWithDefaults(request):\n schema = schemaish.Structure()\n schema.add('myBooleanTrue', schemaish.Boolean())\n schema.add('myBooleanFalse', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n form.defaults = {'myBooleanTrue':True,'myBooleanFalse':False}\n return form",
"def to_bool(data, true_label, **kwargs):\n return Component(\n \"ToBool\",\n arguments={\n 'data': Component.of(data),\n 'true_label': Component.of(true_label)\n },\n options={\n \n },\n constraints=kwargs)",
"def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Boolean()",
"def set_bool_attribute(self, id: str, b: Optional[bool]):\n self.set_attribute(id, None if not b else ConstInt(1))",
"def CONST_BOOL(self, t):\n t.value = False if t.value == '#false' else True\n return t",
"def test_for_bool(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"for bool b in [True, False]\\n\\tUnaryGate(b, 0) | 0\"\n )\n assert np.all(\n bb._forvar[\"b\"] == np.array([True, False])\n )",
"def get_xml_bool_attribute(elem, attribute, default=None):\n value = elem.get(attribute, default)\n if value is None:\n raise XMLSchemaKeyError(attribute)\n elif value in ('true', '1') or value is True:\n return True\n elif value in ('false', '0') or value is False:\n return False\n else:\n raise XMLSchemaTypeError(\"an XML boolean value is required for attribute %r\" % attribute)",
"def bool_spec(\n tag: Tag = \"bool\",\n allowed_values: Optional[Set[bool]] = None,\n conformer: Optional[Conformer] = None,\n) -> Spec:\n\n assert allowed_values is None or all(isinstance(e, bool) for e in allowed_values)\n\n @pred_to_validator(\"Value '{value}' is not boolean\", complement=True)\n def is_bool(v) -> bool:\n return isinstance(v, bool)\n\n validators = [is_bool]\n\n if allowed_values is not None:\n\n @pred_to_validator(\n f\"Value '{{value}}' not in {allowed_values}\", complement=True\n )\n def is_allowed_bool_type(v) -> bool:\n return v in allowed_values # type: ignore\n\n validators.append(is_allowed_bool_type)\n\n return ValidatorSpec.from_validators(tag, *validators, conformer=conformer)",
"def build_active_schema(cls, attrs):\n schema = cls()\n schema.add_attributes(attrs)\n return schema",
"def create_type_widget(self):\n self._chb_bool = QtWidgets.QCheckBox()\n return self._chb_bool"
]
| [
"0.608737",
"0.5819675",
"0.566594",
"0.5590935",
"0.5580326",
"0.55594844",
"0.55293834",
"0.55084515",
"0.5463669",
"0.5422187",
"0.54039186",
"0.532823",
"0.53129405",
"0.52992815",
"0.5292598",
"0.52653176",
"0.5248311",
"0.51945597",
"0.5185645",
"0.5181672",
"0.5181354",
"0.51759475",
"0.5167901",
"0.51530147",
"0.5150283",
"0.5137681",
"0.5113329",
"0.5111878",
"0.50811636",
"0.5057021"
]
| 0.6226163 | 0 |
Merges the given BooleanAttributeSchema into this schema. | def merge_schema(self, schema):
super(BooleanAttributeSchema, self).merge_schema(schema)
self.values.update(schema.values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default",
"def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)",
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs",
"def addBool(self, b):\n self._buf.push(_B_STRUCT.pack(b))",
"def __init__(self, name, values=None, exclusive=False, default=None):\n super(BooleanAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.values = set(values or [])\n self.validate_default_value()",
"def attributes(self):\n attrs_ = super(BooleanAttributeSchema, self).attributes()\n attrs_.append(\"values\")\n return attrs_",
"def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)",
"def ShapeFrom(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_ShapeFrom(self, *args)",
"def set_bool_attribute(self, id: str, b: Optional[bool]):\n self.set_attribute(id, None if not b else ConstInt(1))",
"def Shape2(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperation_Shape2(self, *args)",
"def writeAttributeBool(self, *args):\n return _libsbml.XMLOutputStream_writeAttributeBool(self, *args)",
"def add_bool_as_scalar(self, node):\n if node.value == 'true' or node.value == 'false' :\n return self.construct_yaml_bool(node)\n return self.construct_scalar(node)",
"def write_bool(self, b: bool) -> None:\n self.buffer += struct.pack(\"<?\", b)",
"def generate_schema_yaml(self, overwrite=False):\n if self._schema_path and os.path.isfile(self._schema_path):\n if not overwrite:\n raise RuntimeError('Schema file `{}` already exists! Set `overwrite=True` to overwrite.'.format(self._schema_path))\n warnings.warn('Overwriting schema file `{0}`, which is backed up at `{0}.bak`'.format(self._schema_path))\n shutil.copyfile(self._schema_path, self._schema_path + '.bak')\n\n schema = self._generate_schema_from_datafiles(self._datasets)\n\n for col, schema_this in schema.items():\n if np.dtype(schema_this['dtype']).kind == 'b' and (\n col.endswith('_flag_bad') or col.endswith('_flag_noGoodPixels')):\n schema_this['default'] = True\n\n with open(self._schema_path, 'w') as schema_stream:\n yaml.dump(schema, schema_stream)",
"def Shape(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Shape(self, *args)",
"def Shape1(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperation_Shape1(self, *args)",
"def _validate_bool(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'value'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'bool':\n raise ValidationError('expected _type \"bool\"', path)\n if not isinstance(instance['value'], bool):\n raise ValidationError('value must be bool', path)",
"def FrameAddBoolFrame(builder, boolFrame):\n return AddBoolFrame(builder, boolFrame)",
"def add_boolean(self, name, **kwargs):\n self.add(Flags.BooleanFlag(name, **kwargs))",
"def register_bool(self, name, short=None, default=None, group=None, help=None):\n self._register(name, self._parse_bool, short=short, default=default,\n group=group, help=help)",
"def boolean(self, column, nullable=False):\n self._last_column = self.table.add_column(column, \"boolean\", nullable=nullable)\n return self",
"def form_BooleanWithDefaults(request):\n schema = schemaish.Structure()\n schema.add('myBooleanTrue', schemaish.Boolean())\n schema.add('myBooleanFalse', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n form.defaults = {'myBooleanTrue':True,'myBooleanFalse':False}\n return form",
"async def put_bool( # pylint: disable=inconsistent-return-statements\n self, complex_body: JSON, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def validate_subset_of_schema(self, schema):\n super(BooleanAttributeSchema, self).validate_subset_of_schema(schema)\n\n if not self.values.issubset(schema.values):\n raise AttributeSchemaError(\n \"Values %s are not a subset of %s\"\n % (self.values, schema.values)\n )",
"def schema():\n return vol.Schema({\"venus\": cv.boolean, \"mars\": cv.boolean, \"jupiter\": cv.boolean})",
"async def put_bool( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def __call__(self, parser, namespace, value, unused_option_string=None):\n try:\n setattr(namespace, self.dest, util.parse_bool(value))\n except TypeError as err:\n raise argparse.ArgumentError(self, 'Boolean value required') from err",
"def form_Boolean(request):\n schema = schemaish.Structure()\n schema.add('myBooleanField', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n return form",
"def rebuild(self, dframe, overwrite=False):\n current_schema = self\n new_schema = schema_from_dframe(dframe, self)\n\n if current_schema and not overwrite:\n # merge new schema with existing schema\n current_schema.update(new_schema)\n new_schema = current_schema\n\n return new_schema"
]
| [
"0.61939293",
"0.58525634",
"0.57368326",
"0.50372493",
"0.5014537",
"0.5005869",
"0.49757817",
"0.49018866",
"0.46996504",
"0.46879697",
"0.46091363",
"0.45477447",
"0.4533857",
"0.4531039",
"0.45263448",
"0.4515642",
"0.45118606",
"0.4507055",
"0.4490799",
"0.44091803",
"0.4384848",
"0.43392572",
"0.43113795",
"0.42946386",
"0.42938942",
"0.42900667",
"0.42843512",
"0.42812002",
"0.42571127",
"0.4249536"
]
| 0.78415316 | 0 |
Returns whether or not the container contains an Attribute with the given name. | def has_attr_with_name(self, name):
for attr in self:
if attr.name == name:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_attribute(self, name):\n return name in self.schema",
"def hasAttribute(self, attrib):\n return self._dqa(attrib) in self.attributes",
"def has_attribute(self, name):\n\n pass",
"def has_attr(self, name):\n return name in self and not self[name] in EMPTY_VALUES",
"def has_attr(self, key):\n return key in self.attrs",
"def is_element_attribute(element, attribute_name):\n return element.get(attribute_name) is not None",
"def has_attribute(self, key):\n return key in self.__dict",
"def has_attribute(self, attribute: str) -> bool:\n return any([\n key_node.value == attribute for key_node, _ in self.yaml_node.value\n ])",
"def has_attribute(self, attribute):\n return (attribute in self.attribute_list)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def __contains__(self, attribute_name):\n return False # pragma: no cover",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n success = False\n try:\n response = self._vault_api.read_secret(path=section_name.upper(), mount_point=self.mount_point)\n keys = list(response[\"data\"][\"data\"].keys())\n success = attr_name.upper() in keys\n except InvalidPath:\n pass\n return success",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def hasAttribute(self, *args):\n return _libsbml.XMLAttributes_hasAttribute(self, *args)",
"def attribute_exists(se: SchemaExplorer, attribute_label: str) -> bool:\n schema_graph = se.get_nx_schema()\n\n if attribute_label in schema_graph.nodes:\n return True\n return False",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n config = ConfigParser(allow_no_value=True)\n config.read(self.connection_string)\n\n result = False\n for section in config.sections():\n if section.lower().replace(' ', '_') == section_name.lower().replace(' ', '_'):\n for attr in config[section]:\n if attr.lower().replace(' ', '_') == attr_name.lower().replace(' ', '_'):\n result = True\n\n return result",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)",
"def has_attribute_key(graph_element, attribute_key):\n return attribute_key in graph_element.get_attributes().keys() # return whether key is present",
"def hasAttr(self, *args):\n return _libsbml.XMLToken_hasAttr(self, *args)",
"def contains_attr(self, gi):\n if gi is None:\n return False\n for gi_obj in self.gradual_items:\n if gi.attribute_col == gi_obj.attribute_col:\n return True\n return False",
"def is_global_attr(self, attr_name):\n\n return attr_name in self._global_attr_names",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def __contains__(self, item):\n return item in self.attrs",
"def has_keypoints_attribute(self, label, attr_name):\n if not self.has_keypoints_label(label):\n return False\n\n return self.schema[label].has_attribute(attr_name)",
"def has_attribute(*attrs):\n\n @meta\n def check(cls):\n return all(hasattr(cls, a) for a in attrs)\n\n return check",
"def __contains__(self, name):\n try:\n self[name]\n return True\n except KeyError:\n return False",
"def has_attributes(self):\n return bool(self.attrs)",
"def attr_exists(attribute, user_input):\n\n response = table.scan(\n FilterExpression = Attr(attribute).eq(user_input)\n )\n\n if response['Items']:\n return True\n\n return False"
]
| [
"0.79942447",
"0.77154106",
"0.76591486",
"0.7382911",
"0.733944",
"0.73309386",
"0.7262436",
"0.7184343",
"0.71637976",
"0.71544474",
"0.71544474",
"0.71331775",
"0.71263033",
"0.7110779",
"0.7050013",
"0.69409716",
"0.69250476",
"0.6889576",
"0.68727183",
"0.6751156",
"0.6633142",
"0.6620342",
"0.65646243",
"0.6494135",
"0.64778864",
"0.6454384",
"0.64324456",
"0.6426101",
"0.64074856",
"0.6366352"
]
| 0.8376568 | 0 |
Gets all `Attribute`s with the given name. | def get_attrs_with_name(self, name):
return self.get_matches([lambda attr: attr.name == name]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attr_values_with_name(self, name):\n return [attr.value for attr in self.get_attrs_with_name(name)]",
"def queryAttributeNames(name):\n\n header, rows = querySciDB(\"attributes(%s)\" % name)\n return [row[1].translate(None, \"\\\"\") for row in rows]",
"def getAttributeByName(self, name):\n\n for eachAttribute in self._attributes:\n if eachAttribute.getName() == name:\n return eachAttribute\n\n return None",
"def getAttributes(self, name):\n ent = self.entities[name]\n\n attrs = []\n while ent != None:\n this_ent_attrs = copy.copy(ent[\"attributes\"])\n this_ent_attrs.reverse()\n attrs.extend(this_ent_attrs)\n ent = self.entities.get(ent[\"supertype\"], None)\n\n attrs.reverse()\n return attrs",
"def getAttributes(self, name):\r\n ent = self.entities[name]\r\n\r\n attrs = []\r\n while ent != None:\r\n this_ent_attrs = copy.copy(ent[\"attributes\"])\r\n this_ent_attrs.reverse()\r\n attrs.extend(this_ent_attrs)\r\n ent = self.entities.get(ent[\"supertype\"], None)\r\n\r\n attrs.reverse()\r\n return attrs",
"def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]",
"def __getattr__(self, name):\n if not name in self._attrs.iterkeys():\n raise AttributeError(name)\n return self._attrs[name]",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def getAttribute(self, name):\n \n return self[self._name][name]",
"def getattribute(self, name):\n return self.attributes[name]",
"def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)",
"def attribute(self, name):\n\n attrs = [attr for attr in self.attributes if attr.name == name]\n\n if attrs:\n return attrs[0]\n else:\n raise NoSuchAttributeError(name)",
"def _get_attrs(self, names):\n assert isinstance(names, str)\n names = names.replace(\",\", \" \").split(\" \")\n res = []\n for n in names:\n if n == \"\":\n continue\n if n not in self.__dict__:\n raise KeyError(\"Unknown name for Container attribute: '{}'\".format(n))\n res.append(getattr(self, n))\n return res",
"def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def get_attribute(self, name):\n return self.element.get_attribute(name)",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def get_attributes(self, item_name, attribute_name=None,\r\n consistent_read=False, item=None):\r\n return self.connection.get_attributes(self, item_name, attribute_name,\r\n consistent_read, item)",
"def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None",
"def has_attr_with_name(self, name):\n for attr in self:\n if attr.name == name:\n return True\n\n return False",
"def get_attribute(self, name):\n\n pass",
"def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)",
"def get_attribute_values_from_log(log, attribute_name):\n attributes = attributes_filter.get_attribute_values(log, attribute_name)\n return attributes",
"def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def GetAttribute(self, name):\n ret = libxml2mod.xmlTextReaderGetAttribute(self._o, name)\n return ret",
"def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list",
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def get_attr(self, name: str):\n return self.call(name)"
]
| [
"0.7516274",
"0.72581846",
"0.70912445",
"0.69646543",
"0.6895492",
"0.68367636",
"0.6494162",
"0.6411804",
"0.63663906",
"0.6317672",
"0.62741804",
"0.6262732",
"0.62604016",
"0.6198323",
"0.60872364",
"0.6051567",
"0.59799707",
"0.59614956",
"0.59420294",
"0.5920864",
"0.59107053",
"0.59011596",
"0.58694243",
"0.58677775",
"0.58656675",
"0.58656675",
"0.58388686",
"0.5829391",
"0.5813514",
"0.5801245"
]
| 0.77114695 | 0 |
Gets a list of values for all `Attribute`s with the given name. | def get_attr_values_with_name(self, name):
return [attr.value for attr in self.get_attrs_with_name(name)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]",
"def queryAttributeNames(name):\n\n header, rows = querySciDB(\"attributes(%s)\" % name)\n return [row[1].translate(None, \"\\\"\") for row in rows]",
"def get_attrs_with_name(self, name):\n return self.get_matches([lambda attr: attr.name == name])",
"def getAttributes(self, name):\n ent = self.entities[name]\n\n attrs = []\n while ent != None:\n this_ent_attrs = copy.copy(ent[\"attributes\"])\n this_ent_attrs.reverse()\n attrs.extend(this_ent_attrs)\n ent = self.entities.get(ent[\"supertype\"], None)\n\n attrs.reverse()\n return attrs",
"def getAttributes(self, name):\r\n ent = self.entities[name]\r\n\r\n attrs = []\r\n while ent != None:\r\n this_ent_attrs = copy.copy(ent[\"attributes\"])\r\n this_ent_attrs.reverse()\r\n attrs.extend(this_ent_attrs)\r\n ent = self.entities.get(ent[\"supertype\"], None)\r\n\r\n attrs.reverse()\r\n return attrs",
"def get_attribute_values_from_log(log, attribute_name):\n attributes = attributes_filter.get_attribute_values(log, attribute_name)\n return attributes",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def values(self):\n return self.attrs.values()",
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def listattribute(self, varName):\n fName = \"\"\n if varName in self.statVars:\n fName = self.statVars[varName][0]\n elif varName in self.timeVars:\n fName = self.timeVars[varName][0][0]\n if fName:\n var = cdms2.open(fName, 'r')(varName)\n return var.listattributes()\n else:\n return []",
"def _get_attrs(self, names):\n assert isinstance(names, str)\n names = names.replace(\",\", \" \").split(\" \")\n res = []\n for n in names:\n if n == \"\":\n continue\n if n not in self.__dict__:\n raise KeyError(\"Unknown name for Container attribute: '{}'\".format(n))\n res.append(getattr(self, n))\n return res",
"def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)",
"def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list",
"def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)",
"def attributes(self):\n\n return list(self._attributes.values())",
"def values(self):\n return [self[name] for name in self.keys()]",
"def get_similar_attr(self, attrname) -> list:\n Similar_corpus = self._similar_corpus\n for key in Similar_corpus:\n if key == attrname:\n return [key] + Similar_corpus[key]\n return []",
"def get_similar_attr(self, attrname) -> list:\n Similar_corpus = self._similar_corpus\n for key in Similar_corpus:\n if key == attrname:\n return [key] + Similar_corpus[key]\n return []",
"def getAttributeByName(self, name):\n\n for eachAttribute in self._attributes:\n if eachAttribute.getName() == name:\n return eachAttribute\n\n return None",
"def getattribute(self, name):\n return self.attributes[name]",
"def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr",
"def getAttribute(self, name):\n \n return self[self._name][name]",
"def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]",
"def values(self):\r\n return [self[k] for k in self]",
"def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:\n raise NotImplementedError()",
"def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist"
]
| [
"0.7520641",
"0.7162945",
"0.705591",
"0.7045606",
"0.7021155",
"0.6939452",
"0.6783374",
"0.6768759",
"0.66577613",
"0.66454077",
"0.6569157",
"0.6465251",
"0.6424673",
"0.6266206",
"0.6243621",
"0.6228554",
"0.6223528",
"0.61864316",
"0.61479187",
"0.61479187",
"0.6135107",
"0.6068368",
"0.6050388",
"0.6047086",
"0.60227394",
"0.6011303",
"0.6011303",
"0.60043025",
"0.59779704",
"0.59650666"
]
| 0.90423757 | 0 |
Pops constant attributes from this container. | def pop_constant_attrs(self):
return self.pop_elements([lambda attr: attr.constant]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pop_attributes(self):\n attrs = self.attrs\n self.clear_attributes()\n return attrs",
"def clear_attributes(self):\n self.attrs = etad.AttributeContainer()",
"def unsetConstant(self):\n return _libsbml.Compartment_unsetConstant(self)",
"def popFrameVariables(self):\n del self.frame_variables_stack[-1]\n del self.frame_type_descriptions[-1]",
"def clear_attrs(self):\n self._attributes.clear()",
"def unsetConstant(self):\n return _libsbml.Parameter_unsetConstant(self)",
"def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)",
"def unsetConstant(self):\n return _libsbml.Species_unsetConstant(self)",
"def remove_attr(self, key):\n del self.header[key]",
"def unload(self) -> None:\n for attr in self._attrs:\n setattr(self, attr, None)",
"def del_attrib(self, key):\n self.aux_attrib.pop(key)\n self.aux_attrib_args.pop(key)",
"def unsetConstant(self):\n return _libsbml.QualitativeSpecies_unsetConstant(self)",
"def unsetConstant(self):\n return _libsbml.LocalParameter_unsetConstant(self)",
"def clear(self, attrname):\n self.__dict__['_'+attrname] = False",
"def unsetConstant(self):\n return _libsbml.SpeciesReference_unsetConstant(self)",
"def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair",
"def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair",
"def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair",
"def _remove_swarm_keys(self):\n for key in SWARM_PROPERTIES:\n self.spec.pop(key, None)",
"def unassign(self) -> None:\n self._row.remove(self._value)\n self._column.remove(self._value)\n self._box.remove(self._value)\n self._value = 0",
"def clearAttributes(self):\n return _libsbml.XMLToken_clearAttributes(self)",
"def clear_cxt_vars(cxt):\n if hasattr(cxt, '_cl'):\n del cxt._cl\n if hasattr(cxt, '_pairs'):\n del cxt._pairs",
"def remove_attribute(self, name):\n\n pass",
"def reset_attr(self, server, attribute):\n\t\tattribute = str(attribute)\n\t\tcfg = self.get_cfg(server)\n\t\tif cfg:\n\t\t\treturn cfg.pop(attribute, None)",
"def attr_remove(self):\n def _del_if_in(obj, attr):\n if attr in obj:\n del obj[attr]\n if self._modifier_exists(REMOVE_KEY):\n to_remove = self[CONFIG_KEY][SAMPLE_MODS_KEY][REMOVE_KEY]\n _LOGGER.debug(\"Removing attributes: {}\".format(to_remove))\n for attr in to_remove:\n [_del_if_in(s, attr) for s in self.samples]",
"def popitem(self):\n return super(ReadOnlyDict, self).popitem()",
"def _del(self) -> None:\n self.variables.pop(prop_name, None)",
"def drop_attr(self, attr_name): # DONE\n self.data.drop(attr_name, axis=1, inplace=True)\n print(self.data)",
"def deleteATTR(sel=None):\n if sel == None:\n sel = pm.ls(sl=1)\n for obj in sel:\n #remove customAttr with keyable\n attrs = pm.listAttr(obj,k=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)\n #remove customAttr with Nonkeyable\n attrs = pm.listAttr(obj,cb=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)",
"def test_remove_a_single_attribute(self):\n pass"
]
| [
"0.6409805",
"0.6064282",
"0.6012825",
"0.59316134",
"0.5912364",
"0.5777581",
"0.5755103",
"0.57506615",
"0.5728127",
"0.5702524",
"0.5696803",
"0.5662745",
"0.55867356",
"0.55519366",
"0.55496734",
"0.55287945",
"0.55287945",
"0.55287945",
"0.5513677",
"0.55119723",
"0.54826707",
"0.54568005",
"0.54442257",
"0.5435784",
"0.5425009",
"0.5410428",
"0.54044604",
"0.540344",
"0.53907466",
"0.5377624"
]
| 0.77252495 | 0 |
Removes attributes from this container that are not compliant with the given schema. Only the first observation of each exclusive attribute is kept (if applicable). | def filter_by_schema(self, schema, constant_schema=None):
if constant_schema is None:
constant_schema = schema
get_schema = lambda attr: constant_schema if attr.constant else schema
# Remove attributes with invalid names
filter_fcn = lambda attr: get_schema(attr).has_attribute(attr.name)
self.filter_elements([filter_fcn])
#
# Filter objects by their schemas
#
del_inds = set()
found_names = set()
for idx, attr in enumerate(self):
name = attr.name
# Remove attributes that violate schema
attr_schema = get_schema(attr).get_attribute_schema(name)
if not attr_schema.is_valid_attribute(attr):
del_inds.add(idx)
# Enforce exclusivity, if necessary
is_exclusive = get_schema(attr).is_exclusive_attribute(name)
if is_exclusive:
if name in found_names:
del_inds.add(idx)
else:
found_names.add(name)
self.delete_inds(del_inds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)",
"def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)",
"def _drop_protected_attrs(model_class, values):\n for attr in model_class.__protected_attributes__:\n if attr in values:\n del values[attr]",
"def clear_attributes(self):\n self.attrs = etad.AttributeContainer()",
"def filter_schema(schema):\n for column, column_schema in schema.iteritems():\n if column_schema.get(CARDINALITY):\n del column_schema[CARDINALITY]\n schema[column] = column_schema\n\n return schema",
"def remove_attributes(cube, field, filename):\n cube.attributes = None",
"def clear_attrs(self):\n self._attributes.clear()",
"def clean_attrs(cls, diffsync: DiffSync, attrs):\n return cls.clean_ids_or_attrs(diffsync, attrs)",
"def trim_data(data, attributes):\n return data.drop(attributes, axis=1)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def clean_metadata_from_xml(cls, xml_object):\r\n for attr in cls.metadata_attributes:\r\n if xml_object.get(attr) is not None:\r\n del xml_object.attrib[attr]",
"def clean_schema(schema):\n # type: (Dict) -> Dict\n return {k: v for k, v in schema.items()\n if k not in _SWAGGER_FIELDS and not k.lower().startswith(\"x-\")}",
"def test_remove_a_single_attribute(self):\n pass",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def remove_null_fields(self):\n with open(self.schema_path, 'r') as file_obj:\n schema_data = yaml.safe_load(file_obj)\n schema_fields = schema_data.get('mapping').keys()\n for field in schema_fields:\n # We want to keep 'false' and 0 values, and avoid removing fields that are required in the schema.\n if field in self.data and self.data[field] in (None, '', [], {}) and \\\n not schema_data.get('mapping', {}).get(field, {}).get('required'):\n self.data.pop(field)",
"def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data",
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def deleteATTR(sel=None):\n if sel == None:\n sel = pm.ls(sl=1)\n for obj in sel:\n #remove customAttr with keyable\n attrs = pm.listAttr(obj,k=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)\n #remove customAttr with Nonkeyable\n attrs = pm.listAttr(obj,cb=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)",
"def validate(self, attrs):\n # Validate attributes\n for attr in attrs:\n self.validate_attribute(attr)\n\n # Enforce attribute exclusivity, if necessary\n if self.has_exclusive_attributes:\n counts = attrs.get_attribute_counts()\n for name, count in iteritems(counts):\n if count > 1 and self.is_exclusive_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is exclusive but appears %d times in \"\n \"this container\" % (name, count)\n )",
"def _filtered_attributes(\n self, required_attributes: Union[Iterable, Literal[\"__all__\"]], dontformat=False\n ) -> Tuple[Dict, Dict]:\n if required_attributes == \"__all__\":\n required_attributes = self.__atomic_fields_set__ | {\"meta\"}\n required_attributes = set(required_attributes)\n errors = []\n attrs = {name: getattr(self, name, None) for name in required_attributes-{\"meta\"}}\n for name in required_attributes - {\"meta\"}:\n if name not in self.__atomic_fields_set__:\n errors.append(f\" Unexpected required attribute: '{name}'.\")\n continue\n if attrs.get(name) is None:\n if not utils.is_an_optional_type_hint(self.__fields_types__[name]):\n errors.append(f\" Missing required attribute: '{name}'.\")\n if errors:\n raise ValueError(\"\\n\" + \"\\n\".join(errors))\n attrs = {\n utils.snake_to_camel_case(k, dontformat): v\n for (k, v) in attrs.items()\n if k in set(required_attributes) - self._identifier_fields\n }\n meta_attrs = {\n utils.snake_to_camel_case(name, dontformat): getattr(self, name)\n for name in self.__meta_attributes__\n if getattr(self, name) is not None\n } if \"meta\" in required_attributes else None\n return attrs, meta_attrs",
"def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in types:\n if any(i in attr for i in self.keep) or attr[0:2] == '__':\n continue\n else:\n x = getattr(self, attr)\n del x\n for molecule in self.values():\n molecule.strip_molecule(self.keep)\n exit()",
"def test_remove():\n # remove dict keys\n schema = Schema({\"weight\": int,\n Remove(\"color\"): str,\n Remove(\"amount\"): int})\n out_ = schema({\"weight\": 10, \"color\": \"red\", \"amount\": 1})\n assert \"color\" not in out_ and \"amount\" not in out_\n\n # remove keys by type\n schema = Schema({\"weight\": float,\n \"amount\": int,\n # remvove str keys with int values\n Remove(str): int,\n # keep str keys with str values\n str: str})\n out_ = schema({\"weight\": 73.4,\n \"condition\": \"new\",\n \"amount\": 5,\n \"left\": 2})\n # amount should stay since it's defined\n # other string keys with int values will be removed\n assert \"amount\" in out_ and \"left\" not in out_\n # string keys with string values will stay\n assert \"condition\" in out_\n\n # remove value from list\n schema = Schema([Remove(1), int])\n out_ = schema([1, 2, 3, 4, 1, 5, 6, 1, 1, 1])\n assert_equal(out_, [2, 3, 4, 5, 6])\n\n # remove values from list by type\n schema = Schema([1.0, Remove(float), int])\n out_ = schema([1, 2, 1.0, 2.0, 3.0, 4])\n assert_equal(out_, [1, 2, 1.0, 4])",
"def deleteAttributes(self, keys):\n self.graph.deleteExtendedAttributes(self.entityId, keys)",
"def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs",
"def forbid_properties(schema: Dict[str, Any], forbidden: List[str]) -> None:\n not_schema = schema.setdefault(\"not\", {})\n already_forbidden = not_schema.setdefault(\"required\", [])\n already_forbidden.extend(forbidden)\n not_schema[\"required\"] = list(set(chain(already_forbidden, forbidden)))",
"def pop_attributes(self):\n attrs = self.attrs\n self.clear_attributes()\n return attrs",
"def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)",
"def drop_attr(self, attr_name): # DONE\n self.data.drop(attr_name, axis=1, inplace=True)\n print(self.data)",
"def _trimRecords(self):\n self.highpassrecords.resize(self.nhighpassrecords, refcheck=False)\n self.lowpassrecords.resize(self.nlowpassrecords, refcheck=False)\n self.digitalsvalrecords.resize(self.ndigitalsvalrecords, refcheck=False)\n # cleanup by deleting any struct arrays of len 0\n for recname in ('highpassrecords', 'lowpassrecords', 'digitalsvalrecords'):\n if len(self.__getattribute__(recname)) == 0:\n self.__delattr__(recname)",
"def stripBlacklistAttrs(attrs, blacklist):\n gb = FnAttribute.GroupBuilder()\n gb.update(attrs)\n\n for attrName in blacklist:\n gb.delete(attrName)\n\n return gb.build()"
]
| [
"0.63107795",
"0.63107795",
"0.6131333",
"0.6119389",
"0.60960937",
"0.6045111",
"0.6032735",
"0.60116005",
"0.5976104",
"0.58545375",
"0.5839931",
"0.5834085",
"0.5815676",
"0.5787223",
"0.5742111",
"0.57354367",
"0.5733125",
"0.5728372",
"0.57195425",
"0.5669524",
"0.56368804",
"0.56005925",
"0.55278003",
"0.54811454",
"0.54576296",
"0.54527336",
"0.5450267",
"0.54395306",
"0.5438731",
"0.5425759"
]
| 0.6831719 | 0 |
Returns a dictionary mapping attribute names to their counts in this container. | def get_attribute_counts(self):
counts = defaultdict(int)
for attr in self:
counts[attr.name] += 1
return dict(counts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def counts(self) -> dict:\n return Counter(self.sequence)",
"def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}",
"def _get_as_dict_count(self):\n counter = Counter()\n for product in self.products:\n counter[product.id] += 1\n return counter",
"def histogram_categorical_attribute(nodes: typ.Iterable[vtna.graph.TemporalNode], attribute_name: str) \\\n -> typ.Dict[str, int]:\n hist = collections.Counter()\n hist.update(node.get_global_attribute(attribute_name) for node in nodes)\n return hist",
"def get_num_attributes(self) -> int:\n return self._num_attributes",
"def attributes(self):\n return self.__dict.keys()",
"def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )",
"def getNumAttributes(self):\r\n return self.numAttributes",
"def to_dict (self):\n return {\n 'lengths': self.lengths,\n 'lowerCounts': self.lower_counts,\n 'upperCounts': self.upper_counts,\n 'digitCounts': self.digit_counts,\n 'symbolCounts': self.symbol_counts,\n 'classCounts': self.class_counts,\n 'wordCounts': self.word_counts\n }",
"def counts(self):\n # Returns an OMD because Counter/OrderedDict may not be\n # available, and neither Counter nor dict maintain order.\n super_getitem = super(OrderedMultiDict, self).__getitem__\n return self.__class__((k, len(super_getitem(k))) for k in self)",
"def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic",
"def getCombinationCounts(self, attributeList):\n cD = {}\n try:\n idxL = [self._attributeNameList.index(atName) for atName in attributeList]\n #\n for row in self.data:\n ky = tuple([row[jj] for jj in idxL])\n cD[ky] = cD[ky] + 1 if ky in cD else 1\n except Exception as e:\n if self.__verbose:\n logger.exception(\"Selection failure\")\n if self._raiseExceptions:\n raise e\n return cD",
"def getNumAttributes(self):\n\n return len(self._attributes)",
"def get_count_by_attribute_value(rows, attribute_name):\n results = defaultdict(lambda: 0)\n for row in rows:\n r = row[attribute_name]\n results[r] += 1\n return results",
"def _create_word_count_dict(self):\n word_counts = dict()\n for wc in self.word_counts.all():\n word_counts[wc.word.name] = wc.count\n return word_counts",
"def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)",
"def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts",
"def attributes_metadata(self):\n\n attribute_meta = collections.defaultdict(dict)\n\n for attribute in self.attributes:\n attribute_meta[attribute.name]['valuemap'] = attribute.valuemap\n attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers\n\n return dict(attribute_meta)",
"def view_counts():\n out = {}\n for i in range(len(classes)):\n out.update({decoded[i]: storage.count(classes[i])})\n return out",
"def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq",
"def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret",
"def stats():\n class_counts = {}\n convert_dict = {\n 'Amenity': 'amenities',\n 'State': 'states',\n 'City': 'cities',\n 'User': 'users',\n 'Place': 'places',\n 'Review': 'reviews'\n }\n\n for _class in convert_dict.keys():\n class_counts[convert_dict[_class]] = storage.count(_class)\n\n return jsonify(class_counts)",
"def _get_counts(self, X: np.ndarray) -> Dict[int, np.ndarray]:\n return {f: np.bincount(X[:, f].astype(int), minlength=n_cat) for f, n_cat in\n self.categories_per_feature.items()}",
"def count(self):\n return {'count': self.collection.count()}",
"def doc_lengths(self):\n return dict(zip(self.keys(), map(len, self.values())))",
"def attributes(self):\n return dict(self.__attributes)",
"def operation_counts(self) -> Dict[int, Dict[str, int]]:\n return self._operation_counts",
"def StateCounts(self):\r\n\t\treturn self._get_attribute('stateCounts')",
"def Count(attribute):\n return lambda attrs: len(attrs.get(attribute, []))",
"def get_attribute_names(cls):\n return cls._attributes.keys()"
]
| [
"0.70180696",
"0.69050837",
"0.6903133",
"0.6848671",
"0.6753199",
"0.6742087",
"0.67376226",
"0.67350996",
"0.66511387",
"0.6624211",
"0.6598114",
"0.6584916",
"0.65632945",
"0.65021485",
"0.65013814",
"0.6492559",
"0.6492349",
"0.64813375",
"0.64399755",
"0.6438868",
"0.6438449",
"0.64176476",
"0.64170885",
"0.6378637",
"0.6375591",
"0.6350297",
"0.6340901",
"0.6311631",
"0.6301915",
"0.6301193"
]
| 0.90069354 | 0 |
Whether this schema contains at least one exclusive attribute. | def has_exclusive_attributes(self):
return any(schema.is_exclusive for schema in itervalues(self.schema)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_exclusive_attribute(self, name):\n return self.get_attribute_schema(name).is_exclusive",
"def hasOptionalAttributes(self):\n return _libsbml.SBase_hasOptionalAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.AlgebraicRule_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.EventAssignment_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.GeneProduct_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.AssignmentRule_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Member_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Compartment_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.MultiSpeciesType_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.ListOfMembers_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Group_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Reaction_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.GeneProductAssociation_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.FbcOr_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Input_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Rule_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.SBase_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Port_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.IntraSpeciesReaction_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Event_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Output_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.FbcAnd_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.GeneProductRef_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.KineticLaw_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.OutwardBindingSite_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.CompartmentType_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Species_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Submodel_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeComponentIndex_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.InSpeciesTypeBond_hasRequiredAttributes(self)"
]
| [
"0.78752255",
"0.71905404",
"0.682085",
"0.6818547",
"0.6817014",
"0.6751183",
"0.67473763",
"0.67394555",
"0.6737406",
"0.67252415",
"0.67246854",
"0.6719147",
"0.67022294",
"0.6700692",
"0.66953605",
"0.66911525",
"0.6690814",
"0.6671049",
"0.6651599",
"0.66337264",
"0.6605207",
"0.6590848",
"0.65872294",
"0.65849453",
"0.65796816",
"0.65795094",
"0.6546601",
"0.65465814",
"0.65464437",
"0.6529717"
]
| 0.8674501 | 0 |
Returns an iterator over the (name, AttributeSchema) pairs in this schema. | def iter_attributes(self):
return iteritems(self.schema) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self) -> Tuple[str, Any]:\n for attr_name, attr_val in self.__dict__.items():\n yield attr_name, attr_val",
"def iter_attributes(self) -> Iterator[AttributeNode]:\n status: Any\n\n if isinstance(self.item, AttributeNode):\n status = self.axis\n self.axis = 'attribute'\n yield self.item\n self.axis = status\n return\n elif isinstance(self.item, ElementNode):\n status = self.item, self.axis\n self.axis = 'attribute'\n\n for self.item in self.item.attributes:\n yield self.item\n\n self.item, self.axis = status",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def yield_column_names(schema: ColumnGroupSchema) -> Iterator[str]:\n if isinstance(schema, str):\n yield schema\n else:\n seq = schema.values() if isinstance(schema, collections.abc.Mapping) else schema\n for value in seq:\n yield from yield_column_names(value)",
"def __iter__(self):\n for name, field in self.iterate_over_fields():\n yield name, field",
"def __iter__(self):\r\n for attr, value in self.__dict__.items():\r\n a = getattr(self, attr)\r\n if type(a) is list:\r\n if len(a) > 0:\r\n yield attr, a",
"def __iter__(self):\n for name, _ in getmembers(self.__class__,\n lambda value: isinstance(value, InstrumentedAttribute)):\n yield name",
"def iter_schemas(self):\n warn(\"you may actually mean .my_schemas\", LikelyMistake)\n return super(CamTypeMixin, self).iter_schemas()",
"def __iter__(self):\n for acronym in self.keys:\n yield acronym, self.dict[acronym]",
"def schemaIterator(self):\n if self.schema:\n for field in self.schema:\n field_id = field.getFieldName()\n label = field.getLabel()\n value = getattr(self.instance, field_id, NOT_ANSWERED_VALUE)\n if isinstance(value, list):\n value = ', '.join(value)\n\n yield label, value",
"def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)",
"def iter_stats_schema(schema: tfgnn.GraphSchema) -> Iterator[Tuple[str, Any]]:\n\n for set_type, set_name, set_obj in tfgnn.iter_sets(schema):\n if set_type != tfgnn.CONTEXT:\n # Output a feature for the size of the set.\n key = f\"{set_type}/{set_name}/{tfgnn.SIZE_NAME}\"\n yield key, set_obj\n\n # Output the values for each feature.\n for feature_name, feature in set_obj.features.items():\n if tf.dtypes.as_dtype(feature.dtype) == tf.string:\n continue\n key = f\"{set_type}/{set_name}/{feature_name}\"\n yield key, feature",
"def iteritems(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield (attribute_name, getattr(self, attribute_name))",
"def __iter__(self):\n for feature in itertools.izip(self.shapes, self.records):\n yield feature",
"def __iter__(self):\n for col in self.columns:\n yield self.fields[col]",
"def items(self):\n for attr in ('name', 'birthDate', 'identifier'):\n value = getattr(self, attr, None)\n if value:\n yield attr, value",
"def iterate_children(self) -> Iterator[TypedDictAttribute]:\n yield from self.get_required()\n yield from self.get_optional()",
"def __iter__(self) -> (str, np.ndarray):\n for k, v in self.fields.items():\n yield k, v",
"def findModuleSchemas(self):\n if self.codebase:\n module = self.codebase.instantiate(self.service_module_name)\n else:\n module = importlib.import_module(self.service_module_name)\n\n res = []\n\n for o in dir(module):\n if isinstance(getattr(module, o), Schema):\n res.append(getattr(module, o))\n\n return res",
"def __iter__(self):\r\n for column_id in self._columns.keys():\r\n yield column_id",
"def schemas(self):\n if not self._schemas:\n self._schemas = get_schema(self.attributes.workspace.namespace, self.attributes.workspace.name)\n return self._schemas",
"def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key",
"def __iter__(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield getattr(self, attribute_name)",
"def schemas(self):\n return model.Schemas(self)",
"def items(self):\n for name in self.fields:\n yield name, getattr(self, name)",
"def __iter__(self):\n key = list(self.keys())[0]\n length = len(self[key])\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield res",
"def yield_database_schema(\n self, schema_name: str\n ) -> Iterable[CreateDatabaseSchemaRequest]:\n\n yield CreateDatabaseSchemaRequest(\n name=schema_name,\n database=EntityReference(id=self.context.database.id, type=\"database\"),\n )",
"def get_fields(self) -> Iterable[fields.Field]:\n for attr_name in dir(self):\n attr = getattr(self, attr_name)\n if isinstance(attr, fields.Field):\n yield attr",
"def iter_keypoints_labels(self):\n return iter(self.schema)",
"def __iter__(self):\n attr = gv.firstattr(self.handle)\n while gv.ok(attr):\n yield gv.nameof(attr), \\\n decode_page(gv.getv(self.handle, attr))\n attr = gv.nextattr(self.handle, attr)"
]
| [
"0.6245281",
"0.61062413",
"0.60659826",
"0.5949053",
"0.5913689",
"0.5893057",
"0.5817822",
"0.58118874",
"0.5766706",
"0.57579017",
"0.5689008",
"0.5654089",
"0.56415623",
"0.56186086",
"0.5611452",
"0.5560966",
"0.5550918",
"0.54651785",
"0.5452331",
"0.5448329",
"0.5444498",
"0.54339594",
"0.5427443",
"0.5423128",
"0.5414917",
"0.54138756",
"0.54130685",
"0.5370945",
"0.53686386",
"0.5337066"
]
| 0.74640673 | 0 |
Whether the schema has an Attribute with the given name. | def has_attribute(self, name):
return name in self.schema | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_attr_with_name(self, name):\n for attr in self:\n if attr.name == name:\n return True\n\n return False",
"def has_attribute(self, name):\n\n pass",
"def hasAttribute(self, attrib):\n return self._dqa(attrib) in self.attributes",
"def attribute_exists(se: SchemaExplorer, attribute_label: str) -> bool:\n schema_graph = se.get_nx_schema()\n\n if attribute_label in schema_graph.nodes:\n return True\n return False",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def has_attr(self, name):\n return name in self and not self[name] in EMPTY_VALUES",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def is_element_attribute(element, attribute_name):\n return element.get(attribute_name) is not None",
"def has_attr(self, key):\n return key in self.attrs",
"def has_attribute(self, key):\n return key in self.__dict",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def has_attribute(self, attribute):\n return (attribute in self.attribute_list)",
"def hasAttribute(self, *args):\n return _libsbml.XMLAttributes_hasAttribute(self, *args)",
"def has_attribute(self, attribute: str) -> bool:\n return any([\n key_node.value == attribute for key_node, _ in self.yaml_node.value\n ])",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n success = False\n try:\n response = self._vault_api.read_secret(path=section_name.upper(), mount_point=self.mount_point)\n keys = list(response[\"data\"][\"data\"].keys())\n success = attr_name.upper() in keys\n except InvalidPath:\n pass\n return success",
"def has_attribute_key(graph_element, attribute_key):\n return attribute_key in graph_element.get_attributes().keys() # return whether key is present",
"def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)",
"def has_keypoints_attribute(self, label, attr_name):\n if not self.has_keypoints_label(label):\n return False\n\n return self.schema[label].has_attribute(attr_name)",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n config = ConfigParser(allow_no_value=True)\n config.read(self.connection_string)\n\n result = False\n for section in config.sections():\n if section.lower().replace(' ', '_') == section_name.lower().replace(' ', '_'):\n for attr in config[section]:\n if attr.lower().replace(' ', '_') == attr_name.lower().replace(' ', '_'):\n result = True\n\n return result",
"def checkattr(name):\n\n def check(obj):\n try:\n attrgetter(name)(obj)\n return True\n except AttributeError:\n return False\n\n return check",
"def hasAttr(self, *args):\n return _libsbml.XMLToken_hasAttr(self, *args)",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def __contains__(self, attribute_name):\n return False # pragma: no cover",
"def _is_encodable_attribute(name):\n if name == '_meta':\n return True\n elif name.startswith(\"_\") or name.startswith(\"__\") or name == \"ext\":\n return False\n else:\n return True",
"def hasAttribute(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def has(self, name):\n try:\n if self.input(name) is None:\n return False\n except KeyError:\n return False\n\n return True",
"def attr_exists(attribute, user_input):\n\n response = table.scan(\n FilterExpression = Attr(attribute).eq(user_input)\n )\n\n if response['Items']:\n return True\n\n return False",
"def validate(self, name):\n return name in self.dict"
]
| [
"0.8269132",
"0.7991501",
"0.7659555",
"0.74895906",
"0.74583995",
"0.7421857",
"0.7392836",
"0.7271526",
"0.726393",
"0.71954143",
"0.7174027",
"0.70955116",
"0.70955116",
"0.705711",
"0.7055704",
"0.69954616",
"0.6912138",
"0.69118476",
"0.68898445",
"0.6820372",
"0.6804414",
"0.66734076",
"0.6672212",
"0.65917546",
"0.6553426",
"0.65463644",
"0.654297",
"0.65276897",
"0.65260476",
"0.6518735"
]
| 0.8838361 | 0 |
Gets the AttributeSchema for the Attribute with the given name. | def get_attribute_schema(self, name):
self.validate_attribute_name(name)
return self.schema[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_schema(self, name):\n return Schema(self, name)",
"def get_schema(self, name, namespace=None):\n avro_name = self.get_name(name=name, namespace=namespace)\n return self._names.get(avro_name.fullname, None)",
"def schema(self, name):\n return model.Schema(self, name)",
"def get_attribute_class(self, name):\n self.validate_attribute_name(name)\n return self.schema[name].get_attribute_class()",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def _get_schema(name):\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)",
"def get_model_schema(*, name: str) -> typing.Optional[types.Schema]:\n model = get_model(name=name)\n if model is None:\n return None\n return model._schema # pylint: disable=protected-access",
"def getAttributeByName(self, name):\n\n for eachAttribute in self._attributes:\n if eachAttribute.getName() == name:\n return eachAttribute\n\n return None",
"def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]",
"def attribute(self, name):\n\n attrs = [attr for attr in self.attributes if attr.name == name]\n\n if attrs:\n return attrs[0]\n else:\n raise NoSuchAttributeError(name)",
"def load_schema(name):\r\n\r\n data = pkgutil.get_data(__package__, \"schemas/{0}.json\".format(name))\r\n return json.loads(data.decode(\"utf-8\"))",
"def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})",
"def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None",
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def attributes_get(self, attr_name):\n if not self.sqs_attr:\n return None\n\n if attr_name not in self.sqs_attr:\n return None\n\n return self.sqs_attr[attr_name]",
"def get_attr(name):\n userDoc = get_user()\n _idx = userDoc.index.get(name, None)\n\n if _idx is not None:\n return userDoc.attributes[_idx]\n else:\n return None",
"def getAvroSchema(self, cls, fieldName):\n field = None\n for fld in cls.schema.fields:\n if fld.name == fieldName:\n field = fld\n return field",
"def queryAttributeNames(name):\n\n header, rows = querySciDB(\"attributes(%s)\" % name)\n return [row[1].translate(None, \"\\\"\") for row in rows]",
"def find_attribute_id(self, attribute_name):\n attribute_id = None\n pr = self.layer.dataProvider()\n for field_id, field in enumerate(pr.fields()):\n if field.name() == attribute_name:\n attribute_id = field_id\n return attribute_id\n # In case the attribute has not been found, raise exception\n raise AttributeError('Attribute name %s not found' % attribute_name)",
"def get_constraint(self, attribute_name):\n\n for constraint in self.constraints:\n if constraint.key == attribute_name:\n return constraint\n\n # If it can't be found, return None.\n return None",
"def get_attribute(self, attribute_name, attribute_type='StringValue'):\n\n return self.sqs_message.message_attributes \\\n .get(attribute_name) \\\n .get(attribute_type)",
"def getattribute(self, name):\n return self.attributes[name]",
"def attribute(self, name, by_ref=False):\n\n if by_ref:\n return self._attributes_by_ref[name]\n else:\n try:\n return self._attributes[name]\n except KeyError:\n raise NoSuchAttributeError(\"Unknown attribute '{}' \"\n \"in dimension '{}'\"\n .format(name, self.name),\n name)",
"def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)",
"def schema_helper(self, name, _, schema=None, **kwargs):\n if schema is None:\n return None\n\n schema_instance = resolve_schema_instance(schema)\n\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n\n return json_schema",
"def get_schema(self):\r\n return self.__schema",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def __getitem__(self, name) -> 'StarSchema':\n return self.schemas[name]",
"def _get_schema(self):\n self._pick()\n return Schema()",
"def add_attribute(self, attr):\n name = attr.name\n if name not in self.schema:\n schema_cls = attr.get_schema_cls()\n self.schema[name] = schema_cls(name)\n\n self.schema[name].add_attribute(attr)"
]
| [
"0.64486235",
"0.6430363",
"0.62171483",
"0.5857013",
"0.5799322",
"0.57704204",
"0.5766579",
"0.57060593",
"0.5699009",
"0.5678361",
"0.5433262",
"0.5315926",
"0.5253417",
"0.52411157",
"0.5234161",
"0.5197639",
"0.51942366",
"0.50964564",
"0.5081106",
"0.5077334",
"0.5031839",
"0.49744067",
"0.4953551",
"0.49210644",
"0.4871752",
"0.48674026",
"0.48668835",
"0.48538533",
"0.4845007",
"0.48423517"
]
| 0.87385505 | 0 |
Gets the class of the Attribute with the given name. | def get_attribute_class(self, name):
self.validate_attribute_name(name)
return self.schema[name].get_attribute_class() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attribute_class(self, attr_name):\n return self.attrs.get_attribute_class(attr_name)",
"def get_attribute_class(self):\n return self._attr_cls",
"def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]",
"def attribute(self, name):\n\n attrs = [attr for attr in self.attributes if attr.name == name]\n\n if attrs:\n return attrs[0]\n else:\n raise NoSuchAttributeError(name)",
"def getAttributeByName(self, name):\n\n for eachAttribute in self._attributes:\n if eachAttribute.getName() == name:\n return eachAttribute\n\n return None",
"def getattribute(self, name):\n return self.attributes[name]",
"def get_attr(self):\n attr = self._bld.FindOrCreateAttribute(self._sobj, self.sname)\n return attr._narrow(self.stype)",
"def getAttribute(self, name):\n \n return self[self._name][name]",
"def get_class(self, name):\n return self.host.get_class(name)",
"def get_class_attr(obj, name, default=None):\n return getattr(obj.__class__, name, default)",
"def get_attr_with_name(self, name, default=no_default):\n attrs = self.get_attrs_with_name(name)\n\n if not attrs and default is not no_default:\n return default\n\n if len(attrs) != 1:\n raise ValueError(\n \"Expected one attribute with name '%s' but found %d\"\n % (name, len(attrs))\n )\n\n return attrs[0]",
"def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None",
"def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None",
"def __getattr__(self, name):\n if not name in self._attrs.iterkeys():\n raise AttributeError(name)\n return self._attrs[name]",
"def get_attribute(self, name):\n\n pass",
"def get_attr(self, name: str):\n return self.call(name)",
"def get_class(self, class_name):\n try:\n return self._classes[class_name]\n except KeyError:\n raise NameError",
"def get_attribute(self, name):\n return self.element.get_attribute(name)",
"def get_class(self, name):\n raise NotImplementedError",
"def get_attribute(self, attribute_name, attribute_type='StringValue'):\n\n return self.sqs_message.message_attributes \\\n .get(attribute_name) \\\n .get(attribute_type)",
"def get_class_attribute(self):\n return self.class_attr",
"def get_attr(name):\n userDoc = get_user()\n _idx = userDoc.index.get(name, None)\n\n if _idx is not None:\n return userDoc.attributes[_idx]\n else:\n return None",
"def from_string(cls, name):\n if hasattr(cls,name):\n return cls.__getattribute__(name)\n else:\n return None",
"def _get_class(self, name):\n return self._hw_mm.namespaces[\"hw_devices\"][name]",
"def get_class_name(name):\n name = _strip_class_name(name)\n return convert_to_camel_case(name)",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def __getitem__(self, attribute_name: str) -> Attribute:\n return self._attributes_by_name[attribute_name]",
"def get_attr(self, name, default=None):\n try:\n return self.managedobjectattribute_set.get(key=name).value\n except ManagedObjectAttribute.DoesNotExist:\n return default",
"def get(self, attrname):\n return self.__dict__['_'+attrname]",
"def attribute_name(name: str) -> str:\n return text.snake_case(utils.safe_snake(name))"
]
| [
"0.8641372",
"0.7024071",
"0.6980088",
"0.65067154",
"0.64325404",
"0.6407288",
"0.6280768",
"0.62770015",
"0.6250951",
"0.6186854",
"0.6100665",
"0.6097087",
"0.6080639",
"0.6073486",
"0.6052716",
"0.595313",
"0.5910228",
"0.59096617",
"0.5879764",
"0.5867442",
"0.58294636",
"0.582353",
"0.5823426",
"0.581899",
"0.5772156",
"0.5756201",
"0.57407784",
"0.5717402",
"0.5687277",
"0.5643295"
]
| 0.8838273 | 0 |
Whether the Attribute with the given name is exclusive. | def is_exclusive_attribute(self, name):
return self.get_attribute_schema(name).is_exclusive | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_attr(self, name):\n return name in self and not self[name] in EMPTY_VALUES",
"def has_attribute(self, name):\n\n pass",
"def has_attr_with_name(self, name):\n for attr in self:\n if attr.name == name:\n return True\n\n return False",
"def has_exclusive_attributes(self):\n return any(schema.is_exclusive for schema in itervalues(self.schema))",
"def has_attribute(self, name):\n return name in self.schema",
"def __contains__(self, attribute_name):\n return False # pragma: no cover",
"def is_excluded(self, attr_name, request):\n return False",
"def removeAttributeByName(self, name):\n\n removeIndex = None\n\n for i, eachAttribute in enumerate(self._attributes):\n if eachAttribute.getName() == name:\n removeIndex = i\n\n if removeIndex is None:\n return False\n\n self.removeAttributeByIndex(removeIndex)\n\n return True",
"def has_attr(self, key):\n return key in self.attrs",
"def hasAttribute(self, attrib):\n return self._dqa(attrib) in self.attributes",
"def __ne__(self, other: 'Attribute') -> bool:\n return not self == other",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def is_exclusive(self):\n return self.exclusive",
"def is_element_attribute(element, attribute_name):\n return element.get(attribute_name) is not None",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def hasAttribute(self, *args):\n return _libsbml.XMLAttributes_hasAttribute(self, *args)",
"def __contains__(self, name):\n if name not in self.ALLOWED_EXCLUDES or name not in self.data.keys():\n return False\n else:\n return True",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def has_attribute(self, key):\n return key in self.__dict",
"def is_global_attr(self, attr_name):\n\n return attr_name in self._global_attr_names",
"def hasAttr(self, *args):\n return _libsbml.XMLToken_hasAttr(self, *args)",
"def disabled(name):\n return not enabled(name)",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n success = False\n try:\n response = self._vault_api.read_secret(path=section_name.upper(), mount_point=self.mount_point)\n keys = list(response[\"data\"][\"data\"].keys())\n success = attr_name.upper() in keys\n except InvalidPath:\n pass\n return success",
"def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def hasAttribute(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def has_attribute(self, attribute):\n return (attribute in self.attribute_list)",
"def validate(self, name):\n return name in self.dict"
]
| [
"0.7593362",
"0.703136",
"0.694037",
"0.68999416",
"0.6891211",
"0.6605466",
"0.6590984",
"0.639892",
"0.63264763",
"0.6316299",
"0.6309909",
"0.6309658",
"0.6299827",
"0.6285345",
"0.62298197",
"0.62032163",
"0.61965716",
"0.61603713",
"0.6154692",
"0.61544764",
"0.61313874",
"0.60731566",
"0.6072442",
"0.6069753",
"0.60585976",
"0.6034138",
"0.6034138",
"0.60209227",
"0.600103",
"0.5994184"
]
| 0.8701278 | 0 |
Whether the Attribute with the given name has a default value. | def has_default_value(self, name):
return self.get_attribute_schema(name).has_default_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_attr_value_default(self, attr_name):\n try:\n attr_value = self.field_attrs[attr_name]\n except KeyError:\n return True\n\n return attr_value == self.get_attr_default(attr_name)",
"def is_attr_value_default(self, attr_name):\n try:\n attr_value = self.field_attrs[attr_name]\n except KeyError:\n return True\n\n return attr_value == self.get_attr_default(attr_name)",
"def has_default_value(self):\n return self.default is not None",
"def has_default(self):\r\n return self.default is not None",
"def has_attr(self, name):\n return name in self and not self[name] in EMPTY_VALUES",
"def has_default(self):\n return self._default is not NOT_PROVIDED",
"def get_attr_bool(self, name, default=False):\n v = self.get_attr(name)\n if v is None:\n return default\n if v.lower() in [\"t\", \"true\", \"y\", \"yes\", \"1\"]:\n return True\n else:\n return False",
"def has_default(self):\r\n return self._default is not None",
"def has(self, name):\n return name in self._defaults",
"def isDefault (self):\n val = self.__getattribute__('StoredValue')\n dft = self.__class__.StoredValue\n return val==dft",
"def get_bool(self, key, default=RequiredAttr()):\n if key in self.attrs:\n val = self.attrs[key]\n return val.strip().lower() in ['true', '1', 't', 'y', 'yes']\n if isinstance(default, RequiredAttr):\n raise AttributeError(\"Required attribute {} not found.\".format(key))\n return default",
"def isDefault(self) -> bool:\n ...",
"def isDefault(self) -> bool:\n ...",
"def isDefault(self) -> bool:\n ...",
"def has_attribute(self, name):\n\n pass",
"def is_default(self):\n\n # Make sure matching default and value cases are found to be\n # equivalent.\n if self.default is None: # empty string should equal None\n current_val = (None if self.value == \"\" else self.value)\n elif isinstance(self.default, str): # avoid str v float comparisons\n current_val = str(self.value)\n else:\n current_val = self.value\n\n # self.template does not contain any information about self.value, so\n # we need to check this separately.\n if current_val != self.default:\n return False\n\n # At this point, self.value is equivalent to self.default, so we should\n # check the remaining attribute defaults defined in self.template.\n default = True\n for attr, val in self.template.items():\n current = getattr(self, attr)\n if current != val:\n default = False\n break\n\n return default",
"def get_attr_default(self, attr_name):\n for defaults in (self._ATTRIBUTE_DEFAULTS.get(self.field_type, {}),\n self._ATTRIBUTE_DEFAULTS['*']):\n try:\n return defaults[attr_name]\n except KeyError:\n continue\n\n return None",
"def get_attr_default(self, attr_name):\n for defaults in (self._ATTRIBUTE_DEFAULTS.get(self.field_type, {}),\n self._ATTRIBUTE_DEFAULTS['*']):\n try:\n return defaults[attr_name]\n except KeyError:\n continue\n\n return None",
"def has_attribute(self, name):\n return name in self.schema",
"def has_default(model_field: DataclassCreationFields) -> bool:\n return (model_field.field.default is not dataclasses.MISSING) or (\n model_field.field.default_factory is not dataclasses.MISSING\n )",
"def is_default(self):\n return self._tag == 'default'",
"def get_default_value(self, name):\n return self.get_attribute_schema(name).default",
"def is_default(self) -> Optional[bool]:\n return pulumi.get(self, \"is_default\")",
"def is_default(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_default\")",
"def required(self) -> bool:\n return self._default is None",
"def is_default(self) -> bool:\n return pulumi.get(self, \"is_default\")",
"def validate_default_value(self):\n if self.has_default_value:\n if not self.is_valid_value(self.default):\n raise AttributeSchemaError(\n \"Default value '%s' is not compliant with the schema\"\n )",
"def is_default(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_default\")",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)"
]
| [
"0.87237966",
"0.87237966",
"0.76137537",
"0.71896803",
"0.7098916",
"0.7040826",
"0.70077074",
"0.69972944",
"0.6970349",
"0.69686097",
"0.6951363",
"0.69084203",
"0.69084203",
"0.69084203",
"0.68482715",
"0.67959493",
"0.6778438",
"0.6778438",
"0.675506",
"0.6701785",
"0.66817397",
"0.66339976",
"0.6603632",
"0.66021514",
"0.6577554",
"0.655584",
"0.65365916",
"0.6533545",
"0.6466495",
"0.6466495"
]
| 0.8926538 | 0 |
Gets the default value for the Attribute with the given name. | def get_default_value(self, name):
return self.get_attribute_schema(name).default | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attr_value_with_name(self, name, default=no_default):\n try:\n attr = self.get_attr_with_name(name)\n return attr.value\n except ValueError:\n if default is not no_default:\n return default\n\n raise",
"def get_attr_default(self, attr_name):\n for defaults in (self._ATTRIBUTE_DEFAULTS.get(self.field_type, {}),\n self._ATTRIBUTE_DEFAULTS['*']):\n try:\n return defaults[attr_name]\n except KeyError:\n continue\n\n return None",
"def get_attr_default(self, attr_name):\n for defaults in (self._ATTRIBUTE_DEFAULTS.get(self.field_type, {}),\n self._ATTRIBUTE_DEFAULTS['*']):\n try:\n return defaults[attr_name]\n except KeyError:\n continue\n\n return None",
"def get_attr_with_name(self, name, default=no_default):\n attrs = self.get_attrs_with_name(name)\n\n if not attrs and default is not no_default:\n return default\n\n if len(attrs) != 1:\n raise ValueError(\n \"Expected one attribute with name '%s' but found %d\"\n % (name, len(attrs))\n )\n\n return attrs[0]",
"def get(self, name, default=None):\n try:\n return self.__getattribute__(name, default)\n except AttributeError:\n return default",
"def getAttribute(self, attribname, default = None):\n return self.attributes.get(attribname, default)",
"def get_value(self, attribute_name, default=None):\n return getattr(self, '%s__' % attribute_name, default)",
"def get_value(self, attribute_name, default=None):\n return getattr(self, '%s__' % attribute_name, default)",
"def get_value(self, attribute_name, default=None):\n return getattr(self, '%s__' % attribute_name, default)",
"def get_attr(self, name, default=None):\n try:\n return self.managedobjectattribute_set.get(key=name).value\n except ManagedObjectAttribute.DoesNotExist:\n return default",
"def get_attr_value(self, attr_name, use_default=True):\n try:\n return self.field_attrs[attr_name]\n except KeyError:\n if use_default:\n return self.get_attr_default(attr_name)\n\n return None",
"def get_attr_value(self, attr_name, use_default=True):\n try:\n return self.field_attrs[attr_name]\n except KeyError:\n if use_default:\n return self.get_attr_default(attr_name)\n\n return None",
"def get(self, name, default=UNDEFINED):\n try:\n return self.__getattr__(name)\n except AttributeError:\n return default",
"def get(self, name):\n try:\n return self._defaults[name]\n except KeyError:\n raise UndefinedDefault(\"default %s is undefined\" % name)",
"def get_attr_int(self, name, default=0):\n v = self.get_attr(name)\n if v is None:\n return default\n try:\n return int(v)\n except: # noqa\n return default",
"def getattr(self, name, *default):\n for attr in self.attributes:\n if attr.name.lower() == name.lower():\n return attr\n else:\n if default:\n return default[0]\n raise KeyError(name)",
"def getvalue(self, name, *default):\n try:\n return self.getattr(name).value\n except KeyError:\n if default:\n return default[0]\n raise",
"def get(self, name, default=None):\n\t\treturn self[name] if self[name] is not None else default",
"def getAttribute(self, key, default=''):\n return self.attr(key, default=default)",
"def getAttribute(self, key, default=''):\n return self.attr(key, default=default)",
"def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]",
"def getValue(name, default=None):",
"def get_property_default(self, name, default):\n if (not name in self.properties):\n return default\n return self.properties[name]",
"def get(self, key, default=None):\r\n return self._getAttrMap().get(key, default)",
"def getattribute(self, name):\n return self.attributes[name]",
"def get_attr(self, location, attr, default=None):\r\n return self.get_attrs(location).get(attr, default)",
"def get(self, name, default):\n try:\n return self[name]\n except KeyError:\n self.set(name, default)\n return default",
"def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None",
"def get_attribute(self, name):\n return self.element.get_attribute(name)",
"def is_attr_value_default(self, attr_name):\n try:\n attr_value = self.field_attrs[attr_name]\n except KeyError:\n return True\n\n return attr_value == self.get_attr_default(attr_name)"
]
| [
"0.8517897",
"0.84372294",
"0.84372294",
"0.8239614",
"0.8107409",
"0.78475",
"0.7844098",
"0.7844098",
"0.7844098",
"0.77803206",
"0.7375202",
"0.7375202",
"0.73467994",
"0.7339083",
"0.7134643",
"0.71048343",
"0.70601207",
"0.70501745",
"0.70332193",
"0.70332193",
"0.69189596",
"0.6833665",
"0.68012226",
"0.67496896",
"0.67403316",
"0.6726673",
"0.6719147",
"0.667384",
"0.6671417",
"0.660845"
]
| 0.8850165 | 0 |
Incorporates the given Attribute into the schema. | def add_attribute(self, attr):
name = attr.name
if name not in self.schema:
schema_cls = attr.get_schema_cls()
self.schema[name] = schema_cls(name)
self.schema[name].add_attribute(attr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def add_attribute(self, attr):\n self.attrs.add_attribute(attr)",
"def add_attribute(self, attr):\n self.add(attr)",
"def add_attribute(self, attr):\n self.attrs.add(attr)",
"def add_attribute(self, attr: ResourceAttributeDescriptor) -> None:\n self._attributes[assert_not_none(attr.name)] = attr.bind(self)",
"def add_attribute(self, attribute):\n if attribute not in self.attributes:\n self.attributes.add(attribute)\n self.attribute_list.append(attribute)\n return self",
"def add_attribute(self, col, attr_name):\n # not optimised: not expected to be a usual operation\n new_table = np.c_[self.np_table, col]\n new_attributes = self.attributes + [attr_name]\n self.__init__(new_table, self.objects, new_attributes)",
"def addattribute(self, uid, field, value):\n\n raise NotImplementedError",
"def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}",
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def define_attribute(self, name, atype, data=None):\n self.attributes.append(name)\n self.attribute_types[name] = atype\n self.attribute_data[name] = data",
"def put_attributes_attribute_db_id(\n attribute_db_id: str = Query(..., alias='attributeDbId'),\n authorization: Optional[constr(regex=r'^Bearer .*$')] = Query(\n None, alias='Authorization'\n ),\n body: GermplasmAttributeNewRequest = None,\n) -> GermplasmAttributeSingleResponse:\n pass",
"def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value",
"def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair",
"def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair",
"def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair",
"def attribute_dict(self, attribute_dict):\n self.__attribute_dict.update(attribute_dict)",
"def add(self, attr: str):\n self._includes.add(attr)\n self._regex = None",
"def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")",
"def require_attribute(\n self, attribute: str, typ: Union[None, Type] = _Any) -> None:\n self.require_mapping()\n attr_nodes = [\n value_node for key_node, value_node in self.yaml_node.value\n if key_node.value == attribute\n ]\n if len(attr_nodes) == 0:\n raise RecognitionError(\n 'Missing required attribute \"{}\"'.format(attribute))\n attr_node = attr_nodes[0]\n\n if typ != _Any:\n recognized_types, result = self.__recognizer.recognize(\n attr_node, cast(Type, typ))\n if len(recognized_types) == 0:\n raise RecognitionError(format_rec_error(result))",
"def add(self, attr):\n self.validate_type(attr)\n self.values.add(attr.value)",
"def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_symbol_attribute(self, symbol_attribute):\n self.symbol_attributes.append(symbol_attribute)",
"def nma_attribute(self, stmt, p_elem, pset=None):\n att = \"nma:\" + stmt.keyword\n if att not in p_elem.attr:\n p_elem.attr[att] = stmt.arg",
"def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)",
"def set(self, attribute: str, value: Any):\n return setattr(self, attribute, value)",
"def add_column(self, schema):\n self[schema.name] = schema.copy()",
"def add_valid_attribute(self, attr, deletable=False):\n if self.__class__ is Row:\n raise TypeError(msg.inherited_rows)\n super(Row, self).__setattr__(\n \"__sawhitelist__\", set(self.__sawhitelist__ | set((attr,))))\n if deletable:\n super(Row, self).__setattr__(\n \"__delwhitelist__\", set(self.__delwhitelist__ | set((attr,))))",
"def add_user_attribute(self, attribute_name, attribute_type, nested_type):\n self.request_url = \"{0}/{1}/attributes\".format(self.API_URL, self.USER_ENDPOINT)\n payload = {\n 'name': 'traits.' + attribute_name,\n 'attributeType': attribute_type,\n 'nestedType': nested_type\n }\n return self.__create_request(payload, self.REQUEST_POST, version=\"v1\")",
"def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})"
]
| [
"0.6663215",
"0.60491776",
"0.5928776",
"0.58562344",
"0.5797518",
"0.574551",
"0.5701491",
"0.55894977",
"0.558921",
"0.55785716",
"0.5549905",
"0.5527866",
"0.5522455",
"0.5517122",
"0.5517122",
"0.5517122",
"0.5500225",
"0.54813933",
"0.5440213",
"0.5405617",
"0.52933156",
"0.52920836",
"0.52903354",
"0.5275258",
"0.5254783",
"0.52279836",
"0.5222015",
"0.5195839",
"0.51754993",
"0.51734865"
]
| 0.7208039 | 0 |
Whether the Attribute is compliant with the schema. | def is_valid_attribute(self, attr):
try:
self.validate_attribute(attr)
return True
except etal.LabelsSchemaError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type",
"def has_attribute(self, name):\n return name in self.schema",
"def is_valid_attribute(self, attr):\n return self.is_valid(attr)",
"def isValidForSchema(schema):\n\n return True",
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)",
"def is_valid(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate(attr)\n except (TypeError, ValueError):\n return False\n return True",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def is_valid_attributes(self, attrs):\n try:\n self.validate_attributes(attrs)\n return True\n except etal.LabelsSchemaError:\n return False",
"def check(self, description: Description) -> bool:\n # if the name of the attribute is not present, return false.\n name = self.attribute_name\n if name not in description.values:\n return False\n\n # if the type of the value is different from the type of the attribute, return false.\n value = description.values[name]\n if type(self.constraint_type.value) in {list, tuple, set} and not isinstance(\n value, type(next(iter(self.constraint_type.value)))\n ):\n return False\n if type(self.constraint_type.value) not in {\n list,\n tuple,\n set,\n } and not isinstance(value, type(self.constraint_type.value)):\n return False\n\n # dispatch the check to the right implementation for the concrete constraint type.\n return self.constraint_type.check(value)",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False",
"def isAttribute(self, p_int): # real signature unknown; restored from __doc__\n return False",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid(self, data_model: DataModel) -> bool:\n # if the attribute name of the constraint is not present in the data model, the constraint is not valid.\n if self.attribute_name not in data_model.attributes_by_name:\n return False\n\n attribute = data_model.attributes_by_name[self.attribute_name]\n return self.constraint_type.is_valid(attribute)",
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def hasRequiredAttributes(self):\n return _libsbml.AlgebraicRule_hasRequiredAttributes(self)",
"def has_attributes(self):\n return bool(self.attrs)",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesType_hasRequiredAttributes(self)",
"def has_attribute(self, name):\n\n pass",
"def hasRequiredAttributes(self):\n return _libsbml.Unit_hasRequiredAttributes(self)",
"def __null_check(self, record_attribute, attribute_schema):\n if attribute_schema[NULLABLE_KEY]:\n return True\n elif record_attribute is not None:\n return True\n else:\n IS_VALID_FILE = False\n return False",
"def IsValid(self):\n return False",
"def hasRequiredAttributes(self):\n return _libsbml.AssignmentRule_hasRequiredAttributes(self)",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def schemaIsValid(self):\n ret = libxml2mod.xmlSchemaIsValid(self._o)\n return ret",
"def hasRequiredAttributes(self):\n return _libsbml.Rule_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.RateRule_hasRequiredAttributes(self)",
"def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True"
]
| [
"0.7286637",
"0.72063947",
"0.7186152",
"0.71108377",
"0.6968952",
"0.6960972",
"0.69131577",
"0.67958045",
"0.671763",
"0.66995484",
"0.6684665",
"0.6680829",
"0.6679944",
"0.66204864",
"0.6618594",
"0.65400356",
"0.6493107",
"0.64581275",
"0.64008856",
"0.6380986",
"0.63800997",
"0.637537",
"0.6374199",
"0.63519824",
"0.63195497",
"0.63170046",
"0.63160586",
"0.63107884",
"0.6306091",
"0.6303517"
]
| 0.75508046 | 0 |
Validates that the schema has an Attribute with the given name. | def validate_attribute_name(self, name):
if not self.has_attribute(name):
raise AttributeContainerSchemaError(
"Attribute '%s' is not allowed by the schema" % name
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def has_attribute(self, name):\n return name in self.schema",
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def has_attribute(self, name):\n\n pass",
"def has_attr_with_name(self, name):\n for attr in self:\n if attr.name == name:\n return True\n\n return False",
"def validateAttribute(self, attributeName):\n if (not attributeName in self._attributes):\n raise pcssErrors.PcssGlobalException(\"Error: attempted to set attribute %s which is not a valid pfa attribute\" % attributeName)",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)",
"def validate(self, name):\n return name in self.dict",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def get_attribute_schema(self, name):\n self.validate_attribute_name(name)\n return self.schema[name]",
"def checkattr(name):\n\n def check(obj):\n try:\n attrgetter(name)(obj)\n return True\n except AttributeError:\n return False\n\n return check",
"def attribute_exists(se: SchemaExplorer, attribute_label: str) -> bool:\n schema_graph = se.get_nx_schema()\n\n if attribute_label in schema_graph.nodes:\n return True\n return False",
"def validate_attributes(self, attrs):\n self.attrs.validate(attrs)",
"def check_attrname(self, attrname, \\\n warning_words=\"attrname not available for this analyzer\", \\\n warn=False):\n\n if not self._initialized:\n return True\n\n if attrname not in self._value_count_dict or attrname not in self._bucket_comp:\n if warn:\n raise BucketAssignError(warning_words)\n else:\n return False\n\n return True",
"def is_element_attribute(element, attribute_name):\n return element.get(attribute_name) is not None",
"def check_schema_name(name: str):\n if not is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")",
"def has_attr(self, name):\n return name in self and not self[name] in EMPTY_VALUES",
"def is_valid_attribute(self, attr):\n return self.is_valid(attr)",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def validate(attrs):\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def validate_type(self, attr):\n if not isinstance(attr, self._attr_cls):\n raise AttributeSchemaError(\n \"Expected attribute '%s' to have type '%s'; found '%s'\"\n % (attr.name, self.type, etau.get_class_name(attr))\n )",
"def check(self, description: Description) -> bool:\n # if the name of the attribute is not present, return false.\n name = self.attribute_name\n if name not in description.values:\n return False\n\n # if the type of the value is different from the type of the attribute, return false.\n value = description.values[name]\n if type(self.constraint_type.value) in {list, tuple, set} and not isinstance(\n value, type(next(iter(self.constraint_type.value)))\n ):\n return False\n if type(self.constraint_type.value) not in {\n list,\n tuple,\n set,\n } and not isinstance(value, type(self.constraint_type.value)):\n return False\n\n # dispatch the check to the right implementation for the concrete constraint type.\n return self.constraint_type.check(value)",
"def validate(self, attrs: dict):\n attr_types_set = {attr.lower() for attr in attrs.keys()}\n\n missing_required = self.required_attrs - attr_types_set\n if missing_required:\n missing_required = ', '.join(missing_required)\n raise SchemaValidationError(f'Missing required attributes: {missing_required}')\n\n not_required = attr_types_set - self.required_attrs\n not_allowed = not_required - self.allowed_attrs\n if not_allowed:\n not_allowed = ', '.join(not_allowed)\n raise SchemaValidationError(f'Attribute types are not allowed: {not_allowed}')\n\n self.attr_type_validate(attrs)"
]
| [
"0.77677214",
"0.76652",
"0.7619264",
"0.75500405",
"0.73856294",
"0.7114656",
"0.6867957",
"0.6837161",
"0.6732875",
"0.6634328",
"0.65856785",
"0.65856785",
"0.65780014",
"0.6362414",
"0.63355386",
"0.6302873",
"0.6245351",
"0.62054867",
"0.61891484",
"0.6172528",
"0.61213547",
"0.6072449",
"0.6052454",
"0.6033442",
"0.6022128",
"0.6013201",
"0.59875304",
"0.5923309",
"0.5922433",
"0.592096"
]
| 0.8732103 | 0 |
Validates that the Attribute is compliant with the schema. | def validate_attribute(self, attr):
self.validate_attribute_name(attr.name)
self.schema[attr.name].validate_attribute(attr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)",
"def validate_attributes(self, attrs):\n self.attrs.validate(attrs)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_type(self, attr):\n if not isinstance(attr, self._attr_cls):\n raise AttributeSchemaError(\n \"Expected attribute '%s' to have type '%s'; found '%s'\"\n % (attr.name, self.type, etau.get_class_name(attr))\n )",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))",
"def validate(self, attrs: dict):\n attr_types_set = {attr.lower() for attr in attrs.keys()}\n\n missing_required = self.required_attrs - attr_types_set\n if missing_required:\n missing_required = ', '.join(missing_required)\n raise SchemaValidationError(f'Missing required attributes: {missing_required}')\n\n not_required = attr_types_set - self.required_attrs\n not_allowed = not_required - self.allowed_attrs\n if not_allowed:\n not_allowed = ', '.join(not_allowed)\n raise SchemaValidationError(f'Attribute types are not allowed: {not_allowed}')\n\n self.attr_type_validate(attrs)",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def is_valid_attribute(self, attr):\n return self.is_valid(attr)",
"def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type",
"def validateAttribute(self, attributeName):\n if (not attributeName in self._attributes):\n raise pcssErrors.PcssGlobalException(\"Error: attempted to set attribute %s which is not a valid pfa attribute\" % attributeName)",
"def is_valid(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate(attr)\n except (TypeError, ValueError):\n return False\n return True",
"def isValidForSchema(schema):\n\n return True",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def validated() -> Any:\n return attr.s(slots=True, kw_only=True, eq=False)",
"def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid",
"def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False",
"def validate(self: BaseType, attr: Optional[str] = None) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._validate(attribute)\n else:\n self._validate(attr)\n return self",
"def check_param(cls, key, val) -> None:\n\n # Check if attribute is valid for this resource at all\n if key not in cls._Attributes:\n raise TypeError(\"Unexpected attribute '{}' for resource '{}'\".format(\n key,\n cls))\n\n Attribute = cls._Attributes[key]\n\n # Check if attribute is null and is allowed to be null\n if Attribute['optional'] and val is None:\n return\n\n # Check if attribute has correct type\n if Attribute['list']:\n msg = \"Attribute '{}' of resource {} has to be of type list of '{}'\".format(\n key,\n cls,\n Attribute['type'])\n if not isinstance(val, list):\n raise TypeError(msg)\n for item in val:\n if not isinstance(item, Attribute['type']):\n raise TypeError(msg)\n else:\n msg = \"Attribute '{}' of resource {} has to be of type '{}'\".format(\n key,\n cls,\n Attribute['type'])\n if not isinstance(val, Attribute['type']):\n raise TypeError(msg)\n\n # Check all checks\n if 'checks' in Attribute:\n msg = \"Illegal value '{}' for attribute '{}' of resource {}\".format(\n val,\n key,\n cls)\n for value_check in Attribute['checks']:\n if value_check(cls, val) is False:\n raise ValueError(msg)",
"def validate_instance(instance: Any) -> Any:\n attr.validate(instance)",
"def validate_field(self, fieldname):\n fieldname = self.__class__.FIELD_ALIAS.get(fieldname, fieldname)\n v = self._data[fieldname]\n t = self._field_or_default_datatype(fieldname, v)\n gfapy.Field._validate_gfa_field(v, t, fieldname)",
"def _check_consistency(self):\n # check that all required attributes in the schema are contained in the description\n required_attributes = [\n attribute.name\n for attribute in self.data_model.attributes\n if attribute.is_required\n ]\n if not all(\n attribute_name in self.values for attribute_name in required_attributes\n ):\n raise AttributeInconsistencyException(\"Missing required attribute.\")\n\n # check that all values are defined in the data model\n all_attributes = [attribute.name for attribute in self.data_model.attributes]\n if not all(key in all_attributes for key in self.values.keys()):\n raise AttributeInconsistencyException(\n \"Have extra attribute not in data model.\"\n )\n\n # check that each of the provided values are consistent with that specified in the data model\n for key, value in self.values.items():\n attribute = next(\n (\n attribute\n for attribute in self.data_model.attributes\n if attribute.name == key\n ),\n None,\n )\n if not isinstance(value, attribute.type):\n # values does not match type in data model\n raise AttributeInconsistencyException(\n \"Attribute {} has incorrect type: {}\".format(\n attribute.name, attribute.type\n )\n )\n if not type(value) in ALLOWED_ATTRIBUTE_TYPES:\n # value type matches data model, but it is not an allowed type\n raise AttributeInconsistencyException(\n \"Attribute {} has unallowed type: {}. Allowed types: {}\".format(\n attribute.name, type(value), ALLOWED_ATTRIBUTE_TYPES,\n )\n )",
"def validate(self, attrs):\n # Validate attributes\n for attr in attrs:\n self.validate_attribute(attr)\n\n # Enforce attribute exclusivity, if necessary\n if self.has_exclusive_attributes:\n counts = attrs.get_attribute_counts()\n for name, count in iteritems(counts):\n if count > 1 and self.is_exclusive_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is exclusive but appears %d times in \"\n \"this container\" % (name, count)\n )",
"def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False"
]
| [
"0.8203361",
"0.8005601",
"0.80000144",
"0.74886554",
"0.73177165",
"0.73177165",
"0.71307045",
"0.701625",
"0.69656026",
"0.6961678",
"0.6932738",
"0.6932649",
"0.6918583",
"0.68263227",
"0.6823013",
"0.67536736",
"0.67139626",
"0.6583246",
"0.65653574",
"0.6533311",
"0.6480849",
"0.6471056",
"0.6450951",
"0.6428617",
"0.63835794",
"0.63468796",
"0.63461155",
"0.63455904",
"0.6307655",
"0.6253752"
]
| 0.8464728 | 0 |
Validates that the AttributeContainer is compliant with the schema. | def validate(self, attrs):
# Validate attributes
for attr in attrs:
self.validate_attribute(attr)
# Enforce attribute exclusivity, if necessary
if self.has_exclusive_attributes:
counts = attrs.get_attribute_counts()
for name, count in iteritems(counts):
if count > 1 and self.is_exclusive_attribute(name):
raise AttributeContainerSchemaError(
"Attribute '%s' is exclusive but appears %d times in "
"this container" % (name, count)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)",
"def validate(self, attr):\n if attr.name != self.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (self.name, attr.name)\n )\n\n self.validate_type(attr)\n\n if not self.is_valid_value(attr.value):\n raise AttributeSchemaError(\n \"Value '%s' of attribute '%s' is not allowed by the \"\n \"schema \" % (attr.value, attr.name)\n )",
"def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def validate(self, attrs: dict):\n attr_types_set = {attr.lower() for attr in attrs.keys()}\n\n missing_required = self.required_attrs - attr_types_set\n if missing_required:\n missing_required = ', '.join(missing_required)\n raise SchemaValidationError(f'Missing required attributes: {missing_required}')\n\n not_required = attr_types_set - self.required_attrs\n not_allowed = not_required - self.allowed_attrs\n if not_allowed:\n not_allowed = ', '.join(not_allowed)\n raise SchemaValidationError(f'Attribute types are not allowed: {not_allowed}')\n\n self.attr_type_validate(attrs)",
"def validate_attributes(self, attrs):\n self.attrs.validate(attrs)",
"def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def isValidForSchema(schema):\n\n return True",
"def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)",
"def _check_consistency(self):\n # check that all required attributes in the schema are contained in the description\n required_attributes = [\n attribute.name\n for attribute in self.data_model.attributes\n if attribute.is_required\n ]\n if not all(\n attribute_name in self.values for attribute_name in required_attributes\n ):\n raise AttributeInconsistencyException(\"Missing required attribute.\")\n\n # check that all values are defined in the data model\n all_attributes = [attribute.name for attribute in self.data_model.attributes]\n if not all(key in all_attributes for key in self.values.keys()):\n raise AttributeInconsistencyException(\n \"Have extra attribute not in data model.\"\n )\n\n # check that each of the provided values are consistent with that specified in the data model\n for key, value in self.values.items():\n attribute = next(\n (\n attribute\n for attribute in self.data_model.attributes\n if attribute.name == key\n ),\n None,\n )\n if not isinstance(value, attribute.type):\n # values does not match type in data model\n raise AttributeInconsistencyException(\n \"Attribute {} has incorrect type: {}\".format(\n attribute.name, attribute.type\n )\n )\n if not type(value) in ALLOWED_ATTRIBUTE_TYPES:\n # value type matches data model, but it is not an allowed type\n raise AttributeInconsistencyException(\n \"Attribute {} has unallowed type: {}. Allowed types: {}\".format(\n attribute.name, type(value), ALLOWED_ATTRIBUTE_TYPES,\n )\n )",
"def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise",
"def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid",
"def validateAttribute(self, attributeName):\n if (not attributeName in self._attributes):\n raise pcssErrors.PcssGlobalException(\"Error: attempted to set attribute %s which is not a valid pfa attribute\" % attributeName)",
"def validate_subset_of_schema(self, schema):\n super(CategoricalAttributeSchema, self).validate_subset_of_schema(\n schema\n )\n\n if not self.categories.issubset(schema.categories):\n raise AttributeSchemaError(\n \"Categories %s are not a subset of %s\"\n % (self.categories, schema.categories)\n )",
"def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False",
"def validate_type(self, attr):\n if not isinstance(attr, self._attr_cls):\n raise AttributeSchemaError(\n \"Expected attribute '%s' to have type '%s'; found '%s'\"\n % (attr.name, self.type, etau.get_class_name(attr))\n )",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.label != schema.label:\n raise KeypointsSchemaError(\n \"Expected keypoints label '%s'; found '%s'\"\n % (schema.label, self.label)\n )\n\n self.attrs.validate_subset_of_schema(schema.attrs)",
"def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()",
"def schemaIsValid(self):\n ret = libxml2mod.xmlSchemaIsValid(self._o)\n return ret",
"def check_consistency(self) -> 'Schema':\n errors = []\n fields = self.__fields__\n for k, v in fields.items():\n _, err = v.validate(getattr(self, k), fields, loc=k)\n if err:\n errors.append(err)\n if errors:\n raise ValidationError(errors, self.__class__)\n return self",
"def validate(attrs):\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def validate(self, schema=os.path.join(os.path.dirname(__file__), 'am.xsd')):\n return validate_xml(schema, self.path, from_path=True)",
"def is_valid_attributes(self, attrs):\n try:\n self.validate_attributes(attrs)\n return True\n except etal.LabelsSchemaError:\n return False"
]
| [
"0.7271944",
"0.70180625",
"0.7017623",
"0.6986532",
"0.6902823",
"0.67330015",
"0.6720441",
"0.6674313",
"0.6657943",
"0.66326123",
"0.64983755",
"0.64881223",
"0.6440674",
"0.6394842",
"0.63349223",
"0.6258516",
"0.61419076",
"0.6141416",
"0.6073022",
"0.60543233",
"0.60319734",
"0.60319734",
"0.60242337",
"0.60052335",
"0.5996345",
"0.599229",
"0.5983208",
"0.5964237",
"0.59378755",
"0.59292924"
]
| 0.7024046 | 1 |
Validates that this schema is a subset of the given schema. | def validate_subset_of_schema(self, schema):
self.validate_schema_type(schema)
for name, attr_schema in iteritems(self.schema):
if not schema.has_attribute(name):
raise AttributeContainerSchemaError(
"Attribute '%s' does not appear in schema" % name
)
other_attr_schema = schema.get_attribute_schema(name)
attr_schema.validate_subset_of_schema(other_attr_schema) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for label, poly_schema in iteritems(self.schema):\n if not schema.has_keypoints_label(label):\n raise KeypointsContainerSchemaError(\n \"Keypoints label '%s' does not appear in schema\" % label\n )\n\n other_keypoints_schema = schema.get_keypoints_schema(label)\n poly_schema.validate_subset_of_schema(other_keypoints_schema)",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.label != schema.label:\n raise KeypointsSchemaError(\n \"Expected keypoints label '%s'; found '%s'\"\n % (schema.label, self.label)\n )\n\n self.attrs.validate_subset_of_schema(schema.attrs)",
"def validate_subset_of_schema(self, schema):\n super(BooleanAttributeSchema, self).validate_subset_of_schema(schema)\n\n if not self.values.issubset(schema.values):\n raise AttributeSchemaError(\n \"Values %s are not a subset of %s\"\n % (self.values, schema.values)\n )",
"def validate_subset_of_schema(self, schema):\n super(CategoricalAttributeSchema, self).validate_subset_of_schema(\n schema\n )\n\n if not self.categories.issubset(schema.categories):\n raise AttributeSchemaError(\n \"Categories %s are not a subset of %s\"\n % (self.categories, schema.categories)\n )",
"def validate_subset_of_schema(self, schema):\n super(NumericAttributeSchema, self).validate_subset_of_schema(schema)\n\n if self.range and (\n not schema.range\n or self.range[0] < schema.range[0]\n or self.range[1] > schema.range[1]\n ):\n raise AttributeSchemaError(\n \"Range %s is not a subset of %s\" % (self.range, schema.range)\n )",
"def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/10149169\n is_subset = db_tdx_schema.items() <= schema.items()\n if not is_subset and raise_error:\n raise ValueError((\n \"The given database schema is not compatible with the\"\n \" existing database schema. The given schema was {}\"\n \" but the existing schema was {}\").format(\n schema, db_tdx_schema))\n return is_subset",
"def isValidForSchema(schema):\n\n return True",
"def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))",
"def is_subset(self, other):",
"def is_strict_subset(self, other):\n return self.is_subset(other) and self != other",
"def is_proper_subset(self, other):\n if isinstance(other, Set):\n return self != other and self.is_subset(other)\n else:\n raise ValueError(\"Unknown argument '%s'\" % other)",
"def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)",
"def is_proper_subset(self, other):\n if not isinstance(other, SetPy):\n raise TypeError(\"Can only be proper subset of another SetPy\")\n return self.is_subset(other) and not self == other",
"def check_consistency(self) -> 'Schema':\n errors = []\n fields = self.__fields__\n for k, v in fields.items():\n _, err = v.validate(getattr(self, k), fields, loc=k)\n if err:\n errors.append(err)\n if errors:\n raise ValidationError(errors, self.__class__)\n return self",
"def _validate_subsets(self, subsets: Sequence[str]) -> Sequence[str]:\n if not subsets:\n raise ValueError(\"no subsets specified\")\n for subset in subsets:\n if subset not in self.data_files.keys():\n raise ValueError(f\"{subset} is not valid\")\n return subsets",
"def validate_schema(self, data, **kwargs):\n if \"role\" not in data and \"visible\" not in data:\n raise ValidationError(_(\"Missing fields 'role' and/or 'visible'.\"))",
"def _verify_schema(schema):\n assert type(schema) in [dict, tuple], f'Expected a dict or a tuple but got {type(schema)}'\n if isinstance(schema, tuple):\n assert len(schema) == 2, f'Expected a tuple with length 2 but got length {len(schema)}'\n if schema[1] is not None:\n assert isinstance(schema[1], schema[0]), f'{str(schema[1])} does not have expected type {str(schema)}'\n elif isinstance(schema, dict):\n for sub_schema in schema.values():\n _verify_schema(sub_schema)",
"def validate_json(schema, doc):\n is_invalid = set(doc).difference(set(schema))\n if is_invalid:\n return False\n return True",
"def filter_by_schema(self, schema):\n pass",
"def test_schema_completeness_validation_valid_input(self):\n for complete_schema in list_of_schema_inputs:\n validate_json_schema_completeness(complete_schema)\n\n assert True",
"def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def subfields_all(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') != req_val:\n return False\n return True",
"def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))",
"def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()",
"def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()",
"def validate(self):\n self._validate_time_index()\n self._validate_num_profiles()\n self._validate_merge_col_exists()\n self._validate_unique_merge_col()\n self._validate_merge_col_overlaps()",
"def is_subset(self, other):\n # we want to know if some set, A, is a subset of another set, B\n # go through every element in set A\n for bucket in self.buckets:\n for element in bucket.iterate():\n # if B has an element that A does not, then\n # A is NOT a subset of B\n if not other.contains(element):\n return False\n # if we do not find an element in B that is not in A, then\n # A must be a subset of B\n return True",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def is_subset(self, other):\n if not isinstance(other, Set):\n raise ValueError(\"Unknown argument '%s'\" % other)\n\n # Handle the trivial cases\n if self == other:\n return True\n is_empty = self.is_empty\n if is_empty is True:\n return True\n elif fuzzy_not(is_empty) and other.is_empty:\n return False\n if self.is_finite_set is False and other.is_finite_set:\n return False\n\n # Dispatch on subclass rules\n ret = self._eval_is_subset(other)\n if ret is not None:\n return ret\n ret = other._eval_is_superset(self)\n if ret is not None:\n return ret\n\n # Use pairwise rules from multiple dispatch\n from sympy.sets.handlers.issubset import is_subset_sets\n ret = is_subset_sets(self, other)\n if ret is not None:\n return ret\n\n # Fall back on computing the intersection\n # XXX: We shouldn't do this. A query like this should be handled\n # without evaluating new Set objects. It should be the other way round\n # so that the intersect method uses is_subset for evaluation.\n if self.intersect(other) == self:\n return True"
]
| [
"0.8153359",
"0.80990547",
"0.8020877",
"0.79472554",
"0.7687689",
"0.7619746",
"0.6748884",
"0.66119224",
"0.64827746",
"0.6362671",
"0.63433886",
"0.62965935",
"0.6293175",
"0.61685395",
"0.6159616",
"0.6158576",
"0.6087979",
"0.6073577",
"0.5879684",
"0.58497864",
"0.5847108",
"0.5846107",
"0.5833199",
"0.5818039",
"0.5811146",
"0.5811146",
"0.58037823",
"0.579918",
"0.5768558",
"0.57442695"
]
| 0.81938004 | 0 |
Merges the given AttributeSchema into the schema. | def merge_attribute_schema(self, attr_schema):
name = attr_schema.name
if name not in self.schema:
self.schema[name] = attr_schema
else:
self.schema[name].merge_schema(attr_schema) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)",
"def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default",
"def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)",
"def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)",
"def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})",
"def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)",
"def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}",
"def merge_schema(self, schema):\n super(NumericAttributeSchema, self).merge_schema(schema)\n\n if not self.range:\n self.range = schema.range\n else:\n self.range = (\n min(self.range[0], schema.range[0]),\n max(self.range[1], schema.range[1]),\n )",
"def update(self, schema: 'Schema'):\n self._update(schema)",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n self._schema = schema",
"def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None",
"def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )",
"def set_schema(self, schema):\r\n self.__schema = schema",
"def rebuild(self, dframe, overwrite=False):\n current_schema = self\n new_schema = schema_from_dframe(dframe, self)\n\n if current_schema and not overwrite:\n # merge new schema with existing schema\n current_schema.update(new_schema)\n new_schema = current_schema\n\n return new_schema",
"def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs",
"def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry",
"def _update(self, schema: 'Schema'):\n for method in schema._get_methods():\n if method.id in self:\n raise ValueError(\n f\"Duplicate method id for {method.method} id: {method.id}\"\n )\n\n for combinator in schema._get_combinators():\n if combinator.id in self:\n raise ValueError(\n f\"Duplicate combinator id for {combinator.predicate} \" +\n f\"id: {combinator.id}\"\n )\n\n self.constructors += schema.constructors\n self.functions += schema.functions\n\n self._build_schema_data()",
"def preprocess_schema(schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces=None, qualified=False):\n\n from .simplexml import SimpleXMLElement # here to avoid recursive imports\n\n # analyze the namespaces used in this schema\n local_namespaces = {}\n for k, v in schema[:]:\n if k.startswith(\"xmlns\"):\n local_namespaces[get_local_name(k)] = v\n if k == 'targetNamespace':\n # URI namespace reference for this schema\n if v == \"urn:DefaultNamespace\":\n v = global_namespaces[None]\n local_namespaces[None] = v\n if k == 'elementFormDefault':\n qualified = (v == \"qualified\")\n # add schema namespaces to the global namespace dict = {URI: ns prefix}\n for ns in local_namespaces.values():\n if ns not in global_namespaces:\n global_namespaces[ns] = 'ns%s' % len(global_namespaces)\n \n for element in schema.children() or []:\n if element.get_local_name() in ('import', 'include',):\n schema_namespace = element['namespace']\n schema_location = element['schemaLocation']\n if schema_location is None:\n log.debug('Schema location not provided for %s!' % schema_namespace)\n continue\n if schema_location in imported_schemas:\n log.debug('Schema %s already imported!' % schema_location)\n continue\n imported_schemas[schema_location] = schema_namespace\n log.debug('Importing schema %s from %s' % (schema_namespace, schema_location))\n # Open uri and read xml:\n xml = fetch(schema_location, http, cache, force_download, wsdl_basedir)\n\n # Parse imported XML schema (recursively):\n imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)\n preprocess_schema(imported_schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces, qualified)\n\n element_type = element.get_local_name()\n if element_type in ('element', 'complexType', \"simpleType\"):\n namespace = local_namespaces[None] # get targetNamespace\n element_ns = global_namespaces[ns] # get the prefix\n element_name = element['name']\n log.debug(\"Parsing Element %s: %s\" % (element_type, element_name))\n if element.get_local_name() == 'complexType':\n children = element.children()\n elif element.get_local_name() == 'simpleType':\n children = element('restriction', ns=xsd_uri)\n elif element.get_local_name() == 'element' and element['type']:\n children = element\n else:\n children = element.children()\n if children:\n children = children.children()\n elif element.get_local_name() == 'element':\n children = element\n if children:\n process_element(elements, element_name, children, element_type, xsd_uri, dialect, namespace, qualified)",
"def resolve_schema(self, data):\n if not isinstance(data, dict):\n return\n\n # OAS 2 component or OAS 3 header\n if \"schema\" in data:\n data[\"schema\"] = self.openapi.resolve_schema_dict(data[\"schema\"])\n # OAS 3 component except header\n if self.openapi_version.major >= 3:\n if \"content\" in data:\n for content_type in data[\"content\"]:\n schema = data[\"content\"][content_type][\"schema\"]\n data[\"content\"][content_type][\n \"schema\"\n ] = self.openapi.resolve_schema_dict(schema)",
"def _set_schema(self, schema_value):\n self._id = schema_value.id\n\n if type(self).__name__ != schema_value.type:\n # Make sure this object is the correct type.\n raise ValueError('Cannot convert a {} protocol to a {}.'\n .format(str(type(self)), schema_value.type))\n\n for input_full_path in schema_value.inputs:\n\n value = copy.deepcopy(schema_value.inputs[input_full_path])\n\n input_path = ProtocolPath.from_string(input_full_path)\n self.set_value(input_path, value)",
"def add_attribute(self, attr):\n name = attr.name\n if name not in self.schema:\n schema_cls = attr.get_schema_cls()\n self.schema[name] = schema_cls(name)\n\n self.schema[name].add_attribute(attr)",
"def merge_attribute_defs(self, dest, source, changes = {}):\n # print \"in merge_attribute_defs, dest =\"\n # pp.pprint(dest)\n # print \"source =\"\n # pp.pprint(source)\n for aid in source.keys():\n if aid not in dest.keys():\n # copy attribute, then check for append\n dest[aid] = copy.deepcopy(source[aid])\n if 'value' in dest[aid]:\n if type(dest[aid]['value']) is str and dest[aid]['value'][0]=='+':\n dest[aid]['value'] = dest[aid]['value'].lstrip('+')\n changes[aid] = dest[aid]['value']\n continue \n if 'value' not in dest[aid]:\n if 'value' in source[aid]:\n dest[aid]['value'] = source[aid]['value']\n if (type(dest[aid]['value']) is str and dest[aid]['value'][0] == '+'):\n dest[aid]['value'] = dest[aid]['value'].lstrip('+') \n changes[aid] = dest[aid]['value']\n continue\n else:\n print (\"** Error, merging attribute '%s' but value not specified in source\"\n \" or destination\") % aid\n traceback.print_stack()\n sys.exit(1) \n else:\n if 'value' in source[aid]: \n # value given in both source and destination\n self.append_or_replace(dest[aid], source[aid], 'value', \"attribute %s\" % aid)\n changes[aid] = dest[aid]['value'] # save changed value\n else:\n print (\"** Warning, node at:\\n%s\\nmerging attribute '%s'\" \n \" but value to merge not specified.\") % (self.full_path, aid)\n print \"source attributes:\"\n pp.pprint(source)\n print \"dest attributes:\"\n pp.pprint(dest)",
"def load(self, base_schema):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n self.schema_org_version = get_schemaorg_version()\n _base_schema.append(\n load_schemaorg(version=self.schema_org_version, verbose=self.verbose)\n )\n continue\n elif self.is_a_dde_schema(_sc):\n _base_schema.append(self.load_dde_schemas(_sc))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema",
"def load_schema(self, schema):\n if not self.default_schema_loaded:\n self.load_default_schema()\n # load JSON-LD file of user defined schema\n self.schema_extension_only = preprocess_schema(load_json_or_yaml(schema))\n if \"@context\" in self.schema_extension_only:\n self.context.update(self.schema_extension_only[\"@context\"])\n # convert user defined schema into a networkx DiGraph\n self.schema_extension_nx = load_schema_into_networkx(self.schema_extension_only)\n # update undefined classes/properties\n undefined_nodes = [node for node, attrdict in self.schema_extension_nx.node.items() if not attrdict]\n attr_dict = {}\n \n for _node in undefined_nodes:\n if _node in self.schemaorg_nx.nodes():\n attr_dict[_node] = self.schemaorg_nx.nodes[_node]\n nx.set_node_attributes(self.schema_extension_nx, attr_dict)\n # merge networkx graph of user-defined schema with networkx graph of schema defined by Schema.org\n #self.schema_nx = merge_schema_networkx(self.schemaorg_nx, self.schema_extension_nx)\n self.schema_nx = self.schema_extension_nx\t\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_full_schema()\n # merge together the given schema and the schema defined by schemaorg\n #self.schema = merge_schema(self.schema_extension_only, self.schemaorg_schema)\n self.schema = self.schemaorg_schema\n # split the schema networkx into individual ones\n isolates = list(nx.isolates(self.schema_nx))\n \n for node, attrdict in self.schema_extension_nx.node.items():\n if not 'type' in attrdict:\n self.schema_extension_nx.nodes[node][\"type\"] = \"Class\" \n for node, attrdict in self.schema_nx.node.items():\n if not 'type' in attrdict:\n self.schema_nx.nodes[node][\"type\"] = \"Class\" \n \n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)",
"def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)",
"def populate_schema_defs(schema, repo=None):\n repo = SCHEMA_DEFS if repo is None else repo\n extract_named_schemas(\n schema,\n repo,\n lambda schema: schema,\n )",
"def test_merge_schemas(registry):\n test_schema = registry[TYPES][unit_test_type].schema\n test_subschema = test_schema['properties']['attachment']\n res = merge_schemas(test_subschema, registry[TYPES])\n assert res\n assert res != test_subschema\n assert res['properties']['attachment']['attachment'] is True"
]
| [
"0.83535683",
"0.6661252",
"0.64205706",
"0.63567936",
"0.5432936",
"0.5387347",
"0.530836",
"0.52937627",
"0.52555555",
"0.5190755",
"0.5190755",
"0.5190755",
"0.5179762",
"0.5135182",
"0.51190615",
"0.5019401",
"0.50025386",
"0.49863493",
"0.4966378",
"0.49458146",
"0.48388338",
"0.48286653",
"0.4800144",
"0.47949976",
"0.47872078",
"0.47800726",
"0.4772156",
"0.47424722",
"0.47137034",
"0.47039342"
]
| 0.85706264 | 0 |
Merges the given AttributeContainerSchema into the schema. | def merge_schema(self, schema):
for _, attr_schema in schema.iter_attributes():
self.merge_attribute_schema(attr_schema) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)",
"def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)",
"def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)",
"def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default",
"def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)",
"def AddAttributeContainer(self, container):\n if container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT:\n event_data_identifier = container.GetEventDataIdentifier()\n event_data_lookup_key = event_data_identifier.CopyToString()\n\n event_data_identifier = self._event_data_identifier_mappings.get(\n event_data_lookup_key, None)\n\n if event_data_identifier:\n container.SetEventDataIdentifier(event_data_identifier)\n else:\n identifier = container.GetIdentifier()\n identifier_string = identifier.CopyToString()\n\n # TODO: store this as a merge warning so this is preserved\n # in the storage file.\n logger.error((\n 'Unable to merge event attribute container: {0:s} since '\n 'corresponding event data: {1:s} could not be found.').format(\n identifier_string, event_data_lookup_key))\n return\n\n elif container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT_DATA:\n event_data_stream_identifier = container.GetEventDataStreamIdentifier()\n event_data_stream_lookup_key = None\n if event_data_stream_identifier:\n event_data_stream_lookup_key = (\n event_data_stream_identifier.CopyToString())\n\n event_data_stream_identifier = (\n self._event_data_stream_identifier_mappings.get(\n event_data_stream_lookup_key, None))\n\n if event_data_stream_identifier:\n container.SetEventDataStreamIdentifier(event_data_stream_identifier)\n elif event_data_stream_lookup_key:\n identifier = container.GetIdentifier()\n identifier_string = identifier.CopyToString()\n\n # TODO: store this as a merge warning so this is preserved\n # in the storage file.\n logger.error((\n 'Unable to merge event data attribute container: {0:s} since '\n 'corresponding event data stream: {1:s} could not be '\n 'found.').format(identifier_string, event_data_stream_lookup_key))\n return\n\n if container.CONTAINER_TYPE in (\n self._CONTAINER_TYPE_EVENT_DATA,\n self._CONTAINER_TYPE_EVENT_DATA_STREAM):\n # Preserve the lookup key before adding it to the attribute container\n # store.\n identifier = container.GetIdentifier()\n lookup_key = identifier.CopyToString()\n\n if container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT_TAG:\n self._storage_writer.AddOrUpdateEventTag(container)\n else:\n self._storage_writer.AddAttributeContainer(container)\n\n if container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT:\n parser_name = self._event_data_parser_mappings.get(\n event_data_lookup_key, 'N/A')\n self._session.parsers_counter[parser_name] += 1\n self._session.parsers_counter['total'] += 1\n\n elif container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT_DATA:\n identifier = container.GetIdentifier()\n self._event_data_identifier_mappings[lookup_key] = identifier\n\n parser_name = container.parser.split('/')[-1]\n self._event_data_parser_mappings[lookup_key] = parser_name\n\n elif container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT_DATA_STREAM:\n identifier = container.GetIdentifier()\n self._event_data_stream_identifier_mappings[lookup_key] = identifier",
"def rebuild(self, dframe, overwrite=False):\n current_schema = self\n new_schema = schema_from_dframe(dframe, self)\n\n if current_schema and not overwrite:\n # merge new schema with existing schema\n current_schema.update(new_schema)\n new_schema = current_schema\n\n return new_schema",
"def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None",
"def resolve_schema_in_request_body(self, request_body):\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)",
"def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}",
"def resolve_schema(self, data):\n if not isinstance(data, dict):\n return\n\n # OAS 2 component or OAS 3 header\n if \"schema\" in data:\n data[\"schema\"] = self.openapi.resolve_schema_dict(data[\"schema\"])\n # OAS 3 component except header\n if self.openapi_version.major >= 3:\n if \"content\" in data:\n for content_type in data[\"content\"]:\n schema = data[\"content\"][content_type][\"schema\"]\n data[\"content\"][content_type][\n \"schema\"\n ] = self.openapi.resolve_schema_dict(schema)",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n\n self._schema = schema",
"def schema(self, schema):\n self._schema = schema",
"def load(self, base_schema):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n self.schema_org_version = get_schemaorg_version()\n _base_schema.append(\n load_schemaorg(version=self.schema_org_version, verbose=self.verbose)\n )\n continue\n elif self.is_a_dde_schema(_sc):\n _base_schema.append(self.load_dde_schemas(_sc))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema",
"def _set_schema(self, schema_value):\n self._id = schema_value.id\n\n if type(self).__name__ != schema_value.type:\n # Make sure this object is the correct type.\n raise ValueError('Cannot convert a {} protocol to a {}.'\n .format(str(type(self)), schema_value.type))\n\n for input_full_path in schema_value.inputs:\n\n value = copy.deepcopy(schema_value.inputs[input_full_path])\n\n input_path = ProtocolPath.from_string(input_full_path)\n self.set_value(input_path, value)",
"def deduce_schema_for_record(self, json_object, schema_map, base_path=None):\n for key, value in json_object.items():\n # The canonical key is the lower-cased version of the sanitized key\n # so that the case of the field name is preserved when generating\n # the schema but we don't create invalid, duplicate, fields since\n # BigQuery is case insensitive\n canonical_key = self.sanitize_name(key).lower()\n schema_entry = schema_map.get(canonical_key)\n new_schema_entry = self.get_schema_entry(\n key=key,\n value=value,\n base_path=base_path\n )\n schema_map[canonical_key] = self.merge_schema_entry(\n old_schema_entry=schema_entry,\n new_schema_entry=new_schema_entry,\n base_path=base_path\n )",
"def preprocess_schema(schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces=None, qualified=False):\n\n from .simplexml import SimpleXMLElement # here to avoid recursive imports\n\n # analyze the namespaces used in this schema\n local_namespaces = {}\n for k, v in schema[:]:\n if k.startswith(\"xmlns\"):\n local_namespaces[get_local_name(k)] = v\n if k == 'targetNamespace':\n # URI namespace reference for this schema\n if v == \"urn:DefaultNamespace\":\n v = global_namespaces[None]\n local_namespaces[None] = v\n if k == 'elementFormDefault':\n qualified = (v == \"qualified\")\n # add schema namespaces to the global namespace dict = {URI: ns prefix}\n for ns in local_namespaces.values():\n if ns not in global_namespaces:\n global_namespaces[ns] = 'ns%s' % len(global_namespaces)\n \n for element in schema.children() or []:\n if element.get_local_name() in ('import', 'include',):\n schema_namespace = element['namespace']\n schema_location = element['schemaLocation']\n if schema_location is None:\n log.debug('Schema location not provided for %s!' % schema_namespace)\n continue\n if schema_location in imported_schemas:\n log.debug('Schema %s already imported!' % schema_location)\n continue\n imported_schemas[schema_location] = schema_namespace\n log.debug('Importing schema %s from %s' % (schema_namespace, schema_location))\n # Open uri and read xml:\n xml = fetch(schema_location, http, cache, force_download, wsdl_basedir)\n\n # Parse imported XML schema (recursively):\n imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)\n preprocess_schema(imported_schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces, qualified)\n\n element_type = element.get_local_name()\n if element_type in ('element', 'complexType', \"simpleType\"):\n namespace = local_namespaces[None] # get targetNamespace\n element_ns = global_namespaces[ns] # get the prefix\n element_name = element['name']\n log.debug(\"Parsing Element %s: %s\" % (element_type, element_name))\n if element.get_local_name() == 'complexType':\n children = element.children()\n elif element.get_local_name() == 'simpleType':\n children = element('restriction', ns=xsd_uri)\n elif element.get_local_name() == 'element' and element['type']:\n children = element\n else:\n children = element.children()\n if children:\n children = children.children()\n elif element.get_local_name() == 'element':\n children = element\n if children:\n process_element(elements, element_name, children, element_type, xsd_uri, dialect, namespace, qualified)",
"def update(self, schema: 'Schema'):\n self._update(schema)",
"def set_schemas(self, schemas, asset=None):\n dicts = [s.to_dict() for s in schemas]\n self._set_property('pc:schemas', dicts, asset)",
"def set_schema(self, schema):\r\n self.__schema = schema",
"def load_base_schema(base_schema=None, verbose=False):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n # if \"schema.org\" in _base or \"schema\" in _base:\n # _base_schema.append(\n # load_schemaorg(verbose=verbose)\n # )\n # if \"bioschemas\" in _base:\n # _base_schema.append(\n # load_bioschemas(verbose=verbose)\n # )\n\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n _base_schema.append(load_schemaorg(verbose=verbose))\n continue\n elif _sc in registered_dde_schemas():\n _base_schema.append(load_dde_schemas(_sc, verbose=verbose))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema",
"def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})",
"def _add_table_schema(table_desc, table_name, schema):\n table_desc['TableName'] = table_name\n table_desc['AttributeDefinitions'] = [{\n 'AttributeName': item['name'],\n 'AttributeType': DynamoStubber._encode_type(item['type'])\n } for item in schema]\n table_desc['KeySchema'] = [{\n 'AttributeName': item['name'],\n 'KeyType': item['key_type']\n } for item in schema]",
"def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n for name, attr_schema in iteritems(self.schema):\n if not schema.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' does not appear in schema\" % name\n )\n\n other_attr_schema = schema.get_attribute_schema(name)\n attr_schema.validate_subset_of_schema(other_attr_schema)",
"def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))",
"def load_schema(self, schema):\n if not self.default_schema_loaded:\n self.load_default_schema()\n # load JSON-LD file of user defined schema\n self.schema_extension_only = preprocess_schema(load_json_or_yaml(schema))\n if \"@context\" in self.schema_extension_only:\n self.context.update(self.schema_extension_only[\"@context\"])\n # convert user defined schema into a networkx DiGraph\n self.schema_extension_nx = load_schema_into_networkx(self.schema_extension_only)\n # update undefined classes/properties\n undefined_nodes = [node for node, attrdict in self.schema_extension_nx.node.items() if not attrdict]\n attr_dict = {}\n \n for _node in undefined_nodes:\n if _node in self.schemaorg_nx.nodes():\n attr_dict[_node] = self.schemaorg_nx.nodes[_node]\n nx.set_node_attributes(self.schema_extension_nx, attr_dict)\n # merge networkx graph of user-defined schema with networkx graph of schema defined by Schema.org\n #self.schema_nx = merge_schema_networkx(self.schemaorg_nx, self.schema_extension_nx)\n self.schema_nx = self.schema_extension_nx\t\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_full_schema()\n # merge together the given schema and the schema defined by schemaorg\n #self.schema = merge_schema(self.schema_extension_only, self.schemaorg_schema)\n self.schema = self.schemaorg_schema\n # split the schema networkx into individual ones\n isolates = list(nx.isolates(self.schema_nx))\n \n for node, attrdict in self.schema_extension_nx.node.items():\n if not 'type' in attrdict:\n self.schema_extension_nx.nodes[node][\"type\"] = \"Class\" \n for node, attrdict in self.schema_nx.node.items():\n if not 'type' in attrdict:\n self.schema_nx.nodes[node][\"type\"] = \"Class\" \n \n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)",
"def extend_schema(schema, documentAST=None):\n\n assert isinstance(schema, GraphQLSchema), \"Must provide valid GraphQLSchema\"\n assert documentAST and isinstance(\n documentAST, ast.Document\n ), \"Must provide valid Document AST\"\n\n # Collect the type definitions and extensions found in the document.\n type_definition_map = {}\n type_extensions_map = defaultdict(list)\n\n for _def in documentAST.definitions:\n if isinstance(\n _def,\n (\n ast.ObjectTypeDefinition,\n ast.InterfaceTypeDefinition,\n ast.EnumTypeDefinition,\n ast.UnionTypeDefinition,\n ast.ScalarTypeDefinition,\n ast.InputObjectTypeDefinition,\n ),\n ):\n # Sanity check that none of the defined types conflict with the\n # schema's existing types.\n type_name = _def.name.value\n if schema.get_type(type_name):\n raise GraphQLError(\n (\n 'Type \"{}\" already exists in the schema. It cannot also '\n + \"be defined in this type definition.\"\n ).format(type_name),\n [_def],\n )\n\n type_definition_map[type_name] = _def\n elif isinstance(_def, ast.TypeExtensionDefinition):\n # Sanity check that this type extension exists within the\n # schema's existing types.\n extended_type_name = _def.definition.name.value\n existing_type = schema.get_type(extended_type_name)\n if not existing_type:\n raise GraphQLError(\n (\n 'Cannot extend type \"{}\" because it does not '\n + \"exist in the existing schema.\"\n ).format(extended_type_name),\n [_def.definition],\n )\n if not isinstance(existing_type, GraphQLObjectType):\n raise GraphQLError(\n 'Cannot extend non-object type \"{}\".'.format(extended_type_name),\n [_def.definition],\n )\n\n type_extensions_map[extended_type_name].append(_def)\n\n # Below are functions used for producing this schema that have closed over\n # this scope and have access to the schema, cache, and newly defined types.\n\n def get_type_from_def(type_def):\n type = _get_named_type(type_def.name)\n assert type, \"Invalid schema\"\n return type\n\n def get_type_from_AST(astNode):\n type = _get_named_type(astNode.name.value)\n if not type:\n raise GraphQLError(\n (\n 'Unknown type: \"{}\". Ensure that this type exists '\n + \"either in the original schema, or is added in a type definition.\"\n ).format(astNode.name.value),\n [astNode],\n )\n return type\n\n # Given a name, returns a type from either the existing schema or an\n # added type.\n def _get_named_type(typeName):\n cached_type_def = type_def_cache.get(typeName)\n if cached_type_def:\n return cached_type_def\n\n existing_type = schema.get_type(typeName)\n if existing_type:\n type_def = extend_type(existing_type)\n type_def_cache[typeName] = type_def\n return type_def\n\n type_ast = type_definition_map.get(typeName)\n if type_ast:\n type_def = build_type(type_ast)\n type_def_cache[typeName] = type_def\n return type_def\n\n # Given a type's introspection result, construct the correct\n # GraphQLType instance.\n def extend_type(type):\n if isinstance(type, GraphQLObjectType):\n return extend_object_type(type)\n if isinstance(type, GraphQLInterfaceType):\n return extend_interface_type(type)\n if isinstance(type, GraphQLUnionType):\n return extend_union_type(type)\n return type\n\n def extend_object_type(type):\n return GraphQLObjectType(\n name=type.name,\n description=type.description,\n interfaces=lambda: extend_implemented_interfaces(type),\n fields=lambda: extend_field_map(type),\n )\n\n def extend_interface_type(type):\n return GraphQLInterfaceType(\n name=type.name,\n description=type.description,\n fields=lambda: extend_field_map(type),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_union_type(type):\n return GraphQLUnionType(\n name=type.name,\n description=type.description,\n types=list(map(get_type_from_def, type.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_implemented_interfaces(type):\n interfaces = list(map(get_type_from_def, type.interfaces))\n\n # If there are any extensions to the interfaces, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for namedType in extension.definition.interfaces:\n interface_name = namedType.name.value\n if any([_def.name == interface_name for _def in interfaces]):\n raise GraphQLError(\n (\n 'Type \"{}\" already implements \"{}\". '\n + \"It cannot also be implemented in this type extension.\"\n ).format(type.name, interface_name),\n [namedType],\n )\n interfaces.append(get_type_from_AST(namedType))\n\n return interfaces\n\n def extend_field_map(type):\n new_field_map = OrderedDict()\n old_field_map = type.fields\n for field_name, field in old_field_map.items():\n new_field_map[field_name] = GraphQLField(\n extend_field_type(field.type),\n description=field.description,\n deprecation_reason=field.deprecation_reason,\n args=field.args,\n resolver=cannot_execute_client_schema,\n )\n\n # If there are any extensions to the fields, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for field in extension.definition.fields:\n field_name = field.name.value\n if field_name in old_field_map:\n raise GraphQLError(\n (\n 'Field \"{}.{}\" already exists in the '\n + \"schema. It cannot also be defined in this type extension.\"\n ).format(type.name, field_name),\n [field],\n )\n new_field_map[field_name] = GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n\n return new_field_map\n\n def extend_field_type(type):\n if isinstance(type, GraphQLList):\n return GraphQLList(extend_field_type(type.of_type))\n if isinstance(type, GraphQLNonNull):\n return GraphQLNonNull(extend_field_type(type.of_type))\n return get_type_from_def(type)\n\n def build_type(type_ast):\n _type_build = {\n ast.ObjectTypeDefinition: build_object_type,\n ast.InterfaceTypeDefinition: build_interface_type,\n ast.UnionTypeDefinition: build_union_type,\n ast.ScalarTypeDefinition: build_scalar_type,\n ast.EnumTypeDefinition: build_enum_type,\n ast.InputObjectTypeDefinition: build_input_object_type,\n }\n func = _type_build.get(type(type_ast))\n if func:\n return func(type_ast)\n\n def build_object_type(type_ast):\n return GraphQLObjectType(\n type_ast.name.value,\n interfaces=lambda: build_implemented_interfaces(type_ast),\n fields=lambda: build_field_map(type_ast),\n )\n\n def build_interface_type(type_ast):\n return GraphQLInterfaceType(\n type_ast.name.value,\n fields=lambda: build_field_map(type_ast),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_union_type(type_ast):\n return GraphQLUnionType(\n type_ast.name.value,\n types=list(map(get_type_from_AST, type_ast.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_scalar_type(type_ast):\n return GraphQLScalarType(\n type_ast.name.value,\n serialize=lambda *args, **kwargs: None,\n # Note: validation calls the parse functions to determine if a\n # literal value is correct. Returning null would cause use of custom\n # scalars to always fail validation. Returning false causes them to\n # always pass validation.\n parse_value=lambda *args, **kwargs: False,\n parse_literal=lambda *args, **kwargs: False,\n )\n\n def build_enum_type(type_ast):\n return GraphQLEnumType(\n type_ast.name.value,\n values={v.name.value: GraphQLEnumValue() for v in type_ast.values},\n )\n\n def build_input_object_type(type_ast):\n return GraphQLInputObjectType(\n type_ast.name.value,\n fields=lambda: build_input_values(type_ast.fields, GraphQLInputObjectField),\n )\n\n def build_implemented_interfaces(type_ast):\n return list(map(get_type_from_AST, type_ast.interfaces))\n\n def build_field_map(type_ast):\n return {\n field.name.value: GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n for field in type_ast.fields\n }\n\n def build_input_values(values, input_type=GraphQLArgument):\n input_values = OrderedDict()\n for value in values:\n type = build_field_type(value.type)\n input_values[value.name.value] = input_type(\n type, default_value=value_from_ast(value.default_value, type)\n )\n return input_values\n\n def build_field_type(type_ast):\n if isinstance(type_ast, ast.ListType):\n return GraphQLList(build_field_type(type_ast.type))\n if isinstance(type_ast, ast.NonNullType):\n return GraphQLNonNull(build_field_type(type_ast.type))\n return get_type_from_AST(type_ast)\n\n # If this document contains no new types, then return the same unmodified\n # GraphQLSchema instance.\n if not type_extensions_map and not type_definition_map:\n return schema\n\n # A cache to use to store the actual GraphQLType definition objects by name.\n # Initialize to the GraphQL built in scalars and introspection types. All\n # functions below are inline so that this type def cache is within the scope\n # of the closure.\n\n type_def_cache = {\n \"String\": GraphQLString,\n \"Int\": GraphQLInt,\n \"Float\": GraphQLFloat,\n \"Boolean\": GraphQLBoolean,\n \"ID\": GraphQLID,\n \"__Schema\": __Schema,\n \"__Directive\": __Directive,\n \"__DirectiveLocation\": __DirectiveLocation,\n \"__Type\": __Type,\n \"__Field\": __Field,\n \"__InputValue\": __InputValue,\n \"__EnumValue\": __EnumValue,\n \"__TypeKind\": __TypeKind,\n }\n\n # Get the root Query, Mutation, and Subscription types.\n query_type = get_type_from_def(schema.get_query_type())\n\n existing_mutation_type = schema.get_mutation_type()\n mutationType = (\n existing_mutation_type and get_type_from_def(existing_mutation_type) or None\n )\n\n existing_subscription_type = schema.get_subscription_type()\n subscription_type = (\n existing_subscription_type\n and get_type_from_def(existing_subscription_type)\n or None\n )\n\n # Iterate through all types, getting the type definition for each, ensuring\n # that any type not directly referenced by a field will get created.\n types = [get_type_from_def(_def) for _def in schema.get_type_map().values()]\n\n # Do the same with new types, appending to the list of defined types.\n types += [get_type_from_AST(_def) for _def in type_definition_map.values()]\n\n # Then produce and return a Schema with these types.\n return GraphQLSchema(\n query=query_type,\n mutation=mutationType,\n subscription=subscription_type,\n # Copy directives.\n directives=schema.get_directives(),\n types=types,\n )"
]
| [
"0.78481984",
"0.6505774",
"0.6218267",
"0.62063813",
"0.54526615",
"0.54215837",
"0.53956157",
"0.528516",
"0.5258999",
"0.51798785",
"0.5173846",
"0.51475376",
"0.51475376",
"0.51475376",
"0.5113156",
"0.5090977",
"0.50859594",
"0.5082441",
"0.50775105",
"0.5067844",
"0.5034826",
"0.50258577",
"0.49406707",
"0.4938009",
"0.48535416",
"0.48354754",
"0.48303002",
"0.48297036",
"0.48057935",
"0.47964087"
]
| 0.7980561 | 0 |
Builds an AttributeContainerSchema that describes the active schema of the given AttributeContainer. | def build_active_schema(cls, attrs):
schema = cls()
schema.add_attributes(attrs)
return schema | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema",
"def build_active_schema(cls, attr):\n return cls(attr.name, categories={attr.value})",
"def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})",
"def get_schema() -> Dict[str, type]:\n schema: Dict[str, type] = {}\n\n # Add all columns from pipeline configs\n for pipeline in get_pipelines():\n schema.update(pipeline.schema)\n\n # Add new columns from adapter\n for col_old, col_new in OUTPUT_COLUMN_ADAPTER.items():\n if col_old in schema and col_new is not None:\n schema[col_new] = schema[col_old]\n\n return schema",
"def get_schema_structure(self) -> SchemaStructure:\n constructors: List[CombinatorData] = list(\n self._combinator_map.values()\n )\n methods: List[FunctionData] = list(\n self._function_map.values()\n )\n\n return SchemaStructure(constructors=constructors, methods=methods)",
"def _dict2schema(dct):\n attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n\n class Meta(object):\n strict = True\n\n attrs[\"Meta\"] = Meta\n return type(str(\"\"), (ma.Schema,), attrs)",
"def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema",
"def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))",
"def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }",
"def build_active_schema(cls, attr):\n return cls(attr.name, range=(attr.value, attr.value))",
"def create_schema(self, schema: str):\n return",
"def setup_schema(BaseDao, session):\n def setup_schema_fn():\n for class_ in BaseDao._decl_class_registry.values():\n if hasattr(class_, '__tablename__'):\n if class_.__name__.endswith('Schema'):\n raise ModelConversionError(\n \"For safety, setup_schema can not be used when a\"\n \"Model class ends with 'Schema'\"\n )\n\n class Meta(object):\n model = class_\n sqla_session = session\n dump_only = ('pkId', 'created', 'modified')\n\n schema_class_name = '%sSchema' % class_.__name__\n\n schema_class = type(\n schema_class_name,\n (ModelSchema,),\n {'Meta': Meta}\n )\n\n setattr(class_, '__marshmallow__', schema_class)\n\n return setup_schema_fn",
"def _schema_builder(mocker):\n return mocker.create_autospec(SchemaBuilder)",
"def get_schema(self) -> ArchiveSchema:\n return self.schema",
"def _generate_schema(self):\n\n response = self._request('GET', CosmoSim.SCHEMA_URL,\n auth=(self.username, self.password),\n headers={'Accept': 'application/json'},\n cache=False)\n data = response.json()\n self.db_dict = {}\n for i in range(len(data['databases'])):\n self.db_dict[str(data['databases'][i]['name'])] = {}\n\n sstr = str(data['databases'][i]['name'])\n sid = str(data['databases'][i]['id'])\n self.db_dict[sstr]['id'] = sid\n sdesc = str(data['databases'][i]['description'])\n self.db_dict[sstr]['description'] = sdesc\n self.db_dict[sstr]['tables'] = {}\n for j in range(len(data['databases'][i]['tables'])):\n sstr2 = str(data['databases'][i]['tables'][j]['name'])\n self.db_dict[sstr]['tables'][sstr2] = {}\n sdata = data['databases'][i]['tables'][j]['id']\n self.db_dict[sstr]['tables'][sstr2]['id'] = sdata\n sdesc2 = data['databases'][i]['tables'][j]['description']\n self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2\n self.db_dict[sstr]['tables'][sstr2]['columns'] = {}\n tmpval = len(data['databases'][i]['tables'][j]['columns'])\n for k in range(tmpval):\n sdata2 = data['databases'][i]['tables'][j]['columns'][k]\n sdata2_id = sdata2['id']\n sstr3 = str(sdata2['name'])\n\n sdesc3 = sdata2['description']\n self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {\n 'id': sdata2_id,\n 'description': sdesc3}\n return response",
"def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))",
"def build_song_schema():\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema",
"def schema(self):\n # TODO The schema of a container resource...\n # This is the same as the leaf.\n # However, this isn't actually the schema of the response\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }",
"def _schema(self):\n\n self._check_compiled()\n return self._compiled._schema",
"def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})",
"def generate_wc_schema():\n json_str = json.dumps({'fields': [\n {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)",
"def build_song_schema():\n schema = T.StructType(\n [\n T.StructField('artist_id', T.StringType(), True),\n T.StructField('artist_latitude', T.DecimalType(), True),\n T.StructField('artist_longitude', T.DecimalType(), True),\n T.StructField('artist_location', T.StringType(), True),\n T.StructField('artist_name', T.StringType(), True),\n T.StructField('duration', T.DecimalType(), True),\n T.StructField('num_songs', T.IntegerType(), True),\n T.StructField('song_id', T.StringType(), True),\n T.StructField('title', T.StringType(), True),\n T.StructField('year', T.IntegerType(), True)\n ]\n )\n return schema",
"def _get_schema(self):\n self._pick()\n return Schema()",
"def get_schema(self) -> dict:",
"def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema",
"def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )",
"def get_avro_translated_schema(self):\n type_conversions = {\n 'STRING': 'string',\n 'NUMERIC': {\n 'type': 'bytes',\n 'logicalType': 'decimal',\n 'precision': 38,\n 'scale': 9,\n }\n }\n\n fields = []\n # TODO([email protected]): add support for nested fields\n for bq_field in self.bq_schema:\n field_type = type_conversions[bq_field.field_type]\n\n field = {\n 'name': bq_field.name,\n 'type': field_type,\n }\n\n fields.append(field)\n\n schema_dict = {\n 'type': 'record',\n 'name': self.schema_name,\n 'fields': fields,\n }\n avro_schema = avro.schema.Parse(json.dumps(schema_dict))\n\n return avro_schema",
"def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema",
"def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema",
"def generate_cooccur_schema():\n json_str = json.dumps({'fields': [\n {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)"
]
| [
"0.5810922",
"0.55912894",
"0.55674845",
"0.5503729",
"0.530108",
"0.52915776",
"0.52851915",
"0.5175618",
"0.5157794",
"0.5129213",
"0.51003456",
"0.5094893",
"0.50896853",
"0.5078276",
"0.50685155",
"0.50472176",
"0.5033845",
"0.50164914",
"0.5008662",
"0.5008367",
"0.5000015",
"0.49687177",
"0.49473068",
"0.4932776",
"0.49298096",
"0.49190605",
"0.49165756",
"0.48961338",
"0.4893424",
"0.4875745"
]
| 0.6336021 | 0 |
Constructs an AttributeContainerSchema from a JSON dictionary. | def from_dict(cls, d):
schema = d.get("schema", None)
if schema is not None:
schema = {
attr_name: AttributeSchema.from_dict(asd)
for attr_name, asd in iteritems(schema)
}
return cls(schema=schema) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )",
"def _dict2schema(dct):\n attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n\n class Meta(object):\n strict = True\n\n attrs[\"Meta\"] = Meta\n return type(str(\"\"), (ma.Schema,), attrs)",
"def from_json(cls, json_string:str):\n data = json.loads(json_string)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance",
"def parse(json_string):\n try:\n json_data = json.loads(json_string)\n except Exception as exn:\n raise SchemaParseException(\n 'Error parsing schema from JSON: %r. '\n 'Error message: %r.'\n % (json_string, exn))\n\n # Initialize the names object\n names = Names()\n\n # construct the Avro Schema object\n return schema_from_json_data(json_data, names)",
"def from_schema(cls, sdict):\n\n table_schema = TableSchema()\n for name, dschema in sdict.items():\n\n schema = ColumnSchema(name=name, **dschema)\n table_schema.add_column(schema)\n\n return table_schema",
"def from_dict(cls, _dict: Dict) -> 'Resource':\n args = {}\n if 'attributes' in _dict:\n args['attributes'] = [Attribute.from_dict(x) for x in _dict.get('attributes')]\n return cls(**args)",
"def from_dict(cls, _dict: Dict) -> 'Attribute':\n args = {}\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n if 'value' in _dict:\n args['value'] = _dict.get('value')\n return cls(**args)",
"def parse(json_value: Dict[str, Jsonish]) -> ParamSchema:\n return _parse_kwargs(**json_value)",
"def from_json(cls, json_str: str):\n\n def read_input(x: dict):\n return TensorSpec.from_json_dict(**x) if x[\"type\"] == \"tensor\" else ColSpec(**x)\n\n return cls([read_input(x) for x in json.loads(json_str)])",
"def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'LightSourceMaterialSchema':\n return util.deserialize_model(dikt, cls)",
"def from_json(cls, tag_json):\n return cls.from_dict(json.loads(tag_json))",
"def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance",
"def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)",
"def from_dict(cls, fs_dict):\n\n feature_set_proto = json_format.ParseDict(\n fs_dict, FeatureSetProto(), ignore_unknown_fields=True\n )\n return cls.from_proto(feature_set_proto)",
"def from_json(cls, json_string=None, filename=None, encoding='utf-8', errors='strict', **kwargs):\n bx_args = {}\n for arg in kwargs.copy():\n if arg in BOX_PARAMETERS:\n bx_args[arg] = kwargs.pop(arg)\n data = _from_json(json_string, filename=filename, encoding=encoding, errors=errors, **kwargs)\n if not isinstance(data, dict):\n raise BoxError('json data not returned as a dictionary, but rather a {0}'.format(type(data).__name__))\n return cls(data, **bx_args)",
"def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)"
]
| [
"0.67771685",
"0.63776934",
"0.62418735",
"0.6074002",
"0.6064018",
"0.60208553",
"0.587389",
"0.5806087",
"0.5778693",
"0.57692313",
"0.57473946",
"0.5735322",
"0.5715946",
"0.5680771",
"0.56228805",
"0.5593468",
"0.5558336",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168",
"0.5556168"
]
| 0.7527382 | 0 |
Gets the attribute for the given mask value. | def get_attr(self, value):
return self.index[value] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _MaskedImage_get(self, x, y):\n return (self.getImage().get(x, y),\n self.getMask().get(x, y),\n self.getVariance().get(x, y))",
"def getattribute(self, name):\n return self.attributes[name]",
"def with_mask(self, mask):\n return self[mask]",
"def getAttr(self, name, *args):\n if len(args) > 0:\n return self.attrs.get( name, args[0] )\n return self.attrs[name]",
"def get_attr(self, name, varname=None):\n if varname is None:\n return self.root_group.getncattr(name)\n else:\n return self.root_group[varname].getncattr(name)",
"def _getVal(self, mask, size):\n\t\tif (int(self.header['flags']) & mask) > 0:\n\t\t\t# First unsigned short is the length of the string\n\t\t\ttxt = self.fpLnk.read(2)\n\t\t\tlength = struct.unpack(\"<H\", txt)[0]\n\t\t\t# Adjust for unicode\n\t\t\tlength = length * size\n\t\t\treturn self.fpLnk.read(length)",
"def getAttribute(self, lp, which):\n return lp.solverModel.getAttrib(which)",
"def get_attribute(self, name):\n return self.element.get_attribute(name)",
"def _get_attr(self, attr, root=None):\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]",
"def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]",
"def get_attribute(self, key):\n return self.attributes[key]",
"def get_attr(name):\n userDoc = get_user()\n _idx = userDoc.index.get(name, None)\n\n if _idx is not None:\n return userDoc.attributes[_idx]\n else:\n return None",
"def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None",
"def GetMask(*args, **kwargs):\n return _gdi_.Bitmap_GetMask(*args, **kwargs)",
"def get_attribute(self, name):\n\n pass",
"def GetAttribute(self):\n return _itkLabelShapeOpeningImageFilterPython.itkLabelShapeOpeningImageFilterIUS3_GetAttribute(self)",
"def get_attribute(self, selector, attribute):\n el = self.locate_element(selector)\n return el.get_attribute(attribute)",
"def get_attr(self, name: str):\n return self.call(name)",
"def map_get(node, path):\n if path not in find_map:\n raise errors.RadistPathError(\"Invalid attribute specification\", path)\n attr = map_get_unsafe(node, path)\n if attr is not None:\n return attr\n else:\n raise errors.RadistPathError(\"Attribute isn't defined\", path)",
"def fn_get_att(self, value):\n\n resource_name, key = value\n if resource_name in self.root.get('Resources', ()):\n resource = self.parser.exploded(self.root['Resources'], resource_name)\n try:\n return self._find_att(resource, key)\n except KeyError as e:\n if e.args != (key,):\n raise\n\n return UnknownValue(\"ATT: {}.{}\".format(resource_name, key))",
"def get(flag=\"rainbow\"):\n return flags[flag]",
"def get(self, att):\n return getattr(self, att)",
"def _get_node_attr(self, node, attr):\n return self.metrics[attr].ix[node]",
"def GetAttribute(self, attr):\n return self._attrs[attr]",
"def get_color(mask: int, position: int):\n return (mask >> (position << 1)) & 3",
"def get_value_of(self, attr):\n return getattr(self, attr.upper(), 0)",
"def _get_attrib(self, attrib_path: str, binfo: dict) -> str:\n apath = attrib_path.split('.')\n return self._get_attrib_by_path(apath, binfo)",
"def GetAttribute(self):\n return _itkLabelShapeOpeningImageFilterPython.itkLabelShapeOpeningImageFilterIUS2_GetAttribute(self)",
"def get_mask(self, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n mask_path = serial.preprocess(p + \"mask.npy\")\n mask = np.load(mask_path)\n if not np.all(np.bitwise_or(mask == 0, mask == 1)):\n raise ValueError(\"Mask has incorrect values.\")\n return mask",
"def getattribute(objeto, name: str):\r\n # Get internal dict value matching name.\r\n value = objeto.__dict__.get(name)\r\n if not value:\r\n # Raise AttributeError if attribute value not found.\r\n return None\r\n # Return attribute value.\r\n return value"
]
| [
"0.6186562",
"0.60222083",
"0.59830433",
"0.5904889",
"0.5858861",
"0.5842193",
"0.5794798",
"0.5771632",
"0.5712678",
"0.5712663",
"0.56443495",
"0.562588",
"0.56038415",
"0.5592644",
"0.55911195",
"0.557782",
"0.5570431",
"0.555515",
"0.5530427",
"0.55128914",
"0.55091274",
"0.5489631",
"0.5482204",
"0.5481001",
"0.5478243",
"0.54716367",
"0.54544485",
"0.5442167",
"0.54341143",
"0.54339916"
]
| 0.6249164 | 0 |
Returns a MaskIndex for the given labels map. | def from_labels_map(cls, labels_map):
mask_index = cls()
for index, value in iteritems(labels_map):
mask_index[index] = CategoricalAttribute("label", value)
return mask_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def label_to_mask(labels):\n # get the image size\n h, w = labels.shape\n\n # build a color to label map\n idx_to_color = {}\n for label in class_info:\n idx_to_color[class_info[label].id] = class_info[label].color\n\n # generate label matrix\n mask = np.zeros((h, w, 3), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n id = labels[y, x]\n r, g, b = idx_to_color[id]\n mask[y, x] = np.array([b, g, r])\n\n return mask",
"def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}",
"def mask2categorical(Mask: tf.Tensor, labels: dict) -> tf.Tensor:\n assert type(labels) == dict, \"labels variable should be a dictionary\"\n\n X = Mask\n\n if X.dtype == \"float32\":\n X = tf.cast(X*255, dtype=\"uint8\")\n\n Y = tf.zeros(X.shape[0:2] , dtype=\"float32\")\n for i, key in enumerate(labels):\n Y = tf.where(np.all(X == labels[key], axis=-1), i, Y)\n Y = tf.cast(Y, dtype=\"uint8\")\n return Y",
"def _get_triplet_mask(self, labels):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Check that i, j and k are distinct\n indices_not_same = torch.eye(labels.shape[0]).to(device).byte() ^ 1\n i_not_equal_j = torch.unsqueeze(indices_not_same, 2)\n i_not_equal_k = torch.unsqueeze(indices_not_same, 1)\n j_not_equal_k = torch.unsqueeze(indices_not_same, 0)\n distinct_indices = i_not_equal_j * i_not_equal_k * j_not_equal_k\n\n # Check if labels[i] == labels[j] and labels[i] != labels[k]\n label_equal = torch.eq(torch.unsqueeze(labels, 0), torch.unsqueeze(labels, 1))\n i_equal_j = torch.unsqueeze(label_equal, 2)\n i_equal_k = torch.unsqueeze(label_equal, 1)\n valid_labels = i_equal_j * (i_equal_k ^ 1)\n\n mask = distinct_indices * valid_labels # Combine the two masks\n\n return mask",
"def get_label_map(labels):\n label_map = dict()\n for i,v in enumerate(np.ravel(labels.data)):\n if v in label_map.keys():\n label_map.get(v).append(i)\n else:\n label_map[v] = [i]\n return label_map",
"def mask_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = 0\n else:\n if row[i] == 10:\n erase = True\n row[i] = 1\n return row\n\n ret = np.copy(labels)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)",
"def get_mask_by_label(data, label):\n mask = np.copy(data)\n mask[mask != label] = -1\n mask[mask == label] = 0\n mask += 1\n return mask",
"def masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask,order='C'))[0]",
"def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)",
"def encode_segmap(self, mask):\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(self.get_pascal_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask",
"def get_idx_from_sent_rnn_mask(self, sent, word_idx_map, max_l=45, k=300, filter_h=5):\n x = []\n mask = None\n pad = 1\n for i in xrange(pad):\n x.append(0)\n\n words = sent.split()\n for word in words:\n if word in word_idx_map:\n x.append(word_idx_map[word])\n mask.append(1)\n while len(x) < max_l+2*pad:\n x.append(0)\n mask.append(0)\n return x, mask",
"def makeindmap(self,indmap,s,backgroud = None):\n if s == 1:\n raise TestConvNetError('s should be larger than 1')\n wr,wc = indmap.shape[0],indmap.shape[1]\n stride = 12\n filter_size = 30\n if backgroud is None:\n res = np.zeros((wr*s,wc*s,3),dtype=np.float32)\n else:\n alpha = 0.6\n if backgroud.shape != (wr*s,wc*s,3):\n raise TestConvNetError('Error size of backgroud')\n res = alpha * backgroud.copy()\n for i in range(wr):\n for j in range(wc):\n res[i*stride:i*stride+filter_size,j*stride:j*stride+filter_size]+= (1-alpha)* indmap[i,j]\n m = np.minimum(res,1)\n \n return m",
"def get_ind(labels, k):\n return (np.array(labels) == k).astype('float64')",
"def mask_index(self) -> int:\n return self._mask_index",
"def from_index_mapping(cls, mim):\n tables = [\n {key: (value.label, value.rgba) for key, value in nm.label_table.items()}\n for nm in mim.named_maps\n ]\n rest = ScalarAxis.from_index_mapping(mim)\n return LabelAxis(rest.name, tables, rest.meta)",
"def mask_to_label(mask):\n # get the image size\n h, w, _ = mask.shape\n\n # build a color to label map\n color_to_idx = {}\n for label in class_info:\n color_to_idx[class_info[label].color] = class_info[label].id\n\n # generate label matrix\n label = np.zeros((h, w), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n b, g, r = mask[y, x]\n color = (r, g, b)\n label[y, x] = color_to_idx[color]\n\n return label",
"def get_idxes_from_mask(mask):\n if len(mask) > 1:\n return torch.nonzero(mask.squeeze(), as_tuple=False).reshape(1, -1)[0]\n elif len(mask) == 1:\n return torch.tensor([0], dtype=torch.int64) if mask.sum() == 1 else torch.tensor([], dtype=torch.int64)\n return torch.tensor([], dtype=torch.int64)",
"def maskLabel(self, mask):\n\n # Mask-based indexing requires the image\n # data to be loaded into memory\n self.data\n\n # Extract the values that are in\n # the mask, and their corresponding\n # mask weights\n mask = self.prepareMask(mask)\n boolmask = mask > 0\n vals = self[boolmask]\n weights = mask[boolmask]\n weightsum = weights.sum()\n gotValues = np.unique(vals)\n values = []\n props = []\n\n # Only consider labels that\n # this atlas is aware of\n for label in self.desc.labels:\n if label.value in gotValues:\n\n # Figure out the number of all voxels\n # in the mask with this value, weighted\n # by the mask.\n prop = weights[vals == label.value].sum()\n\n # Normalise it to be a proportion\n # of all voxels in the mask. We\n # multiply by 100 because the FSL\n # probabilistic atlases store their\n # probabilities as percentages.\n values.append(label.value)\n props .append(100 * prop / weightsum)\n\n return values, props",
"def mask_indices(n,mask_func,k=0):\r\n m = ones((n,n),int)\r\n a = mask_func(m,k)\r\n return where(a != 0)",
"def mask(self):\n return self.mask_index",
"def get_mask_dictionary(train_names):\n masks={}\n for name in train_names:\n masks[name]=cv.imread(\"../dataset/masks/\"+name+\".png\",cv.IMREAD_GRAYSCALE)\n \n return masks",
"def encode_labelmap(colour_img, colourlabelmap):\n colour_img = colour_img.astype(int)\n labels = np.zeros((colour_img.shape[0], colour_img.shape[1]), dtype=np.int16)\n for label_id, colour in enumerate(colourlabelmap):\n labels[np.where(np.all(colour == colour_img, axis=-1))] = label_id\n\n return labels",
"def get_label_masks(self, vocabs, language):\n fn = 'data/{}/conll09/train.txt'.format(language)\n lemma_to_preds = get_lemma_to_preds(fn)\n masks = np.zeros((vocabs['plemmas'].size, vocabs['predicates'].size),\n dtype=np.float32)\n for i, lemma in vocabs['plemmas'].idx_to_word.iteritems():\n if lemma in lemma_to_preds:\n preds = lemma_to_preds[lemma]\n idxs = vocabs['predicates'].encode_sequence(preds)\n for j in idxs:\n masks[i][j] = 1.0\n else:\n masks[i, :] = 1.0 # Allow everything\n return masks",
"def createMaskDictionary(self):\n try:\n self.maskMap = dict(list(zip(self.inds,list(range(len(self.inds))))))\n self.maskSet = set(self.inds)\n except Exception as error:\n print(\"failed in createMaskDictionary\", error)",
"def decode_labels(mask, num_classes=41):\n h, w = mask.shape\n outputs = np.zeros((h, w, 3), dtype=np.uint8)\n\n img = Image.new('RGB',(len(mask[0]), len(mask)))\n pixels = img.load()\n for j_, j in enumerate(mask):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs = np.array(img)\n return outputs",
"def get_positive_mask(labels):\n batch_shape = tf.shape(labels)[0]\n mask_1 = tf.logical_not(get_negative_mask(labels))\n mask_2 = tf.logical_not(tf.eye(batch_shape, dtype=tf.bool))\n return tf.logical_and(mask_1, mask_2)",
"def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask",
"def get_label_indices(df: DataFrame, labels: list):\n return [idx for idx, name in enumerate(df.columns) if name in labels]",
"def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")",
"def mask_indices(n, mask_func, k=0):\r\n m = np.ones((n, n), int)\r\n a = mask_func(m, k)\r\n return np.where(a != 0)"
]
| [
"0.67572516",
"0.6358951",
"0.6177919",
"0.60563976",
"0.6012035",
"0.5983042",
"0.5929131",
"0.58174324",
"0.5817323",
"0.57576334",
"0.5706275",
"0.56616384",
"0.5649061",
"0.56425726",
"0.5634829",
"0.56325567",
"0.5567068",
"0.5558785",
"0.55503845",
"0.55488545",
"0.5539359",
"0.55390394",
"0.5534613",
"0.5495851",
"0.5489959",
"0.5465957",
"0.5453933",
"0.5448946",
"0.54450876",
"0.54436684"
]
| 0.80756646 | 0 |
Constructs a MaskIndex from a JSON dictionary. | def from_dict(cls, d):
index = d.get("index", None)
if index is not None:
index = {
int(value): Attribute.from_dict(ad)
for value, ad in iteritems(index)
}
return cls(index=index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, mapping=None, **kwargs):\n\n masked_kwargs = {}\n if len(kwargs) > 0:\n masked_kwargs = self._mask_dict(kwargs)\n\n if isinstance(mapping, MaskedDict):\n super().__init__(self._unify(mapping, masked_kwargs))\n\n elif isinstance(mapping, (dict, Headers)):\n temp = {}\n for key, value in mapping.items():\n if masking_services.should_mask(key) is True:\n temp[key] = self.MASK\n else:\n temp[key] = self._mask(value)\n\n super().__init__(self._unify(temp, masked_kwargs))\n\n elif isinstance(mapping, LIST_TYPES):\n temp = []\n for key, value in mapping:\n if masking_services.should_mask(key) is True:\n temp.append((key, self._mask(value)))\n else:\n temp.append((key, value))\n\n super().__init__(temp, **masked_kwargs)\n\n else:\n super().__init__(masked_kwargs)",
"def _mask_dict(self, value):\n\n return MaskedDict(value)",
"def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])",
"def from_labels_map(cls, labels_map):\n mask_index = cls()\n for index, value in iteritems(labels_map):\n mask_index[index] = CategoricalAttribute(\"label\", value)\n\n return mask_index",
"def json2mask(txt, mattr, filepath):\n img = np.zeros((2048, 2448, 3),\n dtype=np.uint8)\n info = json.loads(txt)['codes']\n for code in info:\n barcode_area = (slice(code['y0'], code['y1']),\n slice(code['x0'], code['x1']), slice(0, 3))\n leny = barcode_area[0].stop - barcode_area[0].start\n lenx = barcode_area[1].stop - barcode_area[1].start\n img[barcode_area] = 1\n if leny * lenx > (2048 * 2448) / 16: # if barcodearea larger than a\n # 16th of the original image\n return None\n return img",
"def Expand_Mask(mask, feature_dict):\n new_mask = np.zeros(mask.shape + (len(feature_dict),))\n for i in feature_dict.keys():\n ni = int(i)\n new_mask[mask == ni,ni] = 1 \n return new_mask",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)"
]
| [
"0.5478897",
"0.5297613",
"0.52694786",
"0.5240679",
"0.51570094",
"0.5151151",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904",
"0.50982904"
]
| 0.5821097 | 0 |
Creates a DataFileSequence instance for the given sequence. | def __init__(self, sequence, immutable_bounds=True):
self.sequence = sequence
self.immutable_bounds = immutable_bounds
self._extension = os.path.splitext(self.sequence)[1]
self._lower_bound, self._upper_bound = etau.parse_bounds_from_pattern(
self.sequence
)
self._iter_index = None
if self._lower_bound is None or self._upper_bound is None:
raise DataFileSequenceError(
"Sequence '%s' did not match any files on disk" % sequence
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(cls: Type[Sequence], sequence: bytes, alphabet: Alphabet) -> Sequence:\n return cls(lib.imm_seq_create(sequence, alphabet.imm_abc), alphabet)",
"def sequence_to_biopython_record(\n sequence, id=\"<unknown id>\", name=\"<unknown name>\", features=()\n):\n if has_dna_alphabet:\n seq = Seq(sequence, alphabet=DNAAlphabet())\n else:\n seq = Seq(sequence)\n\n return SeqRecord(\n seq=seq,\n id=id,\n name=name,\n features=list(features),\n annotations={\"molecule_type\": \"DNA\"},\n )",
"def __init__(self, sequence):\n self._seq = sequence # Copy of the given data.\n # Reference to the underlying data, will increment to 0 on first call\n # to next element.\n self._k = -1",
"def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)",
"def _make_seq_container(\n klass, data, moltype=None, label_to_name=None, info=None, source=None, **kw\n):\n if moltype is not None:\n moltype = get_moltype(moltype)\n\n info = info or {}\n for other_kw in (\"constructor_kw\", \"kw\"):\n other_kw = kw.pop(other_kw, None) or {}\n kw.update(other_kw)\n assert isinstance(info, dict), \"info must be a dict\"\n source = source or info.get(\"source\", \"unknown\")\n info[\"source\"] = str(source)\n\n return klass(\n data=data, moltype=moltype, label_to_name=label_to_name, info=info, **kw\n )",
"def get_sequence(self):\n if os.path.isfile(self.input):\n with open(self.input, \"r\") as file:\n self.sequence = file.read()\n else:\n raise oma.SequenceError(\"Cannot open {0}\".format(self.input))",
"def from_sequence(\n cls,\n sequence: List[str],\n topology: CHARMMResidueTopologyFile,\n patches: (Dict[str, List[int]], None) = None,\n name: (str, None) = None,\n segid: (str, None) = None\n ):\n if name is None:\n name = []\n for charmm_name in sequence:\n try:\n name.append(CHARMM_TO_ACID_CODE[charmm_name])\n except KeyError:\n name.append(\"-{}-\".format(charmm_name))\n name = ''.join(name)\n instance = cls(\n name, patches=patches, topology=topology, segment=segid\n )\n\n for residue_index, residue_name in enumerate(sequence):\n residue_definition = topology.residues[residue_name]\n residue = residue_definition.to_residue(residue_index)\n instance.residues.append(residue)\n instance.finalize()\n return instance",
"def load_seq(\n filename: os.PathLike,\n annotation_path: Optional[os.PathLike] = None,\n format: Optional[str] = None,\n moltype: Optional[str] = None,\n label_to_name: Optional[Callable] = None,\n parser_kw: Optional[dict] = None,\n info: Optional[dict] = None,\n **kw,\n) -> Sequence:\n info = info or {}\n info[\"source\"] = str(filename)\n file_format, _ = get_format_suffixes(filename)\n if file_format == \"json\":\n seq = load_from_json(filename, (Sequence,))\n seq.name = label_to_name(seq.name) if label_to_name else seq.name\n return seq\n\n data = _load_seqs(file_format, filename, format, kw, parser_kw)\n name, seq = data[0]\n name = label_to_name(name) if label_to_name else name\n result = make_seq(seq, name, moltype=moltype)\n result.info.update(info)\n\n if getattr(seq, \"annotation_db\", None):\n result.annotation_db = seq.annotation_db\n\n if annotation_path is not None:\n result.annotation_db = load_annotations(path=annotation_path, seqids=[name])\n return result",
"def from_json(cls, sequence_name: str, data: Dict[str, Any]):\n sequence = cls(sequence_name)\n sequence.total_count = data['total_count']\n sequence.next_count = data['next_count']\n return sequence",
"def from_text(cls, fasta_sequence):\n if isinstance(fasta_sequence, basestring):\n fasta_sequence = StringIO(fasta_sequence)\n fasta_sequence = iter(fasta_sequence)\n self = cls()\n for line in fasta_sequence:\n header = line.strip()\n if header.startswith(';'):\n continue\n if header:\n break\n else:\n raise EmptyInput\n if header[0] != '>':\n raise FormatError('illegal characters before header')\n parts = header.split(None, 1)\n if len(parts) == 0:\n raise FormatError('no sequence id')\n self.id = parts[0][1:]\n if len(parts) > 1:\n self.description = parts[1].strip()\n sequence = []\n for line in fasta_sequence:\n if line.strip().startswith(';'):\n continue\n sequence.extend(line.split())\n self.sequence = ''.join(sequence)\n return self",
"def create_fasta_seqrecord(header, sequence_string):\n seq = Seq(sequence_string, alphabet=IUPAC.unambiguous_dna)\n seqrecord = SeqRecord(seq, description=header)\n return seqrecord",
"def __new__(cls, project=None, name=None, code=None):\n \n if project and name:\n \n project = Sequence._check_project(project)\n \n # condition the name\n name = Sequence._condition_name(name)\n \n # now get it from the database\n seq_db = db.session.query(Sequence).\\\n filter_by(name=name).first()\n \n if seq_db is not None:\n logger.debug(\"found the sequence in the database\")\n logger.debug(\"returning the Sequence instance from the \"\n \"database\")\n \n seq_db.__skip_init__ = None\n return seq_db\n else:\n logger.debug(\"the Sequence should be new, there is no such \"\n \"Sequence in the database\")\n \n # in any other case just return the normal __new__\n logger.debug(\"returning a normal Sequence instance\")\n return super(Sequence, cls).__new__(cls, project, name, code)",
"def parse_sequence(fasta_seq, length):\n # extract name and sequence\n name, seq = fasta_seq.id, fasta_seq.seq\n\n # Cannot create a larger sequence than the original\n length = min(length, len(seq))\n\n # find the maximum starting index that generates a full\n # length subsequence\n max_start = len(seq) - length\n start = random.randint(0, max_start)\n \n # generate a new sequence\n gen_seq = seq[start:(start + length)]\n\n # return the generated sequence\n return SeqRecord(gen_seq, name, '', '')",
"def static(seq: List[int]):\n return Data._create_dataset(seq, pad=True)",
"def create(self):\n \n # create the sequence structure by calling the self.project.create\n self.project.create()",
"async def generate_sequence_fasta(db, sequence_id):\n sequence = await db.sequences.find_one(sequence_id, [\"sequence\", \"otu_id\", \"isolate_id\"])\n\n if not sequence:\n raise virtool.errors.DatabaseError(\"Sequence does not exist\")\n\n otu_name, isolate_name = await get_otu_and_isolate_names(db, sequence[\"otu_id\"], sequence[\"isolate_id\"])\n\n fasta = format_fasta_entry(\n otu_name,\n isolate_name,\n sequence_id,\n sequence[\"sequence\"]\n )\n\n return format_fasta_filename(otu_name, isolate_name, sequence[\"_id\"]), fasta",
"def __init__(self, name, sequence, description=None, seq_type=None):\n self.name = name\n self.description = description\n self.seq_type = seq_type\n self._sequence = sequence",
"async def run_sequence(self, sequence: Union[str, list], **kwargs: Optional[Any]):\n namespace = self._get_namespace(**kwargs)\n\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n _name = self.name\n self.logger.debug(\"Calling run_sequence() for %s from %s\", sequence, self.name)\n return await self.AD.sequences.run_sequence(_name, namespace, sequence, **kwargs)",
"def __init__(self, seq: str, debug=False):\n self.debug = debug\n self.seq = self._validate_input_sequence(seq)\n self.suffixes = []\n self.stage = 1",
"def __init__(self, seq, annotation=False):\n self.seq = seq\n self.length = len(seq)\n self.annotation = annotation",
"def __init__(self, seq):\n # Check the type of seq. Only strings are accepted\n if type(seq) == type(\"string\"):\n self.sequence = seq.upper()\n else:\n raise Exception(\"Invalid typesequence of nucleotides for Sequence class.\")",
"def get_sequences(data_path, gene):\n \n sequence_file = os.path.join(data_path, gene + \".fasta\")\n try:\n sequences_gene = sequence.Sequence.create(file = sequence_file, input_format = 'fasta')\n except FileNotFoundError:\n print(\"Did not found {} in {}.\".format(gene,data_path))\n sequences_gene = \"Did not found {} in {}.\".format(gene,data_path)\n except:\n print(\"Unexpected Error while trying to get the sequence from {}.\".format(sequence_file))\n sequences_gene = \"Unexpected Error while trying to get the sequence from {}.\".format(sequence_file)\n # print(\"sequences_gene\", sequences_gene)\n return sequences_gene",
"def sequence(self, sequence):\n\n self._sequence = sequence",
"def __validate(self, seqdata):\n\n _Sequence = namedtuple('Seq', ['name', 'data'])\n\n # file-like object\n # isinstance(obj, file) does not hold in Py3\n if hasattr(seqdata, 'read') and hasattr(seqdata, 'name'):\n self.logger.debug('Reading data from file-like object {}'.format(seqdata.name))\n fname = seqdata.name\n\n elif isinstance(seqdata, basestring):\n self.logger.debug('Reading data from file path {}'.format(seqdata))\n fname = seqdata\n\n # can be file name string or sequence\n if not os.path.isfile(fname):\n raise OSError('Sequence file not found: {}'.format(seqdata))\n else:\n raise TypeError('Sequence input format not recognized: {}'.format(seqdata))\n\n # parse and validate sequences\n # defining these two a prior just in case later we decide to support more stuff\n _seq_alphabet = IUPACProtein()\n _seq_format = 'fasta'\n\n seq_iterator = SeqIO.parse(seqdata, _seq_format, alphabet=_seq_alphabet)\n for seq_i, seq_record in enumerate(seq_iterator, start=1):\n\n seq_name = seq_record.name\n seq_raw = str(seq_record.seq)\n if not _verify_alphabet(seq_record.seq):\n msg = 'Entry #{} ({}) in {} is not a valid protein sequence'\n raise ParseError(msg.format(seq_i, seq_name, fname))\n\n self.sequences.append(_Sequence(seq_name, seq_raw))\n\n return self.sequences",
"def _set_sequence(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"sequence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"sequence must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"sequence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__sequence = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_sequence(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"sequence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"sequence must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"sequence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__sequence = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_sequence(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"sequence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"sequence must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"sequence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__sequence = t\n if hasattr(self, '_set'):\n self._set()",
"def createSequenceFile(sequences, tmpDir, filename='seq.fa'):\n seqfile = os.path.join(tmpDir, filename)\n with open(seqfile, 'w') as f:\n for name, sequence in sequences.iteritems():\n f.write(\">{}\\n{}\\n\".format(name, sequence))\n subprocess.call(\"pyfasta flatten {}\".format(seqfile), shell=True)\n return seqfile",
"def fetch_sequence(sequence_id, database='uniprot'):\n if sequence_id.startswith('UPI'):\n database = 'uniparc'\n url_template = 'http://www.uniprot.org/uniparc/{}.fasta'\n elif sequence_id.startswith('UniRef'):\n database = 'uniref'\n url_template = 'http://www.uniprot.org/uniref/{}.fasta'\n else:\n database = 'uniprot'\n url_template = 'http://www.uniprot.org/uniprot/{}.fasta'\n\n url = url_template.format(sequence_id)\n logger.debug('Downloading sequence {} from {}...'.format(sequence_id, url))\n\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Failed to fetch sequence with return code: {}\".format(r.status_code))\n\n seq = Bio.SeqIO.read(io.StringIO(r.text), 'fasta')\n if database == 'uniprot':\n seq.annotations['db'], seq.id, seq.name = re.split('[\\| ]', seq.id)\n return seq",
"def test_create_seqstructs(self):\n with open(\"./support_files/cs.fasta\") as fin:\n obs = create_seqstructs(fin, 10)\n self.assertEqual(obs, self.seqstruct)"
]
| [
"0.6637892",
"0.6181747",
"0.6043447",
"0.6024799",
"0.6001478",
"0.599184",
"0.59164035",
"0.5851341",
"0.5843446",
"0.58333296",
"0.5832064",
"0.5780453",
"0.5718229",
"0.56897944",
"0.56620103",
"0.55939054",
"0.5567729",
"0.55158764",
"0.549222",
"0.5492118",
"0.5480412",
"0.54624873",
"0.5445623",
"0.54395306",
"0.5432087",
"0.5432087",
"0.5432087",
"0.5427082",
"0.5398782",
"0.5393098"
]
| 0.619707 | 1 |
Checks if the index is within the bounds for this sequence. | def check_bounds(self, index):
if index < self.lower_bound or index > self.upper_bound:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __verify_index(self, index):\n if not isinstance(index, int):\n raise TypeError(\"Index must be of type int\")\n elif index >= self.length or index < -self.length:\n raise IndexError(\"Index out of bounds\")\n return True",
"def __is_valid(self, subscript):\n return ((0,0) <= subscript and subscript < self.size)",
"def valid_index(self, index):\n if 0 <= index < self._list_size:\n return True\n else:\n return False",
"def in_range(self, index):\n for match in self.matches:\n if match.start_pos <= index < match.end_pos:\n return True\n\n return False",
"def out_of_bounds(self):\n return self.rect.right <= 0",
"def _inrange(self, index):\n if len(index) != self.ndim:\n raise Exception('SparseN tensor has %d dimensions, and requires the same number of indices.'%self.ndim)\n for ii, ss in zip(index,self.shape):\n if ii < 0 or ii >= ss:\n raise Exception('Index is out of range: %d'%index)",
"def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col",
"def in_bounds(self, position):\n row, col = position\n return ((row >= 0 and row < self.height) and\n (col >= 0 and col < self.width))",
"def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])",
"def check_bounds (position, size):\n \n for item in position:\n # checks whether item is out of bounds\n if item < 0 or item >= size:\n return False\n return True",
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def pos_within_bounds(position):\n if type(position[0]) == int:\n row,col = position\n else:\n col,row = position\n \n if not 1<=row<=8:\n return False\n if not 65<=ord(col)<=72:\n return False\n return True",
"def is_valid(array, index):\n row, column = index\n return 0 <= row < len(array) and 0 <= column < len(array[row])",
"def in_range(table, index):\n if index > len(table):\n print(\"Error: index out of range\")\n return False\n if index < 0:\n print(\"Error: negative index\")\n return False\n return True",
"def out_of_bounds(self):\n return self.rect.right <= 0 or self.rect.left >= self.screen_rect.width",
"def __is_valid(self, pos):\n return 0 <= pos[0] < self._n and 0 <= pos[1] < self._n",
"def validate_position(position: Tuple[int, int], bound: int) -> bool:\n if position[0] < 0 or position[0] >= bound:\n return False\n if position[1] < 0 or position[1] >= bound:\n return False\n return True",
"def out_of_bounds(self):\n return self._parms.get(\"out_of_bounds\")",
"def occupied(self, (xIndex, yIndex)):\n return xIndex < 0 or yIndex < 0 or \\\n xIndex >= self.xN or yIndex >= self.yN or \\\n self.grid[xIndex][yIndex]",
"def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8",
"def in_bounds(self, location: tuple) -> bool:\n return 0 <= min(location) and max(location) <= 7",
"def is_in_bounds(pos):\n return PLAYFIELD_PADDING[0] < pos[0] < PLAYFIELD_PADDING[0] +\\\n BLOCK_NUM_WIDTH * Block.WIDTH and PLAYFIELD_PADDING[1] < pos[1] <\\\n PLAYFIELD_PADDING[1] + BLOCK_NUM_HEIGHT * Block.HEIGHT",
"def has_bounds(self):\r\n bounds = self.bounds\r\n if bounds in (None, [None, None]):\r\n return False\r\n for i in xrange(bounds[0]):\r\n if bounds[0][i] is not None and bounds[0][i] > -np.inf:\r\n return True\r\n for i in xrange(bounds[1]):\r\n if bounds[1][i] is not None and bounds[1][i] < np.inf:\r\n return True\r\n return False",
"def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines",
"def _validate_index(self, index):\r\n\t\tvalid_index = index is int(index) and index >= 0 and index < self._size\r\n\t\tif not valid_index:\r\n\t\t\traise IndexError()",
"def _is_valid_index(self, index):\n row = index.row()\n column = index.column()\n return not (row < 0 or column < 0 or\n row >= len(self.view_list) or column > 4 or\n index == QModelIndex())",
"def contains(self, x):\n return (isinstance(x, int) and x >= 0 and x < self._dim)",
"def isRangeValid(self) -> bool:\n ...",
"def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True",
"def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)"
]
| [
"0.7743756",
"0.7266645",
"0.71771103",
"0.712753",
"0.7123661",
"0.7049638",
"0.69962424",
"0.6915459",
"0.68826395",
"0.68442345",
"0.6773398",
"0.67721176",
"0.67416173",
"0.6729419",
"0.67009795",
"0.668903",
"0.6605921",
"0.6588083",
"0.65783405",
"0.6572464",
"0.6539587",
"0.65184957",
"0.65113187",
"0.64942384",
"0.64715743",
"0.6464079",
"0.64495647",
"0.6445796",
"0.6392327",
"0.6377109"
]
| 0.8669091 | 0 |
Generates the path for the file with the given sequence index. If the sequence has mutable bounds, the index can extend the sequence consecutively (i.e., by one index) above or below the current bounds. | def gen_path(self, index):
if self.immutable_bounds:
if not self.check_bounds(index):
raise DataFileSequenceError(
"Index %d out of bounds [%d, %d]"
% (index, self.lower_bound, self.upper_bound)
)
elif index < 0:
raise DataFileSequenceError("Indices must be nonnegative")
elif index == self.lower_bound - 1:
self._lower_bound = index
elif index == self.upper_bound + 1:
self._upper_bound = index
elif not self.check_bounds(index):
raise DataFileSequenceError(
"Index %d out of bounds [%d, %d]; mutable sequences can be "
"extended at most one index above/below."
% (index, self.lower_bound, self.upper_bound)
)
return self.sequence % index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_seq_filename(self):\n fnd = self._get_session_dir()\n self.seq_number += 1\n fn = os.path.join(fnd, 'S%4.4d.tif' % self.seq_number)\n return fn",
"def filenum_index( # noqa: F811\n index: Optional[int], seq: \"FileSequence\"\n) -> Optional[int]:\n if index is None:\n return None\n if index < 0:\n return index\n else:\n index -= seq.start\n # if the requested index was a positive number, and adjusting for the start\n # number throws it into the negatives, then we know the request is out of\n # range of the sequence, so we need to raise an error here to avoid returning\n # a file from the end of the sequence because of the negative roll over.\n if index < 0:\n raise IndexError(\"Frame Out of range\")\n else:\n return index",
"def _image_path_from_index(self, index):\n # Example image path for index=119993:\n # images/train2014/COCO_train2014_000000119993.jpg\n file_name = (str(index).zfill(12) + '.jpg')\n image_path = os.path.join(self._root_dir, self._data_name, file_name)\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n raise NotImplementedError",
"def image_path_from_index(self, index):\n # set the prefix\n if self._image_set == 'test':\n prefix = 'testing/image_2'\n else:\n prefix = 'training/image_2'\n\n image_path = os.path.join(self._data_path, prefix,\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n assert self.annotation_set is not None, \"Dataset not initialized\"\n name = self.annotation_set[index] # e.g. 'set00_V010_img00577.xml'\n set_name, video_name, xml_name = name.split('_')\n img_name = os.path.splitext(xml_name)[0] + self.extension\n img_path = os.path.join(self.data_path, set_name, video_name, img_name)\n assert os.path.exists(img_path), 'Path does not exist: {}'.format(img_path)\n\n return img_path",
"def get_seq_path(frame_path):\n return os.path.dirname(frame_path)",
"def image_path_from_index(self, index):\n for ext in self._image_ext:\n image_path = os.path.join(self._data_path, 'Images',\n index + ext)\n if os.path.exists(image_path):\n break\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n\treturn image_path",
"def real_image_path(self, index):\r\n\r\n index = index.replace(\"\\\\\", \"/\")\r\n\r\n if not os.path.exists(index):\r\n image_file = os.path.join(self.prefix_path, index)\r\n else:\r\n image_file = index\r\n if not image_file.endswith('.jpg'):\r\n image_file = image_file + '.jpg'\r\n assert os.path.exists(\r\n image_file), 'Path does not exist: {}'.format(image_file)\r\n return image_file",
"def key_sequence_to_path(sequence: List[str]):\n return Path.rootPath() + \".\".join(sequence)",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, index)\n assert os.path.exists(image_path), 'path does not exist: {}'.format(image_path)\n return image_path",
"def generate_index(file_name):\n count = num_lines(file_name)\n index = random.randint(0, count - 1)\n return index",
"def getcodetofilename(index_file_parameters,bamfile_id):\n index_file=index_file_parameters['index']\n relative_flg=index_file_parameters['relative']\n \n index_dict=dict([(lntxt.rstrip().split(',')[0],lntxt.rstrip().split(',')[1]) for lntxt in open(index_file).readlines()])\n \n if bamfile_id not in index_dict:\n return ''\n \n if relative_flg==0:\n return index_dict[bamfile_id]\n else:\n relative_dir='/'.join(index_file.split('/')[0:-1])\n return '%s/%s'%(relative_dir,index_dict[bamfile_id])",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'Images', index + self._image_ext)\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path",
"def get_sequential_file_name(self,file_path,file_extension):\n path_no_ext = file_path.replace('.'+file_extension,'')\n index = self._get_newest_file_index(file_path,file_extension)\n \n return '{0}_{1}.{2}'.format(path_no_ext,index+1,file_extension)",
"def image_path_from_index(self, index):\n assert self.image_set_index is not None, \"Dataset not initialized\"\n name = self.image_set_index[index]\n image_file = os.path.join(self.image_dir, 'images', name)\n assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)\n return image_file",
"def _out_fn(self, split_index):\n if split_index > 999:\n raise ValueError(\"Too many splitted files to generate: number \" +\n \"of splitted files exceed 1000.\")\n name = self.out_format.format(split_index)\n return op.join(self.out_dir, name)",
"def generateFullPath(nSeq, nstates, prng, weights, bt, isNormalized):\n rateMtxQ = ReversibleRateMtx.ReversibleRateMtx(nstates, weights)\n if isNormalized:\n Q = rateMtxQ.getNormalizedRateMtx()\n else:\n Q = rateMtxQ.getRateMtx()\n \n stationary = rateMtxQ.getStationaryDist()\n ## sample the initial states for each sequence\n initialStateSeq = prng.choice(nstates, nSeq, replace=True, p=stationary)\n ## given the initial state, we sample the full path and save each sequence in a list\n seqList = []\n simulator = SimuSeq.ForwardSimulation(bt, Q)\n for i in range(0, nSeq):\n seq = simulator.sampleStateTimeSeq(prng, initialStateSeq[i])\n seqList.append(seq)\n ## get the keys for each sequence seqList[0].keys() \n return seqList",
"def make_file_path_fromidx(digits=1, levels=1):\n if (\n not isinstance(digits, int)\n or not isinstance(levels, int)\n or digits < 1\n or levels < 1\n or digits < levels\n ):\n raise Exception(\n f\"digits and levels must be integers larger than 0 and digits must not be smaller than \"\n \"levels, got {digits}/{levels}\"\n )\n\n def file_path_fromidx(doc=None, idx=None):\n if idx is None or not isinstance(idx, int) or idx < 0:\n raise Exception(\"Index must be an integer >= 0\")\n per = int(digits / levels)\n asstr = str(idx)\n digs = max(0, digits - len(asstr))\n tmp = \"0\" * digs\n tmp += str(idx)\n path = \"\"\n fromdigit = len(tmp) - per\n todigit = len(tmp)\n for lvl in range(levels - 1):\n path = tmp[fromdigit:todigit] + path\n # print(\"per=\", per, \"from=\", fromdigit, \"to=\", todigit, \"sec=\", tmp[fromdigit:todigit])\n path = \"/\" + path\n fromdigit = fromdigit - per\n todigit = todigit - per\n path = tmp[:todigit] + path\n return path\n\n return file_path_fromidx",
"def generate_name(file_name, index):\n name = ''\n with open(file_name) as names:\n for i, line in enumerate(names):\n if i == index:\n name += line.split(None, 1)[0]\n return name",
"def GetSequentialFileName(base_name):\n name, ext = os.path.splitext(base_name)\n assert ext == '', 'base_name cannot contain file extension.'\n index = 0\n while True:\n output_name = '%s_%03d' % (name, index)\n if not glob.glob(output_name + '.*'):\n break\n index = index + 1\n return output_name",
"def image_path_from_index(self, index):\n image_path = os.path.join(self.cfg.file_path, 'JPEGImages',\n index + self.cfg.image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def generate_fasta(sequences, fasta_path):\n\n with open(fasta_path, 'w+') as f:\n for i in range(len(sequences)):\n f.write('>seq '+str(i))\n f.write('\\n')\n f.write(sequences[i])\n f.write('\\n')",
"def output_path_generator(self, version, file_format):\n # generate the data needed\n # the output path\n file_name_buffer = []\n template_kwargs = {}\n\n # if this is a shot related task set it to shots resolution\n include_project_code = True\n if version.nice_name.startswith(version.task.project.code):\n include_project_code = False\n\n version_sig_name = self.get_significant_name(\n version,\n include_project_code=include_project_code\n )\n\n file_name_buffer.append(\n '%(version_sig_name)s.001.%(format)s'\n )\n template_kwargs.update({\n 'version_sig_name': version_sig_name,\n 'format': file_format\n })\n\n output_file_name = ''.join(file_name_buffer) % template_kwargs\n\n # check if it is a stereo comp\n # if it is enable separate view rendering\n output_file_path = os.path.join(\n version.absolute_path,\n 'Outputs',\n version.take_name,\n # 'v%03d' % version.version_number,\n file_format\n )\n\n # create the dir\n try:\n os.makedirs(output_file_path)\n except OSError:\n # path exists\n pass\n\n output_file_full_path = os.path.join(\n output_file_path,\n output_file_name\n ).replace('\\\\', '/')\n\n # make the path Project: relative\n output_file_full_path = 'Project:%s' % os.path.relpath(\n output_file_full_path,\n os.path.dirname(version.absolute_path)\n )\n\n # set the output path\n return '%s' % os.path.normpath(\n output_file_full_path\n ).encode()",
"def seek(self, index: int, /) -> str:\n self.index = index\n return self.current",
"def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])",
"def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])",
"def get_output_filename(item: str, root: str, i: int) -> str:\n element_split = item.split(\"/\")\n item, ext = element_split[-1].split(\".\")\n if i < 0:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}.{ext}\"\n else:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}_aug{i}.{ext}\"",
"def image_path_at(self, i):\n image_path = os.path.join(self._image_path, self._image_index[i])\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path"
]
| [
"0.5923429",
"0.58115065",
"0.5772479",
"0.5749777",
"0.5673819",
"0.5670448",
"0.561562",
"0.560478",
"0.55820173",
"0.55788344",
"0.5571164",
"0.5543174",
"0.55321175",
"0.5524482",
"0.55151856",
"0.55108917",
"0.5443332",
"0.54330754",
"0.5406386",
"0.54018956",
"0.53976077",
"0.53908324",
"0.5389464",
"0.53792095",
"0.53506535",
"0.53237224",
"0.52891624",
"0.52891624",
"0.5277023",
"0.5271737"
]
| 0.8406517 | 0 |
Adds the records in the JSON file to the container. | def add_json(self, json_path, record_cls=None):
rc = record_cls or self._ELE_CLS
self.add_container(self.from_json(json_path, record_cls=rc))
return len(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_record():\n if 'json' not in request.files:\n # use an HTML record that seems appropriate\n return \"no json file in the request!\", 400\n try:\n # can't assume that JSON file is valid\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with identifier, post failed!\", 400\n with RECORD_LOCK:\n # just check if the name already exists in the global RECORD list\n if len([r for r in RECORDS if r.get('name') == _record['name']]):\n return \"already in the records!\", 409\n RECORDS.append(_record)\n return \"OK\"",
"def add_records(self, data: dict, execution_context: dict):",
"def add_or_update(self, json_records):\n cursor = self._conn.cursor()\n cursor.execute(\"BEGIN TRANSACTION\")\n for image_record in json_records:\n self._add_or_update_one(cursor, image_record)\n cursor.execute(\"COMMIT TRANSACTION\")",
"def append(self, json):\n data = simplejson.loads(json)\n data['_time'] = int(time.time())\n self.logger.log_saving(json)\n self.json_collection.insert(data)",
"def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False",
"def load_json(self, file):\n with open(file, 'r', encoding=\"utf8\") as f:\n self.extend(json.load(f))",
"def write_to_json(self):\r\n logging.info('Writing records to JSON')\r\n with open(self.backup, 'w') as fp:\r\n json.dump(self.record, fp)\r\n logging.info(\"Finished writing records to JSON\")",
"def add_record_container(self, container: BaseRecordContainer) -> None:\n self.record_containers.append(container)",
"def appendJson(filepath,entry):\n with open(filepath,mode='r', encoding='utf-8') as f:\n jconfig = json.load(f)\n\n with open(filepath,mode='w', encoding='utf-8') as feedsjson:\n jconfig.append(entry)\n print(json.dumps(jconfig,indent=2))\n json.dump(jconfig,feedsjson)",
"def addJSON(file: str, df, creat: bool):\n if creat is False :\n with open(file) as train_file:\n dict_train = json.load(train_file)\n data = pd.read_json(dict_train, orient=\"records\")\n df = pd.concat([data, df])\n \n js = df.to_json(orient='records').replace(\n \"[\\\\\\\"[\", '').replace(\"]\\\\\\\"]\", '')\n \n with open(file, 'w', encoding='utf8') as outfile:\n json.dump(js, outfile, ensure_ascii=False, indent=4)",
"def set_data_from_json(self, filename):\n with open(filename, 'r') as f:\n self.data = json.load(f, object_pairs_hook=OrderedDict)",
"def bulk_insert(cls, path=\"data.json\"):\n from json import load\n from codecs import open\n \n lists = load(open(path, \"r\", \"utf8\"))\n for lst in lists:\n ing = cls(content = lst)\n ing.put()",
"def read_json(self, json_files):\n self.file_access.write_log(\"Attempting to read the json files {}\".format(json_files))\n for i in json_files:\n self.constituency.append(self.file_access.read_election_json(i))\n self.file_access.write_log(\"The {} json file has been added to the Constituency object\".format(i))",
"def add_to_data_json(self, json_to_add=None, **kwargs):\n data = json_continuously_loader(self.data)\n if json_to_add:\n json_to_dict = json_continuously_loader(json_to_add)\n # Merge dicts:\n data.update(json_to_dict)\n self.data = data\n\n if not json_to_add:\n for key, value in kwargs.items():\n data[key] = value\n # Update the Rqueue object\n self.data = data\n\n self.save()",
"def append_to_db(cls, file, headers):\n # if file already exists\n if cls.integrity_check_before_append(file, headers):\n with open(file, 'a') as f:\n start = time.time()\n for i in headers:\n f.write(json.dumps(i) + '\\n')\n stop = time.time() - start\n print(stop, 'sec')\n\n else:\n print(\"something went wrong\")\n return",
"def save(self, filename):\n with open(filename, \"w\") as f:\n m = {\n \"order\": self.order,\n \"pad\": self.pad,\n \"records\": {str(k): v for k, v in self.records.items()}\n }\n json.dump(m, f)",
"def add_record(self, record: Dict, src_name: SourceName) -> None:\n concept_id = record[\"concept_id\"]\n record[\"src_name\"] = src_name.value\n label_and_type = f\"{concept_id.lower()}##identity\"\n record[\"label_and_type\"] = label_and_type\n record[\"item_type\"] = \"identity\"\n try:\n self.batch.put_item(Item=record)\n except ClientError as e:\n logger.error(\n \"boto3 client error on add_record for \"\n f\"{concept_id}: {e.response['Error']['Message']}\"\n )\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in record:\n value = record.get(attr_type)\n if not value:\n continue\n if isinstance(value, str):\n items = [value.lower()]\n else:\n items = {item.lower() for item in value}\n for item in items:\n self._add_ref_record(\n item, record[\"concept_id\"], item_type, src_name\n )",
"def __init__(self, fileName):\n self.recordDict = {}\n for line in open(fileName, 'r') :\n sipRecord = json.loads(line)\n self.recordDict[sipRecord['addressOfRecord']] = line",
"def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)",
"def add_from_uuid_list(self):\n\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split('\\n')[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add('Invalid uuid lenght.')\n continue\n \n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return",
"def populate_from_json(db: Session, model: DatabaseModel, json_file: str):\n data_exists = db.query(model).first()\n if data_exists:\n return\n with open(json_file, \"r\") as file:\n data = json.loads(file.read())\n for obj in data:\n db.add(model(**obj))\n db.commit()",
"def feed_records(self):\n if not self.stats_file:\n return\n\n with open(self.stats_file) as fh:\n reader = reverse_file(fh)\n for line in reader:\n if line is None:\n return\n if not line:\n continue\n\n try:\n js = json.loads(line)\n except Exception as e:\n continue\n\n yield js",
"def append_entry(host, email, password, mailbox):\n\n new_entry = {\n\n 'host': host,\n 'email': email,\n 'password': password,\n 'mailbox': mailbox\n }\n\n with open('data.json') as f:\n data = load(f)\n\n data[\"items\"].append(new_entry)\n\n with open('data.json', 'w') as outfile:\n dump(data, outfile, indent=4)\n\n print('\\nNew Entry Added Successfully!')",
"def test_add_multiple_records_to_zone(self):\n zone = Zone('test.example.com')\n recordA = Record(zone, 'test-recorda', {'type': 'A', 'ttl': 300})\n recordB = Record(zone, 'test-recordb', {'type': 'A', 'ttl': 300})\n zone.add_record(recordA)\n zone.add_record(recordB)\n self.assertDictEqual(zone.records, {\n 'test-recorda': recordA,\n 'test-recordb': recordB,\n })",
"def load_json(file_name_template, record_id):\n with open(file_name_template % (record_id)) as f:\n json_data = json.load(f)\n return json_data",
"def put_record(self, tag, json_str):\n a = 0\n while a < 2000:\n if a % 100 == 0 and a != 0:\n logger.info(\"A batch of 100 simple json records have been sent\")\n self.firehose_client.put_record(DeliveryStreamName=self.get_stream_name(tag),\n Record={\n 'Data': json_str\n }\n )\n a = a + 1\n logger.info(\"Records were placed successfully!!\")",
"def add_from_json(self, location):\r\n with open(location) as file:\r\n data = json.load(file)\r\n for metros in data[\"metros\"]:\r\n self.vertices[metros[\"code\"]] = Vertex(metros)\r\n for routes in data[\"routes\"]:\r\n start = routes[\"ports\"][0]\r\n destination = routes[\"ports\"][1]\r\n distance = routes[\"distance\"]\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))",
"def import_file(self):\n self.inputdata = json.load(self.infile)\n self.outputdata = self.inputdata\n self.logger.info('Json file Loaded')\n self.logger.debug(u'JSON:{d}'.format(d=self.inputdata))",
"def data(self):\n\n for i in json_parsed:\n name = i['name']\n number = i['number']\n address=i['address']\n banking=i['banking']\n position=i['position']\n latitude=position['lat']\n longitude=position['lng']\n bike_stands=i['bike_stands']\n status=i['status']\n insert_data(name, number, address, banking, latitude, longitude, bike_stands, status)",
"def add_elasticsearch_records(self, data_list):\n actions = [self.create_data_record(data_dict) for data_dict in data_list]\n self.actions_buffer.extend(actions)"
]
| [
"0.65498114",
"0.64974004",
"0.64236826",
"0.6339578",
"0.6130468",
"0.59746075",
"0.59578854",
"0.5907128",
"0.5892819",
"0.58659583",
"0.58485895",
"0.5736148",
"0.57177305",
"0.5664405",
"0.5654199",
"0.5602515",
"0.559988",
"0.55985063",
"0.5575644",
"0.5556617",
"0.5543048",
"0.55264884",
"0.55219305",
"0.5495729",
"0.549493",
"0.54700935",
"0.5456006",
"0.54523706",
"0.543911",
"0.5427207"
]
| 0.69737047 | 0 |
Builds a lookup dictionary indexed by `field` whose values are lists of indices of the records whose `field` attribute matches the corresponding key. | def build_lookup(self, field):
lud = defaultdict(list)
for i, r in enumerate(self.__elements__):
lud[getattr(r, field)].append(i)
return dict(lud) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lookups_for_field(field):\n field_class = type(field)\n return [\n (lookup, LOOKUP_TYPES[lookup]) \\\n for lookup in FIELDS_TO_LOOKUPS[INV_FIELD_TYPES[field_class]]\n ]",
"def build_lookup_index(item: Resource, index_string: str):\n index = defaultdict(dict)\n if index_string is None:\n return index\n\n pattern = r'([\\w]+)\\[(\\d+)\\]'\n for entry in index_string.split(';'):\n key, uriref = entry.split('=')\n m = re.search(pattern, key)\n attr = m[1]\n i = int(m[2])\n prop = getattr(item, attr)\n try:\n index[attr][i] = prop[URIRef(item.uri + uriref)]\n except IndexError:\n # need to create an object with that URI\n obj = prop.obj_class(uri=URIRef(item.uri + uriref))\n # TODO: what if i > 0?\n prop.values.append(obj)\n index[attr][i] = obj\n return index",
"def index_records(vr):\n return collections.OrderedDict((record2key(rec), clean_sample_index(rec))\n for rec in vr)",
"def index(self, field):\r\n\r\n try:\r\n index = self._field_names.index(unicode(field))\r\n except ValueError:\r\n raise KeyError(\"Field list has no field with name '%s'\" % unicode(field))\r\n\r\n return index",
"def mk_id_lookups(self):\n id_lookups = {}\n for ns in self.ddef.keys():\n id_lookups[ns] = self.mk_id_lookup(ns)\n return id_lookups",
"def build_subsets(self, field):\n sss = defaultdict(list)\n for r in self.__elements__:\n sss[getattr(r, field)].append(r)\n return dict(sss)",
"def _genLookupDict(self, tagged):\n lookup_dict = {}\n for token,pos in tagged:\n if pos not in lookup_dict.keys():\n lookup_dict[pos] = []\n lookup_dict[pos].append(token)\n return lookup_dict",
"def gen_search_index(record, link_content=None):\n document_id = record[\"document_id\"]\n record_index = {\n \"document_name\": record[\"document_name\"],\n \"document_type\": record[\"document_type\"].__name__,\n \"content\": record[\"content\"],\n \"authors\": record[\"authors\"],\n \"publish_date\": record[\"publish_date\"],\n \"link_content\": link_content,\n }\n return (document_id, record_index)",
"def sorted_arr_lookup_ix(karr, vals, ix_table, k1, k2):\n # print(k1, k2)\n mx_index = ix_table[-1, 0]\n ix_k1 = lookup_ix(ix_table, k1)\n if k1 == mx_index:\n ix_k2 = len(karr)\n else:\n ix_k2 = lookup_ix(ix_table, k1 + 1, check=False)\n\n c2 = karr[ix_k1:ix_k2, 1]\n ixb1 = np.searchsorted(c2, k2)\n # ixb2 = np.searchsorted(c2, k2 + 1)\n\n ix = ix_k1 + ixb1\n k1_, k2_ = karr[ix]\n\n if (k1_ != k1) or (k2_ != k2):\n print('k1', k1, 'k2', k2)\n print(k1_, k2_)\n raise KeyError(\"Array doesn't contain keys\")\n return vals[ix]",
"def _find_ids(self,\r\n data_list,\r\n prop,\r\n lookup_index,\r\n lookup_doc_type,\r\n lookup_field):\r\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\r\n lg.setLevel(self.log_level)\r\n\r\n rtn_list = []\r\n first_time = IsFirst()\r\n for item in data_list:\r\n # the Dot class will retive and set dictionary values via dot\r\n # notation\r\n val = Dot(item).get(prop)\r\n if val.startswith(\"#;lookup#;\"):\r\n lookup_val = val.replace(\"#;lookup#;\", \"\")\r\n lookup_obj = self.get_item(lookup_val, lookup_field)\r\n if first_time.first():\r\n lg.debug(\" lookup_obj:\\n%s\", pp.pformat(lookup_obj))\r\n if lookup_obj:\r\n rtn_list.append(Dot(item).set(prop, lookup_obj['_id']))\r\n return rtn_list",
"def landfire_lookup(bps_codes, fieldname):\n\n result = dict()\n\n with open(stsim_manager.lookup_file_path['Landfire'], 'r') as f:\n\n lookup = csv.DictReader(f)\n for table_row in lookup:\n read_bps_code = table_row['BPS_MODEL']\n read_bps_code = '0' + read_bps_code if len(read_bps_code) < 7 else read_bps_code\n try:\n if int(read_bps_code) in [int(code) for code in bps_codes]:\n value = table_row[fieldname]\n result[read_bps_code] = value\n except:\n continue # skip all the non-int parseable entries\n\n return result",
"def indexes(self, fields):\r\n\r\n indexes = [self.index(field) for field in fields]\r\n\r\n return tuple(indexes)",
"def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map",
"def get_reverse_movie_lookup(lookups):\n return {idx: name for name, idx in lookups.movie_to_idx.items()}",
"def _dynamic_dict(example, src_field, tgt_field):\n\n src = src_field.tokenize(example[\"src\"])\n # make a small vocab containing just the tokens in the source sequence\n unk = src_field.unk_token\n pad = src_field.pad_token\n src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])\n unk_idx = src_ex_vocab.stoi[unk]\n # Map source tokens to indices in the dynamic dict.\n src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])\n example[\"src_map\"] = src_map\n example[\"src_ex_vocab\"] = src_ex_vocab\n\n if \"tgt\" in example:\n tgt = tgt_field.tokenize(example[\"tgt\"])\n mask = torch.LongTensor(\n [unk_idx] + [src_ex_vocab.stoi[w] for w in tgt] + [unk_idx])\n example[\"alignment\"] = mask\n return src_ex_vocab, example",
"def field_index(self, field):\r\n idx = self.fields.index(field)\r\n if self.index:\r\n idx = self.index[idx]\r\n return idx",
"def hash_fieldlist(cls):\n for field in cls.fieldlist:\n cls.fieldhash[field.id] = field",
"def key_by(self, field: str) -> B[B, E]:\n pass",
"def lookup(self, key):",
"def lookupFields(self, index):\n fields = self.indexDict[index]\n if isinstance(fields,types.ListType):\n return fields\n else:\n return [fields]",
"def lookup_query(self, query):\n return { term: self.index[term] for term in query.split('/r') if term in self.index }",
"def build_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer",
"def build_count_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n if SelectExtractors.SelectExtractors._parseXSDIntegerLiteral(string):\r\n tmp[field] = SelectExtractors.SelectExtractors._parseXSDIntegerLiteral(string) # hacky, I know.\r\n else:\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer",
"def get_indexes_for_key (self,key):\r\n\r\n if self.using_database:\r\n aprint('GETTING INDEXES FOR KEY')\r\n value_tuple = (notebookname,key,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=? and keyword=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.key_dict[str(key)]",
"def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)",
"def convert_field_name_to_indexes(field_name):\n rows = '87654321'\n columns = 'abcdefgh'\n row_index = column_index = None\n\n row_name = field_name[0]\n for i in range(8):\n if rows[i] == row_name:\n row_index = i\n\n column_name = field_name[1]\n for i in range(8):\n if columns[i] == column_name:\n column_index = i\n\n return row_index, column_index",
"def bucket_indexof(table, key):",
"def build_index(text: Iterable) -> Dict[str, List[Tuple[int, int]]]:\n index = defaultdict(list)\n for line_no, line in enumerate(text, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n index[word].append(location)\n return index",
"def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices",
"def construct_indu_index_mapping(df):\n industries_to_index = {}\n industries = df[\"ggroup\"].dropna().astype(int).unique()\n industries = industries.tolist()\n quarters = (df[\"year\"].astype(\"str\") + \" q\" + df[\"quarter\"].astype(\"str\")).unique()\n for i in range(df.shape[0]):\n row = df.iloc[i, :]\n if math.isnan(row[\"ggroup\"]):\n continue\n industries_to_index[int(row[\"ggroup\"])] = industries_to_index.get(int(row[\"ggroup\"]), set())\n industries_to_index[int(row[\"ggroup\"])].add(i)\n return industries_to_index"
]
| [
"0.64730227",
"0.5716295",
"0.551497",
"0.5492499",
"0.5463954",
"0.53340346",
"0.53127265",
"0.53011155",
"0.5170127",
"0.5150985",
"0.5149047",
"0.5119388",
"0.51165813",
"0.5076202",
"0.50119376",
"0.4994829",
"0.49922776",
"0.49909088",
"0.49796864",
"0.4969731",
"0.4939102",
"0.49026904",
"0.48967612",
"0.48853227",
"0.48828137",
"0.48662654",
"0.48573485",
"0.4827416",
"0.48161227",
"0.4812762"
]
| 0.7838401 | 0 |
Builds a dictionary indexed by `field` whose values are lists of records whose `field` attribute matches the corresponding key. | def build_subsets(self, field):
sss = defaultdict(list)
for r in self.__elements__:
sss[getattr(r, field)].append(r)
return dict(sss) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_lookup(self, field):\n lud = defaultdict(list)\n for i, r in enumerate(self.__elements__):\n lud[getattr(r, field)].append(i)\n return dict(lud)",
"def record_to_dict(f_record, key_name: str):\n return_dict = {}\n for record in f_record:\n key = ''\n for f, v in record.items():\n if f == key_name:\n key = v\n else:\n try:\n return_dict[key].update({f: v})\n except KeyError:\n return_dict[key] = {f: v}\n return return_dict",
"def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map",
"def group_by(self, field):\n # This could be cached for performance, but for clarity it is not\n result = {}\n for row in self:\n if field == 'month':\n # FIXME - Hack!\n # - the \"month\" attribute of the row is intended for string\n # pattern matching, but the rowset wants to keep the original\n # objects intact as much as possible\n key = row.date.replace(day=1)\n else:\n key = row._getvalue(field)\n\n if key is None:\n key = 'unknown'\n\n if key not in result:\n result[key] = RowSet()\n\n result[key].append(row)\n return result",
"def fieldToFilter(fields):\n if len(fields) == 0:\n return dict()\n return dict(\n pdir_keywords=dict(\n query=[\"%s:%s\" % (k, v) for (k, v) in fields.items()],\n operator=\"and\",\n )\n )",
"def parse_record(self, record):\n data = defaultdict(list)\n\n for trait, parser in self.parsers:\n for field_name in self.search_fields:\n field = record.get(field_name)\n if not field:\n continue\n parsed = parser.parse(field, field_name)\n if parsed:\n data[trait] += parsed\n\n return data",
"def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)",
"def abc_make_records(self, records, fields=None):\n fields = fields or self.abc_get_model_fields(records)\n result = []\n field_types = {}\n def field_type(name):\n \"\"\"Check the type of a field.\"\"\"\n if name not in field_types:\n field_types[name] = records.fields_get([name], attributes=['type'])[name]['type']\n return field_types.get(name)\n for record in records:\n rec ={'_name': record._name, 'id': record.id}\n for field in fields:\n child_fields = None\n # Relational field\n if type(field) == tuple:\n child_fields = field[1] or ['display_name']\n field = field[0]\n value = getattr(record, field)\n if child_fields:\n value = self.abc_make_records(value, child_fields)\n if field_type(field) == 'many2one':\n value = value and value[0] or None\n rec[field] = value\n result.append(rec)\n return result",
"def index_records(vr):\n return collections.OrderedDict((record2key(rec), clean_sample_index(rec))\n for rec in vr)",
"def recs_to_dict(keyfldname, recs):\n d = dict()\n for rec in recs:\n d[rec[keyfldname]] = rec\n return d",
"def hash_fieldlist(cls):\n for field in cls.fieldlist:\n cls.fieldhash[field.id] = field",
"def numpy_recarray_to_dict(data, keyfield_name):\n numrec = data.size\n fielddtype = data.dtype.fields\n names = fielddtype.keys()\n formats = []\n output_dict = {}\n for name in names:\n formats.append(fielddtype[name][0])\n\n for dataindex in np.arange(numrec):\n record_entry = {}\n for name in names:\n # TODO: remove keyfield_name from this list\n record_entry[name] = data[dataindex][name].tolist()\n output_dict[data[dataindex][keyfield_name]] = record_entry\n return output_dict",
"def record_fields(self):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n record_fields_dict = {}\n for group_name, dset in record_fields_grp.items():\n record_fields_dict[group_name] = list(dset.asstr())\n\n return record_fields_dict",
"def buildFromRecords(self, records):\n probes = {}\n for record in records:\n fields = {}\n for field in record.split(self.FIELD_DELIMITER):\n index = field.find(self.KEY_VALUE_DELIMITER)\n if index == -1 or len(field) < (index+1):\n raise InvariantViloation('detected invalid probe record in app info file - {}'.format(record))\n fields.update({field[:index]:field[index+1:]})\n if fields:\n try:\n fields[self.FIELD_FILE] = self.trimWorkspace(fields[self.FIELD_FILE], self.workspace)\n probes.update({\n fields[self.FIELD_RECORDER_RETURN_SITE] : AnchoredProbe(\n fields[self.FIELD_NAME], fields[self.FIELD_FILE], fields[self.FIELD_LINE],\n fields[self.FIELD_ATTRIBUTES], fields[self.FIELD_STATUS] == self.PROBE_STATUS_ENABLED,\n fields[self.FIELD_NAME]\n )\n })\n except KeyError as error:\n raise InvariantViloation('detected record missing field {} - \\n{}\\n{}'.format(error, record, fields))\n return probes",
"def serialize_fields(record, fields=_SERIALIZED_FIELDS):\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {\n key: _serialize(record[key])\n for key in keys if record[key] is not None\n })",
"def give_field(self,\r\n fieldname):\r\n\r\n return [a_temp for a_temp in self.default_dict['field']\r\n if self.default_dict['field'][a_temp] == fieldname]",
"def get_fields(csv_file, fields):\n result = OrderedDict()\n for field in fields:\n result[field] = []\n \n with open(csv_file) as f:\n reader = csv.DictReader(f)\n for row in reader:\n for field in fields:\n result[field].append(row[field])\n \n return result",
"def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered",
"def aggregated_records(all_records, key_fields=KEY_FIELDS):\n flow_table = defaultdict(_FlowStats)\n for flow_record in all_records:\n key = tuple(getattr(flow_record, attr) for attr in key_fields)\n if any(x is None for x in key):\n continue\n flow_table[key].update(flow_record)\n\n for key in flow_table:\n item = {k: v for k, v in zip(key_fields, key)}\n item.update(flow_table[key].to_dict())\n yield item",
"def key_by(self, field: str) -> B[B, E]:\n pass",
"def record_dict(self):\n return {p.key: getattr(self, p.key) for p in self.__mapper__.attrs}",
"def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result",
"def build_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer",
"def recordToDict(self, record):\n fields = {}\n if record is not None:\n for field, value in record.fields.iteritems():\n\n # FIXME: need to sort out dealing with enormous groups; we\n # can ignore these when sending AMP responses because the\n # client will always fetch members via a members( ) AMP\n # command.\n if field.name in (u\"memberDNs\", u\"memberUIDs\"):\n continue\n\n valueType = record.service.fieldName.valueType(field)\n if valueType in (unicode, bool):\n fields[field.name] = value\n elif valueType is uuid.UUID:\n fields[field.name] = str(value)\n elif issubclass(valueType, (Names, NamedConstant)):\n fields[field.name] = value.name if value else None\n return fields",
"def list_field_to_dict(list_field):\n\n return_data = []\n\n for item in list_field:\n # if list is of embedded documents, convert each document to a dictionary\n if isinstance(item, EmbeddedDocument):\n return_data.append(mongo_to_dict(item))\n # convert the data type\n else:\n return_data.append(mongo_to_python_type(item,item))\n\n return return_data",
"def _convert_record_fields_to_table_columns(self, run_idx, run_record_key):\n\n fields = {}\n for record_field in self.record_fields[run_record_key]:\n fields[record_field] = self._convert_record_field_to_table_column(\n run_idx, run_record_key, record_field)\n\n return fields",
"def make_dict(cls, fields, fields_kwargs):\n return utils.make_dict(fields, fields_kwargs)",
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"def groupby(f, coll):\n d = dict()\n for item in coll:\n key = f(item)\n if key not in d:\n d[key] = []\n d[key].append(item)\n return d",
"def getByField(database,field):\n correspondant=[]\n for key,usr in database.items():\n if field == usr.fieldStudy:\n correspondant.append(usr)\n return correspondant, False\n else:\n return correspondant,True"
]
| [
"0.6895315",
"0.61138445",
"0.57409555",
"0.56086946",
"0.55212",
"0.54991513",
"0.5478987",
"0.54692143",
"0.5454076",
"0.54103667",
"0.5346098",
"0.5315012",
"0.52935874",
"0.5275915",
"0.52585053",
"0.5243504",
"0.52341795",
"0.5225069",
"0.5221858",
"0.52113163",
"0.52037865",
"0.5190638",
"0.5185596",
"0.51826465",
"0.514461",
"0.5143965",
"0.514223",
"0.50960237",
"0.508118",
"0.50674725"
]
| 0.65679175 | 1 |
Cull records from the container based on `field`. | def cull(self, field, keep_values=None, remove_values=None):
lud = self.build_lookup(field)
# Determine values to keep
if remove_values:
keep_values = set(lud.keys()) - set(remove_values)
if not keep_values:
raise DataRecordsError(
"Either keep_values or remove_values must be provided"
)
# Cull records
inds = set()
for v in keep_values:
inds.update(lud[v])
self.keep_inds(inds)
return len(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cull_with_function(self, field, func):\n lud = self.build_lookup(field)\n\n # Cull records\n inds = set()\n for v in lud:\n if func(v):\n inds.update(lud[v])\n self.keep_inds(inds)\n\n return len(self)",
"def remove_filter_field(self, field):\n if self.filters:\n category_filter = self.filters.get(str(field.category.id), None)\n\n if category_filter:\n field_filter = category_filter.pop(field.key, None)\n\n if field_filter:\n self.save()",
"def __call__(self, collection, field):\n pass",
"def clean(self, field_path=None):\n cleaned = self._collection.clean(field_path)\n return self.set_collection(cleaned)",
"def test_entities__Entity__removeField__1(entities, entity_with_field, field):\n assert field in entities.values()\n entity_with_field.removeField(field)\n assert field not in entities.values()",
"def test_entities__Entity__removeField__3(entity_with_field, field):\n entity_with_field.removeField(field)\n assert None is zope.component.queryMultiAdapter(\n (entity, Dummy()), IField, name=u'Field')\n assert None is IEntity(field, None)",
"def slice(self, field):\n return [getattr(r, field) for r in self.__elements__]",
"def get_field(self, field):\r\n if field not in self.fields:\r\n raise ValueError(\"Invalid field: {}. Must be one of: {}\"\r\n .format(field, self.fields))\r\n sub = set(meta[field] for meta in self.data)\r\n return sub",
"def crop_field(image_data, fieldmap, location):\n coloraxis = 0 if image_data.size <= 2 else 1\n allcolors = () if not coloraxis else (slice(None),) * coloraxis\n colordepth = () if not coloraxis else (image_data.size[0], )\n offset, size = receptive_field(fieldmap, location)\n result = numpy.zeros(colordepth + size)\n (xto, xfrom), (yto, yfrom) = (_cropped_slices(\n o, s, l) for o, s, l in zip(offset, size, image_data.size[coloraxis:]))\n result[allcolors + (xto, yto)] = image_data[allcolors + (xfrom, yfrom)]\n return result",
"def _all_data_fields(field):\n all_fields = PhotoTech.objects.all().values()\n return list(set([all_fields[x][field]\n for x in range(len(all_fields))]))",
"def clearField(self):\n self.field.clearFields()",
"def remove_field(pl, key):\n\n if type(pl) is tuple:\n r = (remove_field(v, key) for v in pl)\n\n elif type(pl) is list:\n r = [remove_field(v, key) for v in pl]\n \n elif type(pl) is dict:\n r = {k: remove_field(v, key) for (k, v) in pl.items() if k != key}\n else: \n r = pl\n\n return r",
"def find_all(self, field):\n result = []\n for c in self.children:\n if c.name == field:\n result.append(c)\n result += c.find_all(field)\n return result",
"def drop_field(self, field):\r\n self.dropped_fields.add(field)",
"def clean_collection(previous_records, collection):\n for previous_record in previous_records:\n collection.delete_one({\"_ref\": previous_record})",
"def process_field(self, field_value):\n\n if is_novabase(field_value):\n if not self.already_processed(field_value):\n self.process_object(field_value, False)\n key = self.get_cache_key(field_value)\n result = self.simple_cache[key]\n else:\n result = self.process_object(field_value, False)\n return result",
"def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)",
"def item_remove_field(self, item, field_name):\n\t\treturn self._modify_object(item=item, field_name=field_name, new_value=None, new_field_name=None)",
"def clear_field_values(self):\n\t\tlogging.info(\"Clearing values in the field[] dictionary of the object\")\n\t\tlogging.debug(\"Before = \" + str(self.field))\n\t\tfor key, value in self.fields.items():\n\t\t\tself.field[str(key)] = None\n\t\tlogging.debug(\"After = \" + str(self.field))\n\t\treturn",
"def filter_record(self, record):\n raise NotImplementedError(\"Override in subclass\")",
"def select(self, field_spec=None): # pylint: disable=invalid-name\n coll = self.pipeline().select(field_spec).to_keyed_collections()\n return self.set_collection(coll.get('all'))",
"def _cache_field(self, name, field):\n base = field\n while True:\n self_base = self\n while True:\n if self_base is base:\n return\n\n if getattr(self_base, \"base\", None) is not None:\n self_base = self_base.base\n else:\n break\n\n if getattr(base, \"base\", None) is not None:\n base = base.base\n else:\n break\n\n self._converted[name] = field",
"def query_by_field(cls, field: str, value: str)->List:\n database.cursor.execute(\n \"SELECT * FROM {0} WHERE {1} = %s\".format(cls.table_name, field), (value,))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]",
"def drop(self, fields, inplace=True):\n if inplace:\n obj = self\n else:\n obj = copy.deepcopy(self)\n\n for field in list(fields):\n del obj[field]\n\n return obj",
"def make_filtered_field(ds, fieldname, filter_fields = [], tolerance = tol):\n def _filtered_field(field, data):\n x = data[('gas',fieldname)]\n\n select = data[filter_fields[0]] < 0\n for f in filter_fields:\n select = select + (data[f] < tolerance)\n x[select] = np.nan\n\n return x\n\n ds.add_field(('gas',fieldname + '_filtered'), function = _filtered_field, units = \"\")\n return",
"def run(self, data, config=None, pipeline=None):\n del data[self.field]\n return data",
"def apply(self, field):\n with timed_stage('limiter'):\n if self.squeezed_triangles:\n self.squeezed_filter.apply(field)\n\n if self.is_vector:\n tmp_func = self.P1DG.get_work_function()\n fs = field.function_space()\n for i in range(fs.value_size):\n tmp_func.dat.data_with_halos[:] = field.dat.data_with_halos[:, i]\n super(VertexBasedP1DGLimiter, self).apply(tmp_func)\n field.dat.data_with_halos[:, i] = tmp_func.dat.data_with_halos[:]\n self.P1DG.restore_work_function(tmp_func)\n else:\n super(VertexBasedP1DGLimiter, self).apply(field)",
"def shuffle_field(dframe, field):\n column = list(dframe[field])\n random.shuffle(column)\n dframe[field] = column",
"def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):\n if 'purity' in fields:\n fields.remove('purity')\n result = super(StockValuationLayer, self).read_group(domain, fields, groupby, offset=offset, limit=limit,\n orderby=orderby, lazy=lazy)\n return result",
"def read_and_set(self):\n self.df = self.run_find(self.qry, self.hide_fields)\n return"
]
| [
"0.61030185",
"0.54195553",
"0.5323119",
"0.4986233",
"0.49818215",
"0.49595803",
"0.48766378",
"0.48577824",
"0.48449743",
"0.4799145",
"0.4787641",
"0.47857893",
"0.47811604",
"0.47460705",
"0.46765202",
"0.46490967",
"0.4638372",
"0.46370828",
"0.46361995",
"0.46234384",
"0.4621662",
"0.46069565",
"0.46000612",
"0.45699272",
"0.45378774",
"0.4530208",
"0.45277864",
"0.452741",
"0.4510345",
"0.4507122"
]
| 0.5545093 | 1 |
Cull records from the container for which `field` returns something that evaluates to False when passed through func. | def cull_with_function(self, field, func):
lud = self.build_lookup(field)
# Cull records
inds = set()
for v in lud:
if func(v):
inds.update(lud[v])
self.keep_inds(inds)
return len(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)",
"def reject(self, func=bool):\n return self.filter(lambda x: not func(x))",
"def wholeFieldPredicate(field):\n\n if field:\n return (field, )\n else:\n return ()",
"def ifilter(self, func: Callable[[T], bool]) -> '_[T]':\n return _(filter(func, self.array))",
"def filter(self, func: Callable[[T], bool]) -> 'List[T]':\n return [v for v in self.array if func(v)]",
"def filter(self, func=bool):\n return _(filter(func, self._))",
"def filter_func(fieldname):\n if fieldname.startswith('_'):\n return False\n value = getattr(class_, fieldname)\n \n return isinstance(value, type)",
"def filter(self, func):\n n = len(self.data['id'])\n new_table = []\n for i in range(n):\n row = dict([(col, self.data[col][i]) for col in self.cols])\n if func(row):\n new_table.append(row)\n for col in self.cols:\n self.data[col] = []\n for row in new_table:\n self.data[col].append(row[col])\n return self",
"def filter(self, func):\n self._sets.filter(key=func)",
"def remove(predicate, coll):\r\n return filter(lambda x: not predicate(x), coll)",
"def filter_generic(mt_list, func):\r\n return [mt for mt in mt_list if func(mt)]",
"def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))",
"def remove(predicate, coll):\n return filter(lambda x: not predicate(x), coll)",
"def drop_while(coll, func): \n i = 0\n while i < len(coll) and func(coll[i]):\n i += 1\n return coll[i:]",
"def cull(self, field, keep_values=None, remove_values=None):\n lud = self.build_lookup(field)\n\n # Determine values to keep\n if remove_values:\n keep_values = set(lud.keys()) - set(remove_values)\n if not keep_values:\n raise DataRecordsError(\n \"Either keep_values or remove_values must be provided\"\n )\n\n # Cull records\n inds = set()\n for v in keep_values:\n inds.update(lud[v])\n self.keep_inds(inds)\n\n return len(self)",
"def filter(self, func: Callable[[Trajectory, Any], bool]) -> Data:\n trajs, labels = [], []\n for traj, label in zip(self.trajs, self.labels):\n if func(traj, label):\n trajs.append(traj)\n labels.append(label)\n logging.info(\"Filtered %d of %d trajectories\", len(trajs), len(self))\n return Data(trajs, labels)",
"def test_entities__Entity__removeField__1(entities, entity_with_field, field):\n assert field in entities.values()\n entity_with_field.removeField(field)\n assert field not in entities.values()",
"def func_filter(self, func):\n return QuerySet(filter(func, self))",
"def remove_filter_field(self, field):\n if self.filters:\n category_filter = self.filters.get(str(field.category.id), None)\n\n if category_filter:\n field_filter = category_filter.pop(field.key, None)\n\n if field_filter:\n self.save()",
"def test_filter_comparison_func_false(self):\n\n num_props_original = len(self.test_table._odmldict)\n self.test_table.filter(comparison_func=lambda x, y: True, PropertyName='')\n self.assertEqual(len(self.test_table._odmldict), num_props_original)\n\n self.test_table.filter(comparison_func=lambda x, y: False, PropertyName='')\n self.assertEqual(len(self.test_table._odmldict), 0)",
"def filter(self, func=bool, *args, **kwargs):\n return self.apply(func, *args, **kwargs).apply(bool) == True",
"def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]",
"def make_filtered_field(ds, fieldname, filter_fields = [], tolerance = tol):\n def _filtered_field(field, data):\n x = data[('gas',fieldname)]\n\n select = data[filter_fields[0]] < 0\n for f in filter_fields:\n select = select + (data[f] < tolerance)\n x[select] = np.nan\n\n return x\n\n ds.add_field(('gas',fieldname + '_filtered'), function = _filtered_field, units = \"\")\n return",
"def assert_matches_nonexistent_field(f, field):\n assert (\n f.to_dict() ==\n {'bool': {'must_not': [{'exists': {'field': field}}]}})",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError",
"def filter(self, function):\n return FunctionalWrapper(filter(function, self.data))",
"def filter_record(self, record):\n raise NotImplementedError(\"Override in subclass\")",
"def filt(rec):\n return True # Show everything",
"def filter_snapshot(snap, config):\n for field in ALL_FIELDS:\n if field not in config:\n snap.ClearField(field)",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))"
]
| [
"0.6246021",
"0.59056026",
"0.5561669",
"0.5532213",
"0.54449373",
"0.54386085",
"0.52966136",
"0.5288926",
"0.5282799",
"0.52240276",
"0.5167315",
"0.5153407",
"0.5135682",
"0.505053",
"0.5041102",
"0.5027123",
"0.5005287",
"0.49946797",
"0.49810916",
"0.49740827",
"0.49577022",
"0.4955228",
"0.49501455",
"0.49153602",
"0.49003842",
"0.48920918",
"0.4890143",
"0.48616117",
"0.48464125",
"0.48374873"
]
| 0.6549046 | 0 |
Creates a new DataRecords instance containing only the subset of records in this container with the specified indices. | def subset_from_indices(self, indices):
return self.extract_inds(indices) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _subset(self, idxs):\n vertices = [self.vertices[i] for i in idxs]\n if hasattr(self, \"data\"):\n data = Table(data=[self.data._data[i] for i in idxs], fields=self.data.fields)\n return type(self)(vertices, properties=self.properties, data=data, crs=self.crs)\n else:\n return type(self)(vertices, properties=self.properties, crs=self.crs)",
"def subset(dataset, article_indices):\n indices = []\n for a_id in article_indices:\n indices += dataset.get_article_indices(a_id)\n return Subset(dataset, indices)",
"def slice(self,*Indices):\n \n Ind = tuple(Indices)\n\n\n try:\n \n OutShape=shape((1*self[(0,)*Dimension])[Indices])\n except:\n raise IndexError(\"Wrong format for indices\")\n \n Out = BZO(shape=OutShape)\n \n for Ind in self.IndList():\n\n Out[Ind]=array(self[Ind][Indices])\n \n Out.CleanUp()\n \n return Out",
"def sample_inds(self, inds: List[float]):\n data_subset = self.data.create_subset(inds)\n\n if self.preload:\n data_ram_subset = [self.data_ram[i] for i in inds]\n return AtomisticDataset(data_subset, data_ram=data_ram_subset)\n else:\n return AtomisticDataset(data_subset)",
"def make_dataset(files_dataset, shard_index):\n files_dataset = files_dataset.shard(params[\"parallel_reads\"], shard_index)\n dataset = files_dataset.interleave(tf.data.TFRecordDataset)\n dataset = dataset.map(\n tf_example_parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset",
"def subsetFromGeneIds(self, geneIds):\n\t\tgs = copy.copy(self)\n\t\tgs._dataframe = gs._dataframe.loc[[item for item in geneIds if item in gs._dataframe.index]]\n\t\treturn gs",
"def _subset(self, idxs):\n vertices = [self.vertices[i] for i in idxs]\n subset = Line(vertices, properties=self.properties, crs=self.crs)\n return subset",
"def create_subsets(self, start_ids):\n subsets = list()\n df = self.all_df.copy()\n for sid in start_ids:\n df2 = df.loc[sid:, :]\n subsets.append(df.drop(df2.index, axis=0))\n df = df2.copy()\n subsets.append(df)\n return subsets",
"def get_data_at_time_indices(self, indices):\n if _is_iterable(indices):\n # Raise error if indices not sorted?\n index_list = list(sorted(indices))\n time_list = [self._time[i] for i in indices]\n data = {\n cuid: [values[idx] for idx in index_list]\n for cuid, values in self._data.items()\n }\n time_set = self._orig_time_set\n return TimeSeriesData(data, time_list, time_set=time_set)\n else:\n # indices is a scalar\n return ScalarData(\n {cuid: values[indices] for cuid, values in self._data.items()}\n )",
"def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)",
"def subList(self, indices):\n Parameter.checkList(indices, Parameter.checkIndex, (0, self.getNumVertices()))\n vList = GeneralVertexList(len(indices))\n vList.setVertices(self.getVertices(indices))\n\n return vList",
"def slice(self, num_slices, slice_index):\n return ClippedDataset(CachedGenotypeDataset(self.basename, self.vector_names,\n _ceiling_partition(len(self), num_slices),\n self.sample_id),\n num_slices=num_slices, slice_index=slice_index)",
"def extract_subset_idx(self, start_idx, end_idx):\r\n templist = ChannelList()\r\n # Burris and Scintrex have different fields. This only returns non-empty fields.\r\n for field, value in self:\r\n temp = value[start_idx : end_idx + 1]\r\n setattr(templist, field, temp)\r\n return templist",
"def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set",
"def warping_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.warping_records(run_idxs))",
"def bc_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.bc_records(run_idxs))",
"def subset_by_index(prediction_dict, desired_indices):\n\n error_checking.assert_is_numpy_array(desired_indices, num_dimensions=1)\n error_checking.assert_is_integer_numpy_array(desired_indices)\n error_checking.assert_is_geq_numpy_array(desired_indices, 0)\n error_checking.assert_is_less_than_numpy_array(\n desired_indices, len(prediction_dict[VALID_TIMES_KEY])\n )\n\n for this_key in ONE_PER_EXAMPLE_KEYS:\n if prediction_dict[this_key] is None:\n continue\n\n prediction_dict[this_key] = (\n prediction_dict[this_key][desired_indices, ...]\n )\n\n return prediction_dict",
"def sel(\n self,\n **kwargs,\n ) -> \"Dataset\":\n res = [da.sel(**kwargs) for da in self]\n return Dataset(data=res, validate=False)",
"def sample_inds(self, inds: List[float]):\n data = [self.data[i] for i in inds]\n return MoleculeDataset(data)",
"def test_subset_by_index(self):\n\n this_satellite_dict = satellite_io.subset_by_index(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_indices=DESIRED_INDICES\n )\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_INDEX\n ))",
"def run_contig_records_dataframe(self, run_idxs, run_record_key):\n records = self.run_contig_records(run_idxs, run_record_key)\n return pd.DataFrame(records)",
"def __init__(self, dataset: SizedDataset, predicate: Callable):\n\t\tindices = [i for i in range(len(dataset)) if predicate(dataset[i])]\n\t\tsuper().__init__(dataset)\n\t\tself._subset = Subset(dataset, indices)",
"def subset_by_index(example_dict, desired_indices):\n\n error_checking.assert_is_numpy_array(desired_indices, num_dimensions=1)\n error_checking.assert_is_integer_numpy_array(desired_indices)\n error_checking.assert_is_geq_numpy_array(desired_indices, 0)\n error_checking.assert_is_less_than_numpy_array(\n desired_indices, len(example_dict[VALID_TIMES_KEY])\n )\n\n for this_key in ONE_PER_EXAMPLE_KEYS:\n if isinstance(example_dict[this_key], list):\n example_dict[this_key] = [\n example_dict[this_key][k] for k in desired_indices\n ]\n else:\n example_dict[this_key] = (\n example_dict[this_key][desired_indices, ...]\n )\n\n return example_dict",
"def resampling_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.resampling_records(run_idxs))",
"def new_character_subset(self, label, character_indices):\n cs = CharacterSubset(character_indices=character_indices, label=label)\n return self.add_character_subset(cs)",
"def resampler_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.resampler_records(run_idxs))",
"def _at_index(data, indices, keepdim=None, padding=np.nan):\n if not (keepdim is None or keepdim in ['data', 'index']):\n raise TypeError('unexpected argument keepdim={}'.format(keepdim))\n\n data = np.asarray(data)\n indices = np.asarray(indices)\n i = indices[indices < data.size]\n\n if keepdim is None:\n return data[i]\n elif keepdim == 'data':\n res = np.full(data.size, padding)\n res[i] = data[i]\n return res\n elif keepdim == 'index':\n res = np.full(indices.size, padding)\n if i.size !=0:\n res[0:indices.size-1] = data[i]\n return res",
"def select_samples(self, sample_ids):\n mask = self._data.isin(sample_ids)\n return self._copy(self._data[mask])",
"def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)",
"def run_records_dataframe(self, run_idx, run_record_key):\n records = self.run_records(run_idx, run_record_key)\n return pd.DataFrame(records)"
]
| [
"0.59688765",
"0.5935681",
"0.5567602",
"0.5546546",
"0.5529566",
"0.5520367",
"0.5479978",
"0.5464802",
"0.5424255",
"0.53562534",
"0.53429353",
"0.5326171",
"0.5291528",
"0.52882534",
"0.5271028",
"0.52346957",
"0.5224938",
"0.51865077",
"0.5113216",
"0.5099608",
"0.5077727",
"0.50743073",
"0.5058599",
"0.50490034",
"0.50317115",
"0.5027527",
"0.5016225",
"0.50071883",
"0.4993389",
"0.49802408"
]
| 0.6700164 | 0 |
Constructs a DataRecords instance from a dictionary. | def from_dict(cls, d, record_cls=None):
if record_cls is None:
record_cls_str = d.get(cls._ELE_CLS_FIELD, None)
if record_cls_str is None:
raise DataRecordsError(
"Your DataRecords does not have its '%s' attribute "
"populated, so you must manually specify the `record_cls` "
"to use when loading it" % cls._ELE_CLS_FIELD
)
record_cls = etau.get_class(record_cls_str)
return DataRecords(
record_cls=record_cls,
records=[record_cls.from_dict(r) for r in d[cls._ELE_ATTR]],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_dicts(self, records, fieldnames=None):\n generator = _from_dicts(records, fieldnames=fieldnames)\n return Reader(generator)",
"def from_dict(cls, record, _id=None):\n # copy dict\n record = dict(record)\n\n # get record id and remove it from record\n record_id = record.pop(\"_id\", None)\n if _id is None:\n _id = record_id\n if _id is None:\n _id = cls._make_uuid()\n\n # make record\n return cls(record, _id)",
"def from_dict(cls, data):\n return cls(**data)",
"def from_dict(cls, data: Dict[str, any]):\n return cls(**data)",
"def from_dict(cls, odic):\n return dacite.from_dict(data_class=cls, data=odic)",
"def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)",
"def from_dict(cls, d):\n return cls(**d)",
"def from_dict(cls, data):\r\n instance = cls()\r\n for key, value in data.items():\r\n instance.__dict__[key] = value\r\n return instance",
"def from_dict(self, dict_=None): # noqa: MC0001\n for key in dict_:\n if hasattr(self, key):\n attribute = getattr(self, key)\n if key == \"history\":\n for element in dict_[key]:\n record = \\\n aspecd.history.ProcessingHistoryRecord()\n record.from_dict(element)\n self.history.append(record)\n elif key == \"analyses\":\n for element in dict_[key]:\n record = aspecd.history.AnalysisHistoryRecord()\n record.from_dict(element)\n self.analyses.append(record)\n elif key == \"annotations\":\n for element in dict_[key]:\n record = \\\n aspecd.history.AnnotationHistoryRecord()\n record.from_dict(element)\n self.annotations.append(record)\n elif key == \"representations\":\n for element in dict_[key]:\n record = aspecd.history.PlotHistoryRecord()\n record.from_dict(element)\n self.representations.append(record)\n elif key == \"references\":\n for element in dict_[key]:\n record = DatasetReference()\n record.from_dict(element)\n self.references.append(record)\n elif key == \"tasks\":\n for element in dict_[key]:\n if element[\"kind\"] == \"representation\":\n record_class_name = \\\n 'aspecd.history.PlotHistoryRecord'\n else:\n record_class_name = 'aspecd.history.' \\\n + element[\"kind\"].capitalize() + 'HistoryRecord'\n record = aspecd.utils.object_from_class_name(\n record_class_name)\n # noinspection PyUnresolvedReferences\n record.from_dict(element[\"task\"])\n self.tasks.append({'kind': element[\"kind\"],\n 'task': record})\n elif hasattr(attribute, 'from_dict'):\n attribute.from_dict(dict_[key])\n else:\n setattr(self, key, dict_[key])",
"def from_dict(cls, dictionary):\n obj = cls()\n for var, data in dictionary.items():\n obj[var] = data\n\n return obj",
"def from_dict(cls, d):\n return loadd(d, cls)",
"def from_dict(cls, data):\n instance = cls()\n instance._set_data(data)\n return instance",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)"
]
| [
"0.7356026",
"0.7138957",
"0.71198374",
"0.70284396",
"0.70236653",
"0.6950481",
"0.6917223",
"0.6891114",
"0.6885126",
"0.6884123",
"0.68505895",
"0.6840841",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809",
"0.6824809"
]
| 0.7742329 | 0 |
Returns the list of attributes of the data record that are to be serialized. All private attributes (those starting with "_") and attributes in `excluded()` are omitted from this list. | def attributes(self):
attr = super(BaseDataRecord, self).attributes()
return [a for a in attr if a not in self.excluded()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def get_attributes(cls):\r\n return []",
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)\n return filter",
"def attributes(self):\n\n return list(self._attributes.values())",
"def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_",
"def get_attributes(cls):\r\n return [Attribute('file'),\r\n Attribute('missing', None)]",
"def get_attributes(cls):\n return cls._attributes",
"def get_attributes(self):\n return self.attributes",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def getAttributes(self):\n return self.attributes",
"def getAttributes(self):\n return self.attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes",
"def all_attributes(self):\n\n attributes = []\n for level in self.levels:\n attributes.extend(level.attributes)\n\n return attributes",
"def getAttributes(self):\n pass",
"def attributes(self):\n return self.__dict.keys()",
"def get_attributes(cls):\r\n return [Attribute('allowed_files', '[]', transform=cls.parse_files),\r\n Attribute('label', ''),\r\n Attribute('required_files', '[]', transform=cls.parse_files), ]",
"def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs",
"def to_dict(self):\n excluded_keys = ['idx', 'json', 'identifier']\n keys_to_store = {\n key for key in self.__dict__\n if key in self._included_attr or (\n key not in excluded_keys and\n key not in self._excluded_attr and\n not (key.startswith('_') and self._exclude_private_attr)\n )\n }\n return {\n key: self.__dict__[key] for key in keys_to_store\n }",
"def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs",
"def get_sensitive_attributes(self):\n return self.sensitive_attrs",
"def get_attributes(self):\n\n _attributes = (self.survival_id, self.record_id, self.name,\n self.source_id, self.failure_date, self.left_interval,\n self.right_interval, self.status_id, self.quantity,\n self.tbf, self.mode_type_id, self.nevada_chart,\n self.ship_date, self.number_shipped, self.return_date,\n self.number_returned, self.user_float_1,\n self.user_float_2, self.user_float_3,\n self.user_integer_1, self.user_integer_2,\n self.user_integer_3, self.user_string_1,\n self.user_string_2, self.user_string_3)\n\n return _attributes",
"def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]"
]
| [
"0.7105229",
"0.70813924",
"0.7015696",
"0.6925821",
"0.6905669",
"0.688247",
"0.68791455",
"0.6837716",
"0.67589146",
"0.6555124",
"0.65252256",
"0.6523603",
"0.64901346",
"0.641182",
"0.6391025",
"0.6391025",
"0.63747823",
"0.63747823",
"0.63747823",
"0.6345987",
"0.6328713",
"0.62904006",
"0.62838906",
"0.6280878",
"0.6256657",
"0.6240546",
"0.62294036",
"0.6210424",
"0.6196657",
"0.618347"
]
| 0.793529 | 0 |
Deletes any optional attributes from the data record that are not set, i.e., those that are `no_default`. Note that `None` is a valid value for an attribute. | def clean_optional(self):
for o in self.optional():
if hasattr(self, o) and getattr(self, o) is no_default:
delattr(self, o) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _none(self, attrs):\n\n [self.__setattr__(nm, None) if nm[0] == '_' else\n self._data.pop(nm, None) for nm in attrs]",
"def reinitToNull(self):\n for sAttr in self.getDataAttributes():\n setattr(self, sAttr, None);\n return self;",
"def _remove_none(self, data):\r\n for key, value in data.items():\r\n if value is None or isinstance(value, forms.Field):\r\n del data[key]\r\n if isinstance(value, dict):\r\n self._remove_none(data[key])",
"def clear_attrs(self):\n self._attributes.clear()",
"def _remove_none_fields(klass, d):\n\n return dict((k, v) for k, v in d.iteritems() if v is not None)",
"def remove_null_fields(self):\n with open(self.schema_path, 'r') as file_obj:\n schema_data = yaml.safe_load(file_obj)\n schema_fields = schema_data.get('mapping').keys()\n for field in schema_fields:\n # We want to keep 'false' and 0 values, and avoid removing fields that are required in the schema.\n if field in self.data and self.data[field] in (None, '', [], {}) and \\\n not schema_data.get('mapping', {}).get(field, {}).get('required'):\n self.data.pop(field)",
"def _maybe_del_attr(da, attr):\n if attr in da.attrs:\n del da.attrs[attr]\n\n return da",
"def deleteATTR(sel=None):\n if sel == None:\n sel = pm.ls(sl=1)\n for obj in sel:\n #remove customAttr with keyable\n attrs = pm.listAttr(obj,k=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)\n #remove customAttr with Nonkeyable\n attrs = pm.listAttr(obj,cb=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)",
"def __delattr__(self, attr):\n # Set to default value\n if attr in self.fields:\n setattr(self, attr, self.fields[attr].default)\n else:\n super(BaseModel, self).__delattr__(attr)",
"def remove_attributes(cube, field, filename):\n cube.attributes = None",
"def clean_metadata_from_xml(cls, xml_object):\r\n for attr in cls.metadata_attributes:\r\n if xml_object.get(attr) is not None:\r\n del xml_object.attrib[attr]",
"def remove_attributes_with_default_values(self, cls: Type) -> None:\n def matches(value_node: yaml.Node, default: Any) -> bool:\n if value_node.tag == 'tag:yaml.org,2002:null':\n return default is None\n\n if value_node.tag == 'tag:yaml.org,2002:int':\n return int(value_node.value) == int(default)\n\n if value_node.tag == 'tag:yaml.org,2002:float':\n return float(value_node.value) == float(default)\n\n if value_node.tag == 'tag:yaml.org,2002:bool':\n if default is False:\n return (\n str(value_node.value).lower() == 'n' or\n str(value_node.value).lower() == 'no' or\n str(value_node.value).lower() == 'false' or\n str(value_node.value).lower() == 'off')\n elif default is True:\n return (\n str(value_node.value).lower() == 'y' or\n str(value_node.value).lower() == 'yes' or\n str(value_node.value).lower() == 'true' or\n str(value_node.value).lower() == 'on')\n return False\n\n return bool(value_node.value == default)\n\n defaults = defaulted_attributes(cls)\n\n self.yaml_node.value = [\n (name_node, value_node)\n for name_node, value_node in self.yaml_node.value\n if (\n name_node.value not in defaults or\n not matches(value_node, defaults[name_node.value]))]",
"def test_remove_a_single_attribute(self):\n pass",
"def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data",
"def strip_empty_optional_fields(object_dict):\n return {k: v for k, v in object_dict.items() if v is not None}",
"def replace_empty_value(data: list) -> list:\n for book in data:\n for attribute in book:\n if book[attribute] == \"\":\n book[attribute] = \"None\"\n return data",
"def setAllOptional(self):\n for att in self._attributes.values():\n att.outputOptional = True",
"def clear_attributes(self):\n self.attrs = etad.AttributeContainer()",
"def test_del_attribute_is_assigned_properly(self):\r\n class DelModel(Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n key = columns.Integer(primary_key=True)\r\n data = columns.Integer(required=False)\r\n\r\n model = DelModel(key=4, data=5)\r\n del model.data\r\n with self.assertRaises(AttributeError):\r\n del model.key",
"def unload(self) -> None:\n for attr in self._attrs:\n setattr(self, attr, None)",
"def get_non_null_attributes(self) -> dict:\n return {\n key: getattr(self, key, None)\n for key in sorted(self.attributes)\n if getattr(self, key, None) is not None\n }",
"def _remove_empty(self, data, many):\n if not many:\n for key in list(data):\n if key == 'versions':\n data.pop(key)\n\n return {\n key: value for key, value in data.items()\n if value is not None\n }\n for item in data:\n for key in list(item):\n if (key == 'versions') or (item[key] is None):\n item.pop(key)\n\n return data",
"def del_none(d):\n for key, value in list(d.items()):\n if value is None or value == \"\":\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d",
"def remove_empty_fields(obj: dict) -> dict:\n return {k: v for k, v in obj.items() if v is not None}",
"def clear(self, attrname):\n self.__dict__['_'+attrname] = False",
"def trim_data(data, attributes):\n return data.drop(attributes, axis=1)",
"def remove_empty_fields(data_):\n if isinstance(data_, dict):\n for key, value in data_.items():\n\n # Dive into a deeper level.\n if isinstance(value, dict) or isinstance(value, list):\n value = remove_empty_fields(value)\n\n # Delete the field if it's empty.\n if value in [\"\", None, [], {}]:\n del data_[key]\n\n elif isinstance(data_, list):\n for index in reversed(range(len(data_))):\n value = data_[index]\n\n # Dive into a deeper level.\n if isinstance(value, dict) or isinstance(value, list):\n value = remove_empty_fields(value)\n\n # Delete the field if it's empty.\n if value in [\"\", None, [], {}]:\n data_.pop(index)\n\n return data_",
"def deleteAttr(*args, attribute: AnyStr=\"\", name: AnyStr=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass",
"def popattr(obj, attr, default=NOT_PROVIDED):\n val = getattr(obj, attr, default)\n try:\n delattr(obj, attr)\n except AttributeError:\n if default is NOT_PROVIDED:\n raise\n return val",
"def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)"
]
| [
"0.7082738",
"0.658809",
"0.6563625",
"0.6503828",
"0.64280677",
"0.63774556",
"0.62657505",
"0.62206036",
"0.6208545",
"0.6182857",
"0.6078976",
"0.60499203",
"0.6038751",
"0.60058826",
"0.59697735",
"0.5966007",
"0.5949277",
"0.5938485",
"0.5923254",
"0.59182173",
"0.59134144",
"0.5886275",
"0.5849485",
"0.5785684",
"0.5785612",
"0.5741919",
"0.5727524",
"0.566861",
"0.56323147",
"0.5628371"
]
| 0.73840094 | 0 |
Return a list of attributes that should always be excluded when the data record is serialized. By default, an empty list is returned. | def excluded(cls):
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs",
"def json_ignore_attrs():\n return ['metadata']",
"def get_sensitive_attributes(self):\n return self.sensitive_attrs",
"def get_attributes(cls):\r\n return []",
"def get_non_null_attributes(self) -> dict:\n return {\n key: getattr(self, key, None)\n for key in sorted(self.attributes)\n if getattr(self, key, None) is not None\n }",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def get_non_redundant_entity_attributes(self):\n return self.__non_redundant_entity_attributes",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def _get_inactive_attributes(self):\n\n active_attributes = self.active_attributes\n inactive_attributes = self.all_attributes\n for attribute in active_attributes:\n if attribute in inactive_attributes:\n inacative_attributes.remove(attribute)\n return inactive_attributes",
"def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_",
"def _yamlSpeciallyHandledAttributes(self):\n return []",
"def _yamlData(self):\n data = dict([(key, value)\n for key, value in self.__dict__.iteritems()\n if ((key in self._yamlAttributeKeys)\n and (key not in self._yamlSpeciallyHandledAttributes))])\n data.update(self._preservedExtraAttributes)\n return data",
"def strip_unwanted_fields(self, data, many, **kwargs):\n unwanted_fields = [\"resource_type\"]\n for field in unwanted_fields:\n if field in data:\n data.pop(field)\n return data",
"def exclude_list(self):\n pass",
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)\n return filter",
"def get_excluded_observations(self):\n\n return copy.deepcopy(self._excluded_observations)",
"def filter_excluded_fields(fields, Meta, exclude_dump_only):\n exclude = getattr(Meta, \"exclude\", [])\n if exclude_dump_only:\n exclude += getattr(Meta, \"dump_only\", [])\n\n filtered_fields = OrderedDict(\n (key, value) for key, value in fields.items() if key not in exclude\n )\n\n return filtered_fields",
"def get_empty_fields(self):\n return [f for f in self.__dict__ if not self.__dict__[f]]",
"def attributes(self):\n\n return list(self._attributes.values())",
"def _filter(self):\n for attr in self:\n short_name_in_ignores = attr.shortName() in self.ignores\n long_name_in_ignores = attr.longName() in self.ignores\n\n if not (short_name_in_ignores or long_name_in_ignores):\n yield attr",
"def get_optional(self) -> list[TypedDictAttribute]:\n result: list[TypedDictAttribute] = []\n for child in self.children:\n if not child.is_required():\n result.append(child)\n return result",
"def serializeWithoutItems(self):\r\n return {\r\n 'name': self.name,\r\n 'id': self.id,\r\n }",
"def get_attributes(cls):\r\n return [Attribute('file'),\r\n Attribute('missing', None)]",
"def _attrs(self):\n for field in self.model._meta.get_fields():\n if isinstance(field, (models.OneToOneField, models.ManyToOneRel)):\n # Skip non-field attributes\n continue\n if field is self._geom_field:\n # Skip the geometry field, which is not an attribute\n continue\n yield field",
"def is_excluded(self, attr_name, request):\n return False",
"def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes",
"def attributes(self) -> Set[str]:\n return set()",
"def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data",
"def _drop_protected_attrs(model_class, values):\n for attr in model_class.__protected_attributes__:\n if attr in values:\n del values[attr]"
]
| [
"0.8822823",
"0.6527074",
"0.6519765",
"0.6508539",
"0.6495695",
"0.64912015",
"0.64014137",
"0.6366201",
"0.63595825",
"0.62454665",
"0.62342465",
"0.6213601",
"0.6187153",
"0.6184186",
"0.6173262",
"0.61586994",
"0.612149",
"0.6110278",
"0.6095926",
"0.6071295",
"0.60674345",
"0.6044776",
"0.6022751",
"0.6015125",
"0.60111076",
"0.5995541",
"0.59861887",
"0.5981652",
"0.5979883",
"0.59610367"
]
| 0.68170273 | 1 |
Creates a new LabeledFileRecord instance. | def __init__(self, file_path, label):
self.file_path = file_path
self.label = label
super(LabeledFileRecord, self).__init__() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_record(filepath, linear=True, name=\"unnamed\", file_format=\"auto\"):\n if file_format != \"auto\":\n record = SeqIO.read(filepath, file_format)\n elif filepath.lower().endswith((\"gb\", \"gbk\")):\n record = SeqIO.read(filepath, \"genbank\")\n elif filepath.lower().endswith((\"fa\", \"fasta\")):\n record = SeqIO.read(filepath, \"fasta\")\n elif filepath.lower().endswith(\".dna\"):\n record = snapgene_file_to_seqrecord(filepath)\n else:\n raise ValueError(\"Unknown format for file: %s\" % filepath)\n record.linear = linear\n if name != \"unnamed\":\n record.id = name\n record.name = name.replace(\" \", \"_\")[:20]\n return record",
"def _new_record():\n nonlocal key\n nonlocal value_list\n nonlocal record\n nonlocal origin\n nonlocal field_offset_map\n key = None\n value_list = None\n if source is not None:\n origin = Origin(source, None, None)\n field_offset_map = {}\n record = RFC822Record(data_cls(), origin, data_cls(), field_offset_map)",
"def from_label_file(cls, label_file_path, out_path=FEATURES_DATA_PATH, source_path=RAW_DATA_PATH):\n df = pd.read_csv(label_file_path)\n filenames = df['filename']\n labels = df['label']\n return cls(filenames, labels, out_path=out_path, source_path=source_path)",
"def __init__(self, fp):\r\n ThriftRecordIO.assert_has_thrift()\r\n RecordIO.Writer.__init__(self, fp, ThriftRecordIO.ThriftCodec())",
"def __init__(self, path, filename_template=\"record_${__index}.yml\", expand=False,\r\n filename_start_index=0, truncate=False):\r\n\r\n self.filename_template = filename_template\r\n self.filename_start_index = filename_start_index\r\n self.path = path\r\n self.expand = expand\r\n self.truncate = truncate",
"def from_genbank(cls, filename):\n\t\tseq_record = SeqIO.read(filename, 'genbank')\n\t\trec = cls(seq_record=seq_record)\n\t\treturn rec",
"def record(self, pdb_filename) :\n data = self.process_pdb(pdb_filename) \n return StrideRecord(StringIO(data))",
"def create_example(filename, sample_rate, load_audio_with_librosa):\n wav_data = tf.gfile.Open(filename, 'rb').read()\n example_list = list(\n audio_label_data_utils.process_record(\n wav_data=wav_data,\n sample_rate=sample_rate,\n ns=music_pb2.NoteSequence(),\n # decode to handle filenames with extended characters.\n example_id=six.ensure_text(filename, 'utf-8'),\n min_length=0,\n max_length=-1,\n allow_empty_notesequence=True,\n load_audio_with_librosa=load_audio_with_librosa))\n assert len(example_list) == 1\n return example_list[0].SerializeToString()",
"def from_dict(cls, the_dict):\n\n if not isinstance(the_dict, dict):\n raise TypeError('This requires a dict. Got type {}'.format(type(the_dict)))\n if 'label_schema' not in the_dict:\n raise KeyError('this dictionary must contain a label_schema')\n\n typ = the_dict.get('type', 'NONE')\n if typ != cls._type:\n raise ValueError('FileLabelCollection cannot be constructed from the input dictionary')\n\n return cls(\n the_dict['label_schema'],\n version=the_dict.get('version', 'UNKNOWN'),\n annotations=the_dict.get('annotations', None),\n image_file_name=the_dict.get('image_file_name', None),\n image_id=the_dict.get('image_id', None),\n core_name=the_dict.get('core_name', None))",
"def create_tfrecord(self, tfrecord_filename, begin=0, einde=-1):\n # Call imagesToTfRecord class to build dataset and store in TFRecord\n T2.writeRecord(tfrecord_filename, TRAIN_DIR, begin=begin, einde=einde)",
"def from_prodigal_output(\n cls, prodigal_faa: Path, output_file: Path = None\n ) -> LabelledFASTA:\n number_prodigal_record_fields = 9\n prodigal_faa = Path(prodigal_faa)\n if output_file is None:\n output_file = prodigal_faa.parent / f\"{prodigal_faa.stem}_longlabels.fasta\"\n else:\n output_file = Path(output_file)\n data = pyfastx.Fasta(prodigal_faa.as_posix(), build_index=False, full_name=True)\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n for record_name, record_seq in data:\n name_list = record_name.split(\" \")\n if len(name_list) < number_prodigal_record_fields:\n logger.error(\n f\"Invalid prodigal header format for record: {record_name}\"\n )\n sys.exit(1)\n contig = \"_\".join(name_list[0].split(\"_\")[:-1])\n gene_number = name_list[0].split(\"_\")[-1]\n start, end = name_list[2], name_list[4]\n strand = (\n \"pos\"\n if name_list[6] == \"1\"\n else (\"neg\" if name_list[6] == \"-1\" else \"\")\n )\n header = f\">{contig}_{gene_number}__{contig}_{gene_number}_{start}_{end}_{strand}\"\n outfile.write(header + \"\\n\")\n outfile.write(record_seq + \"\\n\")\n logging.shutdown()\n return cls(output_file)",
"def NewRecord(self, default={}):\n return HEP.JSONReferenceObject(self.data.get('metadata', {}).get('new_record', default))",
"def from_laspy_File(cls, f):\n return cls((f.x, f.y, f.z), header=f.header.copy())",
"def init_from_pickle_file(cls, filename):\n with open(filename, 'rb') as f:\n loaded_tape = pickle.load(f)\n instance = cls(metrics_to_record=loaded_tape.keys())\n instance.tape = loaded_tape\n return instance",
"def __init__(self, record=None):\n self.record = record",
"def to_record(\n self,\n filepath=None,\n features_type=\"misc_feature\",\n with_original_features=True,\n with_original_spec_features=False,\n with_constraints=True,\n with_objectives=True,\n with_sequence_edits=False,\n colors_dict=None,\n use_short_labels=True,\n record_id = None\n ):\n record = sequence_to_biopython_record(self.sequence)\n if record_id is not None:\n record.id = record_id\n\n record.features = []\n if with_constraints:\n record.features += [\n cst.to_biopython_feature(\n role=\"constraint\",\n feature_type=features_type,\n colors_dict=colors_dict,\n use_short_label=use_short_labels,\n )\n for cst in self.constraints\n if cst.__dict__.get(\"location\", False)\n ]\n if with_objectives:\n record.features += [\n obj.to_biopython_feature(\n role=\"objective\",\n feature_type=features_type,\n colors_dict=colors_dict,\n use_short_label=use_short_labels,\n )\n for obj in self.objectives\n ]\n if with_original_features and (self.record is not None):\n record.features += [\n f\n for f in self.record.features\n if with_original_spec_features\n or not find_specification_label_in_feature(f)\n ]\n if with_sequence_edits:\n record.features += self.sequence_edits_as_features()\n\n if filepath is not None:\n write_record(record=record, target=filepath, file_format=\"genbank\")\n else:\n return record",
"def __init__(self, file_object):\n # Read object or file.\n if not hasattr(file_object, 'read'):\n self.filename = file_object\n self.file = open(file_object, 'rb')\n else:\n self.filename = None\n self.file = file_object\n # Set the offset to the record.\n self.record_offset = 0\n # Parse the header.\n self._parseHeader()",
"def __init__(self, filename, binary_file=None):\n BaseRawIO.__init__(self)\n self.filename = filename\n self.binary_file = binary_file",
"def _record(self):\n record_attr = {\n 'name': 'test_record',\n 'level': 'ERROR',\n 'pathname': '/test/path',\n 'msg': 'This is a test record.',\n }\n record = logging.makeLogRecord(record_attr)\n return record",
"def read_pkl(infile):\n # loading\n if infile is not None:\n logging.info('No pkl provided. Creating a new object')\n pkl = pickle.load(bz2.open(infile, 'r'))\n else:\n logging.info('Reading in: {}'.format(infile))\n # creating new object\n pkl = {'taxonomy' : {}, 'markers' : {}}\n return pkl",
"def __init__(self, filename, listfile=True):\n if hasattr(filename, 'read'):\n self.file = filename\n else:\n self.file = open(filename, 'rb')\n self.header = self.read_header()\n self.hash_table = self.read_table('hash')\n self.block_table = self.read_table('block')\n if listfile:\n self.files = self.read_file('(listfile)').splitlines()\n else:\n self.files = None",
"def __init__(self, video_path, label, group=no_default):\n super(LabeledVideoRecord, self).__init__(video_path, label)\n self.group = group",
"def new_record(self, values=None):\n return Record(schema=self.table_schema, values=values)",
"def __init__(self, seq_record=None):\n\t\tself._record = seq_record",
"def __init__(self, fp, thrift_base):\r\n ThriftRecordIO.assert_has_thrift()\r\n if not thrift_base:\r\n raise ThriftRecordIO.ThriftUnsuppliedException(\r\n 'Must construct ThriftRecordReader with valid thrift_base!')\r\n RecordIO.Reader.__init__(self, fp, ThriftRecordIO.ThriftCodec(thrift_base))",
"def magic_init(cls, feature_path=FEATURES_DATA_PATH, raw_path=RAW_DATA_PATH,\n raw_label_filename='labels.csv'):\n\n from features import AVAILABLE_FEATURES\n out_path = feature_path\n if cls.dependency_feature_name:\n # source path is in feature path\n dependency_extractor = AVAILABLE_FEATURES[cls.dependency_feature_name]\n source_path = feature_path / dependency_extractor.feature_name\n label_file_name = dependency_extractor.get_label_file_name()\n else:\n # source path is raw data path\n source_path = raw_path\n label_file_name = raw_label_filename\n\n label_path = source_path / label_file_name\n print('info: read metadata from {}'.format(label_path))\n print('info: init extractor from {} to {}'.format(source_path, out_path))\n df = pd.read_csv(label_path)\n filenames = df['filename']\n labels = df['label']\n print('info: got filenames {}'.format(filenames))\n return cls(filenames, labels, out_path=out_path, source_path=source_path, feature_path=feature_path,\n raw_path=raw_path)",
"def label_record_pair(self, label, record_pair):\n\n if label == 'y':\n self.labeled_examples['match'].append(record_pair)\n elif label == 'n':\n self.labeled_examples['distinct'].append(record_pair)\n elif label == 'u':\n record_pair = ()\n elif label == 'f':\n print('Finished labeling')\n self.__create_uncertain_pairs_file()",
"def __init__(self, node_id: str, ledger: Ledger):\n self.ledger = ledger\n self.node_id = node_id\n filename = '../files/blockchain' + node_id\n self.file_path = filename + '.txt'\n self.pickle_path = filename + '.pickle'\n self.blockchain = []\n self.saved_blocks = []\n self.create_or_read_file()",
"def __init__(self, fn, name=\"No name\", labels=None, labels_in_file=False):\n self.filename = fn\n self.name = name\n\n (base, extension) = os.path.splitext(self.filename)\n if extension == \".mp3\":\n try:\n print \"Creating wav from {}\".format(self.filename)\n new_fn = base + '.wav'\n subprocess.check_output(\"lame --decode \\\"{}\\\" \\\"{}\\\"\".format(\n self.filename, new_fn), shell=True)\n self.filename = new_fn\n except:\n print \"Could not create wav from mp3\"\n raise\n\n self.sound = Sndfile(self.filename, 'r')\n self.current_frame = 0\n self.channels = self.sound.channels\n\n if labels is not None and labels_in_file:\n raise Exception(\n \"Must only define one of labels and labels_in_file\")\n if labels_in_file and not LIBXMP:\n raise Exception(\n \"Cannot use labels_in_file without python-xmp-toolkit\")\n if labels_in_file and LIBXMP:\n self.labels = self._extract_labels(fn)\n else:\n self.labels = labels",
"def __init__(self, filename, block_size=10):\n # Store the given block size\n self.blockSize = block_size\n\n # Size of record will be the size of the header and the block itself\n record_size = self.HEADER_SIZE + block_size\n\n # Initialize using generic base class\n super(StringStore, self).__init__(filename, record_size)"
]
| [
"0.60124975",
"0.5927857",
"0.57211214",
"0.53448856",
"0.5313057",
"0.5285396",
"0.52336323",
"0.5231617",
"0.5209988",
"0.5197654",
"0.5194985",
"0.5130312",
"0.5118215",
"0.5106813",
"0.50936395",
"0.5091654",
"0.50895816",
"0.50687593",
"0.5065505",
"0.50498515",
"0.5043115",
"0.5035216",
"0.5028336",
"0.50002736",
"0.49967065",
"0.49949235",
"0.4955999",
"0.49527204",
"0.49373245",
"0.4936168"
]
| 0.6933694 | 0 |
Creates a LabeledVideoRecord instance. | def __init__(self, video_path, label, group=no_default):
super(LabeledVideoRecord, self).__init__(video_path, label)
self.group = group | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _new_record():\n nonlocal key\n nonlocal value_list\n nonlocal record\n nonlocal origin\n nonlocal field_offset_map\n key = None\n value_list = None\n if source is not None:\n origin = Origin(source, None, None)\n field_offset_map = {}\n record = RFC822Record(data_cls(), origin, data_cls(), field_offset_map)",
"def bytestring_to_record(example):\n rec = tf.train.SequenceExample.FromString(example)\n start_time = rec.context.feature[START_TIME].float_list.value[0]\n vid_id = rec.context.feature[VIDEO_ID].bytes_list.value[0].decode('utf-8')\n labels = list(rec.context.feature[LABELS].int64_list.value)\n data = rec.feature_lists.feature_list[AUDIO_EMBEDDING_FEATURE_NAME]\n features = [b.bytes_list.value for b in data.feature]\n features = np.asarray([np.frombuffer(_[0], dtype=np.uint8)\n for _ in features])\n if features.ndim == 1:\n raise ValueError(\"Caught unexpected feature shape: {}\"\n .format(features.shape))\n\n rows = [{VIDEO_ID: vid_id, LABELS: labels, TIME: np.uint16(start_time + t)}\n for t in range(len(features))]\n\n return features, pd.DataFrame.from_records(data=rows)",
"def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)",
"def _create_recording_frame(self, subject):\r\n rec_label_frame = LabelFrame(master=self, text=subject, font=FONT_MEDIUM, bd=5)\r\n left_side = Frame(master=rec_label_frame)\r\n\r\n # 4 buttons in a button frame\r\n buttons_frame = Frame(master=left_side)\r\n Button(master=buttons_frame, text=f\"Record {subject}\", height=2, width=13, font=FONT_SMALL,\r\n command=lambda: self._start_rec_by_subject(subject)).pack(side=LEFT, padx=10)\r\n\r\n Button(master=buttons_frame, text=\"Pause\", height=2, width=10, font=FONT_SMALL,\r\n command=lambda: self._pause_recording(subject)).pack(side=LEFT, padx=10)\r\n\r\n Button(master=buttons_frame, text=\"Resume\", height=2, width=10, font=FONT_SMALL,\r\n command=lambda: self._resume_recording(subject)).pack(side=LEFT, padx=10)\r\n\r\n Button(master=buttons_frame, text=\"Finish\", height=2, width=10, font=FONT_SMALL,\r\n command=lambda: self._finish_rec_by_subject(subject)).pack(side=LEFT, padx=10)\r\n\r\n buttons_frame.pack(pady=30)\r\n left_side.pack(side=LEFT)\r\n\r\n # time and name labels\r\n self._create_time_and_name_labels(left_side, subject)\r\n\r\n rec_label_frame.pack(fill=BOTH, pady=30)",
"def __init__(self, seq_record=None):\n\t\tself._record = seq_record",
"def __init__(self, file_path, label):\n self.file_path = file_path\n self.label = label\n super(LabeledFileRecord, self).__init__()",
"def create(cls, dump, model, pid_provider, legacy_id_key=\"legacy_recid\"):\n record = cls.create_record(\n dump, model, pid_provider, legacy_id_key=legacy_id_key\n )\n return record",
"def __init__(self, record=None):\n self.record = record",
"def __new__(cls, player, data):\n track = player.get_current()\n reason = data[LAVALINK_KEY_END_REASON]\n \n self = object.__new__(cls)\n self.player = player\n self.track = track\n self.reason = reason\n return self",
"def create_lmdb_for_vimeo90k_bd():\n # LQ (blur-downsampled, BD)\n folder_path = 'trainsets/vimeo90k/vimeo_septuplet_BDLRx4/sequences'\n lmdb_path = 'trainsets/vimeo90k/vimeo90k_train_BDLR7frames.lmdb'\n train_list_path = 'trainsets/vimeo90k/vimeo_septuplet/sep_trainlist.txt'\n img_path_list, keys = prepare_keys_vimeo90k(folder_path, train_list_path, 'lq')\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True)",
"def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)",
"def new_record(self, values=None):\n return Record(schema=self.table_schema, values=values)",
"def NewRecord(self, default={}):\n return HEP.JSONReferenceObject(self.data.get('metadata', {}).get('new_record', default))",
"def label_record_pair(self, label, record_pair):\n\n if label == 'y':\n self.labeled_examples['match'].append(record_pair)\n elif label == 'n':\n self.labeled_examples['distinct'].append(record_pair)\n elif label == 'u':\n record_pair = ()\n elif label == 'f':\n print('Finished labeling')\n self.__create_uncertain_pairs_file()",
"def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])",
"def __new__(cls, player, data):\n track = player.get_current()\n threshold = data[LAVALINK_KEY_THRESHOLD_MS]*1000.0\n \n self = object.__new__(cls)\n self.player = player\n self.track = track\n self.threshold = threshold\n return self",
"def BriefDescriptorExtractor_create(bytes=None, use_orientation=None): # real signature unknown; restored from __doc__\n pass",
"def generate_frame(video_path, video_name, second, label, dest_path):\n print \"video_path\", video_path\n print 'video_name',video_name\n print 'second',second\n print 'label',label\n print 'dest_path',dest_path\n\n vidcap = cv2.VideoCapture(os.path.join(video_path, video_name))\n vidcap.set(0, int(second*1000))\n success, image = vidcap.read()\n if success:\n cv2.imwrite(os.path.join(dest_path, video_name+\"_\"+str(second)+\"_\"+str(label)+\".jpg\"), image)",
"def load_record(filepath, linear=True, name=\"unnamed\", file_format=\"auto\"):\n if file_format != \"auto\":\n record = SeqIO.read(filepath, file_format)\n elif filepath.lower().endswith((\"gb\", \"gbk\")):\n record = SeqIO.read(filepath, \"genbank\")\n elif filepath.lower().endswith((\"fa\", \"fasta\")):\n record = SeqIO.read(filepath, \"fasta\")\n elif filepath.lower().endswith(\".dna\"):\n record = snapgene_file_to_seqrecord(filepath)\n else:\n raise ValueError(\"Unknown format for file: %s\" % filepath)\n record.linear = linear\n if name != \"unnamed\":\n record.id = name\n record.name = name.replace(\" \", \"_\")[:20]\n return record",
"def create_record(\n cls, dump, model, pid_provider, legacy_id_key=\"legacy_recid\"\n ):\n import ipdb\n\n ipdb.set_trace()\n\n if legacy_id_key is None:\n legacy_id_key = \"pid\"\n try:\n with db.session.begin_nested():\n record_uuid = uuid.uuid4()\n provider = pid_provider.create(\n object_type=\"rec\",\n object_uuid=record_uuid,\n )\n dump[\"pid\"] = provider.pid.pid_value\n record = model.create(dump, record_uuid)\n record.commit()\n db.session.commit()\n return record\n except IlsValidationError as e:\n click.secho(\"VALIDATION ERROR\", fg=\"blue\")\n click.secho(\n \"RECID {0} did not pass validation. ERROR: \\n {1}\".format(\n dump[legacy_id_key],\n [\n \"{0}: {1}\".format(\n error.res[\"field\"], error.res[\"message\"]\n )\n for error in e.errors\n ],\n ).join(\"\\n\"),\n fg=\"blue\",\n )\n click.secho(e.original_exception.message, fg=\"blue\")\n db.session.rollback()\n raise e",
"def from_record(\n cls,\n record,\n specifications_dict=\"default\",\n logger=\"bar\",\n extra_constraints=(),\n extra_objectives=(),\n ):\n # unfortunately the local import below is the most elegant found so\n # far. builtin_specifications cannot be imported at the top of this\n # file as some built-in specifications use DnaOptimizationProblem\n # internally to resolve constructs (see EnforcePatternOccurences)\n if isinstance(record, str):\n record = load_record(record)\n parameters = dict(\n sequence=record,\n constraints=[] + list(extra_constraints), # shallow copy\n objectives=[] + list(extra_objectives), # shallow copy\n logger=logger,\n )\n for feature in record.features:\n if feature.type != \"misc_feature\":\n continue\n label = find_specification_label_in_feature(feature)\n if label is None:\n continue\n specs = Specification.list_from_biopython_feature(\n feature, specifications_dict=specifications_dict\n )\n for role, specification in specs:\n parameters[role + \"s\"].append(specification)\n return cls(**parameters)",
"def __init__(self, video: cv2.VideoCapture):\n self.video = video",
"def __init__(self, video_w, video_h, video_fps, tracking_result):\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n self.fontScale = 1\n self.thickness = 2\n self.thicknessUpdate = 3\n self.color = (238, 221, 192) # A surfrider color\n self.icons = get_icons()\n self.classes_to_icons = {'bottles':self.icons[0], 'fragments':self.icons[1], 'others':self.icons[2]}\n self.video_w = video_w\n self.video_h = video_h\n self.video_fps = video_fps\n self.tracking_result = tracking_result\n self.detection_image_size = (1024, 768)\n self.frames_to_boxes_dict = None\n self.frames_to_update_hud = None",
"def create_from_pb2(cls, pb2_obj: _DetectionProto) -> 'Detection':\n categories = []\n keypoints = []\n\n for idx, score in enumerate(pb2_obj.score):\n categories.append(\n category_module.Category(\n score=score,\n index=pb2_obj.label_id[idx]\n if idx < len(pb2_obj.label_id)\n else None,\n category_name=pb2_obj.label[idx]\n if idx < len(pb2_obj.label)\n else None,\n display_name=pb2_obj.display_name[idx]\n if idx < len(pb2_obj.display_name)\n else None,\n )\n )\n\n if pb2_obj.location_data.relative_keypoints:\n for idx, elem in enumerate(pb2_obj.location_data.relative_keypoints):\n keypoints.append(\n keypoint_module.NormalizedKeypoint(\n x=elem.x,\n y=elem.y,\n label=elem.keypoint_label,\n score=elem.score,\n )\n )\n\n return Detection(\n bounding_box=bounding_box_module.BoundingBox.create_from_pb2(\n pb2_obj.location_data.bounding_box\n ),\n categories=categories,\n keypoints=keypoints,\n )",
"def __init__(self, record_video=True, video_name='video.avi', lower_color=(20, 80, 20), upper_color=(30, 255, 255)):\n self.video = cv2.VideoCapture(0)\n\n # We need to check if camera \n # is opened previously or not \n if not self.video.isOpened():\n print(\"Error reading video file\")\n\n # We need to set resolutions.\n # so, convert them from float to integer. \n self.frame_width = int(self.video.get(3))\n self.frame_height = int(self.video.get(4))\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.size = (self.frame_width, self.frame_height)\n # Below VideoWriter object will create \n # a frame of above defined The output \n # is stored in file with the name stored in self.video_name.\n self.record_video = record_video\n if self.record_video:\n self.video_result = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'MJPG'), self.fps, self.size)\n\n # define the lower and upper boundaries of the colored\n # ball in the HSV color space\n self.lower_color = lower_color\n self.upper_color = upper_color\n self.x = 0\n self.y = 0\n self.is_ball_visible = False\n self.radius = 10",
"def _record(self):\n record_attr = {\n 'name': 'test_record',\n 'level': 'ERROR',\n 'pathname': '/test/path',\n 'msg': 'This is a test record.',\n }\n record = logging.makeLogRecord(record_attr)\n return record",
"def __init__(self, rec):\n self.chr1 = chromosome(rec.chrom)\n self.pos1 = int(rec.start) + 1\n self.pos2= int(rec.stop)\n self.type = rec.info[\"SVTYPE\"]\n if self.type == \"BND\":\n self.chr2=chromosome(rec.info[\"CHR2\"])\n else:\n self.chr2=self.chr1\n\n self.name = \"%s(%s:%s-%s:%s)\" % (\n self.type,\n self.chr1,\n self.pos1,\n self.chr2,\n self.pos2,\n )",
"def log_video(self, trajectory_records, epoch):\n trajectory_rendering = trajectory_records\n video = np.transpose(trajectory_rendering, [0, 3, 1, 2])\n self.training_logger.log_video(\n np.expand_dims(video, axis=0),\n 'what_the_policy_looks_like',\n epoch)",
"def create_video(\n db: Session,\n cloud: str, bucket: str, key: str,\n etag: str = None, mime_type: str = None, size: int = None,\n duration: int = None, width: int = None, height: int = None,\n persistent_id: str = None) ->VideoModel:\n try:\n video = VideoModel(\n cloud=cloud, bucket=bucket, key=key,\n etag=etag, mime_type=mime_type, size=size,\n duration=duration, width=width, height=height,\n persistent_id=persistent_id,\n status=0,\n create_ts=current_timestamp()\n )\n db.add(video)\n db.commit()\n return video\n\n except IntegrityError as exc:\n db.rollback()\n if is_duplicate_entry_exception(exc):\n return video\n else:\n raise",
"def make_video_test_example(image_shape: Sequence[int] = (263, 320, 3),\n audio_shape: Sequence[int] = (10, 256),\n label: int = 42):\n raw_image_bytes = make_image_bytes(shape=image_shape)\n random_audio = np.random.normal(size=audio_shape).tolist()\n\n seq_example = tf.train.SequenceExample()\n put_int64_to_context(seq_example, label=label, key=LABEL_KEY)\n put_bytes_list_to_feature(\n seq_example, raw_image_bytes, key=IMAGE_KEY, repeat_num=4)\n\n put_float_list_to_feature(seq_example, value=random_audio, key=AUDIO_KEY)\n return seq_example"
]
| [
"0.5313232",
"0.5147112",
"0.51127493",
"0.5046695",
"0.49939334",
"0.49845952",
"0.4954849",
"0.4920668",
"0.48525804",
"0.48499542",
"0.481874",
"0.4812749",
"0.47554517",
"0.47434902",
"0.47304296",
"0.47288856",
"0.47284198",
"0.47124922",
"0.47115338",
"0.46898627",
"0.46778482",
"0.46671742",
"0.46575707",
"0.462964",
"0.46265453",
"0.4625099",
"0.461716",
"0.46004516",
"0.45979586",
"0.4596975"
]
| 0.7032111 | 0 |
Given the code return the corresponding rules. | def find_rules_by_code(self, code):
try:
if(len(code)):
self.all_result = self.join_field_rule(ret_all=True).next()
if(self.all_result):
try:
return self.all_result.get(code)
except KeyError as error:
return str(code) + str("Not found")
except:
return 'error looking for records'
else:
return "Code is Required"
except:
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _score_code(self, code):\n # Get list of 2-tuples, each containing an input sequence and an output\n # sequence.\n io_seqs = self.task.make_io_set()\n terminal_reward = 0.0\n results = []\n reason = 'correct'\n for input_seq, output_seq in io_seqs:\n eval_result = bf.evaluate(\n code, input_buffer=input_seq, timeout=0.1,\n max_steps=self.max_execution_steps,\n base=self.task.base,\n require_correct_syntax=self.require_correct_syntax)\n result, success = eval_result.output, eval_result.success\n if not success:\n # Code execution timed out.\n terminal_reward = self.failure_reward\n results = []\n reason = eval_result.failure_reason\n break\n else:\n terminal_reward += self.reward_fn(result, output_seq, self.task.base)\n if result == output_seq:\n terminal_reward += self.correct_bonus # Bonus for correct answer.\n\n # Only add additional reward for shorter code. Subtracting reward\n # interferes with the main objective. Only optimize for length once\n # any solution is found.\n if self.min_code_length == self.max_code_length:\n terminal_reward += self.code_length_bonus\n else:\n terminal_reward += self.code_length_bonus * clipped_linear(\n x=len(code), x0=self.min_code_length, y0=1.0,\n slope=-self.time_penalty, y_range=(0.0, 1.0))\n\n # reason remains 'correct' if it is already\n elif reason == 'correct':\n reason = 'wrong'\n results.append(result)\n\n # Return list of rewards, one for each char in the code. All are 0 except\n # for the terminal reward.\n terminal_reward /= self.best_reward\n return misc.RewardInfo(\n episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],\n input_case=misc.IOTuple(i for i, o in io_seqs),\n correct_output=misc.IOTuple(o for i, o in io_seqs),\n code_output=misc.IOTuple(results),\n input_type=self.input_type,\n output_type=self.output_type,\n reason=reason)",
"def translate_coding_to_rule(self, rule):\n node = Node(\"\", None, None, None)\n node.code_to_rule(rule, None)\n self.rule = node\n self.human_read = self.rule.visit_easy_read()\n self.polish_notation = self.rule.visit_with_polish_notation()\n self.coding = self.rule.visit_make_coding()\n self.find_needed_premises()\n self.find_conclusions()",
"def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)",
"def get_rules(cls):\n raise NotImplementedError()",
"def get_rule_by_field(self, field):\n try:\n if(len(field)):\n self.all_result = self.join_field_rule(ret_all=True).next()\n if(self.all_result):\n for code in self.all_result.values():\n try:\n return code.get(field)\n except KeyError as error:\n return str(field) + str(\"Not found\")\n except:\n return \"Error looking for code.\"\n else:\n return \"Code is Required\"\n except:\n raise",
"def evaluateCode(lang, code):",
"def process_all_checks(self, code, option):\n return code",
"def rule(fasta_in, classes_in, psekraac_type11_out, benchmark_dir=None):\n token = secrets.token_hex(4)\n rule = _get_header(token)\n if benchmark_dir is not None:\n benchmark_out = f\"{benchmark_dir}encoding_psekraac_type11_{token}.txt\"\n rule += _get_benchmark(benchmark_out)\n rule += _get_main(fasta_in, classes_in, psekraac_type11_out)\n return rule",
"def test_rules():",
"def hrules(self):\n ...",
"def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules",
"def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules",
"def makeCode(self, code):\n\n current_charset = None\n pos = sum = 0\n skip = False\n strCode = ''\n for c in range(len(code)):\n if skip:\n skip = False\n continue\n\n # Only switch to char set C if next four chars are digits\n if len(code[c:]) >= 4 and code[c:c + 4].isdigit() and current_charset != self.CharSetC or \\\n len(code[c:]) >= 2 and code[c:c + 2].isdigit() and current_charset == self.CharSetC:\n # If char set C = current and next two chars ar digits, keep C\n if current_charset != self.CharSetC:\n # Switching to Character set C\n if pos:\n strCode += self.ValueEncodings[current_charset['Code C']]\n sum += pos * current_charset['Code C']\n else:\n strCode = self.ValueEncodings[self.CharSetC['START C']]\n sum = self.CharSetC['START C']\n current_charset = self.CharSetC\n pos += 1\n elif code[c] in self.CharSetB and current_charset != self.CharSetB and \\\n not (code[c] in self.CharSetA and current_charset == self.CharSetA):\n # If char in chrset A = current, then just keep that\n # Switching to Character set B\n if pos:\n strCode += self.ValueEncodings[current_charset['Code B']]\n sum += pos * current_charset['Code B']\n else:\n strCode = self.ValueEncodings[self.CharSetB['START B']]\n sum = self.CharSetB['START B']\n current_charset = self.CharSetB\n pos += 1\n elif code[c] in self.CharSetA and current_charset != self.CharSetA and \\\n not (code[c] in self.CharSetB and current_charset == self.CharSetB):\n # if char in chrset B== current, then just keep that\n # Switching to Character set A\n if pos:\n strCode += self.ValueEncodings[current_charset['Code A']]\n sum += pos * current_charset['Code A']\n else:\n strCode += self.ValueEncodings[self.CharSetA['START A']]\n sum = self.CharSetA['START A']\n current_charset = self.CharSetA\n pos += 1\n\n if current_charset == self.CharSetC:\n val = self.CharSetC[code[c:c + 2]]\n skip = True\n else:\n val = current_charset[code[c]]\n\n sum += pos * val\n strCode += self.ValueEncodings[val]\n pos += 1\n\n # Checksum\n checksum = sum % 103\n\n strCode += self.ValueEncodings[checksum]\n\n # The stop character\n strCode += self.ValueEncodings[current_charset['STOP']]\n\n # Termination bar\n strCode += \"11\"\n\n return strCode",
"def get_rules(self, M):\n return list(itertools.chain.from_iterable(\n list(itertools.chain.from_iterable(\n [[self.get_boxrules(x, M), self.get_unaryrules(x, M),\n self.at_least_one_rules(x, M), self.get_columnrules(x, M),\n self.get_rowrules(x, M)] for x in itertools.product(range(1, M+1),\n range(1, M+1))]\n ))\n ))",
"def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc",
"def _validate_code(self, key, code):\n \n if code is None:\n code = self.name\n \n if not isinstance(code, (str, unicode)):\n raise TypeError(\"Sequence.code should be an instance of str or \"\n \"unicode, not %s\" % type(code))\n \n code = Project._condition_code(code)\n \n return code",
"def map_code_to_hashes(code: int) -> hashes.HashAlgorithm:\n\n if code == 0x12:\n return hashes.SHA256()\n if code == 0x13:\n return hashes.SHA512()\n if code == 0xb240:\n return hashes.BLAKE2b(64)\n if code == 0xb260:\n return hashes.BLAKE2s(32)\n raise ValueError(\"Hachage non supporte : %d\", code)",
"def get_reconstruction_from_code(self, codes):\n return self.sess.run(self.reconstructed,\n feed_dict={self.z: codes})",
"def compile(self, rule):\n return rule.traverse(self)",
"def parse_rules(lines: Lines) -> Rules:\n rules: Rules = {}\n for line in lines:\n kind, contents = parse_rule(line)\n rules[kind] = contents\n return rules",
"def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules",
"def vrules(self):\n ...",
"def test_000_validate_by_good_code(self):\n m = schematics_flexible.BaseFlexible(\n {'code': '04',\n 'properties': {\"m\": \"this is text\"}},\n store_handler=get_mock())\n self.assertIsNone(m.validate())",
"def code():",
"def get_prediction_rules(self, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n print(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n Y: pd.Series = quant_dataframe.dataframe.iloc[:, -1]\n\n # f1 score to rule prediction map\n y_pred_dict: Dict[F1Score, TargetValArray] = dict()\n\n # f1 score to rule map\n rules_f1: Dict[F1Score, IDSRule] = dict()\n\n rule: IDSRule\n for rule in self.rules:\n conf: float = rule.car.confidence\n sup: float = rule.car.support\n\n y_pred_per_rule: TargetValArray = rule.predict(quant_dataframe)\n rule_f1_score: F1Score = hmean([conf, sup])\n\n y_pred_dict.update({rule_f1_score: y_pred_per_rule})\n rules_f1.update({rule_f1_score: rule})\n\n # rules in rows, instances in columns\n y_pred_array = np.array(list(y_pred_dict.values()))\n\n y_pred_dict = dict(sorted(y_pred_dict.items(), key=lambda item: item[0], reverse=True))\n\n y_pred = []\n\n minority_classes = []\n\n rule_list = list(self.rules)\n\n if y_pred_dict:\n for i in range(len(Y)):\n all_NA = np.all(y_pred_array[:, i] == IDSRule.DUMMY_LABEL)\n if all_NA:\n minority_classes.append(Y[i])\n\n # if the ruleset covers all instances\n default_class = len(Y == Y[0]) / len(Y)\n default_class_label = Y[0]\n\n if minority_classes:\n default_class = len(Y == mode(minority_classes)) / len(Y)\n default_class_label = mode(minority_classes)\n\n for i in range(len(Y)):\n y_pred_array_datacase = y_pred_array[:, i]\n non_na_mask = y_pred_array_datacase != IDSRule.DUMMY_LABEL\n\n y_pred_array_datacase_non_na = np.where(non_na_mask)[0]\n\n if len(y_pred_array_datacase_non_na) > 0:\n rule_index = y_pred_array_datacase_non_na[0]\n rule = rule_list[rule_index]\n\n y_pred.append((rule.car.confidence, rule.car.consequent.value))\n else:\n y_pred.append((default_class, default_class_label))\n\n return y_pred\n\n else:\n y_pred = len(Y) * [np.inf]\n\n return y_pred",
"def status_for(self, code):\n is_in = lambda start, end, n: start <= n <= end\n for status in self._code_ranges_dict:\n for _range in self._code_ranges_dict[status]:\n if is_in(_range['start'],_range['end'],code):\n return status\n return None",
"def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]",
"def fetchRulesForNode(self, nodeId):\n rules = []\n self.mapNodeDependentSites[nodeId] = set([])\n exactlyMatched = self.findExactlyMatchingRules(nodeId)\n rules.extend(exactlyMatched)\n mergedMatched = self.findMergedMatchingRules(nodeId)\n rules.extend(mergedMatched)\n if len(self.tree.node(nodeId)) > 12:\n rules.extend(self.findDepravedMatchingRules(nodeId))\n # HACK: 2012/10/22\n # elif not mergedMatched and exactlyMatched and len(exactlyMatched) <= 1:\n # if exactlyMatched[0][2][2] < -3: # log(0.05)\n # # Clear rules in this bad situtation.\n # rules = []\n\n # Allow no rules to return, then the decoder will be forced to\n # build translation using CYK.\n if not rules:\n return None, {}\n # if not rules:\n # rules.extend(self.findRecontructMatchingRules(nodeId))\n # if not rules:\n # rules.extend(self.findDepravedMatchingRules(nodeId))\n # # Should rule got here!.\n # assert rules\n\n return rules, self.mapNodeDependentSites[nodeId]",
"def get(self, request):\n doc_types_mappings = DoccodePluginMapping.objects.all()\n rules_json = []\n for rule in doc_types_mappings:\n rules_json.append(\n dict(\n doccode=rule.get_docrule().get_title(),\n id=rule.pk,\n )\n )\n log.info('RulesHandler.read request fulfilled')\n return Response(rules_json, status=status.HTTP_200_OK)",
"def rules(self):\n return tuple(e for e in self.entries if e.is_rule)"
]
| [
"0.64189136",
"0.5745816",
"0.5704152",
"0.5697134",
"0.5655006",
"0.56392395",
"0.5623657",
"0.5576453",
"0.5572982",
"0.55443037",
"0.5520338",
"0.55185455",
"0.5490856",
"0.5308763",
"0.52806115",
"0.52776045",
"0.52774376",
"0.52757066",
"0.52686936",
"0.5263126",
"0.5260822",
"0.5203938",
"0.51527",
"0.5149928",
"0.5149542",
"0.5143022",
"0.5140068",
"0.5138498",
"0.51360893",
"0.51344097"
]
| 0.7306868 | 0 |
Given the field return the rule. | def get_rule_by_field(self, field):
try:
if(len(field)):
self.all_result = self.join_field_rule(ret_all=True).next()
if(self.all_result):
for code in self.all_result.values():
try:
return code.get(field)
except KeyError as error:
return str(field) + str("Not found")
except:
return "Error looking for code."
else:
return "Code is Required"
except:
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> str:\n if self._rule:\n return self._rule\n return self._make_rule(member_param=self._member_param,\n unique_member_param=self._unique_member_param)",
"def get_rule(self):\n\n return self.__rule_",
"def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule",
"def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule",
"def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule",
"def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)",
"def handle_field(self, key, rule, obj):\n\n if isinstance(rule, dict):\n if '$in' in rule:\n return self.handle_in_field(key, rule['$in'], obj)\n\n elif '$nin' in rule:\n return not self.handle_in_field(key, rule['$nin'], obj)\n\n elif '$all' in rule:\n return self.handle_all_field(key, rule['$all'], obj)\n\n else:\n return self.handle_field_rule(key, rule, obj)\n\n else:\n try:\n field = get_field(key, obj)\n\n except KeyError:\n return False\n\n return (field == rule)",
"def getRuleByVariable(self, *args):\n return _libsbml.Model_getRuleByVariable(self, *args)",
"def parseRule(self, ruleTxt, lineNo):\n ruleParts = ruleTxt.split()\n\n #less than 4 or more than 5 fields, poorly formed rule\n if len(ruleParts) < 4 or len(ruleParts) > 5:\n return None\n\n direction = ruleParts[0]\n action = ruleParts[1]\n ip = ruleParts[2]\n ports = ruleParts[3]\n flag = ruleParts[4] if len(ruleParts) == 5 else None\n\n try:\n rule = Rule(direction, action, ip, ports, flag, lineNo)\n except:\n rule = None\n \n return rule",
"def rule(self):\n step_ratio = self.step_ratio\n method = self.method\n if method in ('multicomplex', ) or self.n == 0:\n return np.ones((1,))\n\n order, method_order = self.n - 1, self._method_order\n parity = self._parity(method, order, method_order)\n step = self._richardson_step()\n num_terms, ix = (order + method_order) // step, order // step\n fd_rules = FD_RULES.get((step_ratio, parity, num_terms))\n if fd_rules is None:\n fd_mat = self._fd_matrix(step_ratio, parity, num_terms)\n fd_rules = linalg.pinv(fd_mat)\n FD_RULES[(step_ratio, parity, num_terms)] = fd_rules\n\n if self._flip_fd_rule:\n return -fd_rules[ix]\n return fd_rules[ix]",
"def rule_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule_name\")",
"def field(self, field):\n return self.__getitem__(field)",
"def validator(field: BaseField) -> Callable:\n\n def inner(func: Union[ConfigValidator, FieldValidator]) -> Callable:\n if isinstance(field, Field):\n field.validator = func # type: ignore\n elif isinstance(field, Schema):\n field._validators.append(func) # type: ignore\n\n return func\n\n return inner",
"def find(self, rule_name):\n return self.rules[rule_name]",
"def wholeFieldPredicate(field):\n\n if field:\n return (field, )\n else:\n return ()",
"def csv_to_field_RemarketingRule(entity, value):\n if value is None or value == '':\n return\n\n type_end_pos = value.index('(')\n if type_end_pos <= 0:\n raise ValueError('Invalid Remarketing Rule: {0}'.format(value))\n\n rule_type = value[:type_end_pos]\n rule = value[type_end_pos:]\n\n if rule_type.lower() == 'pagevisitors':\n entity.Rule = parse_rule_PageVisitors(rule)\n elif rule_type.lower() == 'pagevisitorswhovisitedanotherpage':\n entity.Rule = parse_rule_PageVisitorsWhoVisitedAnotherPage(rule)\n elif rule_type.lower() == 'pagevisitorswhodidnotvisitanotherpage':\n entity.Rule = parse_rule_PageVisitorsWhoDidNotVisitAnotherPage(rule)\n elif rule_type.lower() == 'customevents':\n entity.Rule = parse_rule_CustomEvents(rule)\n else:\n entity.Rule = None",
"def rule_number(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"rule_number\")",
"def get_field(self, field_name):\n for f in self.fields:\n if f.name.lower() == field_name.lower():\n return f\n return None",
"def get_rule(self, name):\n\n return self._control_manager.get_rule(name)",
"def validate_field(self, field_name, val):\r\n return self._columns[field_name].validate(val)"
]
| [
"0.6452631",
"0.6452631",
"0.6452631",
"0.6184227",
"0.6184227",
"0.6184227",
"0.6184227",
"0.6184227",
"0.6184227",
"0.6184227",
"0.6102919",
"0.6000917",
"0.59157825",
"0.59157825",
"0.59157825",
"0.5800254",
"0.5708165",
"0.5688316",
"0.54578036",
"0.5444998",
"0.54006064",
"0.52925867",
"0.5241031",
"0.52204233",
"0.52188927",
"0.52099353",
"0.51822907",
"0.5173899",
"0.5155753",
"0.51495093"
]
| 0.68929523 | 0 |
Get geocode results from Google Maps Geocoding API. Note, that in the case of multiple google geocode reuslts, this function returns details of the FIRST result. | def get_google_results(address, api_key=None, return_full_response=False):
# Set up your Geocoding url
geocode_url = "https://maps.googleapis.com/maps/api/geocode/json?address={}".format(address) + "&sensor=false"
# if api_key is not None:
# geocode_url = geocode_url + "&key={}".format(api_key)
# Ping google for the reuslts:
results = requests.get(geocode_url)
# Results will be in JSON format - convert to dict using requests functionality
results = results.json()
# if there's no results or an error, return empty results.
if len(results['results']) == 0:
output = {
"formatted_address": None,
"latitude": None,
"longitude": None,
"accuracy": None,
"google_place_id": None,
"type": None,
"postcode": None
}
else:
answer = results['results'][0]
output = {
"formatted_address": answer.get('formatted_address'),
"latitude": answer.get('geometry').get('location').get('lat'),
"longitude": answer.get('geometry').get('location').get('lng'),
"accuracy": answer.get('geometry').get('location_type'),
"google_place_id": answer.get("place_id"),
"type": ",".join(answer.get('types')),
"postcode": ",".join([x['long_name'] for x in answer.get('address_components')
if 'postal_code' in x.get('types')])
}
# Append some other details:
output['input_string'] = address
output['number_of_results'] = len(results['results'])
output['status'] = results.get('status')
if return_full_response is True:
output['response'] = results
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_google_results(address):\n\t# Set up your Geocoding url\n\tlogging.info(\"[GOOGLE URL]: init\")\n\tparams = {\n\t\"address\":address,\n\t\"key\":GEOPY.get('AQUEDUCT_GOOGLE_PLACES_PRIVATE_KEY')\n\t}\n\t\n\t# Ping google for the reuslts:\n\ttry:\n\t\twith requests.Session() as s:\n\t\t\ts.mount('https://',HTTPAdapter(max_retries=Retry(2, backoff_factor=0.001)))\n\t\t\tr = s.get(url=GEOCODE_URL, params=params, timeout=5)\n\t\t\n\t\tif r.status_code == requests.codes.ok:\n\t\t\t# Results will be in JSON format - convert to dict using requests functionality\n\t\t\tresults = r.json()\n\t\t\t# if there's no results or an error, return empty results.\n\t\t\tif len(results['results']) == 0:\n\t\t\t\toutput = {\n\t\t\t\t\t\"matched_address\" : None,\n\t\t\t\t\t\"lat\": None,\n\t\t\t\t\t\"lon\": None,\n\t\t\t\t\t\"match\": False\n\t\t\t\t}\n\t\t\telse: \n\t\t\t\tanswer = results['results'][0]\n\t\t\t\toutput = {\n\t\t\t\t\t\"matched_address\" : answer.get('formatted_address'),\n\t\t\t\t\t\"lat\": answer.get('geometry').get('location').get('lat'),\n\t\t\t\t\t\"lon\": answer.get('geometry').get('location').get('lng'),\n\t\t\t\t\t\"match\":True\n\t\t\t\t}\n\t\telse:\n\t\t\tlogging.error(f\"[GEOCODER: Get google place]: {r.text}\")\n\t\t\tlogging.error(f\"[GEOCODER- GOOGLE URL]: {r.status_code}\")\n\t\t\toutput = {\n\t\t\t\t\"matched_address\" : None,\n\t\t\t\t\"lat\": None,\n\t\t\t\t\"lon\": None,\n\t\t\t\t\"match\": False\n\t\t\t}\n\t\t\t\n\t\t# Append some other details: \n\t\toutput['address'] = address\n\t\toutput['number_of_results'] = len(results['results'])\n\t\toutput['status'] = results.get('status')\n\t\t\n\t\treturn output\n\texcept Exception as e:\n\t\traise e",
"def geocoding(address):\n AUTH = json.loads(open(\"auth.json\", \"r\").read())\n\n r = requests.get(f\"https://maps.googleapis.com/maps/api/geocode/json\", params={\n \"address\": address,\n \"key\": AUTH[\"GMAP_API\"]\n })\n\n if r.status_code == 200:\n r = r.json()\n results = r[\"results\"]\n if len(results) < 1:\n log.error(\"No result geocoding for %s\", address)\n return (-1, -1)\n\n result = results[0]\n proper_address = result[\"formatted_address\"]\n loc = result[\"geometry\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n\n return (proper_address, lat, lng)\n\n else:\n log.error(\"Error in Geocoding %s\", address)\n return (-1, -1)",
"def get_geocode(self, address):\n\n try:\n raw_data = self.__get_raw_data(address)\n except (URLError, ValueError):\n return 503, None\n else:\n code, coords = self.__parse_raw_data(raw_data)\n return code, coords",
"def google_geocode(query):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"address\" : query.encode(\"utf-8\"), \"key\" : API_KEY }\n\tf = urlopen(LOC_URL % (urlencode(d)))\n\tlocdata = load(f)\n\tif f.getcode() == 200:\n\t\tif \"results\" in locdata:\n\t\t\titem = locdata[\"results\"]\n\t\t\tif len(item) == 0:\n\t\t\t\treturn None\n\t\t\titem = item[0]\n\t\t\tll = item.get(\"geometry\", {}).get(\"location\") # lol tricky\n\t\t\tif not ll: return None\n\t\t\treturn item[\"formatted_address\"], ll[\"lat\"], ll[\"lng\"]\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\traise RuntimeError(\"Error (%s): %s\" % (f.getcode(), locdata.replace(\"\\n\", \" \")))",
"def geocode(self, query, exactly_one=True, timeout=None):\n params = {\n 'addr': self.format_string % query,\n }\n if self.api_key:\n params['key'] = self.api_key\n url = \"?\".join((self.api, urlencode(params)))\n logger.debug(\"%s.geocode: %s\", self.__class__.__name__, url)\n return self._parse_json(\n self._call_geocoder(url, timeout=timeout), exactly_one\n )",
"def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }",
"def geocodeLocations(locations):\n maxResults = 1\n location_query = ''\n for location in locations:\n location_query += \"&location=%s\" % encodeUrl(location)\n url = \"http://open.mapquestapi.com/geocoding/v1/batch?maxResults=%d%s\" % (maxResults, location_query)\n print url\n results = json.loads(urllib2.urlopen(url).read())\n print results\n return\n for location_result in results['results']:\n #print location_result\n if location_result['providedLocation']['location'] == location:\n latlng = location_result['locations'][0]['displayLatLng']\n return latlng\n else:\n print location_result",
"def get_google_results(api_id, address, return_response_fields=None):\n # set up api key\n api_key = \"AIzaSyDQaVh67imEZW2FLH7hb33SB63jv2shkqQ\"\n request_url = \"\"\n outputs = []\n building = address[0]\n address1 = address[0] + \" \" + address[1] + \" \" + address[2]\n if api_id == \"geocoding\":\n request_url = \"https://maps.googleapis.com/maps/api/geocode/json?address={}\".format(\n address1) + \"&key={}\".format(\n api_key)\n print(\"GEOCODING |||||||||| \" + request_url)\n if api_id == \"nearbysearch\":\n lat_long = get_google_results(\"geocoding\", address, return_response_fields=\"latitude\")[0][\n \"latitude\"].__str__() + \",\" + \\\n get_google_results(\"geocoding\", address, return_response_fields=\"longitude\")[0][\n \"longitude\"].__str__()\n request_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={}\".format(\n lat_long) + \"&rankby=distance&type=establishment&key={}\".format(api_key)\n print(\"NEARBYSEARCH |||||||||| \" + request_url)\n results = requests.get(request_url)\n results = results.json()\n\n if len(results['results']) == 0:\n return False\n else:\n for answer in results['results']:\n if api_id == \"geocoding\":\n\n street_number = \"0\"\n for y in answer.get('address_components'):\n if 'street_number' in y.get('types'): street_number = y['long_name']\n\n route_name = \"0\"\n for z in answer.get('address_components'):\n if 'route' in z.get('types'): route_name = z['long_name']\n\n output = {\n \"entry\": building,\n \"street_number\": street_number,\n \"route_name\": route_name,\n \"latitude\": answer.get('geometry').get('location').get('lat'),\n \"longitude\": answer.get('geometry').get('location').get('lng'),\n \"google_place_id\": answer.get(\"place_id\"),\n \"type\": \",\".join(answer.get('types')),\n \"postcode\": \",\".join(\n [x['long_name'] for x in answer.get('address_components') if 'postal_code' in x.get('types')]),\n\n }\n if (output[\"route_name\"]) == \"0\":\n output[\"route_name\"] = answer.get('formatted_address')\n if (output[\"street_number\"]) == \"0\":\n\n pattern = re.compile(\"^(.+?),\")\n pattern0 = re.compile(\",(.+?),\")\n patterns = [pattern, pattern0]\n for pat in patterns:\n if pat.search(answer.get('formatted_address')):\n\n ad = re.findall(pat, answer.get('formatted_address'))[0]\n pattern1 = re.compile(\"\\d+\")\n if pattern1.search(ad):\n ad1 = re.findall(pattern1, ad)[0]\n if len(ad1) < 4: output[\"street_number\"] = ad1\n\n outputs += [output]\n\n if api_id == \"nearbysearch\":\n street_number = \"0\"\n route_name = answer.get('vicinity')\n if answer.get('rating') is None:\n rating = 0\n else:\n rating = int(answer.get('rating'))\n\n output = {'input_string': address1, \"street_number\": street_number, \"route_name\": route_name,\n \"google_place_id\": answer.get(\"place_id\"), \"type\": \",\".join(answer.get('types')),\n \"rating\": rating}\n\n pattern = re.compile(\"^(.+?),\")\n pattern0 = re.compile(\",(.+?),\")\n patterns = [pattern, pattern0]\n for pat in patterns:\n if pat.search(route_name):\n\n ad = re.findall(pat, answer.get('vicinity'))[0]\n pattern1 = re.compile(\"\\d+\")\n if pattern1.search(ad):\n ad1 = re.findall(pattern1, ad)[0]\n if len(ad1) < 4: output[\"street_number\"] = ad1\n\n if output[\"street_number\"] == address[0]:\n outputs += [output]\n\n if return_response_fields is None and len(outputs) > 0:\n return outputs\n elif (len(outputs) > 0) and (return_response_fields is not None):\n output_filter = []\n for item in outputs:\n output_filter += [{\"\" + return_response_fields: item[return_response_fields]}]\n outputs = output_filter\n return outputs\n else:\n return False",
"def get_google_geocoding(coordinates):\n latitude = coordinates['latitude']\n longitude = coordinates['longitude']\n payload = build_google_payload(latitude=latitude, longitude=longitude)\n response = requests.get(google_url, params=payload, timeout=int(GEOCODING_INFO['timeout']))\n if response.status_code == 200:\n formated_data = extract_data_from_google_response(response.text.encode('utf-8'))\n return formated_data\n else:\n return None",
"def extract_data_from_google_response(geocoding_response):\n root = ET.fromstring(geocoding_response)\n for result in root.findall('result'):\n data = result.find('formatted_address').text\n if data != '':\n return data\n return 'Dirección desconocida'",
"def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata",
"def geocode(\n self,\n query,\n max_results=25,\n set_back=0,\n location_descriptor='any',\n exactly_one=True,\n timeout=None,\n ):\n params = {'addressString': query}\n if set_back != 0:\n params['setBack'] = set_back\n if location_descriptor not in ['any',\n 'accessPoint',\n 'frontDoorPoint',\n 'parcelPoint',\n 'rooftopPoint',\n 'routingPoint']:\n raise GeocoderQueryError(\n \"You did not provided a location_descriptor \"\n \"the webservice can consume. It should be any, accessPoint, \"\n \"frontDoorPoint, parcelPoint, rooftopPoint or routingPoint.\"\n )\n params['locationDescriptor'] = location_descriptor\n if exactly_one is True:\n max_results = 1\n params['maxResults'] = max_results\n\n url = \"?\".join((self.api, urlencode(params)))\n logger.debug(\"%s.geocode: %s\", self.__class__.__name__, url)\n response = self._call_geocoder(url, timeout=timeout)\n\n # Success; convert from GeoJSON\n if not len(response['features']):\n return None\n geocoded = []\n for feature in response['features']:\n geocoded.append(self._parse_feature(feature))\n if exactly_one is True:\n return geocoded[0]\n return geocoded",
"def geocode_one(self, postcode: str, address: Optional[str] = None) -> pd.Series:\n if postcode is None and address is None:\n raise utils.GenericException(\"You must pass either postcode or address, or both.\")\n if self.gmaps_key is None:\n self.gmaps_key = self._load_key()\n if self.gmaps_key is not None:\n self.gmaps_client = googlemaps.Client(key=self.gmaps_key)\n if self.cache is None:\n self._load_cache()\n sep = \", \" if address and postcode else \"\"\n postcode = postcode if postcode is not None else \"\"\n address = address if address is not None else \"\"\n search_term = f\"{address}{sep}{postcode}\"\n if search_term in self.cache:\n logging.debug(\"Loading GMaps Geocoder API result from cache: '%s'\", search_term)\n geocode_result = self.cache[search_term]\n else:\n logging.debug(\"Querying Google Maps Geocoder API for '%s'\", search_term)\n if self.gmaps_key is None:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geocode_result = self.gmaps_client.geocode(search_term, region=\"uk\")\n self.cache[search_term] = geocode_result\n self.cache_modified = True\n if not geocode_result or len(geocode_result) > 1:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geometry = geocode_result[0][\"geometry\"]\n ok_loc_types = [\"ROOFTOP\", \"GEOMETRIC_CENTER\"]\n if geometry[\"location_type\"] in ok_loc_types or \\\n geocode_result[0][\"types\"] == [\"postal_code\"]:\n return pd.Series({\"latitude\": geometry[\"location\"][\"lat\"],\n \"longitude\": geometry[\"location\"][\"lng\"],\n \"match_status\": 3})\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})",
"def rlis_geocode(addr_str, token):\n\n url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n params = {\n 'token': token,\n 'input': addr_str,\n 'form': 'json'\n }\n rsp = requests.get(url, params=params)\n\n if rsp.status_code != 200:\n return -1, -1, -1\n else:\n json_rsp = rsp.json()\n if json_rsp['error']:\n return -1, -1, -1\n else:\n return json_rsp['data'][0]['lat'], json_rsp['data'][0]['lng'], json_rsp['data'][0]['fullAddress']",
"def lookup(addr, num, street, city, code, geo_dict, failure_set):\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.load(req.urlopen(address_url).decode('utf-8'))['result']\n except Exception:\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.loads(req.urlopen(address_url).read().decode('utf-8'))['result']\n except Exception as e:\n print(e, addr)\n failure_set.add(addr)\n return None\n if len(geo_data['addressMatches']) == 0:\n print(addr, ': Failure')\n failure_set.add(addr)\n return None\n print(addr, ': Success')\n location = geo_data['addressMatches'][0]['coordinates']\n latlong = ','.join([str(location['y']), str(location['x'])])\n geo_dict[addr] = latlong\n return tuple(float(geo) for geo in latlong.split(','))",
"def get_reverse_geocode_result(userlat_long, result_type=None, location_type=None):\n\n gmaps = googlemaps.Client(key=GooglemapsService.api_key)\n json_response = gmaps.reverse_geocode(userlat_long, result_type, location_type)\n return json_response[0]['formatted_address']",
"def search(self):\n return self.key.geocode(self.cleanplace)",
"def get_location(coordinates):\n location_info = gmaps.reverse_geocode(latlng=coordinates)\n location_list = list()\n for location in location_info:\n if \"locality\" in location[\"types\"]:\n return location[\"formatted_address\"]\n # location_list.append(location[\"formatted_address\"])\n # return location_list",
"def get_lat_lng(self):\n self.input_api = '%20'.join(self.parsed_question)\n self.input_api = ' '.join(self.parsed_question)\n self.google_api_url = 'https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input={}&inputtype=textquery&fields=geometry,name,place_id&types=point_of_interest&key={}'.format (self.input_api, api_key) \n self.r = requests.get(url=self.google_api_url)\n self.data = self.r.json()\n self.name = self.data['candidates'][0]['name']\n self.place_id = self.data['candidates'][0]['place_id']\n self.lat = self.data['candidates'][0]['geometry']['location']['lat']\n self.lng = self.data['candidates'][0]['geometry']['location']['lng']\n print(self.lat, self.lng, self.place_id)\n return (self.lat, self.lng, self.place_id)",
"def geocode(location):\n geocoding_url = f'https://maps.googleapis.com/maps/api/geocode/json?' \\\n f'address={location}&key={_GEOCODING_KEY}'\n geocode_data = requests.get(geocoding_url).json()\n return geocode_data",
"def geocode(addr_str):\n\n\tbase_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n\turl_template = '{0}?token={1}&input={2}&form=json'\n\turl = url_template.format(base_url, token, addr_str)\n\tresponse = requests.get(url)\n\n\tif response.status_code != 200:\n\t\tprint 'unable to establish connection with rlis api'\n\t\tprint 'status code is: {0}'.format(response.status_code)\n\t\treturn response.status_code\n\t\n\tjson_rsp = response.json()\n\tif json_rsp['error']:\n\t\tprint 'the following address could not be geocoded:'\n\t\tprint '\\'{0}\\''.format(addr_str)\n\t\tprint 'the following error message was returned:'\n\t\tprint '\\'{0}\\''.format(json_rsp['error']), '\\n'\n\telse:\n\t\treturn json_rsp['data'][0]",
"def primary_geocode(self):\n try:\n return self.geocodes.all()[0]\n except IndexError:\n return None",
"def reverse_geocoding(lat, lng, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n params = {\n 'latlng': '{},{}'.format(lat, lng),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n geodata = parse_response(response)\n return geodata",
"def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']",
"def geo(address):\n API_PRIVATE = os.environ.get(\"TOM_TOM_PRIVATE\")\n encoded = urllib.parse.quote(address)\n query ='https://api.tomtom.com/search/2/geocode/' + str(encoded) + \\\n '.json?limit=1&countrySet=US&lat=42&lon=-72&topLeft=42.886%2C%20-73.508&btmRight=41.237%2C-69.928&key=' \\\n + API_PRIVATE\n\n response = requests.get(query)\n while True:\n try:\n jsonResponse = response.json()\n break\n except:\n response = requests.get(query)\n\n latit = 0\n longit = 0\n\n for address in jsonResponse['results']:\n latit = address['position']['lat']\n longit = address['position']['lon']\n return latit, longit",
"def geocode():\n\n if \"location\" in request.vars:\n location = request.vars.location\n else:\n session.error = T(\"Need to specify a location to search for.\")\n redirect(URL(r=request, f=\"index\"))\n\n if \"service\" in request.vars:\n service = request.vars.service\n else:\n # @ToDo: service=all should be default\n service = \"google\"\n\n if service == \"google\":\n return s3base.GoogleGeocoder(location, db).get_kml()\n\n if service == \"yahoo\":\n return s3base.YahooGeocoder(location, db).get_xml()",
"def get_geocoding_response(lat: float, long: float):\n payload = {}\n headers = {}\n url = URL_TEMPLATE.format(\n api_key=GEOCODING_API_KEY,\n long_lat=','.join(\n (str(long), str(lat)),\n ),\n )\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n return response.json()",
"def _geocode(self, address):\n try:\n g = self.geocoder_class()\n address = smart_str(address)\n result = g.geocode(address, exactly_one=False)\n if result:\n return result[0]\n else:\n raise GeocodeFailed()\n except (UnboundLocalError, ValueError, GeocoderServiceError) as e:\n raise Exception(e)",
"def geocodeLocation(location, geocodeDB, tries=0):\n try:\n location = encodeUrl(location)\n except UnicodeDecodeError:\n pass\n\n try:\n row = geocodeDB.find_one({'query':location})\n except:\n row = None\n if row:\n print 'Cached. Times: {%d}' % row['count']\n results = row['results']\n geocodeDB.update({'_id':row['_id']}, {'$inc':{'count':1}})\n else:\n url = \"http://open.mapquestapi.com/nominatim/v1/search?format=json&addressdetails=0&bounded=1&viewbox=-123.173825,37.9298443,-122.28178,37.63983&q=%s\" % location\n print url\n try:\n results = json.loads(urllib2.urlopen(url).read())\n print results\n except (socket.timeout, socket.error):\n print 'failed to get: %s' % url\n #print 'socket.timeout so waiting a few seconds before trying again'\n if tries >= 3:\n geocodeDB.save({'query':location, 'results':None, 'count':1})\n return None\n else:\n for i in range(5):\n sys.stdout.write(' %d seconds left\\r' % (5 - i))\n sys.stdout.flush()\n time.sleep(1)\n print 'continuing...'\n return geocodeLocation(location, geocodeDB, tries+1)\n #return None\n except urllib2.URLError:\n print \"URLError\"\n return None\n geocodeDB.save({'query':location, 'results':results, 'count':1})\n if results:\n for obj in results:\n return {'lat':obj['lat'], 'lon':obj['lon']}\n else:\n print \"no results for: %s\", location\n return None",
"def fetch(self, radius: int) -> dict:\n # convert radius integer to string\n radius: str = f\"{radius}mi\" \n # set empty dict\n geocodes: dict = {}\n # iterate through instantiated locations list\n # set search parameters to pass to callGoogle method\n for location in self.locations:\n\n params: dict = {\n\n 'address': location,\n 'sensor': 'false',\n 'key': self.__api_key['google_key']\n\n }\n # define key value pairs | city - geocode\n geocodes[location]: str = f\"{callGoogle(endpoint=self.__api_endpoint, params=params)},{radius}\"\n\n return geocodes"
]
| [
"0.75825644",
"0.7065095",
"0.69788176",
"0.6944597",
"0.68306345",
"0.6712675",
"0.66840744",
"0.66496253",
"0.65347934",
"0.6481211",
"0.6473336",
"0.6473213",
"0.64647895",
"0.64577734",
"0.6385589",
"0.6328624",
"0.6250599",
"0.6218072",
"0.6167225",
"0.6133725",
"0.61009574",
"0.60810155",
"0.60554445",
"0.6051331",
"0.6023245",
"0.6022976",
"0.600287",
"0.6000541",
"0.5974032",
"0.59492767"
]
| 0.7563831 | 1 |
Shortcut for a module of dense layer, batch normalization and possibly adding of Gaussian noise. | def dense_bn_act(inputs, units, activation, kernel_init, noise=False, noise_std=0.5):
_tmp = tf.layers.dense(inputs=inputs, units=units, activation=None, kernel_initializer=kernel_init)
_tmp = tf.contrib.layers.batch_norm(_tmp, center=True, scale=True, is_training=phase)
_tmp = activation(_tmp)
if noise:
_tmp = gaussian_noise_layer(_tmp, noise_std, phase)
return _tmp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dense_block(X, neurons_per_layer, add_noise, hidden_activation):\n Y = Dense(neurons_per_layer)(X)\n\n if add_noise:\n Y = GaussianDropout(0.005)(Y)\n\n Y = LayerNormalization()(Y)\n Y = hidden_activation(Y)\n return Concatenate()([X, Y])",
"def __init__(self, hidden_units, act_fn=activation_fn, output_shape=1, out_activation=None, out_layer=True):\n super().__init__()\n for u in hidden_units:\n self.add(GaussianNoise(0.4)) # Or use kwargs\n self.add(Dense(u, act_fn))\n if out_layer:\n self.add(GaussianNoise(0.4))\n self.add(Dense(output_shape, out_activation))",
"def dense(in_layer):\n return Dense(neurons,\n kernel_initializer=initializer())(in_layer)",
"def gaussian_noise_layer(input_layer, std):\r\n\r\n noise = tf.random_normal(tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32)\r\n\r\n output = tf.add_n([input_layer, noise])\r\n\r\n return output",
"def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)",
"def __init__(self, input_dim: int, output_dim: int):\n\n super().__init__()\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n self.batchNorm1 = layers.BatchNormalization()\n self.dense1 = layers.Dense(\n 64, input_shape=(input_dim+output_dim,),\n kernel_initializer=random_uniform(-np.sqrt(1/input_dim), np.sqrt(1/input_dim))\n )\n self.relu1 = layers.Activation('relu')\n self.dense2 = layers.Dense(32, kernel_initializer=random_uniform(-np.sqrt(1/64), np.sqrt(1/64)))\n self.relu2 = layers.Activation('relu')\n self.dense3 = layers.Dense(output_dim, kernel_initializer=random_uniform(-np.sqrt(1/32), np.sqrt(1/32)))",
"def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp",
"def init_dense(self, layer):\n pass",
"def all_views_gaussian_noise_layer(input_layer, std):\r\n\r\n input_l_cc, input_r_cc = input_layer\r\n\r\n output_l_cc = gaussian_noise_layer(input_l_cc, std)\r\n output_r_cc = gaussian_noise_layer(input_r_cc, std)\r\n\r\n\r\n output = (output_l_cc, output_r_cc)\r\n\r\n return output",
"def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)",
"def add_dense_layer(self, input_layer, hyperparams, func='relu', bn=True):\n W = self._weight_variable(shape=hyperparams[0])\n b = self._bias_variable(shape=hyperparams[1])\n x_ravel = tf.reshape(input_layer, shape=[-1, hyperparams[0][0]])\n if bn:\n return self._batch_normalize(\n self._nonlinearity(func)(tf.matmul(x_ravel, W) + b))\n elif not bn:\n return self._nonlinearity(func)(tf.matmul(x_ravel, W) + b)",
"def __init__(\n self,\n hidden_dims,\n input_dim=3 * 32 * 32,\n num_classes=10,\n dropout=1,\n normalization=None,\n reg=0.0,\n weight_scale=1e-2,\n dtype=np.float32,\n seed=None,\n ):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n Din, Dout = input_dim, hidden_dims[0]\n for i in range(self.num_layers):\n self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout))\n self.params['b' + str(i+1)] = np.zeros((Dout,))\n Din = Dout\n if i < len(hidden_dims) - 1:\n Dout = hidden_dims[i+1]\n if i == len(hidden_dims) - 1:\n Dout = num_classes\n \n # BN params initialization\n if self.normalization != None:\n for i in range(self.num_layers - 1):\n self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i]))\n self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i]))\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {\"mode\": \"train\", \"p\": dropout}\n if seed is not None:\n self.dropout_param[\"seed\"] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization == \"batchnorm\":\n self.bn_params = [{\"mode\": \"train\"} for i in range(self.num_layers - 1)]\n if self.normalization == \"layernorm\":\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)",
"def __init__(\n self,\n latentspace,\n num_blocks=16,\n neurons_per_layer=16,\n hidden_activation=\"relu\",\n output_activation=\"sigmoid\",\n add_noise=True,\n **kwargs\n ):\n h_Activation = lambda activation: LeakyReLU(0.02) if activation == \"leaky_relu\" else Activation(activation)\n\n inp = Input((latentspace,))\n x = inp\n if add_noise:\n x = GaussianNoise(0.01)(x)\n\n for _ in range(num_blocks):\n x = dense_block(\n x,\n neurons_per_layer, # Dense layer\n add_noise, # GaussianNoise with 0.005\n # Layer normalization\n h_Activation(hidden_activation)\n ) # returns Concat [X_input, X]\n\n x = Dense(128)(x)\n x = h_Activation(hidden_activation)(x)\n x = Dense(1)(x)\n out = h_Activation(output_activation)(x)\n\n # Construct the functional model by calling the constructor of the Model super class.\n super(DiscriminatorLatent, self).__init__(inp, out, **kwargs)",
"def normal_init(module, mean=0, std=1, bias=0):\n nn.init.normal_(module.weight, mean, std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)",
"def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect'):\n assert (n_blocks >= 0)\n super(DenseGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n dense_features = ngf * mult\n dense_features = dense_features + 6 * 32\n for i in range(n_blocks):\n model += [DenseBlock(num_layers=6, num_input_features=ngf * mult, bn_size=4, growth_rate=32, drop_rate=0,\n norm_layer=norm_layer)]\n model += [norm_layer(dense_features), nn.ReLU(inplace=True),\n nn.Conv2d(dense_features, ngf * mult, kernel_size=1, stride=1, bias=use_bias),\n ]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*model)",
"def sn_wrapper(module: nn.Module, use_sn: bool, *sn_args, **sn_kwargs) -> nn.Module:\n if use_sn:\n return nn.utils.spectral_norm(module, *sn_args, **sn_kwargs)\n else:\n return module",
"def __init__(self, hidden_size, eps=1e-12):\n super(LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n self.bias.data.zero_()\n self.weight.data.fill_(1.0)",
"def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)",
"def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)",
"def __call__(self, x: jnp.ndarray, *, train: bool, debug: bool = False\n ) -> jnp.ndarray:\n del train, debug\n hid_sizes = self.hid_sizes\n if isinstance(hid_sizes, int):\n hid_sizes = [hid_sizes]\n x = jnp.reshape(x, (x.shape[0], -1))\n for num_hid in hid_sizes:\n x = nn.Dense(\n num_hid, kernel_init=self.kernel_init, bias_init=self.bias_init)(\n x)\n x = nn.relu(x)\n\n # head\n x = nn_layers.IdentityLayer(name='pre_logits')(x)\n x = nn.Dense(\n self.num_outputs,\n kernel_init=self.kernel_init,\n bias_init=self.bias_init,\n name='output_projection')(\n x)\n return x",
"def MI_Net_with_DS(dataset):\n # load data and convert type\n train_bags = dataset['train']\n test_bags = dataset['test']\n\n # convert bag to batch\n train_set = convertToBatch(train_bags)\n test_set = convertToBatch(test_bags)\n dimension = train_set[0][0].shape[1]\n weight = [1.0, 1.0, 1.0, 0.0]\n\n # data: instance feature, n*d, n = number of training instance\n data_input = Input(shape=(dimension,), dtype='float32', name='input')\n\n # fully-connected\n fc1 = Dense(256, activation='relu', kernel_regularizer=l2(args.weight_decay))(data_input)\n fc2 = Dense(128, activation='relu', kernel_regularizer=l2(args.weight_decay))(fc1)\n fc3 = Dense(64, activation='relu', kernel_regularizer=l2(args.weight_decay))(fc2)\n\n # dropout\n dropout1 = Dropout(rate=0.5)(fc1)\n dropout2 = Dropout(rate=0.5)(fc2)\n dropout3 = Dropout(rate=0.5)(fc3)\n\n # features pooling\n fp1 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp1')(dropout1)\n fp2 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp2')(dropout2)\n fp3 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp3')(dropout3)\n\n # score average\n mg_ave =average([fp1,fp2,fp3], name='ave')\n\n model = Model(inputs=[data_input], outputs=[fp1, fp2, fp3, mg_ave])\n sgd = SGD(lr=args.init_lr, decay=1e-4, momentum=args.momentum, nesterov=True)\n model.compile(loss={'fp1':bag_loss, 'fp2':bag_loss, 'fp3':bag_loss, 'ave':bag_loss}, loss_weights={'fp1':weight[0], 'fp2':weight[1], 'fp3':weight[2], 'ave':weight[3]}, optimizer=sgd, metrics=[bag_accuracy])\n\n # train model\n t1 = time.time()\n num_batch = len(train_set)\n for epoch in range(args.max_epoch):\n train_loss, train_acc = train_eval(model, train_set)\n test_loss, test_acc = test_eval(model, test_set)\n print('epoch=', epoch, ' train_loss= {:.3f}'.format(train_loss), ' train_acc= {:.3f}'.format(train_acc), ' test_loss={:.3f}'.format(test_loss), ' test_acc= {:.3f}'.format(test_acc))\n t2 = time.time()\n print('run time:', (t2-t1) / 60, 'min')\n print('test_acc={:.3f}'.format(test_acc))\n\n return test_acc",
"def weights_init(mod):\n classname = mod.__class__.__name__\n if classname.find('Conv') != -1:\n mod.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n mod.weight.data.normal_(1.0, 0.02)\n mod.bias.data.fill_(0)",
"def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #\n # initialized from a normal distribution centered at 0 with standard #\n # deviation equal to weight_scale. Biases should be initialized to zero. #\n # #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n dimension = [input_dim] + hidden_dims + [num_classes]\n for i in range(1, self.num_layers+1):\n self.params['W{0}'.format(i)] = weight_scale * np.random.randn(dimension[i-1], dimension[i])\n self.params['b{0}'.format(i)] = np.zeros(dimension[i])\n\n if self.normalization in ['batchnorm', 'layernorm']:\n self._batchnormInit()\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)",
"def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,\n bias=0.0, activation=None, batchnorm_enabled=False, dropout_keep_prob=-1, is_training=True):\n with tf.variable_scope(name) as scope:\n dense_o_b = dense_p(scope, x=x, w=w, output_dim=output_dim, initializer=initializer,\n l2_strength=l2_strength, bias=bias)\n\n if batchnorm_enabled:\n dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training)\n else:\n dense_o_bn = dense_o_b\n\n if activation is None:\n dense_a = dense_o_bn\n else:\n dense_a = activation(dense_o_bn)\n\n if dropout_keep_prob != -1:\n dense_o_dr = tf.nn.dropout(dense_a, keep_prob=dropout_keep_prob)\n else:\n dense_o_dr = dense_a\n\n dense_o = dense_o_dr\n\n return dense_o",
"def __call__(self, inputs_shape):\n assert not self._achieve_init\n self.W = 2 * np.random.randn(self._units, inputs_shape) / np.sqrt(inputs_shape)\n self.b = np.zeros((self._units, 1))\n super(Dense, self).__call__()",
"def batch_normal(x, is_train, name, activation_fn=None):\n with tf.name_scope(name), tf.variable_scope(name):\n outputs = tf.contrib.layers.batch_norm(x,\n decay=0.999,\n scale=True,\n activation_fn=activation_fn,\n is_training=is_train)\n return outputs",
"def __init__(self, hidden_size, eps=1e-12):\n super(LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps",
"def flatten_and_dense(X,out_channels,*args,activation = 'relu', initialise_weights = False):\n \n shape = X.shape\n X = torch.reshape(X,(-1,1)) # Flatten\n if initialise_weights:\n weights = torch.Tensor(np.random.uniform(-0.01,0.01, size = (out_channels,len(X))))\n weights.requires_grad = False\n bias = torch.Tensor(np.random.uniform(-0.01,0.01, size = (out_channels,1)))\n else:\n weights = args[0]\n bias = args[1]\n if activation == 'sigmoid':\n output = sigmoid(weights.mm(X) + bias)\n elif activation == 'relu':\n output = relu(weights.mm(X) + bias)\n else:\n output = weights.mm(X) + bias # No activation applied -> Typically done before the softmax\n if not initialise_weights:\n \n return output\n else:\n output_shape = output.shape\n return output,weights,bias,output_shape",
"def _squeeze( inputs):\n input_channels = int(inputs.shape[-1])\n\n x = GlobalAveragePooling2D()(inputs)\n x = Dense(input_channels, activation='relu')(x)\n x = Dense(input_channels, activation='hard_sigmoid')(x)\n return x",
"def dense(x, \n num_outputs,\n STD=0.01,\n keep_prob=None,\n activation=None,\n bn=False,\n phase=None):\n output = tf.identity(x)\n \n if keep_prob is not None:\n output = tf.layers.dropout(output, rate=keep_prob, training=phase)\n\n output = slim.fully_connected(output, num_outputs, activation_fn=None, \\\n weights_initializer=tf.truncated_normal_initializer(stddev=STD))\n\n if bn:\n output = tf.layers.batch_normalization(output, training=phase)\n \n if activation: \n output = activation(output)\n \n return output"
]
| [
"0.6515949",
"0.6381442",
"0.6347507",
"0.62334216",
"0.6144226",
"0.6126199",
"0.6119312",
"0.6050492",
"0.6039742",
"0.5975316",
"0.5937367",
"0.5920974",
"0.58605415",
"0.5844079",
"0.58208776",
"0.58104813",
"0.57957095",
"0.5795613",
"0.5778775",
"0.576105",
"0.5750413",
"0.57420975",
"0.57305855",
"0.5728653",
"0.5697281",
"0.56960285",
"0.56789017",
"0.56758773",
"0.5654775",
"0.564848"
]
| 0.6406165 | 1 |
Tests aborting a task without using an event. In theory once the future is cancelled, the dask worker shall 'forget' the task. Sadly this does not work in distributed mode where an Event is necessary. | def test_task_is_aborted(dask_client: distributed.Client):
# NOTE: this works because the cluster is in the same machine
future = dask_client.submit(_some_long_running_task)
_wait_for_task_to_start()
future.cancel()
assert future.cancelled()
with pytest.raises(concurrent.futures.CancelledError):
future.result(timeout=DASK_TESTING_TIMEOUT_S) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cancel(self) -> None:\n context: Dict[str,ArtifactDescriptor] = dict()\n cmd = pycell.python_cell(\n source='import time\\ntime.sleep(5)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(1)\n self.backend.cancel_task('000')\n time.sleep(5)\n self.assertIsNone(controller.task_id)\n self.assertIsNone(controller.state)",
"def cancel(self, task):\n raise NotImplementedError",
"async def test_trio_as_fut_throws_after_cancelled():\n\n async def trio_task():\n try:\n await trio.sleep_forever()\n finally:\n raise ValueError(\"hi\")\n\n async with trio_asyncio.open_loop() as loop:\n fut = loop.trio_as_future(trio_task)\n await trio.testing.wait_all_tasks_blocked()\n fut.cancel()\n with pytest.raises(asyncio.CancelledError):\n await fut",
"def test_only_relevant_task_is_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await signal_actor.wait.remote()\n return \"ok\"\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n r1 = h.remote()\n r2 = h.remote()\n\n # Wait for both requests to be executing.\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal r2 to run to completion and check that it wasn't cancelled.\n ray.get(signal_actor.send.remote())\n assert r2.result() == \"ok\"",
"def test_cancel(self):\n g = TaskDependencyGraph(MockWorkflowContext())\n task = mock.Mock()\n g.add_task(task)\n with mock.patch('cloudify.workflows.api.cancel_request', True):\n self.assertRaises(api.ExecutionCancelled, g.execute)\n\n self.assertFalse(task.apply_async.called)\n self.assertFalse(task.cancel.called)",
"async def test_cancelled_task(self):\n await self.cog._unsilence(self.text_channel)\n self.cog.scheduler.cancel.assert_called_once_with(self.text_channel.id)",
"def test_cancel(self):\n called = []\n clock = task.Clock()\n d = task.deferLater(clock, 1, called.append, None)\n d.cancel()\n def cbCancelled(ignored):\n # Make sure there are no calls outstanding.\n self.assertEqual([], clock.getDelayedCalls())\n # And make sure the call didn't somehow happen already.\n self.assertFalse(called)\n self.assertFailure(d, defer.CancelledError)\n d.addCallback(cbCancelled)\n return d",
"async def cancel_and_delete_task(task_id: TaskId):",
"def stop(self):\n #self.ev_intr.set()\n\n try:\n self.task.stop()\n\n except TaskError as e:\n self.logger.error(\"Error cancelling child task: %s\" % (str(e)))",
"def abort(self):\n self.update(True)\n\n resp = self._connection._post(\n get_url('task abort', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n\n self.update(True)",
"def test_cancel(self):\n reactor = FakeReactor()\n cancelled = []\n\n def error(f):\n cancelled.append(reactor.in_call_from_thread)\n cancelled.append(f)\n\n d = Deferred().addErrback(error)\n dr = EventualResult(d, _reactor=reactor)\n dr.cancel()\n self.assertTrue(cancelled[0])\n self.assertIsInstance(cancelled[1].value, CancelledError)",
"def cancel(self):\n self._task.cancel()",
"async def test_cancel(\n decoy: Decoy,\n state_store: StateStore,\n command_executor: CommandExecutor,\n subject: QueueWorker,\n) -> None:\n subject.start()\n subject.cancel()\n\n await subject.join()\n\n decoy.verify(\n await command_executor.execute(command_id=matchers.Anything()),\n times=0,\n )",
"async def __aexit__(self, *args) -> None:\n assert self._task\n await cancel(self._task)\n self._task = None",
"async def test_cancelled_task(self) -> None:\n cancelledMessage: str = \"I have been cancelled\"\n\n class CancelHandler(Handler):\n async def getName(self) -> str:\n raise asyncio.CancelledError(\n cancelledMessage\n ) # Pretend that this is some await call that gets cancelled\n\n async with TestServer(handler=CancelHandler(), ip=\"::1\") as sa:\n ip, port = sa.ip, sa.port\n assert ip and port\n async with get_client(TestingService, host=ip, port=port) as client:\n with self.assertRaises(ApplicationError) as ex:\n await client.getName()\n self.assertEqual(\n ex.exception.message,\n f\"Application was cancelled on the server with message: {cancelledMessage}\",\n )",
"def cancel(client, task_id):\n # ES does not throw an error if the task doesn't exist.\n client.tasks.cancel(task_id=task_id)",
"def _on_future_cancelled(self, promise):\n promise.setCanceled()",
"def testCallLaterCancelled(self):\n c = task.Clock()\n call = c.callLater(1, lambda a, b: None, 1, b=2)\n call.cancel()\n self.assertFalse(call.active())",
"def test_out_of_band_task_is_not_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def hi(self):\n await signal_actor.wait.remote()\n return \"ok\"\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n self._out_of_band_req = self._h.hi.remote()\n\n async def __call__(self, *args):\n await self._h.hi.remote()\n\n async def get_out_of_band_response(self):\n return await self._out_of_band_req\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n\n # Send a request, wait for downstream request to start, and cancel it.\n r1 = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal out of band request to run to completion and check that it wasn't\n # cancelled.\n ray.get(signal_actor.send.remote())\n assert h.get_out_of_band_response.remote().result() == \"ok\"",
"def test_asynchronousStop(self):\n def main(reactor):\n reactor.callLater(1, reactor.stop)\n return defer.Deferred()\n r = _FakeReactor()\n exitError = self.assertRaises(\n SystemExit, task.react, main, [], _reactor=r)\n self.assertEqual(0, exitError.code)",
"def testCallLaterCancelled(self):\n c = task.Clock()\n call = c.callLater(1, lambda a, b: None, 1, b=2)\n call.cancel()\n self.failIf(call.active())",
"def cancel(self) -> asyncio.Future:\n pass # pragma: no cover",
"async def async_cancel(self):\n raise NotImplementedError",
"def test_revoke_task1():\n task_id = uuid.uuid4().hex\n\n waiting_task = long_task.apply_async(\n args=[5], task_id=task_id, countdown=10, queue=\"queue1\"\n )\n # waiting_task.revoke()\n app.control.revoke(task_id=task_id, terminal=True)\n time.sleep(2)\n new_task = long_task.apply_async(args=[3], task_id=task_id, queue=\"queue2\")",
"async def _exit_tasks() -> None:\n current_task = asyncio.Task.current_task()\n all_tasks = asyncio.Task.all_tasks()\n not_current_tasks = [task for task in all_tasks if task is not current_task]\n\n for task in not_current_tasks:\n task.cancel()",
"async def test_cancel_noops_if_joined(\n decoy: Decoy,\n state_store: StateStore,\n command_executor: CommandExecutor,\n subject: QueueWorker,\n) -> None:\n subject.start()\n await subject.join()\n subject.cancel()",
"def test_synchronousStop(self):\n def main(reactor):\n d = defer.Deferred()\n def stop():\n reactor.stop()\n d.callback(None)\n reactor.callWhenRunning(stop)\n return d\n r = _FakeReactor()\n exitError = self.assertRaises(\n SystemExit, task.react, main, [], _reactor=r)\n self.assertEqual(0, exitError.code)",
"def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()",
"def test_rollbacked_transaction_discard_task(self):\n\n @transaction.commit_on_success\n def do_something():\n my_task.delay()\n raise SpecificException\n try:\n do_something()\n except SpecificException:\n self.assertFalse(my_global)\n else:\n self.fail('Exception not raised')",
"async def cancel_shielded_checkpoint() -> None:\n await get_async_backend().cancel_shielded_checkpoint()"
]
| [
"0.67319435",
"0.6726781",
"0.6667438",
"0.6589503",
"0.6548635",
"0.6491699",
"0.6402465",
"0.6327161",
"0.6293195",
"0.6251757",
"0.6241168",
"0.6227773",
"0.6225861",
"0.62026316",
"0.61707467",
"0.61396015",
"0.6129849",
"0.6107896",
"0.61020535",
"0.60851115",
"0.6073518",
"0.6072337",
"0.60549057",
"0.60216415",
"0.6002276",
"0.59897304",
"0.59778804",
"0.5967175",
"0.5941338",
"0.5911431"
]
| 0.7703924 | 0 |
Returns the successor state after the specified agent takes the action. | def generateSuccessor( self, agentIndex, action):
# Check that successors exist
if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction( state, action )
else: # A ghost is moving
GhostRules.applyAction( state, action, agentIndex )
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer( state.data.agentStates[agentIndex] )
# Resolve multi-agent effects
GhostRules.checkDeath( state, agentIndex )
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
return state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next(self, state, turn, greedy_strategy):\n return self.agent_action",
"def agent_start(self, state):\n self.sum_rewards = 0\n self.episode_steps = 0\n self.last_state = np.array(state)\n self.last_action = self.policy(self.last_state)\n return self.last_action",
"def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info",
"def getSuccessor(self, gameState, action):\r\n successor = gameState.generateSuccessor(self.index, action)\r\n return successor",
"def successor(self, state):\n pass # abstract",
"def successor_state(self):\n return self._successor_state",
"def env_step(self, action):\n random_prob = np.random.uniform(0, 1)\n if random_prob <= self.stochasticity: # Ignore agent's action and move to one of the 8 neighbours\n # Determine how the agent moves (from -1 to 1 in each direction, but not both 0)\n random_nn = np.random.randint(0, len(self.nn))\n random_y = self.nn[random_nn, 0]\n random_x = self.nn[random_nn, 1]\n\n # Move to one of the nearest neighbours\n self.current_state[0] += random_y\n self.current_state[1] += random_x\n else: # Perform agent's action\n # Update current stated based on the action the agent took\n curr_x = self.current_state[1]\n self.current_state[0] += self.actions[action][0] + self.wind[curr_x]\n self.current_state[1] += self.actions[action][1]\n\n # Check if the agent fell out of the boundaries of the grid world\n y_coord = self.current_state[0]\n x_coord = self.current_state[1]\n\n if y_coord >= self.num_rows: # Agent went too far up\n self.current_state[0] = self.num_rows - 1\n elif y_coord < 0: # Agent went too far down\n self.current_state[0] = 0\n\n if x_coord >= self.num_cols: # Agent went too far right\n self.current_state[1] = self.num_cols - 1\n elif x_coord < 0: # Agent went too far left\n self.current_state[1] = 0\n\n is_terminal = False\n reward = -1.0\n\n # Check if the agent reached a terminal state\n if self.current_state == self.terminal_state:\n is_terminal = True\n reward = 0.0\n\n return reward, self.current_state, is_terminal",
"def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action",
"def _single_agent_step(self, action):\n reward = 0.0\n done = False\n self.timestep += 1\n state, player_id = self.game.step(action)\n while not self.game.is_over() and not player_id == self.active_player:\n self.timestep += 1\n action, _ = self.model.agents[player_id].eval_step(\n self._extract_state(state)\n )\n if not self.model.agents[player_id].use_raw:\n action = self._decode_action(action)\n state, player_id = self.game.step(action)\n\n if self.game.is_over():\n reward = self.get_payoffs()[self.active_player]\n done = True\n state = self.reset()\n return state, reward, done\n\n return self._extract_state(state), reward, done",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != util.nearestPoint(pos):\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def nextState(self, state, action):\n return state + action",
"def agent_step(self, reward, state):\n prev_val= self.state[self.prevAction]\n self.state[self.prevAction]=prev_val+self.alpha*(reward-prev_val)\n val=max(self.state)\n index=self.state.index(val)\n self.prevAction=index\n i=random.uniform(0,1)\n if i < 1-self.prob:\n self.prevAction=index\n return index\n else:\n index=random.randint(0,self.num_bandits-1)\n self.prevAction=index",
"def step(self, action, agent_index=0):\n return self.env.step(action)",
"def agent_start(self, state):\n\n # This agent doesn't care what state it's in, it always chooses\n # to move left or right randomly according to self.probLeft\n self.prevAction = self._choose_action()\n\n return self.prevAction",
"def step(action, state):\n observation = state\n reward = 0\n done = False\n if action == \"right\":\n if state == N_STATES - 2:\n observation = \"terminal\"\n reward = 1\n done = True\n else:\n observation = state + 1\n else:\n # move left\n if state != 0:\n observation = state - 1\n return observation, reward, done",
"def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal",
"def step(self, agent) -> Tuple[Optional[str], Any]:\n r = self._step(agent)\n self.finished = True\n return r",
"def get_next_state(self, state, action):\n pass",
"def step(self, action):\n res = self.reward_table.get(self.curr_state, action)\n\n self.curr_state = res['result']\n\n return res",
"def step(self, curr_state, curr_action):\n next_state = self.state_transition(curr_state, curr_action)\n terminal_state = self.is_terminal(next_state)\n if terminal_state[0]:\n r = self.reward(\"agent\", terminal_state[1])\n return (next_state, r, terminal_state[0])\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n terminal_state = self.is_terminal(next_state)\n if terminal_state[0]:\n r = self.reward(\"env\", terminal_state[1])\n return (next_state, r, terminal_state[0]) \n return (next_state, -1, False)",
"def step(self, action):\n\n # ==\n # Transition, reward and termination\n done = False\n reward = self.get_current_reward(self.state)\n\n # Leaf and absorbing nodes\n if self.state <= 1:\n done = True\n if self.state == 1:\n self.state = 0 # go to absorbing\n else:\n self.state = int(self.state // 2)\n\n # ==\n # Features\n phi = self.state_2_features(self.state)\n\n return phi, reward, done, {}",
"def get_final_action(self, state):\n\t\tstate = Variable(torch.from_numpy(state))\n\t\taction = self.target_actor.forward(state).detach()\n\t\tgenus = torch.argmax(self.genus.forward(state),dim=-1).detach()\n\t\treturn action.data.numpy(), genus.data.numpy()",
"def generate_successor(self, index, action):\n if action not in self.get_legal_actions(index):\n action = STOP\n\n game_state = self._clone()\n agents = game_state.agent_states\n ai = agents[index]\n ai.dir = action\n ai.pos = ai.pos[0] + action[0], ai.pos[1] + action[1]\n\n # check and update\n if isinstance(ai, Pacman):\n if ai.pos in game_state.foods:\n game_state.score += 10\n game_state.foods.remove(ai.pos)\n if ai.pos in game_state.capsules:\n game_state.capsules.remove(ai.pos)\n for agent in agents:\n agent.capsule_timer = CAPSULE_TIMEOUT\n for oi in range(len(agents)):\n if oi != index:\n ao = agents[oi]\n if ao.pos == ai.pos:\n # meet\n pac, gho = None, None\n if isinstance(ai, Pacman) and isinstance(ao, Ghost):\n pac, gho = ai, ao\n elif isinstance(ai, Ghost) and isinstance(ao, Pacman):\n pac, gho = ao, ai\n if pac is not None and gho is not None:\n if gho.capsule_timer > 0:\n # ghost died\n gho.pos = INIT_GHOST\n gho.dir = STOP\n gho.capsule_timer = 0\n gho.speed = 1\n\n pac.num_ghost_eaton += 1\n game_state.score += 200 * pac.num_ghost_eaton\n else:\n # pacman died\n pac.pos = INIT_PACMAN\n pac.dir = STOP\n\n pac.num_died += 1\n if pac.num_died == 3:\n raise Exception('Pacman died 3 times, Points: ' + str(game_state.score))\n if agents[oi].capsule_timer > 0:\n agents[oi].capsule_timer -= 1\n if agents[oi].capsule_timer == 0 and isinstance(agents[oi], Pacman):\n agents[oi].num_ghost_eaton = 0\n return game_state",
"def step(self, agent_action):\n\n done = False\n self.round += 1\n # First check round num, if equal to max then fail\n if self.round == self.max_round:\n done = True\n success = FAIL\n user_response = self._end_response()\n else:\n try:\n success = self.update_state(agent_action)\n if success:\n user_response = self._end_response()\n else:\n agent_intent = agent_action['intent']\n assert agent_intent in self.user_responses, 'Not acceptable agent action'\n user_response = self.user_responses[agent_intent](agent_action)\n except Exception:\n return self._default_response(),-5,False,False\n\n reward = self.reward_function(agent_action, success)\n\n return user_response, reward, done, True if success is 1 else False",
"def agent_step(self, reward, state):\n self.sum_rewards += reward\n self.episode_steps += 1\n\n # Make state an array of shape (1, state_dim) to add a batch dimension and\n # to later match the get_action_values() and get_TD_update() functions\n state = np.array(state)\n\n # Select action\n action = self.policy(state)\n \n # Append new experience to replay buffer\n self.replay_buffer.append(self.last_state, self.last_action, reward, 0, state)\n \n # Perform replay steps:\n if self.replay_buffer.size() > self.replay_buffer.minibatch_size:\n self.network_target.load_state_dict(self.network.state_dict())\n for _ in range(self.num_replay):\n # Get sample experiences from the replay buffer\n experiences = self.replay_buffer.sample() \n self.optimize_network(experiences)\n \n # Update the last state and last action.\n self.last_state = state\n self.last_action = action\n \n return action",
"def step(self, action):\n x, y = self.state_to_coord(self.current_state)\n if action == self.actions['up']:\n possible_next_state = self.coord_to_state(x - 1, y)\n if x - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n elif possible_next_state in self.goal_states:\n result = possible_next_state, self.goal_reward, True\n else:\n result = possible_next_state, self.step_reward, False\n elif action == self.actions['right']:\n possible_next_state = self.coord_to_state(x, y + 1)\n if y + 1 >= self.columns or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['left']:\n possible_next_state = self.coord_to_state(x, y - 1)\n if y - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['down']:\n possible_next_state = self.coord_to_state(x + 1, y)\n if x + 1 >= self.rows or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n else:\n raise ValueError('Expected action value in {}, received {} in state {}'.\n format(self.actions, action, self.state_to_coord(self.current_state)))\n\n self.current_state = result[0]\n return result",
"def step(self, action):\r\n\r\n max_time = len(self.episode) - 1 # the number of states available in the episode , -1 for indexing purpose\r\n state = self.episode[self.t] # state contains LT, ST indicator, closing price\r\n closing_price = state['Close']\r\n transaction_fee = self.transaction_fee\r\n agent = self.agent\r\n reward = 0\r\n \r\n if self.t < max_time-1:\r\n if agent.stock == 0: # if there is no stock position the agent can buy or do nothing\r\n if action == 1: # buy sstock at closing price\r\n agent.stock_position = agent.starting_cash / closing_price * (1 - transaction_fee)\r\n agent.cash = 0\r\n agent.stock = 1\r\n reward = 0\r\n elif action == 0: # no action\r\n reward = 0\r\n else:\r\n reward = -10000\r\n \r\n elif agent.stock == 1: # if there is a stock position the agent can sell or do nothing\r\n if action == 2: # sell stock at closing price\r\n agent.cash = agent.stock_position * closing_price * (1 - transaction_fee)\r\n reward = agent.cash - agent.starting_cash\r\n self.done = True\r\n elif action == 0: # no action\r\n reward = 0\r\n elif action == 1:\r\n reward = -10000\r\n \r\n elif self.t == max_time-1:\r\n if agent.stock == 1:\r\n # stock position is forced to liquidate\r\n agent.cash = agent.stock_position * closing_price * (1 - transaction_fee)\r\n reward = agent.cash - agent.starting_cash\r\n self.done = True\r\n \r\n else:\r\n reward = 0\r\n self.done = True \r\n \r\n next_state = (self.episode[self.t+1]['ST Relative Indicator'], \r\n self.episode[self.t+1]['ST Relative Indicator'],\r\n agent.stock,\r\n self.t+1)\r\n self.t += 1\r\n \r\n return next_state, reward, self.done",
"def get_action(self, state):\n self.visited = {}\n utility = -inf\n move = 'STOP'\n\n # We choose the successor with the maximum utility\n for successor in state.generatePacmanSuccessors():\n maxPlayer = True\n score = self.alphabeta(successor[0], -inf, +inf, maxPlayer)\n if utility < score:\n move = successor[1]\n utility = score\n\n # If there's no winning state, we try to to move farther from the ghost\n if utility == -inf:\n dist = -inf\n for successor in state.generatePacmanSuccessors():\n newDist = self.distanceFromGhost(successor[0])\n if not successor[0].isLose() and newDist > dist:\n move = successor[1]\n dist = newDist\n print(utility)\n return move",
"def state_transition(self, curr_state, curr_action):\n #The state transition happens from the current state to the next state based on agent's action\n curr_state[curr_action[0]]=curr_action[1]\n return curr_state",
"def getSuccessor(self, gameState, action):\n\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n\n if (pos != util.nearestPoint(pos)):\n # Only half a grid position was covered.\n return successor.generateSuccessor(self.index, action)\n else:\n return successor"
]
| [
"0.6769947",
"0.66912377",
"0.66013414",
"0.6584788",
"0.65628904",
"0.65581864",
"0.6526788",
"0.6497395",
"0.6473691",
"0.6469876",
"0.6434387",
"0.6432849",
"0.63574517",
"0.63170356",
"0.6308071",
"0.628067",
"0.6278178",
"0.624061",
"0.6201841",
"0.6152078",
"0.6146595",
"0.614504",
"0.61176735",
"0.6116141",
"0.6105236",
"0.6092931",
"0.60768557",
"0.6056913",
"0.6054572",
"0.60349643"
]
| 0.7010531 | 0 |
Returns an AgentState object for pacman (in game.py) state.pos gives the current position state.direction gives the travel vector | def getPacmanState( self ):
return self.data.agentStates[0].copy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal",
"def get_agent_state(self):\n return self.world_state",
"def initialize_state(self, agent):\n # select input & output side\n input = agent\n\n if agent % 4 == 1:\n output = 2\n elif agent % 4 == 2:\n output = 1\n elif agent % 4 == 3:\n output = 0\n elif agent % 4 == 0:\n output = 3\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n # defining sides of square\n x, y = self.choose_starting_points(input)\n d_x, d_y = self.choose_starting_points(output)\n\n # get initial velocity in direction of destination\n distance = np.array([d_x, d_y]) - np.array([x, y])\n v = 0.8 * (distance/np.linalg.norm(distance))\n v_x = v[0]\n v_y = v[1]\n\n # Define small offset >= 0 for location on sides such that the agents do not exactly crash into each other\n offset = 0\n\n if agent % 4 == 0:\n y += offset\n d_y += offset\n elif agent % 4 == 1:\n y -= offset\n d_y -= offset\n elif agent % 4 == 2:\n x += offset\n d_x += offset\n elif agent % 4 == 3:\n x -= offset\n d_x -= offset\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return np.array([[x, y, v_x, v_y, d_x, d_y, self.tau]])",
"def get_agent_position(agent):\n pos = Vector2()\n pos.x = agent.pose.pose.position.x\n pos.y = agent.pose.pose.position.y\n return pos",
"def get_state_arr(self):\n rpos = self.sim.getAgentPosition(self.robot_num)\n rvel = self.sim.getAgentVelocity(self.robot_num)\n rrad = self.sim.getAgentRadius(self.robot_num)\n v_pref = self.sim.getAgentMaxSpeed(self.robot_num)\n theta = math.atan2(rvel[1], rvel[0])\n # Robot's state entry. Note that goal is listed as the robot's current\n # position because we aren't using that goal as such, we are just\n # exploring.\n state = [\n rpos[0], rpos[1], rvel[0], rvel[1], rrad,\n self.headings[self.robot_num], rpos[0], rpos[1],\n v_pref, theta\n ]\n for agent in self.agents:\n if agent != self.robot_num: # We already accounted for the robot\n pos = self.sim.getAgentPosition(agent)\n vel = self.sim.getAgentVelocity(agent)\n rad = self.sim.getAgentRadius(agent)\n state.extend([pos[0], pos[1], vel[0], vel[1], rad,\n self.headings[agent]])\n for obs in self.obstacles:\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(rpos)\n p1, p2 = nearest_points(o, p)\n # Velocity is always 0 for obstacles\n # Heading is same as robot's\n state.extend([p1.x, p2.y, 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n else:\n # Point obstacle\n state.extend([obs[0][0], obs[0][1], 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n return state",
"def env_step(self, action):\n random_prob = np.random.uniform(0, 1)\n if random_prob <= self.stochasticity: # Ignore agent's action and move to one of the 8 neighbours\n # Determine how the agent moves (from -1 to 1 in each direction, but not both 0)\n random_nn = np.random.randint(0, len(self.nn))\n random_y = self.nn[random_nn, 0]\n random_x = self.nn[random_nn, 1]\n\n # Move to one of the nearest neighbours\n self.current_state[0] += random_y\n self.current_state[1] += random_x\n else: # Perform agent's action\n # Update current stated based on the action the agent took\n curr_x = self.current_state[1]\n self.current_state[0] += self.actions[action][0] + self.wind[curr_x]\n self.current_state[1] += self.actions[action][1]\n\n # Check if the agent fell out of the boundaries of the grid world\n y_coord = self.current_state[0]\n x_coord = self.current_state[1]\n\n if y_coord >= self.num_rows: # Agent went too far up\n self.current_state[0] = self.num_rows - 1\n elif y_coord < 0: # Agent went too far down\n self.current_state[0] = 0\n\n if x_coord >= self.num_cols: # Agent went too far right\n self.current_state[1] = self.num_cols - 1\n elif x_coord < 0: # Agent went too far left\n self.current_state[1] = 0\n\n is_terminal = False\n reward = -1.0\n\n # Check if the agent reached a terminal state\n if self.current_state == self.terminal_state:\n is_terminal = True\n reward = 0.0\n\n return reward, self.current_state, is_terminal",
"def make_alternative_states(self) -> np.ndarray:\n states = []\n for agent in range(self.agents):\n agent_state = []\n\n # Own distance\n r, c = self.game.get_agent_pos(agent)\n agent_state.append(r / 6)\n agent_state.append(c / 6)\n\n # Distances to others\n distances_r = [\n (r - pos[0]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n distances_c = [\n (c - pos[1]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n agent_state += distances_r\n agent_state += distances_c\n\n # Goal distances\n distances_goal_r = [(r - pos[0]) / 12 for pos in self.payoff_fields]\n distances_goal_c = [(c - pos[1]) / 12 for pos in self.payoff_fields]\n agent_state += distances_goal_r\n agent_state += distances_goal_c\n\n if agent < self.num_informed:\n agent_state.append((r - self.special_payoff_fields[0][0]) / 12)\n agent_state.append((c - self.special_payoff_fields[0][1]) / 12)\n else:\n agent_state += [0, 0]\n agent_state.append(self.max_turns - self.turns_count)\n states.append(np.array(agent_state))\n\n states = np.stack(states, axis=0)\n return states",
"def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.start = gameState.getAgentPosition(self.index) # starting index of the pacman\n self.numFood = len(self.getFood(gameState).asList()) # the amount of food that has not been returned\n self.hasFood = False\n self.offensiveIndex = self.getTeam(gameState)[0] # agent index of the offensive agent\n\n self.depth = 2\n '''\n Your initialization code goes here, if you need any.\n '''",
"def random_agent(self, state):\n\t\trndint = random.randint\n\t\treturn self.state[state][rndint(0, len(self.state[state]))]",
"def agent_start(self, state):\n\n # This agent doesn't care what state it's in, it always chooses\n # to move left or right randomly according to self.probLeft\n self.prevAction = self._choose_action()\n\n return self.prevAction",
"def forward(self, state):\n\n # this agent is not in the mood today, it is not predicting any force\n force = torch.tensor(0., device=device)\n\n # observe change in system\n du = self.cartpole(state, force)\n\n return du",
"def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n \n CaptureAgent.registerInitialState(self, gameState)\n \n '''\n Your initialization code goes here, if you need any.\n '''\n self.behaviourState = 'Guard'\n self.setCenter(gameState)\n self.eatenFood = 0\n self.prevFoodState = self.getFoodYouAreDefending(gameState)\n self.opponentIndices = self.getOpponents(gameState)\n self.teamIndices = self.getTeam(gameState)\n\n self.teammateIndex = self.getTeam(gameState)[:]\n self.teammateIndex.remove(self.index)\n\n self.defenceDestination = None\n self.attackDestination = None\n self.opponentPositions = {}\n self.opponentPrevPositions = {}\n self.opponentDetected = None\n for opponentIndex in self.opponentIndices:\n self.opponentPositions[opponentIndex] = None\n self.opponentPrevPositions[opponentIndex] = None",
"def propagate_state(self, msg):\r\n # Previous values\r\n x = self.state_vector[0, 0]\r\n y = self.state_vector[1, 0]\r\n theta = self.state_vector[2, 0]\r\n if theta < -pi:\r\n theta += 2 * pi\r\n elif theta > pi:\r\n theta -= 2 * pi\r\n\r\n # Current values\r\n vel = msg.twist.twist.linear.x\r\n ang = msg.twist.twist.angular.z\r\n nvel = 0\r\n nang = 0\r\n dt = msg.header.stamp.secs + msg.header.stamp.nsecs * 10 ** -9 - self.time_stamp\r\n self.time_stamp = msg.header.stamp.secs + msg.header.stamp.nsecs * 10 ** -9\r\n\r\n # Calculate Jacobians F and G\r\n self.motion_jacobian_state_vector(vel, ang, theta, 0, 0, dt)\r\n self.motion_jacobian_noise_components(vel, ang, theta, 0, 0, dt)\r\n\r\n # Choose motion model\r\n if ang == 0:\r\n # Propagate\r\n self.state_vector[0, 0] = x + (vel + nvel) * dt * cos(theta)\r\n self.state_vector[1, 0] = y + (vel + nvel) * dt * sin(theta)\r\n self.state_vector[2, 0] = theta\r\n else:\r\n # Propagate\r\n self.state_vector[0, 0] = x - ((vel + nvel) / (ang + nang)) * sin(theta) + (\r\n (vel + nvel) / (ang + nang)) * sin(theta + (ang + nang) * dt)\r\n self.state_vector[1, 0] = y + ((vel + nvel) / (ang + nang)) * cos(theta) - (\r\n (vel + nvel) / (ang + nang)) * cos(theta + (ang + nang) * dt)\r\n self.state_vector[2, 0] = theta + (ang + nang) * dt",
"def get_state(self):\n return self.get_pose()",
"def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0: # When agent is facing North\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1: # When agent is facing West\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2: # When agent is facing South\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3: # When agent is facing East\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n \n shoot_loc_arr = [] # Initialize Array\n for allowed_state in self.allowed: # Iterate through all allowed states\n for goal_state in self.goals: # Iterate through all goal states\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # X Matches, Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Y Matches, Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # X Matches, Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Y Matches, Head East \n\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) # Initialize to large values\n for goal in shoot_loc_arr: # Iterate through arrays\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")",
"def generate_state():\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state",
"def __init__(self,\n init_pose = np.array([0.0,0.0,10.0,0.0,0.0,0.0]),\n init_velocities = np.array([0.0,0.0,0.1]),\n init_angle_velocities = np.array([0.0,0.0,0.0]),\n runtime=5.,\n target_pos=np.array([0.0,0.0,50.0])):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n self.state_size = self.action_repeat * 6\n \n self.action_low = 10\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])\n\n # to calc reward\n self.pos_diff_init = None",
"def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n \n # initial state\n self.state_scale = 1\n \n self.init_pose = np.concatenate((truncnorm.rvs(-1,1,0,1./3.,3), truncnorm.rvs(-0.021,0.021,0,0.007,3)))\n self.init_pose[2] += 10\n self.init_velocities = np.array([0.,0.,0.])\n self.init_angle_velocities = np.array([0.,0.,0.])\n\n self.runtime = runtime\n \n # Simulation\n self.sim = PhysicsSim(self.init_pose, self.init_velocities, self.init_angle_velocities, self.runtime) \n self.action_repeat = 1\n\n self.init_state = np.concatenate((self.init_pose,self.init_velocities,self.init_angle_velocities),axis=0)\n self.state_size = self.action_repeat * self.init_state.shape[0]\n \n self.action_low = 0 #-1\n self.action_high = 2*450 #1\n self.action_size = 4\n\n self.action_scale = 1 #450 # 1/2 max of the action \n #self.state_scale = 150 # 1/2 size of the state space\n \n # Goal\n self.target_pose = np.array([0.,0.,150.0])\n\n # The previous position\n self.prev_pose = self.init_pose",
"def percept(self, agent):\n return (agent.location, self.status[agent.location])",
"def percept(self, agent):\n return (agent.location, self.status[agent.location])",
"def agent_start(self, state):\n self.sum_rewards = 0\n self.episode_steps = 0\n self.last_state = np.array(state)\n self.last_action = self.policy(self.last_state)\n return self.last_action",
"def getTransitionStatesAndProbs(self, state, action = None):\n # may move ball\n ball = state[0][:2]\n if action == None:\n ballVelocity = state[0][2:]\n else:\n ballVelocity = (0, 0)\n\n keepers = list(self.getKeepers(state))\n takers = list(self.getTakers(state))\n\n chasers = sorted(keepers, key=lambda keeper: util.getPointVectorDistance(keeper, ball, ballVelocity))\n # most closest agent, possess the ball, or go to the ball \n if self.weHaveBall(state):\n # j has the ball, its transition depends on the action\n if action[0] == 'hold':\n pass\n elif action[0] == 'pass':\n # pass the ball to a teammate\n rand = util.randomVector(0.1)\n target = keepers[action[1]]\n diff = util.getDirection(keepers[0], (target[0] + rand[0], target[1] + rand[1]))\n ballVelocity = (self.ballSpeed * diff[0], self.ballSpeed * diff[1])\n else:\n raise Exception('Unknown action')\n else:\n # j should go to the ball\n chasers[0] = self.moveTowards(chasers[0], ball)\n\n # other agents get open for a pass\n for i in xrange(1, len(chasers)):\n # concretely, this agent goes to a least congested place\n chasers[i] = self.moveTowards(chasers[i], self.getLeastCongestedLoc(state, chasers[i]))\n keepers = sorted(chasers, key=lambda keeper: util.getDistance(keeper, ball))\n \n for i in xrange(2):\n takers[i] = self.moveTowards(takers[i], ball)\n for i in xrange(2, len(takers)):\n takers[i] = self.moveTowards(takers[i], keepers[1])\n takers = sorted(takers, key=lambda taker: util.getDistance(taker, keepers[0]))\n \n newBall = (ball[0] + ballVelocity[0], ball[1] + ballVelocity[1],\\\n ballVelocity[0], ballVelocity[1])\n newState = [newBall] + keepers + takers\n return [(tuple(newState), 1)]",
"def get_agent_velocity(agent):\n vel = Vector2()\n vel.x = agent.twist.twist.linear.x\n vel.y = agent.twist.twist.linear.y\n return vel",
"def obs_to_state(obs, info):\n # create state based on obs\n state = np.zeros(obs.shape[0]+2)\n state[:obs.shape[0]] = obs\n # compute angle(theta) between vector of \"robot to goal\" and \"x-axis\" of world\n robot_position = obs[:2]\n goal_position = info[\"goal_position\"]\n vec_x = np.array([1, 0])\n vec_y = np.array([0, 1])\n vec_r2g = goal_position - robot_position\n cos_theta = np.dot(vec_r2g, vec_x) / (np.linalg.norm(vec_r2g)*np.linalg.norm(vec_x))\n sin_theta = np.dot(vec_r2g, vec_y) / (np.linalg.norm(vec_r2g)*np.linalg.norm(vec_y))\n # append new states\n state[:2] = info[\"goal_position\"] - obs[:2] # distance\n state[-2:] = [cos_theta, sin_theta]\n state = state.astype(np.float32)\n\n return state",
"def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.opponents = self.getOpponents(gameState)\n self.distributions = []\n self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]\n print self.legalPositions\n\n #initializing beleif distribution of opponents\n for i in range(0, gameState.getNumAgents()):\n if i in self.opponents:\n beliefs = util.Counter()\n for p in self.legalPositions: beliefs[p] = 1.0\n beliefs.normalize()\n self.distributions.append(beliefs)\n else:\n self.distributions.append(None)\n\n\n ''' \n Your initialization code goes here, if you need any.\n '''",
"def make_states(self) -> np.ndarray:\n state = []\n for agent in range(self.agents):\n r, c = self.game.get_agent_pos(agent)\n state.append(r)\n state.append(c)\n for field in constants.PAYOFF_LOCATIONS:\n r, c = field\n state.append(r)\n state.append(c)\n state.append(self.turns_count)\n for field in self.special_payoff_fields:\n r, c = field\n state.append(r)\n state.append(c)\n\n informed_state = np.array(state)\n uninformed_state = copy.deepcopy(informed_state)\n uninformed_state[-2:] = 0\n\n states = [\n informed_state if i < self.num_informed else uninformed_state\n for i in range(self.agents)\n ]\n states = np.stack(states, axis=0)\n\n return states",
"def get_success_state_arr(self):\n rpos = self.sim.getAgentPosition(self.robot_num)\n rvel = self.sim.getAgentVelocity(self.robot_num)\n rrad = self.sim.getAgentRadius(self.robot_num)\n v_pref = self.sim.getAgentMaxSpeed(self.robot_num)\n theta = math.atan2(rvel[1], rvel[0])\n # Robot's state entry.\n state = [\n rpos[0], rpos[1], rvel[0], rvel[1], rrad,\n self.headings[self.robot_num], self.overall_robot_goal[0],\n self.overall_robot_goal[1], v_pref, theta\n ]\n for agent in self.agents:\n if agent != self.robot_num: # We already accounted for the robot\n pos = self.sim.getAgentPosition(agent)\n vel = self.sim.getAgentVelocity(agent)\n rad = self.sim.getAgentRadius(agent)\n state.extend([pos[0], pos[1], vel[0], vel[1], rad,\n self.headings[agent]])\n for obs in self.obstacles:\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(rpos)\n p1, p2 = nearest_points(o, p)\n # Velocity is always 0 for obstacles\n # Heading is same as robot's\n state.extend([p1.x, p2.y, 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n else:\n # Point obstacle\n state.extend([obs[0][0], obs[0][1], 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n return state",
"def make_state() -> state.GameState:\r\n dung: world.Dungeon = worldgen.EmptyDungeonGenerator(20, 20).spawn_dungeon(0)\r\n p1x, p1y = dung.get_random_unblocked()\r\n p2x, p2y = dung.get_random_unblocked()\r\n while (p2x, p2y) == (p1x, p1y):\r\n p2x, p2y = dung.get_random_unblocked()\r\n ent1 = entities.Entity(1, 0, p1x, p1y, 10, 10, 2, 1, [], dict())\r\n ent2 = entities.Entity(2, 0, p2x, p2y, 10, 10, 2, 1, [], dict())\r\n return state.GameState(True, 1, 1, 2, world.World({0: dung}), [ent1, ent2])",
"def make_step(self, current_state, action):\n\n if current_state == 12:\n current_state = current_state + 50\n elif current_state == 23:\n current_state = current_state + 25\n\n new_state = current_state\n\n if 0 <= current_state < 25:\n dimension = 1\n elif 25 <= current_state < 50:\n dimension = 2\n elif 50 <= current_state < 75:\n dimension = 3\n else:\n print(\"Error in dimension\")\n\n # Update new_position based on the chosen action and check whether agent hits a wall.\n if action == \"n\":\n temp_state = current_state + self.num_cols\n if temp_state < self.num_cells * dimension:\n new_state = temp_state\n elif action == \"e\":\n temp_state = current_state + 1\n if temp_state % self.num_cols > 0:\n new_state = temp_state\n elif action == \"s\":\n temp_state = current_state - self.num_cols\n if temp_state >= 0 + (25 * (dimension - 1)):\n new_state = temp_state\n elif action == \"w\":\n temp_state = current_state - 1\n if temp_state % self.num_cols < self.num_cols - 1:\n new_state = temp_state\n else:\n raise ValueError('Action was mis-specified!')\n\n # Get reward\n reward = self.rewards[new_state]\n\n # Deduct 1 from reward for every attempted move\n reward -= 1\n\n return (new_state, reward)",
"def get_action(self, state):\n # In case of firt call, we initialise the self variables.\n if self.first_call :\n self.initVariable(state)\n\n # Start with Pacman as agent\n agent = PACMAN #0\n\n # Getting legal moves\n legal_moves = getLegalMovingActions(state, agent)\n\n # parents_positions correspond to a \"explored states\" kind of variable\n parents_positions = [(state.getPacmanPosition(),\n state.getGhostPositions(),\n state.getFood().asList())]\n moves_dict = {}\n # We associate with each legal move a score and place the pair in the\n # moves_dict dictionary\n for move in legal_moves:\n # Getting the score\n score = self.minimax(state.generateSuccessor(agent, move),\n (agent+1)%self.nb_agent,\n parents_positions)\n # Placing the pair in the dict\n moves_dict[score] = move\n # We take the move that has the maximum score\n return moves_dict[max(moves_dict)]"
]
| [
"0.67357004",
"0.6304223",
"0.62236255",
"0.6187711",
"0.59295386",
"0.59182173",
"0.58750075",
"0.58393127",
"0.58230054",
"0.57619905",
"0.56301194",
"0.5618911",
"0.5592257",
"0.5557571",
"0.55485815",
"0.55370945",
"0.5535987",
"0.55244625",
"0.5482656",
"0.5482656",
"0.54799855",
"0.547834",
"0.54575354",
"0.5451416",
"0.54184854",
"0.5416468",
"0.5414209",
"0.5400939",
"0.5394035",
"0.5377959"
]
| 0.68078905 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.