query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get the frequency of PWM being used on the gpio. Returns the frequency (in hertz) used for the gpio if OK, otherwise PI_BAD_USER_GPIO.
def get_PWM_frequency(user_gpio): return _u2i(_pigpio_command(_control, _PI_CMD_PFG, user_gpio, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pwm_freq(self):\r\n return self._pwm_freq", "def set_PWM_frequency(user_gpio, frequency):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFS, user_gpio, frequency))", "def get_ao_manual_control_freq( channel ):\n freq = float64(0)\n CALL('GetPhysicalChanAOManualControlFreq', channel, byref(freq))\n return freq.value", "def get_servo_pct(pi, pin):\n return pulsewidth2pct(pi.get_servo_pulsewidth(pin))", "def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies", "def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def get_frequency(self):\r\n _debug('simq03b_api.get_frequency')\r\n \r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440", "def get_frequency(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.FREQUENCY_INVALID\n res = self._frequency\n return res", "def frequency(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"frequency\")", "def frequency(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"frequency\")", "def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)", "def gpib_get_freq(gpib_dev):\n try:\n s = serial.Serial(gpib_dev)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise GPIBError(\"serial device doesn't exist: %s\" % gpib_dev)\n raise GPIBError('issue opening serial device: %s: %s' % (gpib_dev, e))\n\n s.bytesize = serial.EIGHTBITS\n s.parity = serial.PARITY_NONE\n s.stopbits = serial.STOPBITS_ONE\n s.timeout = 2\n s.xonxoff = False\n s.rtscts = False\n s.dsrdtr = False\n s.writeTimeout = 0\n\n # encode unicode strings to be written into byte strings\n def _write(string):\n s.write(string.encode())\n\n if not s.isOpen():\n try:\n s.open()\n except Exception as e:\n raise GPIBError(e)\n\n s.flushInput()\n s.flushOutput()\n\n # initialize GPIB adapter, assumes a GPIB address of 16 which has to be set\n # specifically on the device\n _write('++mode 1\\n')\n _write('++auto 1\\n')\n _write('++addr 16\\n')\n _write('++ver\\n')\n gpib_device = s.readline().strip()\n if not gpib_device:\n raise GPIBError('error communicating with GPIB device')\n\n # confirm communication with frequency counter\n _write('*CLS;*RST;*IDN?\\r\\n')\n instrument_id = s.readline().strip()\n if not instrument_id:\n raise GPIBError('incorrect GPIB address setting')\n\n # request the current measured frequency\n _write('*RST;*CLS;*SRE 0;*ESE 0;\\r\\n')\n _write(':STAT:PRES\\r\\n')\n _write(':DISP:CALC:MATH:STAT OFF\\r\\n')\n _write(':TRAC SCALE, 1.000000\\r\\n')\n # we're using channel 1\n _write(\":FUNC 'FREQ 1'\\r\\n\")\n _write(':FORM:DATA ASCII\\r\\n')\n # send expected value and scale so returned value is properly scaled\n _write(':MEASURE:FREQ? 4e7 HZ, 1 HZ\\r\\n')\n freq = s.readline().strip()\n # re-enable continuous mode for the frequency counter\n _write(':INIT:CONT ON\\r\\n')\n s.close()\n if not freq:\n raise GPIBError('error querying counter for current frequency')\n\n freq = int(float(freq))\n\n # We expect a value close to 40 MHz, error out if it's more than 5 kHz off.\n expected_freq = 40000000\n if abs(freq - expected_freq) > 5000:\n raise GPIBError(\n 'Measured frequency too far away from expected frequency (40 MHz): %d Hz' % (freq,))\n\n return freq", "def frequency(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"frequency\")", "def frequency(self, mass: float) -> float:\n return self.omega(mass) / u.twopi", "def get_cw_freq(self):\n return self.get_frequency(self.synth)", "def frequency_watt(self, p_req = 0, p_prev = 0, ts=datetime.utcnow(), location=0, db_UF = 0.05, db_OF = 0.05): #datetime.\n f = self.grid.get_frequency(ts,location)\n \n if (f < 60 - db_UF).any():\n p_mod = 0\n elif (f > 60 + db_OF).any():\n p_mod = p_req\n else:\n p_mod = p_prev\n \n return p_mod", "def frequency(self) -> int:\n return self._servo_frequency", "def get_frequency(self, c):\n yield self.wm.write(\":READ:POW?\\r\\n\")\n yield self.wm.write(\":READ:FREQ?\\r\\n\")\n freq = yield self.wm.read_very_eager()\n if freq != '':\n\n temp = freq.split()\n temp = map(float,temp)\n temp.sort()\n if temp[len(temp)-1] >40.0:\n freq = temp[len(temp)-1]\n self.freq_changed((freq))\n self.freq = freq\n if temp[0] < 40.0:\n amp = temp[0]\n self.amp_changed((amp))\n self.amp = amp\n returnValue(self.freq)", "def ctrl_to_freq(ctrl, clk=100000000, phase_bits=32):\n return ctrl * clk / 2**phase_bits", "def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm", "def frequency(self):\n return self._pca.frequency", "def get_frequency(self):\r\n return self._api.get_frequency()", "def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)", "def pwm_freq(self, freq: int):\r\n self._pwm_freq = freq\r\n self.pwm_freq_hist.append(freq)\r\n\r\n if self.pwm_freq_hist[-2] != freq and self._daq:\r\n msg = Message(\"pwm_freq\", freq, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)", "def get_fan_pwm(self, pwm_val=None):\n self.assertNotEqual(pwm_val, None, \"Expected PWM value needs to be set\")\n\n data = run_shell_cmd(\"/usr/local/bin/get_fan_speed.sh\")\n data = data.split(\"\\n\")\n for line in data:\n if len(line) == 0:\n continue\n line = line.split(\"(\")\n line = line[1].split(\"%\")\n if abs(int(line[0]) - int(pwm_val)) < 2:\n continue\n else:\n return [False, data]\n return [True, None]", "def get_frequency(self):\r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)", "def get_frequency(self):\r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)" ]
[ "0.74385595", "0.7028066", "0.63403845", "0.628198", "0.6202231", "0.6136178", "0.60510415", "0.5912081", "0.5904167", "0.5895189", "0.5847747", "0.5837071", "0.5837071", "0.5834659", "0.58250284", "0.5782192", "0.5763384", "0.5761137", "0.57594115", "0.57592446", "0.57466745", "0.57243156", "0.56891996", "0.5682519", "0.5669555", "0.5660487", "0.5659693", "0.5658757", "0.56357425", "0.56357425" ]
0.8437725
0
Start (5002500) or stop (0) servo pulses on the gpio. Returns 0 if OK, otherwise PI_BAD_USER_GPIO, PI_BAD_PULSEWIDTH or PI_NOT_PERMITTED.
def set_servo_pulsewidth(user_gpio, pulsewidth): return _u2i(_pigpio_command(_control, _PI_CMD_SERVO, user_gpio, pulsewidth))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, servo_gpio, pi=None, pulse_left_ns=2500, pulse_right_ns=1000, pulse_centre_ns=None):\n\n self.gpio = servo_gpio\n\n if pi is None:\n self.pi = pi = pigpio.pi()\n else:\n self.pi = pi\n\n self.pulse_left_ns = pulse_left_ns\n self.pulse_right_ns = pulse_right_ns\n\n if pulse_centre_ns is None:\n self.pulse_centre_ns = ((pulse_left_ns - pulse_right_ns) // 2) + pulse_right_ns", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)", "def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)", "def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)", "def set_PWM_range(user_gpio, range_):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRS, user_gpio, range_))", "def set_PWM_dutycycle(user_gpio, dutycycle):\n return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle))", "def get_PWM_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRG, user_gpio, 0))", "def _pin_pulse(pin, initial_state=GPIO.LOW, pulse_width=PULSE_WIDTH_SEC):\n # type: (int, bool, Union[int, float]) -> None\n GPIO.output(pin, not initial_state)\n try:\n time.sleep(pulse_width)\n finally:\n GPIO.output(pin, initial_state)", "def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)", "def main():\n\n # Center positions when joystick is at rest\n center_x_pos = 530\n center_y_pos = 504\n\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup([red_led, green_led, blue_led], GPIO.OUT, initial=GPIO.LOW)\n\n pwm_r = GPIO.PWM(red_led, 300)\n pwm_g = GPIO.PWM(green_led, 300)\n pwm_b = GPIO.PWM(blue_led, 300)\n\n pwm_instances = [pwm_r, pwm_g, pwm_b]\n\n for p in pwm_instances:\n p.start(0)\n\n try:\n while True:\n # If joystick switch is pressed down, turn off LEDs\n switch = read_spi_data_channel(mcp3008_switch_channel)\n if switch == 0:\n for p in pwm_instances:\n p.ChangeDutyCycle(0)\n continue\n\n # Read the joystick position data\n x_pos = read_spi_data_channel(mcp3008_x_voltage_channel)\n y_pos = read_spi_data_channel(mcp3008_y_voltage_channel)\n\n # If joystick is at rest in center, turn on all LEDs at max\n if is_joystick_near_center(x_pos, y_pos, center_x_pos, center_y_pos):\n for p in pwm_instances:\n p.ChangeDutyCycle(100)\n continue\n\n # Adjust duty cycle of LEDs based on joystick position\n angle = convert_coordinates_to_angle(x_pos, y_pos, center_x_pos, center_y_pos)\n pwm_r.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'R'))\n pwm_g.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'G'))\n pwm_b.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'B'))\n\n # print(\"Position : ({},{}) -- Angle : {}\".format(x_pos, y_pos, round(angle, 2)))\n\n except KeyboardInterrupt:\n pass\n\n finally:\n for p in pwm_instances:\n p.stop()\n spi.close()\n GPIO.cleanup()", "def init_servos():\n for i in range(0, 7):\n kit.servo[i].actuation_range = 180\n kit.servo[i].set_pulse_width_range(450, 2550)", "def led_duty_cycle(val):\n set_tmr_ocr(TMR1, OCRxB, val)", "def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)", "def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")", "def get_PWM_real_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRRG, user_gpio, 0))", "def pi_close():\n print(\"\\nClosing lock :(\")\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(11, GPIO.OUT)\n servo1 = GPIO.PWM(11, 50)\n\n # start(0) pulse off\n print('servo.start(0)')\n servo1.start(0)\n time.sleep(1)\n\n # turns a little at a time using servo\n print('turning...')\n i = 8.5\n while i > 2:\n # pulse next degree \n print('ChangeDutyCycle(%d)' % i)\n servo1.ChangeDutyCycle(i)\n time.sleep(0.2)\n # no pulse, for smoother turn\n servo1.ChangeDutyCycle(0)\n time.sleep(0.1)\n i -= 1\n\n # stop pulse\n print('servo.ChangeDutyCycle(0)')\n servo1.ChangeDutyCycle(0)\n servo1.stop()\n GPIO.cleanup()\n print('done closing')", "def time_pulse_us(pin:Pin, pulse_level:int, timeout_us:int=1000000, /) -> int:", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def set_pulse_width(self):\n\t\t\"\"\"For PWM Register-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PWM0, PCA9530_2C_1_PWM0_USERDEFINED)\n\t\t\n\t\t\"\"\"For PWM Register-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PWM1, PCA9530_2C_1_PWM1_USERDEFINED)", "def control_motorized(self, action, pin_num=18):\n pulsewidth = self.avail_actions.get(action, None)\n if not pulsewidth:\n raise ValueError('Action not permitted')\n self.pi.set_servo_pulsewidth(pin_num, pulsewidth)\n return self.pi.get_servo_pulsewidth(pin_num)", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle", "def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1", "def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)", "def single_pulse_SCPI(pulsewidth, updown, high_voltage, low_voltage, channel = '1', *args, **kwargs):\n\tif pulsewidth[-2:] not in set({'ns', 'us', 'ms',}):\n\t\tif pulsewidth[-1] != 's':\n\t\t\traise ValueError('pulsewidth ' + str(pulsewidth) + ' not supported')\n\tif updown not in set({'up', 'down'}):\n\t\traise ValueError('updown ' + str(updown) + ' not supported')\n\tif high_voltage[-2:].lower() not in set({'mv'}):\n\t\tif high_voltage[-1].lower() != 'v':\n\t\t\traise ValueError('high_voltage ' + str(high_voltage) + ' not supported')\n\tif low_voltage[-2:].lower() not in set({'mv'}):\n\t\tif low_voltage[-1].lower() != 'v':\n\t\t\traise ValueError('low_voltage ' + str(low_voltage) + ' not supported')\n\tif channel not in set({'1', '2'}):\n\t\traise ValueError('channel ' + str(channel) + ' not supported')\n\t\n\tif updown == 'up':\n\t\tout = 'outp'+channel+':puls:mode sin;'\n\t\tout += ':sour'+channel+':inv off;'\n\t\tout += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'\n\t\tout += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'\n\t\t#puls1 means the first pulse because we are in single mode\n\t\tout += ':sour'+channel+':puls1:wid '+pulsewidth + ';'\n\t\treturn out\n\telse:\n\t\tout = 'outp'+channel+':puls:mode sin;'\n\t\tout += ':sour'+channel+':inv on;'\n\t\tout += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'\n\t\tout += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'\n\t\t#puls1 means the first pulse because we are in single mode\n\t\tout += ':sour'+channel+':puls1:wid '+pulsewidth + ';'\n\t\treturn out", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")", "def stop(self):\n status=self.objdll.USBIO_GPIOWrite(self.id, c_byte(0b000), c_byte(0)) #ENA=0, DIR=0, bit0=0\n print(f\"Set all ports to LOW and stopped the step-motor:{status}\")\n\n return status" ]
[ "0.63637555", "0.60473126", "0.60334086", "0.58553827", "0.5754265", "0.57104355", "0.5707691", "0.5655316", "0.5650569", "0.5588606", "0.5566282", "0.55391467", "0.5466882", "0.54658824", "0.54450136", "0.54419047", "0.537178", "0.53631777", "0.5363072", "0.52936655", "0.529299", "0.5292669", "0.5291556", "0.52873504", "0.52840924", "0.5274901", "0.5204119", "0.5199436", "0.5184511", "0.5184179" ]
0.63240105
1
Get a free notification handle. Returns a handle greater than or equal to zero if OK, otherwise PI_NO_HANDLE. A notification is a method for being notified of gpio state changes via a pipe. Pipes are only accessible from the local machine so this function serves no purpose if you are using Python from a remote machine. The inbuilt (socket) notifications provided by callback() should be used instead. Notifications for handle x will be available at the pipe named /dev/pigpiox (where x is the handle number). E.g. if the function returns 15 then the notifications must be read from /dev/pigpio15. Example ... h = pigpio.notify_open()
def notify_open(): return _u2i(_pigpio_command(_control, _PI_CMD_NO, 0, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify_close(handle):\n return _u2i(_pigpio_command(_control, _PI_CMD_NC, handle, 0))", "def notify_begin(handle, bits):\n return _u2i(_pigpio_command(_control, _PI_CMD_NB, handle, bits))", "def get_notification():\n condition.acquire()\n if not notifications:\n ret = condition.wait(2)\n if not ret:\n condition.release()\n raise TimeoutError(\"Timed out while waiting for notification\")\n\n notice = notifications.pop(0)\n condition.release()\n return notice", "def notify_pause(handle):\n return _u2i(_pigpio_command(_control, _PI_CMD_NB, handle, 0))", "def notifier(self, immediately_reset=True, name=None):\n with ops.name_scope(name, \"notify_notification\",\n [self._handle]) as name:\n return gen_resource_variable_ops.notify_notification(\n self._handle, immediately_reset=immediately_reset, name=name)", "def _notify(title, message, icon=\"dialog-error\"):\n try:\n import pynotify\n except ImportError:\n return\n pynotify.init(\"moya-doc\")\n n = pynotify.Notification(title, message, icon)\n n.show()", "def notification():\n # pop-up notification\n notifies = NotifyModel.get_notify(current_user.get_id())\n return jsonify(notifications=notifies)", "def _create_notify(knx_module: XKNX, config: ConfigType) -> XknxNotification:\n return XknxNotification(\n knx_module,\n name=config[CONF_NAME],\n group_address=config[CONF_ADDRESS],\n )", "def notification_preference(self) -> Optional[Sequence['outputs.NotificationPreferenceResponse']]:\n return pulumi.get(self, \"notification_preference\")", "def notification(message: str):\n # initialize the notification\n notify2.init(\"notifywhenLOAD\")\n notifyObj = notify2.Notification(\"Emergency Alert!\", message)\n notifyObj.set_timeout(12000)\n return notifyObj", "def poll(evt=1.e-5, iot=1.0):\n pend_event(evt)\n return pend_io(iot)", "def inotify_init(flags=0, closefd=CLOEXEC_DEFAULT):\n assert isinstance(flags, int), 'Flags must be an integer'\n\n if closefd:\n flags |= IN_CLOEXEC\n\n fd = lib.inotify_init1(flags)\n \n if fd < 0:\n err = ffi.errno\n if err == errno.EINVAL:\n raise ValueError(\"Invalid argument or flag\")\n elif err == errno.EMFILE:\n raise OSError(\"Maximum inotify instances reached\")\n elif err == errno.ENFILE:\n raise OSError(\"File descriptor limit hit\")\n elif err == errno.ENOMEM:\n raise MemoryError(\"Insufficent kernel memory avalible\")\n else:\n # If you are here, its a bug. send us the traceback\n raise UnknownError(err)\n\n return fd", "def GetNotifyEvent(self):\r\n \r\n return self.notify", "def GetNotifyEvent(self):\r\n \r\n return self.notify", "def GetNotifyEvent(self):\r\n \r\n return self.notify", "def __init__(self, pi, gpio_0, gpio_1, callback, bit_timeout=5):\n\n self.pi = pi\n self.gpio_0 = gpio_0\n self.gpio_1 = gpio_1\n\n self.callback = callback\n\n self.bit_timeout = bit_timeout\n\n self.in_code = False", "def notify(self, notification):\n topic = 'notify.' + notification['subject']\n payload = serializer.dumps(notification, use_bin_type=True)\n self.socket.send_string(topic, flags=zmq.SNDMORE)\n self.socket.send(payload)\n return self.socket.recv_string()", "def get_gpio(request):\n lines=[]\n append=False\n with open(tasmotadir + \"/sonoff/sonoff_template.h\", \"r\") as f:\n for line in f:\n if append==True:\n split = line.split('//')[0]\n subbed = sub('[\\\\s+,;}]','', split)\n lines.append(subbed)\n if 'UserSelectablePins' in line:\n append=True\n if '}' in line:\n append=False\n gpios={}\n for num, gpio in enumerate(lines):\n gpios[gpio] = num\n return(gpios[request])", "def sd_notify(state, logger, unset_environment=False):\n\n\n addr = os.environ.get('NOTIFY_SOCKET')\n if addr is None:\n # not run in a service, just a noop\n return\n try:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC)\n if addr[0] == '@':\n addr = '\\0' + addr[1:]\n sock.connect(addr)\n sock.sendall(state.encode('utf-8'))\n except:\n logger.debug(\"Exception while invoking sd_notify()\", exc_info=True)\n finally:\n if unset_environment:\n os.environ.pop('NOTIFY_SOCKET')\n sock.close()", "def gpio_input(door: Door):\n input_state = GPIO.input(GPIO_PIN)\n if input_state:\n door.is_closed()\n else:\n door.is_opened()", "def show_notification(notification_title = \"\", notification_message = \"\", gid = None, uid = None):\n child_pid = os.fork()\n if(child_pid != 0):\n return child_pid\n\n #child\n if gid != None:\n os.setgid(gid)\n if uid != None:\n os.setuid(uid)\n\n if 'HOME' in os.environ:\n del os.environ['HOME'] \n #force to use pwd\n file_path = os.path.expanduser(r'~/.dbus/session-bus/')\n tmp = os.listdir(file_path)\n if not tmp:\n os._exit(0)\n\n for filename in tmp:\n bus_addr = get_dbus_session_bus_addr(file_path+filename)\n if bus_addr:\n break\n if not bus_addr:\n os.exit(0)\n\n os.environ['DBUS_SESSION_BUS_ADDRESS'] = bus_addr\n print(os.environ['DBUS_SESSION_BUS_ADDRESS'])\n\n notification=Notify.Notification.new(notification_title, notification_message, \"dialog-information\")\n try:\n notification.show()\n except:\n pass\n finally:\n os._exit(0)\n #probably X is not started yet. The process' dying anyway. not a big deal.", "def handle(req):\n\n gpio.output(26, gpio.HIGH)\n time.sleep(0.2)\n gpio.output(26, gpio.LOW)\n\n return req", "def _setup_inotify(self, flag):\n i = None\n\n if flag:\n try:\n import inotify.adapters\n\n except ImportError:\n raise AssertionError(\n 'cannot use inotify, package not installed')\n\n else:\n i = inotify.adapters.Inotify(paths=[self.watch],\n block_duration_s=0)\n\n return (flag, i)", "def svn_client_ctx_t_notify_func_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n pass", "def on_notify(self, name):\r\n pass", "def test_notifications(get_touchmat):\n touchmat = get_touchmat\n touchmat_model = check_device_types.get_device_model(touchmat)\n\n val = touchmat.subscribe(callback)\n assert isinstance(val, int)\n assert val == 1\n\n name = touchmat._object_name\n # Notifications are never sent as '@0' even if we sent the command with @0\n if '@0' in name:\n name = 'touchmat'\n\n # TODO(EB) We'll need a manual test for on_suspend, on_resume,\n # on_device_connected, and on_device_disconnected\n\n touchmat.close()\n notification = get_notification()\n assert notification == ('{}.on_open_count'.format(name), 0)\n notification = get_notification()\n assert notification == ('{}.on_close'.format(name), None)\n touchmat.open()\n notification = get_notification()\n assert notification == ('{}.on_open'.format(name), None)\n notification = get_notification()\n assert notification == ('{}.on_open_count'.format(name), 1)\n\n if touchmat_model == Devices.touchmat_g2:\n area = {\"enabled\": True, \"bottom_right\": {\"x\": 12680, \"y\": 7650},\n \"top_left\": {\"x\": 4000, \"y\": 3000}}\n touchmat.active_area(area)\n notification = get_notification()\n assert notification == ('{}.on_active_area'.format(name), area)\n\n pen_range = TouchMat.ActivePenRange.five_mm\n touchmat.active_pen_range(pen_range)\n notification = get_notification()\n assert notification == ('{}.on_active_pen_range'.format(name),\n pen_range)\n\n touchmat.device_palm_rejection(True)\n notification = get_notification()\n assert notification == ('{}.on_device_palm_rejection'.format(name),\n True)\n\n touchmat.palm_rejection_timeout(242)\n notification = get_notification()\n assert notification == ('{}.on_palm_rejection_timeout'.format(name),\n 242)\n\n touchmat.calibrate()\n notification = get_notification()\n assert notification == ('{}.on_calibrate'.format(name), None)\n\n state = {\"active_pen\": False, \"touch\": True}\n touchmat.state(state)\n notification = get_notification()\n assert notification == ('{}.on_state'.format(name), state)\n\n touchmat.factory_default()\n notification = get_notification()\n assert notification == ('{}.on_factory_default'.format(name), None)\n\n touchmat.reset()\n expected = [('{}.on_reset'.format(name), None),\n ('{}.on_device_disconnected'.format(name), None),\n ('{}.on_device_connected'.format(name), None)]\n for dummy in range(len(expected)):\n notification = get_notification()\n assert notification in expected\n expected.remove(notification)\n\n val = touchmat.unsubscribe()\n assert isinstance(val, int)\n assert val == 0\n\n # Now make sure we aren't getting notification callbacks anymore...\n touchmat.open()\n with pytest.raises(TimeoutError) as execinfo:\n notification = get_notification()\n assert 'Timed out while waiting for notification' in execinfo.value.args[0]\n\n touchmat.factory_default()\n with pytest.raises(TimeoutError) as execinfo:\n notification = get_notification()\n assert 'Timed out while waiting for notification' in execinfo.value.args[0]\n\n # Verify hippy raises errors if we call subscribe with invalid parameters\n with pytest.raises(PySproutError) as execinfo:\n touchmat.subscribe('string')\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.subscribe(touchmat)\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.subscribe({})\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.subscribe(3)\n assert 'Invalid parameter' in execinfo.value.message", "async def test_unknown_notification(\n hass: HomeAssistant, hank_binary_switch, integration, client\n) -> None:\n # just pick a random node to fake the notification event\n node = hank_binary_switch\n\n # We emit the event directly so we can skip any validation and event handling\n # by the lib. We will use a class that is guaranteed not to be recognized\n notification_obj = AsyncMock()\n notification_obj.node = node\n with pytest.raises(TypeError):\n node.emit(\"notification\", {\"notification\": notification_obj})\n\n notification_events = async_capture_events(hass, \"zwave_js_notification\")\n\n # Test a valid notification with an unsupported command class\n event = Event(\n type=\"notification\",\n data={\n \"source\": \"node\",\n \"event\": \"notification\",\n \"nodeId\": node.node_id,\n \"ccId\": 0,\n \"args\": {\n \"commandClassName\": \"No Operation\",\n \"commandClass\": 0,\n \"testNodeId\": 1,\n \"status\": 0,\n \"acknowledgedFrames\": 2,\n },\n },\n )\n node.receive_event(event)\n\n assert not notification_events", "def createNotification(self): \n if self.power_plugged == True and self.battery_percent >= 80:\n Notify.init(\"Charger Notifier\")\n notification = Notify.Notification.new(\n \"BATTERY\",\n \"Battery level is over 80%! Please unplug the charger.\",\n \"dialog-information\"\n )\n notification.show()\n elif self.power_plugged == False and self.battery_percent <= 40:\n Notify.init(\"Charger Notifier\")\n notification = Notify.Notification.new(\n \"BATTERY\",\n \"Battery level is less than 40%! Please plug in the charger.\",\n \"dialog-information\"\n )\n notification.show()", "def notification(self):\n return self._notification", "def get_phandle(self, phandle):\n if self.is_root():\n return self.phandles[phandle]\n else:\n self.parent.get_phandle(phandle)" ]
[ "0.6195248", "0.56858057", "0.5441757", "0.50871366", "0.49239257", "0.49207422", "0.47362456", "0.46772674", "0.46456864", "0.4637645", "0.45783353", "0.45498928", "0.45168123", "0.45168123", "0.45168123", "0.45026726", "0.44815427", "0.44622254", "0.44572738", "0.44307372", "0.44162944", "0.44059458", "0.43585783", "0.43149677", "0.43031257", "0.42874038", "0.42775008", "0.42628792", "0.42363986", "0.4235173" ]
0.69046897
0
Pause notifications on a previously opened handle. Returns 0 if OK, otherwise PI_BAD_HANDLE.
def notify_pause(handle): return _u2i(_pigpio_command(_control, _PI_CMD_NB, handle, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processPollByHandle(hProcess):\n try:\n dwWait = win32event.WaitForSingleObject(hProcess, 0); # pylint: disable=no-member\n except:\n reporter.logXcpt('hProcess=%s %#x' % (hProcess, hProcess,));\n return True;\n return dwWait != win32con.WAIT_TIMEOUT; #0x102; #", "def notify_close(handle):\n return _u2i(_pigpio_command(_control, _PI_CMD_NC, handle, 0))", "def pause():\n xd = display.XKCDDisplayService()\n if xd.is_running():\n click.echo(\"pausing the dialogs\")\n xd.send_signal(signal.SIGUSR2)\n else:\n click.echo(\"xkcd service not running\")", "def on_pause(self):\r\n store = get_store()\r\n store.put(\"pause\", value=self.sm.current)\r\n return True", "def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return", "def notify_open():\n return _u2i(_pigpio_command(_control, _PI_CMD_NO, 0, 0))", "def test_pause_already_closed(testchannel):\n with pytest.raises(ChannelClosedError):\n testchannel.pause()", "def pause(): # real signature unknown; restored from __doc__\n pass", "def set_hold():\n hold = request.params.get(\"hold\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetHold(pid, hold)\n if retval != 0:\n LOG.error(\"Failed to set PID internal state holding. Error code: %s\", ERROR_CODES[retval])", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def resetInactive(): \n global expectedMsgId\n print(\"reset inactive\") \n expectedMsgId = C.POLL\n receiver()\n noteActivity()", "def checkStuck(stuckCount, pi):\n if stuckCount > 2000:\n # Execute button presses for high count\n pi.send(\"Press A\")\n time.sleep(1)\n pi.send(\"Press A\")\n time.sleep(2)\n pi.send(\"Press A\")\n time.sleep(2)\n\n # Reset the count to 0\n return 0\n else:\n return stuckCount", "def pause(self):\n pass\n # self.condor_object.hold()", "def on_resume(self):\r\n store = get_store()\r\n self.sm.current = str(store.get('pause')[\"value\"])", "def pause(self):\n self._cleanup()\n self._paused = True", "def PAUSED(self):\n self.pause_state = self.get_state() # the state FSM was in before 'op-pause' was called\n self.continue_state = self.pause_state\n self.update_status(self.STATES.PAUSED)", "async def pause_behaviors(self) -> None:", "def open_notifications(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell cmd statusbar expand-notifications\").wait()", "def state_wait_do(cfg, app, win, events):", "def pause(self):\r\n cmd = MsgHelper.createMessage(Messages.CMD_PAUSE)\r\n self.mailbox.push( cmd, high_priority = True )", "def pause(self, state):\n resp = yield from self.command('pause '+str(state))\n return True", "def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break", "def __pause(self, reset=False):\n\n self.entrada.write('pause 0\\n')\n self.entrada.flush()\n self.__new_handle(reset)\n self.estado = \"paused\"\n self.emit(\"estado\", \"paused\")", "def _suspend(event: E) -> None:\n event.app.suspend_to_background()", "def pause(self):\n try:\n if self.process.status() == \"stopped\":\n return \"Process is already stopped\"\n self.process.suspend()\n return \"Process Paused Successfully\"\n except Exception as e:\n logging.exception(e)\n return \"Failed to Pause Process\"", "def test_door_pause_protocol(enable_door_safety_switch):\n pause_mgr = PauseManager(door_state=DoorState.CLOSED)\n assert pause_mgr.queue == []\n\n pause_mgr.set_door(door_state=DoorState.OPEN)\n pause_mgr.pause(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n with pytest.raises(PauseResumeError):\n pause_mgr.resume(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n pause_mgr.set_door(door_state=DoorState.CLOSED)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n pause_mgr.resume(PauseType.PAUSE)\n assert pause_mgr.queue == []", "def close_play_lock(self) : \n self.play_lock = True", "def pause(self):\n pass", "def pause(self):\n pass", "def pause(self):\n\t\tpass" ]
[ "0.55920124", "0.55818653", "0.5238397", "0.5157523", "0.5139992", "0.513215", "0.5100176", "0.49161008", "0.49148747", "0.48973805", "0.48814264", "0.48166063", "0.48133954", "0.4805175", "0.47787488", "0.47648308", "0.47398287", "0.47372073", "0.47302106", "0.4697818", "0.4694735", "0.4693163", "0.4677523", "0.46645182", "0.46542647", "0.46454442", "0.46343726", "0.46321744", "0.46321744", "0.46271348" ]
0.69885635
0
Stop notifications on a previously opened handle and release the handle for reuse. Returns 0 if OK, otherwise PI_BAD_HANDLE.
def notify_close(handle): return _u2i(_pigpio_command(_control, _PI_CMD_NC, handle, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop():\n global _control, _notify\n\n if _notify is not None:\n _notify.stop()\n _notify = None\n\n if _control is not None:\n _control.close()\n _control = None", "def __del__(self):\n if hasattr(self, 'dev'):\n kernel32.CloseHandle(self.dev)", "def sx_close_sdk_connection(handle): \n rc = sx_api_close(handle) \n sx_check_rc(rc)", "def _cancel_notification_cycle(self) -> None:\n if HANDLE_CLEAN in self.data:\n cancel = self.data.pop(HANDLE_CLEAN)\n cancel()", "def default_close_cb(uv_handle_t, handle):\n alive.remove(handle) # now safe to free resources\n handle._ffi_cb = None\n handle._ffi_close_cb = None", "def _CloseHandle(self):\n ret = win32functions.CloseHandle(self.process)\n #win32api.CloseHandle(self.process)\n\n if ret == 0:\n ActionLogger().log('Warning: cannot close process handle!')\n #raise WinError()", "def close(self):\n if self.primary:\n os.close(self.primary)\n self.primary = None\n if self.secondary:\n os.close(self.secondary)\n self.secondary = None\n if hasattr(self, \"_process\") and self._process:\n if self._process.poll() is None:\n self._process.terminate()\n while self._process.poll() is None:\n time.sleep(0.001)\n self._process = None", "def close(self) -> None:\n if self._handle is None:\n return\n\n close_handle(self.handle)\n self._handle = None", "def Destroy(self):\n self.Disconnected()\n self._io_loop.remove_handler(self._fd)\n os.close(self._fd)\n self._gadget = None\n self._fd = None", "def del_handle(self, handle):\n\n self.pool.remove(handle)", "def CloseProcessHandle(process_handle: int) -> int:\n return kernel32.CloseHandle(process_handle)", "def close_handle(handle: HANDLE):\n if not bool(_close_handle(handle)):\n raise WinError()", "def _close(self):\n # TODO\n self.holding = False", "def release(self):\r\n if self.is_locked:\r\n os.close(self.fd)\r\n os.unlink(self.lockfile)\r\n self.is_locked = False", "def close(self):\n self.handle.close()", "def release(self):\n if self.is_locked:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False", "def handle_close(self):\n self.active = False\n self.close()", "def release(self):\r\n\r\n if self._lock_fd:\r\n unlock_file(self._lock_fd, close=True)\r\n self._lock_fd = None\r\n return True\r\n else:\r\n return False", "def close(self):\r\n try:\r\n self.proc.terminate()\r\n except (OSError, AttributeError): # pragma: no cover\r\n pass\r\n self.proc = None", "def release(self):\n if not self.is_locked():\n error = NotLocked()\n raise error\n if not self.i_am_locking():\n error = NotMyLock()\n raise error\n remove_existing_pidfile(self.path)", "def _close_staf_handle(self):\n\n try:\n self._staf_handle.unregister()\n except STAFException, e:\n raise FatalError(\"Error unregistering with STAF, RC: {0}, \"\n \"Result: {1}\".format(e.rc, e.result))", "def close(self):\n if self._dev_obj is not None:\n self._dev_obj.release()\n else:\n LOG.warning(\"Device is not opened!\")", "def _close_sd_ref():\n global SD_REF\n if SD_REF:\n SD_REF.close()\n SD_REF = None", "def _plugin_stop(handle):\n GPIO.cleanup()\n _LOGGER.info('MAX31865 (async) Disconnected.')", "def _close_fd(self):\n # close file descriptor for locks\n os.close(self.fd)\n self.fd = None", "def release(self):\r\n try:\r\n EventStream.AllStreams.remove(self)\r\n self.grab(False)\r\n os.close(self.filehandle)\r\n except:\r\n pass", "def close(self):\n if dev[self.id] == FLI_INVALID_DEVICE:\n raise FliError(\"Device already closed or not initialized\")\n dev[self.id] = FLI_INVALID_DEVICE\n with self.lock:\n self.status = CLOSED", "def quit(self):\n os.close(self._inotify_fd)", "def __exit__(self, type, value, traceback) :\n if self.spec :\n self.handle.close()\n self.handle = None", "def __del__(self):\n self._proc.kill()" ]
[ "0.61644775", "0.60394543", "0.59788483", "0.5864608", "0.5852858", "0.57879245", "0.57110846", "0.5650038", "0.5644712", "0.56323874", "0.56087345", "0.5561538", "0.55545336", "0.5508019", "0.55068856", "0.5484848", "0.547702", "0.53773355", "0.53760844", "0.5368541", "0.53618383", "0.53066874", "0.5299775", "0.52992857", "0.5288849", "0.5281941", "0.52727216", "0.5245299", "0.52349484", "0.5218389" ]
0.6524099
0
Sets a watchdog for a gpio. Returns 0 if OK, otherwise PI_BAD_USER_GPIO or PI_BAD_WDOG_TIMEOUT.
def set_watchdog(user_gpio, timeout): return _u2i(_pigpio_command(_control, _PI_CMD_WDOG, user_gpio, timeout))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_tgpio_digital(self, ionum, value, delay_sec=None):\r\n return self._arm.set_tgpio_digital(ionum=ionum, value=value, delay_sec=delay_sec)", "def bus_watchdog(self, value):\n self._write(MX_BUS_WATCHDOG, value)", "def gpio(self) -> int:", "def watchGPIOs(self, nodeid, mask):\n r = remote_hardware_pb2.HardwareMessage()\n r.typ = remote_hardware_pb2.HardwareMessage.Type.WATCH_GPIOS\n r.gpio_mask = mask\n return self._sendHardware(nodeid, r)", "def set_cgpio_digital(self, ionum, value, delay_sec=None):\r\n return self._arm.set_cgpio_digital(ionum=ionum, value=value, delay_sec=delay_sec)", "def test_write(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.output') as mock_output:\n value = gpio.write(0, False)\n mock_output.called_once_with(0, False)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()", "def setup_gpio(self):\n try:\n pin = 4\n gpio = importlib.import_module('RPi.GPIO')\n gpio.setmode(gpio.BCM)\n gpio.setup(pin, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.add_event_detect(pin, gpio.FALLING, callback=self.on_snap_pressed, bouncetime=200)\n except ImportError as e:\n self._logger.exception(e)\n print('raspi gpio module not found, continuing...')", "def gpio_setup():\n GPIO.setmode(GPIO.BOARD)\n GPIO.setwarnings(False)\n for led in (RED, AMBER, GREEN):\n GPIO.setup(LED[led],GPIO.OUT)", "def gpio_set(self, pin: str, status: Union[bool, str]) -> None:\n self.__logger.debug('Eva.gpio_set called')\n return self.__http_client.gpio_set(pin, status)", "def watchdog(self):\n pass", "def set_tgpio_digital_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):\r\n return self._arm.set_tgpio_digital_with_xyz(ionum, value, xyz, fault_tolerance_radius)", "def set_timeout(self, timeout: int) -> None:\n raise WatchdogError(\"Setting timeout is not supported on {0}\".format(self.describe()))", "def get_tgpio_digital(self, ionum=None):\r\n return self._arm.get_tgpio_digital(ionum)", "def __self__(self, GPIO_LED):\n # GPIO.setup()\n # if error raise exception \"Device Not Ready\"\n self.status = false\n return self.status", "def _setGPIOs(self, Dpin, direction, value):\n\n try:\n\n with open('/sys/class/gpio/export', 'a') as f_export:\n f_export.write(self.MAPPING[Dpin])\n\n with open('/sys/class/gpio/' + Dpin + '/direction', 'a') as f_dir:\n f_dir.write(direction)\n\n with open('/sys/class/gpio/' + Dpin + '/value', 'a') as f_value:\n f_value.write(value)\n\n with open('/sys/class/gpio/' + Dpin + '/value') as f_value:\n result = \"PIN \" + Dpin + \" value \" + f_value.read()\n\n except Exception as err:\n LOG.error(\"Error setting GPIO value: \" + str(err))\n result = None\n\n return result", "def gpio(self) -> Gpio:\n\n return self._gpio", "def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()", "def write(gpio, level):\n return _u2i(_pigpio_command(_control, _PI_CMD_WRITE, gpio, level))", "def handle_adminfloodcontrol(bot, event):\n try:\n who, threshold = event.args\n threshold = int(threshold) \n except ValueError: event.missing(\"<userhost> <threshold> [period] [wait]\") ; return\n userhost = getwho(bot, who)\n if userhost: user = bot.users.getuser(userhost)\n else: user = bot.users.byname(who)\n if not user: event.reply(\"i don't know a user %s\" % who) ; return\n if \"OPER\" in user.data.perms: event.reply(\"no flood control for OPER\") ; return\n try: period = event.args[2]\n except IndexError: period = 60\n try: wait = event.args[3]\n except IndexError: wait = 120\n if threshold < 1: threshold = 1\n user.data.floodtime = period\n user.data.floodthreshold = threshold\n user.data.floodwait = wait\n user.data.floodrate = 1\n user.save()\n from jsb.lib.floodcontrol import floodcontrol\n for u in user.data.userhosts: floodcontrol.reset(u)\n event.reply(\"floodrate for %s set to %s\" % (user.data.name, threshold))", "def config_tgpio_reset_when_stop(self, on_off):\r\n return self._arm.config_io_reset_when_stop(1, on_off)", "def set_cgpio_analog(self, ionum, value):\r\n return self._arm.set_cgpio_analog(ionum=ionum, value=value)", "def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)", "def kill_switch(disable_after, keys):\n watchdog(disable_after, keys)", "def set_PWM_dutycycle(user_gpio, dutycycle):\n return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle))", "def set_cgpio_digital_input_function(self, ionum, fun):\r\n return self._arm.set_cgpio_digital_input_function(ionum=ionum, fun=fun)", "def reset_timer(self, *_) -> \"ALL\":\n self.last = time.time()\n delta = time.time() - self.last\n if delta > 180:\n print(\n \"!!! Warning: Watchdog failure detected, spawning a fallback \"\n \"thread.\"\n )\n self.watchdog = FallbackWatchdog(self)\n self.watchdog.start()", "def test_post_setpoint_404(client, auth_header, monkeypatch):\n def mock(*args):\n raise IndexError\n\n monkeypatch.setattr('app.api.gpio.set_pin', mock)\n\n with client as cl:\n res = cl.post('/gpio/29/setpoint',\n headers=auth_header,\n data=json.dumps({'value': 1}))\n\n assert res.status_code == 404", "def set_power_management(value: int) -> None:", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def pump_water(pump_pin, delay=1):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pump_pin, GPIO.OUT)\n timeout = time.time() + 1.5*60 # 1.5 minutes\n\n try:\n print \"Watering plant...\"\n GPIO.output(pump_pin, GPIO.HIGH)\n\n while get_percent_wet() < 75:\n time.sleep(delay)\n if time.time() > timeout:\n break\n\n GPIO.output(pump_pin, GPIO.LOW)\n GPIO.cleanup(pump_pin)\n return\n\n except:\n GPIO.cleanup(pump_pin)\n\n return" ]
[ "0.56243426", "0.55728835", "0.53284377", "0.5285979", "0.5246178", "0.5158066", "0.50361794", "0.49879736", "0.49422064", "0.48005083", "0.47999045", "0.47879446", "0.47860768", "0.4766669", "0.47558644", "0.4749372", "0.47397855", "0.47077832", "0.47011495", "0.469448", "0.4692914", "0.4664981", "0.4650291", "0.4648067", "0.4643446", "0.46225014", "0.46139824", "0.4611091", "0.4598046", "0.45923573" ]
0.78611577
0
Read the levels of the bank 1 gpios (gpios 031). The returned 32 bit integer has a bit set if the corresponding gpio is logic 1. Gpio n has bit value (1<<n). Example ... print(bin(pigpio.read_bank_1())) 0b10010100000011100100001001111 ...
def read_bank_1(): return _pigpio_command(_control, _PI_CMD_BR1, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_bank_1(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BS1, levels, 0))", "def read_bank_2():\n return _pigpio_command(_control, _PI_CMD_BR2, 0, 0)", "def clear_bank_1(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BC1, levels, 0))", "def set_bank_2(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BS2, levels, 0))", "def choose_bin_base() -> int:\n return npr.choice((2, 8, 16))", "def get_level(raw_data, bits):\n level = 0\n for i in range(13, -1, -1):\n level <<= 1\n b, o = (bits[i] / 8) + 1, bits[i] % 8\n level |= (ord(raw_data[b]) >> o) & 1\n return level", "def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10", "def clear_bank_2(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BC2, levels, 0))", "def Get_ActiveConfINT1_Level(self):\r\n return self.__readFromRegisterWithDictionaryMatch(self.__REG_RW_CTRL_REG3, self.__MASK_CTRL_REG3_H_LACTIVE, self.__LevelDict)", "def read_lock_bits(self):\n self.writecmd(self.APP, self.WRITE3_READ1, 4, [0x54, 0x00, 0x00, 0x00])\n return [(ord(self.data[0]) >> x) & 1 for x in range(5)]", "def read_current_ram_bank(self):\n return self.CURRENT_RAM_BANK", "def get_bank(self):\n return self._i2c_read(_BANK_ADDRESS)", "def read(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_READ, gpio, 0))", "def read(type='hex', inc=False):\n bitstring = ''\n for i in [D7,D6,D5,D4,D3,D2,D1,D0]:\n bitstring += str(GPIO.input(i))\n if type == 'hex':\n rval = \"Ox%02x\" % (int(bitstring,2))\n elif type == \"int\":\n rval = int(bitstring,2)\n elif type == \"bin\":\n rval = \"{0:b}\".format(int(bitstring,2))\n else:\n rval = chr(int(bitstring,2))\n if inc:\n incr()\n return rval", "def get_bit(reg,n_bit):\n return reg >> n_bit & 1", "def bcd7(num):\n\tGPIO.output(36, GPIO.HIGH if (num & 0x00000001) > 0 else GPIO.LOW )\n\tGPIO.output(38, GPIO.HIGH if (num & 0x00000002) > 0 else GPIO.LOW )\n\tGPIO.output(40, GPIO.HIGH if (num & 0x00000004) > 0 else GPIO.LOW )\n\tGPIO.output(37, GPIO.HIGH if (num & 0x00000008) > 0 else GPIO.LOW )", "def read_pin(self, pin):\n value = 0\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOA)\n value = self.__checkbit(self.__port_a_value, pin)\n else:\n pin = pin - 8\n self.__port_b_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOB)\n value = self.__checkbit(self.__port_b_value, pin)\n return value", "def get_byte():\n GPIO.setup(data_pins, GPIO.IN)\n # read the data pins\n GPIO.output(chip_select, 0)\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)\n value = 0\n for i in range(0, 8):\n value += GPIO.input(data_pins[i]) << i\n return value", "def Read_Reg(self, bank, addr, data=0):\n # data is used for spi write\n cmd = self.board_def.CMD_READ_REG\n # data = 0xFAFAFAFA\n\n #I need to pack bank into 4 bytes and then only use the 3\n packedBank = struct.pack(\"l\", bank)\n unpackedBank = struct.unpack('4b', packedBank)\n\n packet = struct.pack(\"4bLi\", cmd, unpackedBank[0], unpackedBank[1], unpackedBank[2], addr, data)\n #Next I need to send the command\n try:\n self.send_data(packet)\n except socket.timeout:\n print (\"Timeout raised and caught\")\n #next read from the socket\n try:\n self.recv_stat, self.recv_data = self.receive_data()\n except socket.timeout:\n print (\"Timeout raised and caught\")\n\n if self.recv_stat != 0x0:\n print ('Issue with Reading Register stat={}!!!'.format(self.recv_stat) )\n return self.board_def.STAT_ERROR\n return self.recv_data", "def get_level(k):\r\n return int(log2(k))", "def level(self):\n return self.__pin.pwm", "def gpio(self) -> int:", "def get_song_bank(song_id):\n\tsong_pointers_address = 0xc77e\n\taddress = song_pointers_address + song_id * 2\n\tsong_index, bank = rom[address], rom[address + 1]\n\treturn song_index, bank", "def START_BANK() -> int:\n return 100", "def Get_Status_Batlevel(raw_data,\n batlevel_startpos,\n batlevel_endpost):\n batlevel = raw_data[batlevel_startpos:batlevel_endpost]\n print(f'| raw_batlevel = {batlevel}')\n return Convert_Hex_To_Decimal(batlevel)", "def get_bit(num, position):\n\treturn (num >> position) & 0b1", "def get_base_2(n):\n return str(bin(int(n))).removeprefix('0b')", "def READ_RELAY_N_AT_BANK(N):\n return chr(115 + N)", "def returnbank(self, number):\n found = False\n for bank in self.__banks:\n if bank.number() == str(number):\n found = True\n if bank.signed_in() and not bank.returned() and bank.out():\n bank.returnbank()\n return True, 0\n elif not bank.signed_in():\n return False, 1\n # print(\"Bank not signed in\")\n elif bank.returned():\n return False, 2\n # print(\"Bank already returned\")\n elif not bank.out():\n return False, 3\n # print(\"Bank is not out\")\n if not found:\n return False, 0", "def returnbank(self, number):\n found = False\n for bank in self.__banks:\n if bank.number() == str(number):\n found = True\n if bank.signed_in() and not bank.returned() and bank.out():\n bank.returnbank()\n return True, 0\n elif not bank.signed_in():\n return False, 1\n # print(\"Bank not signed in\")\n elif bank.returned():\n return False, 2\n # print(\"Bank already returned\")\n elif not bank.out():\n return False, 3\n # print(\"Bank is not out\")\n if not found:\n return False, 0" ]
[ "0.69223297", "0.6327469", "0.61724925", "0.5889782", "0.57640517", "0.5685262", "0.556518", "0.5522693", "0.54314464", "0.5401993", "0.5316239", "0.5215261", "0.5204304", "0.5166998", "0.51447284", "0.5141258", "0.5137961", "0.5115312", "0.510982", "0.5084564", "0.50781447", "0.5072831", "0.50185907", "0.50170255", "0.5005733", "0.5005488", "0.5003611", "0.4995106", "0.49838996", "0.49838996" ]
0.72436553
0
Read the levels of the bank 2 gpios (gpios 3253). The returned 32 bit integer has a bit set if the corresponding gpio is logic 1. Gpio n has bit value (1<<(n32)). Example ... print(bin(pigpio.read_bank_2())) 0b1111110000000000000000 ...
def read_bank_2(): return _pigpio_command(_control, _PI_CMD_BR2, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_bank_2(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BS2, levels, 0))", "def read_bank_1():\n return _pigpio_command(_control, _PI_CMD_BR1, 0, 0)", "def set_bank_1(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BS1, levels, 0))", "def clear_bank_2(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BC2, levels, 0))", "def choose_bin_base() -> int:\n return npr.choice((2, 8, 16))", "def get_level(raw_data, bits):\n level = 0\n for i in range(13, -1, -1):\n level <<= 1\n b, o = (bits[i] / 8) + 1, bits[i] % 8\n level |= (ord(raw_data[b]) >> o) & 1\n return level", "def get_bank(self):\n return self._i2c_read(_BANK_ADDRESS)", "def clear_bank_1(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BC1, levels, 0))", "def read_lock_bits(self):\n self.writecmd(self.APP, self.WRITE3_READ1, 4, [0x54, 0x00, 0x00, 0x00])\n return [(ord(self.data[0]) >> x) & 1 for x in range(5)]", "def bcd7(num):\n\tGPIO.output(36, GPIO.HIGH if (num & 0x00000001) > 0 else GPIO.LOW )\n\tGPIO.output(38, GPIO.HIGH if (num & 0x00000002) > 0 else GPIO.LOW )\n\tGPIO.output(40, GPIO.HIGH if (num & 0x00000004) > 0 else GPIO.LOW )\n\tGPIO.output(37, GPIO.HIGH if (num & 0x00000008) > 0 else GPIO.LOW )", "def get_base_2(n):\n return str(bin(int(n))).removeprefix('0b')", "def _bits(num):\r\n return bin(int(num))[2:]", "def uint82bin(n, count=8):\n return \"\".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])", "def Read_Reg(self, bank, addr, data=0):\n # data is used for spi write\n cmd = self.board_def.CMD_READ_REG\n # data = 0xFAFAFAFA\n\n #I need to pack bank into 4 bytes and then only use the 3\n packedBank = struct.pack(\"l\", bank)\n unpackedBank = struct.unpack('4b', packedBank)\n\n packet = struct.pack(\"4bLi\", cmd, unpackedBank[0], unpackedBank[1], unpackedBank[2], addr, data)\n #Next I need to send the command\n try:\n self.send_data(packet)\n except socket.timeout:\n print (\"Timeout raised and caught\")\n #next read from the socket\n try:\n self.recv_stat, self.recv_data = self.receive_data()\n except socket.timeout:\n print (\"Timeout raised and caught\")\n\n if self.recv_stat != 0x0:\n print ('Issue with Reading Register stat={}!!!'.format(self.recv_stat) )\n return self.board_def.STAT_ERROR\n return self.recv_data", "def get_byte():\n GPIO.setup(data_pins, GPIO.IN)\n # read the data pins\n GPIO.output(chip_select, 0)\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)\n value = 0\n for i in range(0, 8):\n value += GPIO.input(data_pins[i]) << i\n return value", "def read_current_ram_bank(self):\n return self.CURRENT_RAM_BANK", "def read(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_READ, gpio, 0))", "def get_song_bank(song_id):\n\tsong_pointers_address = 0xc77e\n\taddress = song_pointers_address + song_id * 2\n\tsong_index, bank = rom[address], rom[address + 1]\n\treturn song_index, bank", "def uint82bin(n, count=8):\n return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])", "def read_pin(self, pin):\n value = 0\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOA)\n value = self.__checkbit(self.__port_a_value, pin)\n else:\n pin = pin - 8\n self.__port_b_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOB)\n value = self.__checkbit(self.__port_b_value, pin)\n return value", "def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10", "def read(type='hex', inc=False):\n bitstring = ''\n for i in [D7,D6,D5,D4,D3,D2,D1,D0]:\n bitstring += str(GPIO.input(i))\n if type == 'hex':\n rval = \"Ox%02x\" % (int(bitstring,2))\n elif type == \"int\":\n rval = int(bitstring,2)\n elif type == \"bin\":\n rval = \"{0:b}\".format(int(bitstring,2))\n else:\n rval = chr(int(bitstring,2))\n if inc:\n incr()\n return rval", "def gpio(self) -> int:", "def _read_byte(self):\n # Setup io pin as input mode\n self.gpio.setup(self._io_pin, GPIO.IN)\n\n byte = 0\n for i in range(8):\n # Read data on the falling edge of clk\n self.gpio.output(self._clk_pin, GPIO.HIGH)\n self._sleep()\n\n self.gpio.output(self._clk_pin, GPIO.LOW)\n self._sleep()\n\n bit = self.gpio.input(self._io_pin)\n byte |= ((2 ** i) * bit)\n\n return byte", "def READ_RELAY_N_AT_BANK(N):\n return chr(115 + N)", "def get_pressurelsb(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_PRESSURELSB,\n 1,\n addrsize=8\n )\n val = 0\n val = val << 8 | byte_list[0]\n return val", "def get_mode(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_MODEG, gpio, 0))", "def get_bit(reg,n_bit):\n return reg >> n_bit & 1", "def _get_bin_map_of_number(number, length):\n empty_map = '0' * length\n bin_map_long = empty_map + str(bin(number))[2:]\n return bin_map_long[-length:]", "def hit_bin(self, n):\n # TODO: fix this monkey code!\n\n if n < 4:\n return n\n elif n << 3 == 0:\n return 4\n elif n << 4 == 0:\n return 5\n elif n << 5 == 0:\n return 6\n elif n >= 32 and n <= 127:\n return 7\n else:\n return 8" ]
[ "0.6800698", "0.6579462", "0.6324625", "0.61041206", "0.58211935", "0.5655997", "0.5601498", "0.55630827", "0.5552614", "0.5406119", "0.5350952", "0.5269801", "0.52556896", "0.5254825", "0.525129", "0.522594", "0.5203341", "0.5201802", "0.5199482", "0.5179958", "0.51724535", "0.5121025", "0.5117484", "0.5113055", "0.5101314", "0.5082782", "0.5059285", "0.502894", "0.49917412", "0.4988534" ]
0.7134265
0
Clears gpios 031 if the corresponding bit in levels is set. Returns 0 if OK, otherwise PI_SOME_PERMITTED. A status of PI_SOME_PERMITTED indicates that the user is not allowed to write to one or more of the gpios.
def clear_bank_1(levels): return _u2i(_pigpio_command(_control, _PI_CMD_BC1, levels, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_bank_2(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BC2, levels, 0))", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def set_vcom_deselect_level(level):\n send_command(0xDB)\n send_command(level << 4)", "def pin_pullclear(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLNONE)", "def check_clear_flags(self):\n self._command(self.commands[\"CLEAR_ERROR_FLAGS\"])\n self._command(self.commands[\"CLEAR_REBOOTED_FLAG\"])", "def clearMyStatus(self):\n self.maxBattery = 0\n self.currentPower = 0\n self.thrust = 0.0\n self.rotation = 0.0\n self.radar = 0\n self.jamming = 0\n self.repair = 0\n self.mass = 0.0\n self.accel = 0.0\n self.maxAssault = 0\n self.assaultStrength = 0", "def off(self):\n for light in self.all:\n GPIO.output(light, 0)", "def octopus_permissions_clear(self, msg, args):\r\n return self.permissions.clear_permissions()", "def clear_bit(num, i):\n return num & ~(1 << i)", "def all_off():\n print(\"Climate is within set parameters; toggling systems off if any are on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n time.sleep(30)", "def clear_all(self):\n for octet in self.master.children['!registerframe']._octets:\n for button in octet._bits_val:\n button.set(0)\n octet._update_value()\n self.master.children[\"!registerframe\"].update_reg_value()\n return None", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')", "def stop(self):\n status=self.objdll.USBIO_GPIOWrite(self.id, c_byte(0b000), c_byte(0)) #ENA=0, DIR=0, bit0=0\n print(f\"Set all ports to LOW and stopped the step-motor:{status}\")\n\n return status", "def clr_bit(self, port, bit):\n hw = self.device.peripherals[port]\n hw.BSRR.wr(1 << ((bit & 15) + 16))", "def unconfigure_aaa_authorization_commands(device, level,level_name=\"\", level_action=\"\", group_name=None):\n\n cmd = f'no aaa authorization commands {level} {level_name} {level_action}'\n if group_name:\n cmd = f'no aaa authorization commands {level} {level_name} group {group_name} {level_action}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure aaa authorization commands:\\n{e}'\n )", "def write(gpio, level):\n return _u2i(_pigpio_command(_control, _PI_CMD_WRITE, gpio, level))", "def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)", "async def _cb(self, gpio, level, tick):\n\n if level < asyncpio.TIMEOUT:\n\n if self.in_code == False:\n self.bits = 1\n self.num = 0\n\n self.in_code = True\n self.code_timeout = 0\n await self.pi.set_watchdog(self.gpio_0, self.bit_timeout)\n await self.pi.set_watchdog(self.gpio_1, self.bit_timeout)\n else:\n self.bits += 1\n self.num = self.num << 1\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout & 2 # clear gpio 0 timeout\n else:\n self.code_timeout = self.code_timeout & 1 # clear gpio 1 timeout\n self.num = self.num | 1\n\n else:\n\n if self.in_code:\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout | 1 # timeout gpio 0\n else:\n self.code_timeout = self.code_timeout | 2 # timeout gpio 1\n\n if self.code_timeout == 3: # both gpios timed out\n await self.pi.set_watchdog(self.gpio_0, 0)\n await self.pi.set_watchdog(self.gpio_1, 0)\n self.in_code = False\n self.callback(self.bits, self.num)", "def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")", "def clearAllCanSelectFlags(self):\n for key in self.canSelectFlags.keys():\n self.canSelectFlags[key] = 0", "def undefFlags(self):\n self.setRegister(h8_const.REG_FLAGS, None)", "def clear_flags(self):\n self.flags.clear()", "async def rndactivity_clear(self, ctx: commands.Context):\n amount = len(await self.config.statuses())\n if await confirm(\n ctx,\n content=_(\n \"Are you sure you want to clear {amount} statuses?\\n\\n\"\n \"**This action is irreversible!**\"\n ).format(amount=amount),\n ):\n await self.config.statuses.set([])\n await self.bot.change_presence(activity=None, status=self.bot.guilds[0].me.status)\n await fmt(ctx, tick(_(\"Successfully removed {amount} status strings.\")), amount=amount)\n else:\n await fmt(ctx, _(\"Okay then.\"))", "def turn_all_off(self):\n for led_type in LED:\n self.led_off(led_type)\n logging.info('LED: ALL - Status: 0')", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def light_off(self, pin='D13'):\n self.light_set(pin, '0')", "def gpio_set(self, pin: str, status: Union[bool, str]) -> None:\n self.__logger.debug('Eva.gpio_set called')\n return self.__http_client.gpio_set(pin, status)", "def unset_dq_bits(value, okbits=32+64+512, verbose=False):\n bin_bits = np.binary_repr(okbits)\n n = len(bin_bits)\n for i in range(n):\n if bin_bits[-(i+1)] == '1':\n if verbose:\n print(2**i)\n \n value -= (value & 2**i)\n \n return value", "def select_levels(syncmap, levels):\n self.log([u\"Levels: '%s'\", levels])\n if levels is None:\n return\n try:\n levels = [int(l) for l in levels if int(l) > 0]\n syncmap.fragments_tree.keep_levels(levels)\n self.log([u\"Selected levels: %s\", levels])\n except ValueError:\n self.log_warn(u\"Cannot convert levels to list of int, returning unchanged\")" ]
[ "0.5283613", "0.5021032", "0.496039", "0.49579084", "0.49537703", "0.49004257", "0.48708", "0.48219374", "0.48002625", "0.47900844", "0.47530323", "0.47522688", "0.4676056", "0.4675586", "0.4653739", "0.4611264", "0.4600976", "0.4599894", "0.4595285", "0.45922235", "0.45884457", "0.45467424", "0.45439306", "0.45341873", "0.45313114", "0.4524791", "0.45205924", "0.45089194", "0.45035243", "0.4471292" ]
0.5718622
0
Clears gpios 3253 if the corresponding bit (021) in levels is set. Returns 0 if OK, otherwise PI_SOME_PERMITTED. A status of PI_SOME_PERMITTED indicates that the user is not allowed to write to one or more of the gpios.
def clear_bank_2(levels): return _u2i(_pigpio_command(_control, _PI_CMD_BC2, levels, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_bank_1(levels):\n return _u2i(_pigpio_command(_control, _PI_CMD_BC1, levels, 0))", "def check_clear_flags(self):\n self._command(self.commands[\"CLEAR_ERROR_FLAGS\"])\n self._command(self.commands[\"CLEAR_REBOOTED_FLAG\"])", "def set_vcom_deselect_level(level):\n send_command(0xDB)\n send_command(level << 4)", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def pin_pullclear(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLNONE)", "def write(gpio, level):\n return _u2i(_pigpio_command(_control, _PI_CMD_WRITE, gpio, level))", "def clear_bit(num, i):\n return num & ~(1 << i)", "def octopus_permissions_clear(self, msg, args):\r\n return self.permissions.clear_permissions()", "def unconfigure_aaa_authorization_commands(device, level,level_name=\"\", level_action=\"\", group_name=None):\n\n cmd = f'no aaa authorization commands {level} {level_name} {level_action}'\n if group_name:\n cmd = f'no aaa authorization commands {level} {level_name} group {group_name} {level_action}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure aaa authorization commands:\\n{e}'\n )", "def light_off(self, pin='D13'):\n self.light_set(pin, '0')", "def off(self):\n for light in self.all:\n GPIO.output(light, 0)", "def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")", "def clr_bit(self, port, bit):\n hw = self.device.peripherals[port]\n hw.BSRR.wr(1 << ((bit & 15) + 16))", "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')", "def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)", "def test_02_setlevel(self):\n msg = udocker.Msg(5)\n self._verify_descriptors(msg)\n self.assertEqual(msg.level, 5)\n msg = udocker.Msg(0)\n msg.setlevel(7)\n self._verify_descriptors(msg)\n self.assertEqual(msg.level, 7)", "def clearMyStatus(self):\n self.maxBattery = 0\n self.currentPower = 0\n self.thrust = 0.0\n self.rotation = 0.0\n self.radar = 0\n self.jamming = 0\n self.repair = 0\n self.mass = 0.0\n self.accel = 0.0\n self.maxAssault = 0\n self.assaultStrength = 0", "def clear_all(self):\n for octet in self.master.children['!registerframe']._octets:\n for button in octet._bits_val:\n button.set(0)\n octet._update_value()\n self.master.children[\"!registerframe\"].update_reg_value()\n return None", "def unset_dq_bits(value, okbits=32+64+512, verbose=False):\n bin_bits = np.binary_repr(okbits)\n n = len(bin_bits)\n for i in range(n):\n if bin_bits[-(i+1)] == '1':\n if verbose:\n print(2**i)\n \n value -= (value & 2**i)\n \n return value", "def all_off():\n print(\"Climate is within set parameters; toggling systems off if any are on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n time.sleep(30)", "def clear_flags(self):\n self.flags.clear()", "def undefFlags(self):\n self.setRegister(h8_const.REG_FLAGS, None)", "def clearAllCanSelectFlags(self):\n for key in self.canSelectFlags.keys():\n self.canSelectFlags[key] = 0", "def gpio_set(self, pin: str, status: Union[bool, str]) -> None:\n self.__logger.debug('Eva.gpio_set called')\n return self.__http_client.gpio_set(pin, status)", "def stop(self):\n status=self.objdll.USBIO_GPIOWrite(self.id, c_byte(0b000), c_byte(0)) #ENA=0, DIR=0, bit0=0\n print(f\"Set all ports to LOW and stopped the step-motor:{status}\")\n\n return status", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def Clear(port):\n\tport.write(\"C\")", "async def _cb(self, gpio, level, tick):\n\n if level < asyncpio.TIMEOUT:\n\n if self.in_code == False:\n self.bits = 1\n self.num = 0\n\n self.in_code = True\n self.code_timeout = 0\n await self.pi.set_watchdog(self.gpio_0, self.bit_timeout)\n await self.pi.set_watchdog(self.gpio_1, self.bit_timeout)\n else:\n self.bits += 1\n self.num = self.num << 1\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout & 2 # clear gpio 0 timeout\n else:\n self.code_timeout = self.code_timeout & 1 # clear gpio 1 timeout\n self.num = self.num | 1\n\n else:\n\n if self.in_code:\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout | 1 # timeout gpio 0\n else:\n self.code_timeout = self.code_timeout | 2 # timeout gpio 1\n\n if self.code_timeout == 3: # both gpios timed out\n await self.pi.set_watchdog(self.gpio_0, 0)\n await self.pi.set_watchdog(self.gpio_1, 0)\n self.in_code = False\n self.callback(self.bits, self.num)", "def systemOff():\n # Updated 11/19/16\n I2C.write_byte_data(Valve_bus, pinOut_O, 0x00 )\n I2C.write_byte_data(Pump_Mag_bus, pinOut_O, 0x00)" ]
[ "0.5860812", "0.51690996", "0.5044732", "0.4996031", "0.4979713", "0.4973494", "0.48871407", "0.4856664", "0.4855407", "0.48206553", "0.4751833", "0.4747806", "0.47464278", "0.4725271", "0.47152248", "0.47134897", "0.46692872", "0.46668115", "0.4658772", "0.46493933", "0.46483642", "0.46447366", "0.4642449", "0.45866796", "0.45790634", "0.45726427", "0.4565127", "0.45550877", "0.45131084", "0.44781575" ]
0.5415214
1
Gets the current system tick. Tick is the number of microseconds since system boot. As tick is an unsigned 32 bit quantity it wraps around after 232 microseconds, which is approximately 1 hour 12 minutes. Example !/usr/bin/env python import pigpio import time pigpio.start() t1 = pigpio.get_current_tick() time.sleep(5) t2 = pigpio.get_current_tick() s = "5 seconds is " + str(pigpio.tickDiff(t1, t2)) + " ticks" print(s) pigpio.stop() displays 5 seconds is 5003398 ticks
def get_current_tick(): return _pigpio_command(_control, _PI_CMD_TICK, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tick():\n return _tick", "def realtime_tick(self):\n return self._read(MX_REALTIME_TICK)", "def tick(self):\n prev_last_tick = self.last_tick_\n self.last_tick_ = timeit.default_timer()\n latest_tick_period = self.last_tick_ - prev_last_tick\n return latest_tick_period", "def currentTick(self):\n return self._currentTick", "def get_current_time() -> int:\n float_time = time.time()\n return int(float_time * 1000) # Convert to ms and int", "def tick(x: float) -> float:\n time.sleep(x)\n return x", "def get_ticks(self):\n return pg.time.get_ticks()", "def realtime():\n return timemodule.time()", "def get_tick_value(self):\n return self.tick", "def getTime():\n\n return float(time.perf_counter()*1000)", "def last_tick_time(self):\n return self.last_tick_", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def now():\n\n return rospy.Time.now().to_nsec()", "def get_system_time(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetSystemTime', self.handle)", "def get_time(self):\n return self._ticks", "def get_current_timestamp():\n return int(round(time.time() * 1e3))", "def get_current_time():\n return int(time.time())", "def current_time():\n\n return int(1000 * time())", "def get_cpu_clock(verbose=False):\n\n fn = '/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq'\n output = run(\"cat \" + fn, quiet=True)\n # This file apparently only exists if the kernel's power saving module is\n # configured a certain way. I have so far only seen it on cn10 and cn11.\n # It looks like the units are kHz.\n\n clock_in_GHz = None\n\n try:\n clock_in_kHz = int(output)\n clock_in_GHz = float(clock_in_kHz) / (10**6)\n return clock_in_GHz\n except ValueError:\n if verbose:\n print(\"Error: On host = {Host}, unable to get cpu clock in string\\n{Output}\"\n .format(Host=env.host, Output=output))\n\n # The cpuinfo_max_freq file approach didn't work, so get current clock\n # from /proc/cpuinfo\n output = run(\"cat /proc/cpuinfo | grep MHz | uniq\", quiet=True)\n\n regex = re.compile(\n \"\"\"\n .*cpu\\sMHz # any chars before \"cpu MHz\"\n \\s*:\\s* # any amount of whitespace, colon, any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output)\n\n if (len(matches) == 1):\n clock_in_GHz = float(matches[0]) / (10**3) # MHz to GHz\n else:\n print(\"Error: On host = {Host}, unable to determine cpu frequency in string\\n{Output}\"\n .format(Host = env.host, Output = output))\n\n if verbose:\n print(\"{Host:4} | CPU clock: {Clock:4.2f} GHz\".format(Host=env.host, Clock=clock_in_GHz))\n\n return clock_in_GHz", "def lastTick():", "def sys_up_time():\n\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n return int(uptime_seconds)", "def now():\n return int(time.time() * 1000)", "def _get_current_epoch_time() -> float:\n return time.time()", "def fetch_current():\n\n data = json.load(urllib2.urlopen(TICKER_URL))\n\n buy = float(data[\"ask\"])\n sell = float(data[\"bid\"])\n\n now = int(time.time()) # Get current unix time\n\n return now, buy, sell", "def get_uptime():\n output = Popen('cat /proc/uptime', shell=True, stdout=PIPE)\n return float(output.communicate()[0].split()[0])" ]
[ "0.70856047", "0.68607926", "0.6551982", "0.64099157", "0.62377304", "0.61763674", "0.61676455", "0.6149985", "0.61287653", "0.6074811", "0.60638833", "0.603675", "0.603675", "0.603675", "0.603675", "0.603675", "0.603675", "0.5901134", "0.5872252", "0.5864109", "0.58607256", "0.58198005", "0.5813197", "0.58031946", "0.5798986", "0.5765924", "0.5759587", "0.5715749", "0.5714817", "0.56950986" ]
0.8358748
0
Get the Pi's hardware revision number. It is unfortunate that Pi boards have been named Revision.1 and Revision.2. That use of the word revision is distinct from the Pi's hardware revision number.' The hardware revision is the last 4 characters on the Revision line of /proc/cpuinfo. The revision number can be used to determine the assignment of gpios to pins. There are at least two types of board. Type 1 has gpio 0 on P13, gpio 1 on P15, and gpio 21 on P113. Type 2 has gpio 2 on P13, gpio 3 on P15, gpio 27 on P113, and gpios 2831 on P5. Type 1 boards have hardware revision numbers of 2 and 3. Type 2 boards have hardware revision numbers of 4, 5, 6, and 15. If the hardware revision can not be found or is not a valid hexadecimal number the function returns 0.
def get_hardware_revision(): return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFirmwareRevision(self): \n return self.firmware_revision", "def hardware_version(self):\n version = self._dll.JLINKARM_GetHardwareVersion()\n major = version / 10000 % 100\n minor = version / 100 % 100\n return '%d.%02d' % (major, minor)", "def get_revision(self):\n vers = self.send(\"?R\", recv=True)\n # Verify its a valid version\n # ? why was this commented out\n float(vers)\n # But return as string to avoid precision issues\n return vers", "def getFirmwareVersion(self, board=0):\n return self.callModule('admin', board, 0, 'getVersion')", "def hardware_version(self):\n return self.data.get('hw_ver')", "def hardware_version(self) -> str:\n return self.camera_info[\"main_hw_version\"]", "def revision(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"revision\")", "def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")", "def python_revision():\n return _sys_version()[3]", "def get_ilo_firmware_version_as_major_minor(self):\n try:\n manager, reset_uri = self._get_ilo_details()\n ilo_fw_ver_str = (\n manager['Oem']['Hp']['Firmware']['Current']['VersionString']\n )\n return common.get_major_minor(ilo_fw_ver_str)\n except Exception:\n return None", "def get_revision(self) -> str:\n try:\n return self.cmd.rev_parse(verify=True, args=\"HEAD\", check_returncode=True)\n except exc.CommandError:\n return \"initial\"", "def get_revision(length: int = constants.REVISION_LEN) -> str:\n global _REVISION\n if _REVISION is None:\n symbols = string.ascii_lowercase + string.digits\n tokens = [random.choice(symbols) for _ in range(length)]\n _REVISION = ''.join(tokens)\n return _REVISION", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def api_revision(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_revision\")", "def fpga_minor():\n return int, None", "def firmware_version(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", "def get_revision(self) -> str:\n raise NotImplementedError", "def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)", "def vcs_revision(self):\n filename = os.path.join(self.requirement.source_directory, '.hg_archival.txt')\n if os.path.isfile(filename):\n with open(filename) as handle:\n for line in handle:\n name, _, value = line.partition(':')\n if name.strip() == 'node':\n return value.strip()", "def get_version():\n return '%d.%d.%d' % version_info", "def get_reversion():\n return to_str(backend.get().af_get_revision())", "def get_issue_from_revision(revision):\n\n match = Update.REVISION_PATTERN.match(revision)\n if not match:\n raise UpdateException(f'unable to extract issue from \"{revision}\"')\n y, m, d = match.group(3), match.group(1), match.group(2)\n return int(y) * 10000 + int(m) * 100 + int(d)", "def get_machine_version():\n return get_file_content(\"/home/pi/.machineconfig/latest_version\")", "def dll_version(self):\n major = ctypes.c_uint32()\n minor = ctypes.c_uint32()\n revision = ctypes.c_uint8()\n\n result = self._lib.NRFJPROG_dll_version(ctypes.byref(major), ctypes.byref(minor), ctypes.byref(revision))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return major.value, minor.value, chr(revision.value)", "def get_hardware_version(self):\n cmd = protocol.GET_HARDWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.hardware_version = value[0][1:]\n else:\n return False", "def __get_arm_cpu_arch_revision(self) -> str:\n\n # CPU features for ARMv8 revisions.\n # From https://en.wikichip.org/wiki/arm/armv8#ARMv8_Extensions_and_Processor_Features\n rev1_features = [\"atomics\", \"asimdrdm\"]\n rev2_features = [\n \"fphp\", \"dcpop\", \"sha3\", \"sm3\", \"sm4\", \"asimddp\", \"sha512\", \"sve\"\n ]\n\n rev = \"ARMv8-A\"\n if any([f in self.cpu_features for f in rev1_features]):\n rev = \"ARMv8.1-A\"\n if any([f in self.cpu_features for f in rev2_features]):\n rev = \"ARMv8.2-A\"\n return rev", "def revision_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"revision_id\")", "def revision_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"revision_id\")" ]
[ "0.6889485", "0.6609553", "0.64131314", "0.6398734", "0.63915384", "0.63616455", "0.62834626", "0.62057483", "0.61732125", "0.612402", "0.608792", "0.60333174", "0.60129875", "0.5994466", "0.59919125", "0.59879756", "0.5885803", "0.58622473", "0.58610564", "0.586081", "0.5844105", "0.5828939", "0.58202195", "0.5820114", "0.58193254", "0.5816458", "0.58105534", "0.5796111", "0.5790938", "0.57891136" ]
0.810522
0
Returns the pigpio software version.
def get_pigpio_version(): return _pigpio_command(_control, _PI_CMD_PIGPV, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version():\n\n version_string = version_from_versioneer()\n\n if not version_string:\n version_string = version_from_pip()\n\n return version_string", "def software_version(self) -> str:\n return self.data.get(Attribute.SOFTWARE_VERSION)", "def get_hardware_revision():\n return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)", "def get_version():\n return '%d.%d.%d' % version_info", "def get_version():\n return 1", "def software_version(self) -> str:\n return self.camera_info[\"main_sw_version\"]", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver", "def get_version():\n click.echo(get_current_version_number())", "def version():\n return uname().version", "def version():\n return uname().version", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def version(self):\n data = self._ftdi.spi_read(self.VERSION_ADDR, len=1, burst='fixed')\n return data[0] & self.VERSION_MASK", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "def version():\n return '%d.%d' % (sys.version_info[0], sys.version_info[1])", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version():\n\n print(VERSION_CODE)", "def version(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version\")", "def get_software_version(self):\n \n try:\n if self.product_info is None:\n self.product_info = self.connection.System.SystemInfo.\\\n get_product_information()\n return self.product_info['product_version']\n except:\n raise", "def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")", "def os_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_version\")" ]
[ "0.73811704", "0.72887504", "0.7246847", "0.71916705", "0.71436334", "0.7046444", "0.70196176", "0.69799864", "0.69646806", "0.6942776", "0.6942776", "0.693798", "0.69212884", "0.68783", "0.68749297", "0.68749297", "0.68749297", "0.68749297", "0.68749297", "0.68642515", "0.6861743", "0.6859652", "0.6859652", "0.6859652", "0.6859652", "0.68529564", "0.6837061", "0.6826566", "0.67964923", "0.67932266" ]
0.8240598
0
Adds a list of pulses to the current waveform. Returns the new total number of pulses in the current waveform if OK, otherwise PI_TOO_MANY_PULSES.
def wave_add_generic(pulses): # pigpio message format # I p1 number of pulses # I p2 0 ## extension ## # III on/off/delay * number of pulses if len(pulses): msg = "" for p in pulses: msg += struct.pack("III", p.gpio_on, p.gpio_off, p.delay) extents = [msg] return _u2i(_pigpio_command_ext( _control, _PI_CMD_WVAG, len(pulses), 0, extents)) else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_pulsars():\n data_list = []\n import psrqpy\n query = psrqpy.QueryATNF().pandas\n for qid in range(len(query[\"PSRJ\"])):\n RA, Dec = sex2deg(query[\"RAJ\"][qid], query[\"DECJ\"][qid])\n dm = query[\"DM\"][qid]\n # Turn dm and period nans into Nones\n if np.isnan(dm):\n dm = None\n period = query[\"P0\"][qid]\n if np.isnan(period):\n period = None\n data_list.append({\"id\":qid,\n \"name\":query[\"PSRJ\"][qid],\n \"ra\":RA,\n \"dec\":Dec,\n \"period\":period,\n \"dm\":dm,\n \"new\":False,\n })\n upload_wrapper(data_list, 'pulsar')", "def pulses(waveform,template,tmax,thresh,deltat,tbef,tafter):\n print ' find pulses '\n wave = np.copy(waveform)\n ifig = 0\n\n tp = []\n ap = []\n plot = False\n lfront = int(np.argmax(template)) # length of template before the peak \n #lfront = 100\n lback = int(len(template) - lfront) # length of template after the peak\n \n ibeg = tbef + 0\n iend = tafter + 0\n \n while ibeg < iend:\n \n if plot:\n ifig += 1\n plt.figure(ifig)\n plt.plot(wave)\n \n wh = np.where(wave[ibeg:iend] > thresh)\n if len(wh[0])==0:\n break\n ith = int(wh[0][0] + ibeg)\n #print 'threshold crossing ', ith \n valmax = np.max(wave[ith:ith+deltat])\n #print wave[ith:ith+deltat]\n posmax = int(ith + np.argmax(wave[ith:ith+deltat]))\n tp.append(posmax)\n ap.append(valmax)\n #print 'pos ',posmax, ' value ',wave[posmax]\n if plot:\n ifig += 1\n print posmax,lfront,lback,ith,ith+deltat\n plt.figure(ifig)\n plt.plot(wave[posmax-lfront:posmax+lback])\n plt.plot(valmax*template)\n \n for i in range(min(len(template),len(wave)-posmax)):\n wave[posmax-lfront+i] += -valmax*template[i]\n \n ibeg += deltat\n if plot:\n plt.show()\n #print 'done pulses'\n \n return tp,ap", "def wave_get_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 0, 0))", "def add_pulse_to_frame(self, frame_num, *pulse_data_args):\n\n for pulse_data in pulse_data_args:\n if pulse_data is not None:\n if frame_num not in self.frame_pulse_data.keys():\n self.frame_pulse_data[frame_num] = []\n self.frame_pulse_data[frame_num].append(pulse_data)", "def pulses_to_mps(pulses, period=10):\n return pulses / (period * 4) # there are 4 pulses per rotation of the annemometer", "def add_pulse(self, start_time, stop_time, dose) -> None:\n self.pulses[start_time, stop_time] = dose\n return", "def addPulse(self, overwrite=False, send=False, **kwargs):\n pulse = Pulse(pulseGenerator=self, **kwargs)\n if overwrite:\n for p in self.pulseList:\n if p.name == pulse.name:\n self.pulseList.remove(p)\n self.pulseList.append(pulse)\n # self.pulseList+=(pulse,)\n self.debugPrint(\"pulseAdded\")\n if send:\n self.preparePulseSequence()\n self.sendPulseSequence()", "def npulses(self):\n return self.header.pulse_count", "def wave_get_max_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 2, 0))", "def appelPuls():\n try:\n if os.path.getsize('aspifile/puls.json'):\n subprocess.call('aspifile/aspipuls.py', shell=True)\n label['text'] = (\"Date: \" + textDate.get() +\" -- \"+ \"Nom: \" + textName.get() +\n \"\\nPulsations: \" + textPuls.get())\n except FileNotFoundError as errorgraph2:\n print('+ Sorry the Puls plot doesn\\'t work ! Data missing !', errorgraph2)\n label['text'] = \"Sorry the Puls plot doesn\\'t work ! Data missing !\"", "def _sanitize_pulses(self, pulses):\n if pulses[0].is_space is not True:\n raise ValueError(\"Pulse patterns must begin with a space\")\n if len(pulses) != 68:\n raise ValueError(f\"Pulse patterns must be 68 pulses long (1 space \"\n f\"+ 1 9ms burst + 1 4.5ms space + 64 message \"\n f\"pulses + 1 trailing burst). Received: \"\n f\"{len(pulses)}\")\n for idx in range(0, len(pulses), 2):\n if not (pulses[idx].is_space is True\n and pulses[idx+1].is_space is False):\n raise ValueError(f\"Pulse pattern does not alternate between \"\n f\"spaces and bursts beginning at index {idx}\")\n\n # remove all pulses not relevant to encoded message\n pulses = pulses[3:-1]\n\n for idx in range(0, len(pulses), 2): # bursts\n if not pulses[idx].is_small_gap():\n raise ValueError(f\"Burst at index {idx} does not match NEC\"\n f\"specifications ({pulses[idx]})\")\n for idx in range(1, len(pulses), 2): # spaces\n if not (pulses[idx].is_small_gap() or pulses[idx].is_large_gap()):\n raise ValueError(f\"Space at index {idx} does not match NEC \"\n f\"specifications ({pulses[idx]})\")\n\n return pulses", "def x_add_particles():\n particle_count_list = np.zeros(7)", "def add_next_pipulse_step(self):\n qubit = self.qubit\n\n adaptive = self.get_param_value('adaptive')\n transition = self.get_param_value('transition_name')\n settings = self.settings.copy({})\n\n if adaptive:\n # Retrieving T2_star and pi-pulse amplitude\n if transition == \"ge\":\n T2_star = qubit.T2_star() if qubit.T2_star() else 0\n amp180 = qubit.ge_amp180() if qubit.ge_amp180() else 0\n elif transition == \"ef\":\n T2_star = qubit.T2_star_ef() if qubit.T2_star_ef() else 0\n amp180 = qubit.ef_amp180() if qubit.ef_amp180() else 0\n else:\n raise ValueError('transition must either be \"ge\" or \"ef\"')\n\n # This has to be solved differently now\n # Amplitudes for Rabi\n # 1) if passed in init\n # 2) v_high based on current pi-pulse amplitude\n # 3) v_high based on default value\n # if rabi_amps is None:\n if amp180:\n rabi_settings = {\n 'Rabi': {\n 'v_max': amp180\n }\n }\n update_nested_dictionary(settings, rabi_settings)\n\n # Delays and artificial detuning for Ramsey\n # if ramsey_delays is None or artificial_detuning is None:\n # defining delta_t for Ramsey\n # 1) if passed in init\n # 2) based on T2_star\n # 3) based on default\n if self.get_param_value(\"use_T2_star\"):\n ramsey_settings = {\n 'Ramsey': {\n 'delta_t': T2_star\n }\n }\n update_nested_dictionary(settings, ramsey_settings)\n\n transition_name = self.get_param_value(\"transition_name\",\n qubit=self.qubit)\n pipulse_settings = {\n \"PiPulseCalibration\": {\n \"General\": {\n \"transition_name\": transition_name,\n }\n }\n }\n update_nested_dictionary(settings, pipulse_settings)\n\n self.add_step(\n PiPulseCalibration,\n 'pi_pulse_calibration',\n {\n 'settings': settings\n }\n )", "def process_pulse_sequence(\n self,\n qubits: dict,\n instrument_pulses: PulseSequence,\n navgs: int,\n nshots: int,\n repetition_duration: int,\n sweepers=None,\n ):\n if sweepers is None:\n sweepers = []\n sequencer: Sequencer\n sweeper: Sweeper\n\n # calculate the number of bins\n num_bins = nshots\n for sweeper in sweepers:\n num_bins *= len(sweeper.values)\n\n # estimate the execution time\n self._execution_time = navgs * num_bins * ((repetition_duration + 1000 * len(sweepers)) * 1e-9)\n\n port = \"o1\"\n # initialise the list of free sequencer numbers to include the default for each port {'o1': 0}\n self._free_sequencers_numbers = [self.DEFAULT_SEQUENCERS[port]] + [1, 2, 3, 4, 5]\n\n # split the collection of instruments pulses by ports\n port_pulses: PulseSequence = instrument_pulses.get_channel_pulses(self._port_channel_map[port])\n\n # initialise the list of sequencers required by the port\n self._sequencers[port] = []\n\n if not port_pulses.is_empty:\n # split the collection of port pulses in non overlapping pulses\n non_overlapping_pulses: PulseSequence\n for non_overlapping_pulses in port_pulses.separate_overlapping_pulses():\n # each set of not overlapping pulses will be played by a separate sequencer\n # check sequencer availability\n if len(self._free_sequencers_numbers) == 0:\n raise Exception(\n f\"The number of sequencers requried to play the sequence exceeds the number available {self._device_num_sequencers}.\"\n )\n # get next sequencer\n sequencer = self._get_next_sequencer(\n port=port,\n frequency=self.get_if(non_overlapping_pulses[0]),\n qubits=qubits,\n qubit=non_overlapping_pulses[0].qubit,\n )\n # add the sequencer to the list of sequencers required by the port\n self._sequencers[port].append(sequencer)\n\n # make a temporary copy of the pulses to be processed\n pulses_to_be_processed = non_overlapping_pulses.shallow_copy()\n while not pulses_to_be_processed.is_empty:\n pulse: Pulse = pulses_to_be_processed[0]\n # attempt to save the waveforms to the sequencer waveforms buffer\n try:\n sequencer.waveforms_buffer.add_waveforms(pulse, self.ports[port].hardware_mod_en, sweepers)\n sequencer.pulses.add(pulse)\n pulses_to_be_processed.remove(pulse)\n\n # if there is not enough memory in the current sequencer, use another one\n except WaveformsBuffer.NotEnoughMemory:\n if len(pulse.waveform_i) + len(pulse.waveform_q) > WaveformsBuffer.SIZE:\n raise NotImplementedError(\n f\"Pulses with waveforms longer than the memory of a sequencer ({WaveformsBuffer.SIZE // 2}) are not supported.\"\n )\n if len(self._free_sequencers_numbers) == 0:\n raise Exception(\n f\"The number of sequencers requried to play the sequence exceeds the number available {self._device_num_sequencers}.\"\n )\n # get next sequencer\n sequencer = self._get_next_sequencer(\n port=port,\n frequency=self.get_if(non_overlapping_pulses[0]),\n qubits=qubits,\n qubit=non_overlapping_pulses[0].qubit,\n )\n # add the sequencer to the list of sequencers required by the port\n self._sequencers[port].append(sequencer)\n\n # update the lists of used and unused sequencers that will be needed later on\n self._used_sequencers_numbers = []\n for port in self._output_ports_keys:\n for sequencer in self._sequencers[port]:\n self._used_sequencers_numbers.append(sequencer.number)\n self._unused_sequencers_numbers = []\n for n in range(self._device_num_sequencers):\n if not n in self._used_sequencers_numbers:\n self._unused_sequencers_numbers.append(n)\n\n # generate and store the Waveforms dictionary, the Acquisitions dictionary, the Weights and the Program\n for port in self._output_ports_keys:\n for sequencer in self._sequencers[port]:\n pulses = sequencer.pulses\n program = sequencer.program\n\n ## pre-process sweepers ##\n # TODO: move qibolab sweepers preprocessing to qblox controller\n\n # attach a sweeper attribute to the pulse so that it is easily accesible by the code that generates\n # the pseudo-assembly program\n pulse = None\n for pulse in pulses:\n pulse.sweeper = None\n\n pulse_sweeper_parameters = [\n Parameter.frequency,\n Parameter.amplitude,\n Parameter.duration,\n Parameter.relative_phase,\n Parameter.start,\n ]\n\n for sweeper in sweepers:\n if sweeper.parameter in pulse_sweeper_parameters:\n # check if this sequencer takes an active role in the sweep\n if sweeper.pulses and set(sequencer.pulses) & set(sweeper.pulses):\n # plays an active role\n reference_value = None\n if sweeper.parameter == Parameter.frequency:\n if sequencer.pulses:\n reference_value = self.get_if(\n sequencer.pulses[0]\n ) # uses the frequency of the first pulse (assuming all same freq)\n if sweeper.parameter == Parameter.amplitude:\n for pulse in pulses:\n if pulse in sweeper.pulses:\n reference_value = pulse.amplitude # uses the amplitude of the first pulse\n if sweeper.parameter == Parameter.duration and pulse in sweeper.pulses:\n # for duration sweepers bake waveforms\n sweeper.qs = QbloxSweeper(\n program=program, type=QbloxSweeperType.duration, rel_values=pulse.idx_range\n )\n else:\n # create QbloxSweepers and attach them to qibolab sweeper\n if sweeper.type == SweeperType.OFFSET and reference_value:\n sweeper.qs = QbloxSweeper.from_sweeper(\n program=program, sweeper=sweeper, add_to=reference_value\n )\n elif sweeper.type == SweeperType.FACTOR and reference_value:\n sweeper.qs = QbloxSweeper.from_sweeper(\n program=program, sweeper=sweeper, multiply_to=reference_value\n )\n else:\n sweeper.qs = QbloxSweeper.from_sweeper(program=program, sweeper=sweeper)\n\n # finally attach QbloxSweepers to the pulses being swept\n sweeper.qs.update_parameters = True\n pulse.sweeper = sweeper.qs\n else:\n # does not play an active role\n sweeper.qs = QbloxSweeper(\n program=program,\n type=QbloxSweeperType.number,\n rel_values=range(len(sweeper.values)),\n name=sweeper.parameter.name,\n )\n\n # else: # qubit_sweeper_parameters\n # if sweeper.qubits and sequencer.qubit in [_.name for _ in sweeper.qubits]:\n # # plays an active role\n # if sweeper.parameter == Parameter.bias:\n # reference_value = self.ports[port].offset\n # # create QbloxSweepers and attach them to qibolab sweeper\n # if sweeper.type == SweeperType.ABSOLUTE:\n # sweeper.qs = QbloxSweeper.from_sweeper(\n # program=program, sweeper=sweeper, add_to=-reference_value\n # )\n # elif sweeper.type == SweeperType.OFFSET:\n # sweeper.qs = QbloxSweeper.from_sweeper(program=program, sweeper=sweeper)\n # elif sweeper.type == SweeperType.FACTOR:\n # raise Exception(\"SweeperType.FACTOR for Parameter.bias not supported\")\n # sweeper.qs.update_parameters = True\n # else:\n # # does not play an active role\n # sweeper.qs = QbloxSweeper(\n # program=program, type=QbloxSweeperType.number, rel_values=range(len(sweeper.values)),\n # name = sweeper.parameter.name\n # )\n else:\n # does not play an active role\n sweeper.qs = QbloxSweeper(\n program=program,\n type=QbloxSweeperType.number,\n rel_values=range(len(sweeper.values)),\n name=sweeper.parameter.name,\n )\n\n # # FIXME: for qubit sweepers (Parameter.bias, Parameter.attenuation, Parameter.gain), the qubit\n # # information alone is not enough to determine what instrument parameter is to be swept.\n # # For example port gain, both the drive and readout ports have gain parameters.\n # # Until this is resolved, and since bias is only implemented with QCMs offset, this instrument will\n # # never take an active role in those sweeps.\n\n # Waveforms\n for index, waveform in enumerate(sequencer.waveforms_buffer.unique_waveforms):\n sequencer.waveforms[waveform.serial] = {\"data\": waveform.data.tolist(), \"index\": index}\n\n # Acquisitions\n for acquisition_index, pulse in enumerate(sequencer.pulses.ro_pulses):\n sequencer.acquisitions[pulse.serial] = {\"num_bins\": num_bins, \"index\": acquisition_index}\n\n # Add scope_acquisition to default sequencer\n if sequencer.number == self.DEFAULT_SEQUENCERS[port]:\n sequencer.acquisitions[\"scope_acquisition\"] = {\"num_bins\": 1, \"index\": acquisition_index + 1}\n\n # Program\n minimum_delay_between_instructions = 4\n\n # Active reset is not fully tested yet\n active_reset = False\n active_reset_address = 1\n active_reset_pulse_idx_I = 1\n active_reset_pulse_idx_Q = 1\n\n sequence_total_duration = pulses.finish # the minimum delay between instructions is 4ns\n time_between_repetitions = repetition_duration - sequence_total_duration\n assert time_between_repetitions > minimum_delay_between_instructions\n # TODO: currently relaxation_time needs to be greater than acquisition_hold_off\n # so that the time_between_repetitions is equal to the sequence_total_duration + relaxation_time\n # to be compatible with th erest of the platforms, change it so that time_between_repetitions\n # is equal to pulsesequence duration + acquisition_hold_off if relaxation_time < acquisition_hold_off\n\n # create registers for key variables\n # nshots is used in the loop that iterates over the number of shots\n nshots_register = Register(program, \"nshots\")\n # during a sweep, each shot is saved in the bin bin_n\n bin_n = Register(program, \"bin_n\")\n # navgs is used in the loop of hardware averages\n navgs_register = Register(program, \"navgs\")\n\n header_block = Block(\"setup\")\n if active_reset:\n header_block.append(\n f\"set_latch_en {active_reset_address}, 4\", f\"monitor triggers on address {active_reset_address}\"\n )\n\n body_block = Block()\n\n body_block.append(f\"wait_sync {minimum_delay_between_instructions}\")\n if self.ports[\"i1\"].hardware_demod_en or self.ports[\"o1\"].hardware_mod_en:\n body_block.append(\"reset_ph\")\n body_block.append_spacer()\n\n pulses_block = Block(\"play_and_acquire\")\n # Add an initial wait instruction for the first pulse of the sequence\n initial_wait_block = wait_block(\n wait_time=pulses[0].start, register=Register(program), force_multiples_of_four=True\n )\n pulses_block += initial_wait_block\n\n for n in range(pulses.count):\n if pulses[n].sweeper and pulses[n].sweeper.type == QbloxSweeperType.start:\n pulses_block.append(f\"wait {pulses[n].sweeper.register}\")\n\n if self.ports[\"o1\"].hardware_mod_en:\n # # Set frequency\n # _if = self.get_if(pulses[n])\n # pulses_block.append(f\"set_freq {convert_frequency(_if)}\", f\"set intermediate frequency to {_if} Hz\")\n\n # Set phase\n if pulses[n].sweeper and pulses[n].sweeper.type == QbloxSweeperType.relative_phase:\n pulses_block.append(f\"set_ph {pulses[n].sweeper.register}\")\n else:\n pulses_block.append(\n f\"set_ph {convert_phase(pulses[n].relative_phase)}\",\n comment=f\"set relative phase {pulses[n].relative_phase} rads\",\n )\n\n if pulses[n].type == PulseType.READOUT:\n delay_after_play = self.ports[\"i1\"].acquisition_hold_off\n\n if len(pulses) > n + 1:\n # If there are more pulses to be played, the delay is the time between the pulse end and the next pulse start\n delay_after_acquire = (\n pulses[n + 1].start - pulses[n].start - self.ports[\"i1\"].acquisition_hold_off\n )\n else:\n delay_after_acquire = sequence_total_duration - pulses[n].start\n time_between_repetitions = (\n repetition_duration - sequence_total_duration - self.ports[\"i1\"].acquisition_hold_off\n )\n assert time_between_repetitions > 0\n\n if delay_after_acquire < minimum_delay_between_instructions:\n raise Exception(\n f\"The minimum delay after starting acquisition is {minimum_delay_between_instructions}ns.\"\n )\n\n if pulses[n].sweeper and pulses[n].sweeper.type == QbloxSweeperType.duration:\n RI = pulses[n].sweeper.register\n if pulses[n].type == PulseType.FLUX:\n RQ = pulses[n].sweeper.register\n else:\n RQ = pulses[n].sweeper.aux_register\n\n pulses_block.append(\n f\"play {RI},{RQ},{delay_after_play}\", # FIXME delay_after_play won't work as the duration increases\n comment=f\"play pulse {pulses[n]} sweeping its duration\",\n )\n else:\n # Prepare play instruction: play wave_i_index, wave_q_index, delay_next_instruction\n pulses_block.append(\n f\"play {sequencer.waveforms_buffer.unique_waveforms.index(pulses[n].waveform_i)},{sequencer.waveforms_buffer.unique_waveforms.index(pulses[n].waveform_q)},{delay_after_play}\",\n comment=f\"play waveforms {pulses[n]}\",\n )\n\n # Prepare acquire instruction: acquire acquisition_index, bin_index, delay_next_instruction\n if active_reset:\n pulses_block.append(f\"acquire {pulses.ro_pulses.index(pulses[n])},{bin_n},4\")\n pulses_block.append(f\"latch_rst {delay_after_acquire + 300 - 4}\")\n else:\n pulses_block.append(\n f\"acquire {pulses.ro_pulses.index(pulses[n])},{bin_n},{delay_after_acquire}\"\n )\n\n else:\n # Calculate the delay_after_play that is to be used as an argument to the play instruction\n if len(pulses) > n + 1:\n # If there are more pulses to be played, the delay is the time between the pulse end and the next pulse start\n delay_after_play = pulses[n + 1].start - pulses[n].start\n else:\n delay_after_play = sequence_total_duration - pulses[n].start\n\n if delay_after_play < minimum_delay_between_instructions:\n raise Exception(\n f\"The minimum delay between the start of two pulses in the same channel is {minimum_delay_between_instructions}ns.\"\n )\n\n if pulses[n].sweeper and pulses[n].sweeper.type == QbloxSweeperType.duration:\n RI = pulses[n].sweeper.register\n if pulses[n].type == PulseType.FLUX:\n RQ = pulses[n].sweeper.register\n else:\n RQ = pulses[n].sweeper.aux_register\n\n pulses_block.append(\n f\"play {RI},{RQ},{delay_after_play}\", # FIXME delay_after_play won't work as the duration increases\n comment=f\"play pulse {pulses[n]} sweeping its duration\",\n )\n else:\n # Prepare play instruction: play wave_i_index, wave_q_index, delay_next_instruction\n pulses_block.append(\n f\"play {sequencer.waveforms_buffer.unique_waveforms.index(pulses[n].waveform_i)},{sequencer.waveforms_buffer.unique_waveforms.index(pulses[n].waveform_q)},{delay_after_play}\",\n comment=f\"play waveforms {pulses[n]}\",\n )\n\n body_block += pulses_block\n body_block.append_spacer()\n\n if active_reset:\n final_reset_block = Block()\n final_reset_block.append(f\"set_cond 1, {active_reset_address}, 0, 4\", comment=\"active reset\")\n final_reset_block.append(f\"play {active_reset_pulse_idx_I}, {active_reset_pulse_idx_Q}, 4\", level=1)\n final_reset_block.append(f\"set_cond 0, {active_reset_address}, 0, 4\")\n else:\n final_reset_block = wait_block(\n wait_time=time_between_repetitions, register=Register(program), force_multiples_of_four=False\n )\n final_reset_block.append_spacer()\n final_reset_block.append(f\"add {bin_n}, 1, {bin_n}\", \"increase bin counter\")\n\n body_block += final_reset_block\n\n footer_block = Block(\"cleaup\")\n footer_block.append(f\"stop\")\n\n # wrap pulses block in sweepers loop blocks\n for sweeper in sweepers:\n body_block = sweeper.qs.block(inner_block=body_block)\n\n nshots_block: Block = loop_block(\n start=0, stop=nshots, step=1, register=nshots_register, block=body_block\n )\n nshots_block.prepend(f\"move 0, {bin_n}\", \"reset bin counter\")\n nshots_block.append_spacer()\n\n navgs_block = loop_block(start=0, stop=navgs, step=1, register=navgs_register, block=nshots_block)\n program.add_blocks(header_block, navgs_block, footer_block)\n\n sequencer.program = repr(program)", "def test_pulse_counter_loop(self):\n self.l.output(conf_d=0x01, state_d=0x0)\n self.l.count(reset=True)\n t10, t20 = 0.001231, 0.002063\n errmask, t1, t2 = self.l.pulse(t1=t10, t2=t20,\n lines=0x01, num_pulses=100)\n self.assertTrue(abs(t1-t10)/t10 < .1)\n self.assertTrue(abs(t2-t20)/t20 < .1)\n c, t = self.l.count()\n self.assertEqual(c, 100,\n \"counted %g pulses\" % c)", "def addPol(*pol):\n\n sum = np.zeros((1,))\n for p in pol:\n sum = polyadd(sum, p)\n\n return sum", "def pulse(amplitude, onsets, width, t_stop, baseline=0.0):\n times = [0]\n amps = [baseline]\n for onset in onsets:\n times += [onset, onset + width]\n amps += [amplitude, baseline]\n times += [t_stop]\n amps += [baseline]\n return np.array(times), np.array(amps)", "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def playlist_add(nums, playlist):\n nums = _parse_multi(nums)\n\n if not g.userpl.get(playlist):\n playlist = playlist.replace(\" \", \"-\")\n g.userpl[playlist] = Playlist(playlist)\n\n for songnum in nums:\n g.userpl[playlist].songs.append(g.model.songs[songnum - 1])\n dur = g.userpl[playlist].duration\n f = (len(nums), playlist, g.userpl[playlist].size, dur)\n g.message = F('added to saved pl') % f\n\n if nums:\n save_to_file()\n\n g.content = generate_songlist_display()", "def add_many(self, pair_list):\n\n for pair in pair_list:\n plug = PlugLead(pair)\n self.check_conflicts(plug)\n self.plugleads.append(plug)", "def add(self, signal_list):\n result = []\n for signals in signal_list:\n result.append(\n signals * signal.blackmanharris(\n len(signals),\n sym=False\n )\n )\n return result", "def _concatenate_pulses(\n self, pulse_instructions, scheduled_start_time, num_controls\n ):\n min_step_size = np.inf\n # Concatenate tlist and coeffs for each control pulses\n compiled_tlist = [[] for tmp in range(num_controls)]\n compiled_coeffs = [[] for tmp in range(num_controls)]\n for pulse_ind in range(num_controls):\n last_pulse_time = 0.0\n for start_time, tlist, coeff in pulse_instructions[pulse_ind]:\n # compute the gate time, step size and coeffs\n # according to different pulse mode\n (\n gate_tlist,\n coeffs,\n step_size,\n pulse_mode,\n ) = self._process_gate_pulse(start_time, tlist, coeff)\n min_step_size = min(step_size, min_step_size)\n\n if abs(last_pulse_time) < step_size * 1.0e-6: # if first pulse\n compiled_tlist[pulse_ind].append([0.0])\n if pulse_mode == \"continuous\":\n compiled_coeffs[pulse_ind].append([0.0])\n # for discrete pulse len(coeffs) = len(tlist) - 1\n\n # If there is idling time between the last pulse and\n # the current one, we need to add zeros in between.\n if np.abs(start_time - last_pulse_time) > step_size * 1.0e-6:\n idling_tlist = self._process_idling_tlist(\n pulse_mode, start_time, last_pulse_time, step_size\n )\n compiled_tlist[pulse_ind].append(idling_tlist)\n compiled_coeffs[pulse_ind].append(\n np.zeros(len(idling_tlist))\n )\n\n # Add the gate time and coeffs to the list.\n execution_time = gate_tlist + start_time\n last_pulse_time = execution_time[-1]\n compiled_tlist[pulse_ind].append(execution_time)\n compiled_coeffs[pulse_ind].append(coeffs)\n\n final_time = np.max([tlist[-1][-1] for tlist in compiled_tlist])\n for pulse_ind in range(num_controls):\n if not compiled_tlist[pulse_ind]:\n continue\n last_pulse_time = compiled_tlist[pulse_ind][-1][-1]\n if np.abs(final_time - last_pulse_time) > min_step_size * 1.0e-6:\n idling_tlist = self._process_idling_tlist(\n pulse_mode, final_time, last_pulse_time, min_step_size\n )\n compiled_tlist[pulse_ind].append(idling_tlist)\n compiled_coeffs[pulse_ind].append(np.zeros(len(idling_tlist)))\n\n for i in range(num_controls):\n if not compiled_coeffs[i]:\n compiled_tlist[i] = None\n compiled_coeffs[i] = None\n else:\n compiled_tlist[i] = np.concatenate(compiled_tlist[i])\n compiled_coeffs[i] = np.concatenate(compiled_coeffs[i])\n return compiled_tlist, compiled_coeffs", "def _concatenate_pulses(\n self, pulse_instructions, scheduled_start_time, num_controls):\n # Concatenate tlist and coeffs for each control pulses\n compiled_tlist = [[] for tmp in range(num_controls)]\n compiled_coeffs = [[] for tmp in range(num_controls)]\n for pulse_ind in range(num_controls):\n last_pulse_time = 0.\n for start_time, tlist, coeff in pulse_instructions[pulse_ind]:\n # compute the gate time, step size and coeffs\n # according to different pulse mode\n gate_tlist, coeffs, step_size, pulse_mode = \\\n self._process_gate_pulse(start_time, tlist, coeff)\n\n if abs(last_pulse_time) < step_size * 1.0e-6: # if first pulse\n compiled_tlist[pulse_ind].append([0.]) \n if pulse_mode == \"continuous\":\n compiled_coeffs[pulse_ind].append([0.])\n # for discrete pulse len(coeffs) = len(tlist) - 1\n\n # If there is idling time between the last pulse and\n # the current one, we need to add zeros in between.\n if np.abs(start_time - last_pulse_time) > step_size * 1.0e-6:\n idling_tlist = self._process_idling_tlist(\n pulse_mode, start_time, last_pulse_time, step_size)\n compiled_tlist[pulse_ind].append(idling_tlist)\n compiled_coeffs[pulse_ind].append(np.zeros(len(idling_tlist)))\n\n # Add the gate time and coeffs to the list.\n execution_time = gate_tlist + start_time\n last_pulse_time = execution_time[-1]\n compiled_tlist[pulse_ind].append(execution_time)\n compiled_coeffs[pulse_ind].append(coeffs)\n\n for i in range(num_controls):\n if not compiled_coeffs[i]:\n compiled_tlist[i] = None\n compiled_coeffs[i] = None\n else:\n compiled_tlist[i] = np.concatenate(compiled_tlist[i])\n compiled_coeffs[i] = np.concatenate(compiled_coeffs[i])\n return compiled_tlist, compiled_coeffs", "def build_pulse_waveform(startper,endper):\r\n mywaveform = numpy.zeros(100, dtype=numpy.int)\r\n if startper > endper:\r\n mywaveform[0:endper]=1\r\n mywaveform[startper:100]=1\r\n else:\r\n mywaveform[startper:endper]=1 \r\n return mywaveform", "def add(self, p):\n self._pumps.add(p)", "def append_procedures(self, procedures):\n assert(isinstance(procedures, list))\n self.__procedures.extend(procedures)", "def update(self, wnd, amps): \n tspan = wnd.timespan\n new_tones = []\n for d in self.tone_data:\n \n tone_amps = [amps[id] for id in d.ids]\n tone_amp_active = [a >= self.min_tone_amp for a in tone_amps]\n tone_amp_range = abs(np.max(tone_amps) - np.min(tone_amps))\n\n if np.all(tone_amp_active) and tone_amp_range <= self.max_inter_tone_amp:\n #print(\"{} - {}\".format([amps[id] for id in data['ids']], self.tones[i]['sym']))\n # All required frequencies for this tone are present\n d.on.union(tspan)\n if d.on.duration >= self.min_presence and not d.reported:\n # Even if tone stays active, won't be reported again before at least min_pause time has passed.\n new_tones.append(d.sym)\n d.reported = True\n d.off.reset()\n else:\n # At least one required frequency is not present\n if d.reported:\n d.off.union(tspan)\n if d.off.duration >= self.min_pause:\n d.reported = False\n d.on.reset()\n \n return new_tones", "def hasPulse(self):\n return True if self.pulses is not None else False", "def process_pulse_noise(pulses, noise_list, dims):\n noisy_pulses = deepcopy(pulses)\n for noise in noise_list:\n if isinstance(noise, ControlAmpNoise):\n noisy_pulses = noise.get_noisy_dynamics(noisy_pulses)\n elif isinstance(noise, UserNoise):\n noisy_pulses = noise.get_noisy_dynamics(pulses, dims)\n return noisy_pulses", "def analyze_wfs_no_png(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n peaks_temp = pd.DataFrame()\n num_fig = 0\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n # Ora faccio un loop sugli eventi..\n for event in self.table_sipm_time['ev']:\n\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_temp], ignore_index=True)\n\n bar.finish()\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))" ]
[ "0.5619221", "0.5339958", "0.52791715", "0.52376866", "0.5214381", "0.5122932", "0.50972664", "0.5062952", "0.4996337", "0.49510646", "0.49477738", "0.4945465", "0.4872589", "0.48549965", "0.48376894", "0.48145306", "0.47974762", "0.47694004", "0.47648665", "0.47475195", "0.47177935", "0.46860677", "0.46378922", "0.46040308", "0.45819706", "0.45754078", "0.45705423", "0.45575008", "0.4516732", "0.45070022" ]
0.65887535
0
Adds a waveform representing serial data to the existing waveform (if any). The serial data starts offset microseconds from the start of the waveform. Returns the new total number of pulses in the current waveform if OK, otherwise PI_BAD_USER_GPIO, PI_BAD_WAVE_BAUD, PI_TOO_MANY_CHARS, PI_BAD_SER_OFFSET, or PI_TOO_MANY_PULSES.
def wave_add_serial(user_gpio, baud, offset, data): # pigpio message format # I p1 user_gpio # I p2 len(data) ## extension ## # I baud # I offset # s data if len(data): extents = [struct.pack("I", baud),struct.pack("I", offset), data] return _u2i(_pigpio_command_ext( _control, _PI_CMD_WVAS, user_gpio, len(data), extents)) else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wave_add_generic(pulses):\n # pigpio message format\n\n # I p1 number of pulses\n # I p2 0\n ## extension ##\n # III on/off/delay * number of pulses\n if len(pulses):\n msg = \"\"\n for p in pulses:\n msg += struct.pack(\"III\", p.gpio_on, p.gpio_off, p.delay)\n extents = [msg]\n return _u2i(_pigpio_command_ext(\n _control, _PI_CMD_WVAG, len(pulses), 0, extents))\n else:\n return 0", "def wave_add_new():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVNEW, 0, 0))", "def wave(pi, gpio, hz, secs, on=1, offset=0):\n micros_left = int(secs * 1000000)\n transitions = int(2 * hz * secs)\n micros = micros_left / transitions\n\n if (offset < 0) or (offset > micros):\n print(\"Illegal offset {} for hz {}\".format(offset, hz))\n exit()\n\n pi.set_mode(gpio, pigpio.OUTPUT)\n\n wf = [] # Empty waveform.\n\n if offset:\n wf.append(pigpio.pulse(0, 0, offset))\n micros_left -= micros\n last_micros = micros - offset\n transitions -= 1\n\n for t in range(transitions, 0, -1):\n micros = micros_left / t\n if (t & 1) == (on & 1):\n wf.append(pigpio.pulse(0, 1<<gpio, micros))\n else:\n wf.append(pigpio.pulse(1<<gpio, 0, micros))\n micros_left -= micros\n\n if offset:\n if on:\n wf.append(pigpio.pulse(1<<gpio, 0, last_micros))\n else:\n wf.append(pigpio.pulse(0, 1<<gpio, last_micros))\n\n pi.wave_add_generic(wf)\n pi.wave_send_repeat(pi.wave_create())", "def _pulse_width_record(self, pin):\n self._time = time.ticks_us()\n if self._prev_time == 0:\n self._prev_time = self._time\n return\n self.pulse_buffer.append(self._time - self._prev_time)\n self._prev_time = self._time\n self.lenth = self.lenth + 1", "def AddWave(self, wave_data):\n wave = OpBasedWave(wave_data, self)\n self._waves[wave.GetId()] = wave\n return wave", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def spectrum_misc(f):\n\n end = False\n while not end:\n try:\n line = f.readline().split()\n wavnew = [float(w) for w in line]\n wav = np.append(wav, wavnew)\n prevwav = wavnew[-1]\n\n except BaseException:\n end = True\n aflux = f.readlines()\n for line in aflux:\n line = re.sub(r\"-10\\d\", \"e-100\", line)\n flux = np.append(flux, line.rstrip().split())\n\n wav, flux = np.array(wav), np.array(flux)\n return wav, flux", "def add_waveform_analog(self):\r\n # make sure that the square wave tab is active now\r\n channel_keyword = self.current_Analog_channel.currentText()\r\n\r\n #----------------------Square waves-----------------------------------\r\n if self.wavetabs.currentIndex() == 0:\r\n \r\n self.waveform_data_dict[channel_keyword] = self.generate_analog(channel_keyword)\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword]) \r\n \r\n #-------------------------Ramp waves----------------------------------\r\n if self.wavetabs.currentIndex() == 1:\r\n \r\n self.waveform_data_dict[channel_keyword] = self.generate_ramp(channel_keyword)\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword]) \r\n \r\n #------------------------Photo cycle----------------------------------\r\n if self.wavetabs.currentIndex() == 4:\r\n self.waveform_data_dict[channel_keyword] = self.generate_photocycle(channel_keyword)\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword]) \r\n \r\n #----------------------------Galvo scanning---------------------------\r\n if self.wavetabs.currentIndex() == 2:\r\n\r\n if self.galvos_tabs.currentIndex() == 0:\r\n\r\n self.waveform_data_dict[channel_keyword] = self.generate_galvos()\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword][1, :])\r\n \r\n elif self.galvos_tabs.currentIndex() == 1:# For contour\r\n \r\n self.waveform_data_dict['galvos_contour'] = self.generate_contour_for_waveform()\r\n self.generate_graphy('galvos_contour', self.waveform_data_dict['galvos_contour'][1, :])", "def fp_wavelength_sol_new(p, loc):\n func_name = __NAME__ + '.fp_wavelength_sol_new()'\n # get parameters from p\n dopd0 = p['IC_FP_DOPD0']\n fit_deg = p['IC_FP_FIT_DEGREE']\n fp_large_jump = p['IC_FP_LARGE_JUMP']\n n_ord_start_fp = p['IC_FP_N_ORD_START']\n n_ord_final_fp = p['IC_FP_N_ORD_FINAL']\n cm_ind = p['IC_WAVE_FP_CM_IND']\n\n # find FP lines\n loc = find_fp_lines_new(p, loc)\n all_lines_2 = loc['ALL_LINES_2']\n # set up storage\n llpos_all, xxpos_all, ampl_all = [], [], []\n m_fp_all, weight_bl_all, order_rec_all, dopd_all = [], [], [], []\n ll_prev, m_prev = np.array([]), np.array([])\n # loop through the orders from red to blue\n for order_num in range(n_ord_final_fp, n_ord_start_fp - 1, -1):\n # define storage\n floc = dict()\n # select the lines in the order\n gg = loc['ORDPEAK'] == order_num\n # store the initial wavelengths of the lines\n # floc['llpos'] = np.polynomial.chebyshev.chebval(\n # loc['XPEAK'][gg],\n # loc['LITTROW_EXTRAP_PARAM_1'][order_num])\n floc['llpos'] = np.polyval(\n loc['LITTROW_EXTRAP_PARAM_1'][order_num][::-1],\n loc['XPEAK'][gg])\n # store the pixel positions of the lines\n floc['xxpos'] = loc['XPEAK'][gg]\n # get the median pixel difference between successive lines\n # (to check for gaps)\n xxpos_diff_med = np.nanmedian(floc['xxpos'][1:] - floc['xxpos'][:-1])\n # store the amplitudes of the lines\n floc['ampl'] = loc['AMPPEAK'][gg]\n # store the values of the blaze at the pixel positions of the lines\n floc['weight_bl'] = np.zeros_like(floc['llpos'])\n # get and normalize blaze for the order\n nblaze = loc['BLAZE'][order_num] / np.nanmax(loc['BLAZE'][order_num])\n for it in range(1, len(floc['llpos'])):\n floc['weight_bl'][it] = nblaze[int(np.round(floc['xxpos'][it]))]\n # store the order numbers\n floc['order_rec'] = loc['ORDPEAK'][gg]\n # set up storage for line numbers\n mpeak = np.zeros_like(floc['llpos'])\n # line number for the last (reddest) line of the order (by FP equation)\n mpeak[-1] = int(dopd0 / floc['llpos'][-1])\n # calculate successive line numbers\n for it in range(len(floc['llpos']) - 2, -1, -1):\n # check for gap in x positions\n flocdiff = floc['xxpos'][it + 1] - floc['xxpos'][it]\n lowcond = xxpos_diff_med - (0.25 * xxpos_diff_med)\n highcond = xxpos_diff_med + (0.25 * xxpos_diff_med)\n if lowcond < flocdiff < highcond:\n # no gap: add 1 to line number of previous line\n mpeak[it] = mpeak[it + 1] + 1\n # if there is a gap, fix it\n else:\n # get line x positions\n flocx0 = floc['xxpos'][it]\n flocx1 = floc['xxpos'][it + 1]\n # get line wavelengths\n floc0 = floc['llpos'][it]\n floc1 = floc['llpos'][it + 1]\n # estimate the number of peaks missed\n m_offset = int(np.round((flocx1 - flocx0) / xxpos_diff_med))\n # add to m of previous peak\n mpeak[it] = mpeak[it + 1] + m_offset\n # verify there's no dopd jump, fix if present\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n # do loops to check jumps\n if dopd_1 - dopd_2 > fp_large_jump:\n while (dopd_1 - dopd_2) > fp_large_jump:\n mpeak[it] = mpeak[it] - 1\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n elif dopd_1 - dopd_2 < -fp_large_jump:\n while (dopd_1 - dopd_2) < -fp_large_jump:\n mpeak[it] = mpeak[it] + 1\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n # determination of observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n # for orders other than the reddest, attempt to cross-match\n if order_num != n_ord_final_fp:\n # check for overlap\n if floc['llpos'][cm_ind] > ll_prev[0]:\n # find closest peak in overlap and get its m value\n ind = np.abs(ll_prev - floc['llpos'][cm_ind]).argmin()\n # the peak matching the reddest may not always be found!!\n # define maximum permitted difference\n llpos_diff_med = np.nanmedian(\n floc['llpos'][1:] - floc['llpos'][:-1])\n # print(llpos_diff_med)\n # print(abs(ll_prev[ind] - floc['llpos'][-1]))\n # check if the difference is over the limit\n if abs(ll_prev[ind] - floc['llpos'][-1]) > 1.5 * llpos_diff_med:\n # print('overlap line not matched')\n ll_diff = ll_prev[ind] - floc['llpos'][-1]\n ind2 = -2\n # loop over next reddest peak until they match\n while ll_diff > 1.5 * llpos_diff_med:\n # check there is still overlap\n if floc['llpos'][ind2] > ll_prev[0]:\n ind = np.abs(ll_prev - floc['llpos'][ind2]).argmin()\n ll_diff = ll_prev[ind] - floc['llpos'][ind2]\n ind2 -= 1\n else:\n break\n m_match = m_prev[ind]\n # save previous mpeak calculated\n m_init = mpeak[cm_ind]\n # recalculate m if there's an offset from cross_match\n m_offset_c = m_match - m_init\n if m_offset_c != 0:\n mpeak = mpeak + m_offset_c\n # print note for dev if different\n if p['DRS_DEBUG']:\n wargs = [order_num, m_match - m_init]\n wmsg = 'M difference for order {0}: {1}'\n WLOG(p, '', wmsg.format(*wargs))\n # recalculate observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store new m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n else:\n wmsg = 'No overlap for order {0}'\n WLOG(p, 'warning', wmsg.format(order_num))\n # save previous mpeak calculated\n m_init = mpeak[cm_ind]\n m_test = mpeak[cm_ind]\n # get dopd for last line of current & first of last order\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0) * 1.e-3\n dopd_prev = (m_prev[0] * ll_prev[0] - dopd0) * 1.e-3\n # do loops to check jumps\n if dopd_curr - dopd_prev > fp_large_jump:\n while (dopd_curr - dopd_prev) > fp_large_jump:\n m_test = m_test - 1\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0)\n dopd_curr = dopd_curr * 1.e-3\n elif dopd_curr - dopd_prev < -fp_large_jump:\n while (dopd_curr - dopd_prev) < -fp_large_jump:\n m_test = m_test + 1\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0)\n dopd_curr = dopd_curr * 1.e-3\n # recalculate m if there's an offset from cross_match\n m_offset_c = m_test - m_init\n if m_offset_c != 0:\n mpeak = mpeak + m_offset_c\n # print note for dev if different\n if p['DRS_DEBUG']:\n wargs = [order_num, mpeak[cm_ind] - m_init]\n wmsg = 'M difference for order {0}: {1}'\n WLOG(p, '', wmsg.format(*wargs))\n # recalculate observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store new m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n\n # add to storage\n llpos_all += list(floc['llpos'])\n xxpos_all += list(floc['xxpos'])\n ampl_all += list(floc['ampl'])\n m_fp_all += list(floc['m_fp'])\n weight_bl_all += list(floc['weight_bl'])\n order_rec_all += list(floc['order_rec'])\n # difference in cavity width converted to microns\n dopd_all += list((floc['dopd_t'] - dopd0) * 1.e-3)\n # save numpy arrays of current order to be previous in next loop\n ll_prev = np.array(floc['llpos'])\n m_prev = np.array(floc['m_fp'])\n\n # convert to numpy arrays\n llpos_all = np.array(llpos_all)\n xxpos_all = np.array(xxpos_all)\n ampl_all = np.array(ampl_all)\n m_fp_all = np.array(m_fp_all)\n weight_bl_all = np.array(weight_bl_all)\n order_rec_all = np.array(order_rec_all)\n dopd_all = np.array(dopd_all)\n\n # fit a polynomial to line number v measured difference in cavity\n # width, weighted by blaze\n with warnings.catch_warnings(record=True) as w:\n coeffs = nanpolyfit(m_fp_all, dopd_all, fit_deg, w=weight_bl_all)[::-1]\n spirouCore.WarnLog(p, w, funcname=func_name)\n # get the values of the fitted cavity width difference\n cfit = np.polyval(coeffs[::-1], m_fp_all)\n # update line wavelengths using the new cavity width fit\n newll = (dopd0 + cfit * 1000.) / m_fp_all\n # insert fp lines into all_lines2 (at the correct positions)\n all_lines_2 = insert_fp_lines(p, newll, llpos_all, all_lines_2,\n order_rec_all, xxpos_all, ampl_all)\n\n # add to loc\n loc['FP_LL_POS'] = llpos_all\n loc['FP_XX_POS'] = xxpos_all\n loc['FP_M'] = m_fp_all\n loc['FP_DOPD_OFFSET'] = dopd_all\n loc['FP_AMPL'] = ampl_all\n loc['FP_LL_POS_NEW'] = newll\n loc['ALL_LINES_2'] = all_lines_2\n loc['FP_DOPD_OFFSET_COEFF'] = coeffs\n loc['FP_DOPD_OFFSET_FIT'] = cfit\n loc['FP_ORD_REC'] = order_rec_all\n # set sources\n sources = ['FP_LL_POS', 'FP_XX_POS', 'FP_M', 'FP_DOPD_OFFSET',\n 'FP_AMPL', 'FP_LL_POS_NEW', 'ALL_LINES_2',\n 'FP_DOPD_OFFSET_COEFF', 'FP_DOPD_OFFSET_FIT', 'FP_ORD_REC']\n loc.set_sources(sources, func_name)\n\n return loc", "def transmitWaveformData(self, name, data, stringOnly=0, marker1=[], marker2=[]):\n MARKER1= 0b01000000\n MARKER2= 0b10000000\n if (marker1==[]):\n marker1=np.zeros(len(data),dtype=int)\n else:\n marker1=marker1*MARKER1\n\n if (marker2==[]):\n marker2=np.zeros(len(data),dtype=int)\n else:\n marker2=marker2*MARKER2\n # self.newWaveform(name,len(data))\n block_data=''\n msgStart=('WLISt:WAVeform:DATA \"'+name+'\",0,'+str(len(data))+',#'+str(len(str(5*len(data))))+str(5*len(data)))\n for val,m1,m2 in itertools.izip(data,marker1,marker2):\n converted_data=struct.pack('<fB',float(val),m1+m2) # or should work aswell\n\n block_data = block_data + converted_data\n msg=msgStart+block_data\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def add_signal(signal_array, json_file, indent_level, scale):\n\n logger.debug('+ Raw signal:{0}'.format(signal_array))\n\n initial_val = signal_array[1]\n # If no intial condition is defined give it an X, saves headache later. \n # issue a warning.\n if ( not(re.search('^[01xX]', signal_array[1])) ):\n signal_array[1] = str(scale) +'X'\n logger.warning(\n '+ Initial condition not defined for {0}. Force invalid \\'x\\''\n .format(signal_array[0])) \n for i,time_step in enumerate(signal_array[1:]):\n\n logger.debug('|---:{0} {1}'.format(i, time_step))\n\n if (re.search('X|x',time_step)):\n signal_array[i+1] = str(scale) + 'X'\n # FIXME: New not in documentation.\n # This is added to represent glitchiness or uncertanity.\n elif (re.search('G',time_step)):\n signal_array[i+1] = str(scale*.03) + 'T' + str(scale*.97) + 'T'\n # FIXME: New not in documentation\n # this is a simple encoding. 0.x will indicate an undef to 1 transition\n # which is not full cycle, and -0.x will show a undef to 0 transition\n # can potenitally be expanded to use x to decide proportion.\n # The combo indication is fixed to 0.25\n elif (re.search(r'0.\\d',time_step)):\n if (re.search(r'-0.\\d',time_step)):\n signal_array[i+1] = str(0.25*scale) + 'U' + str(0.75*scale) + 'L'\n else:\n signal_array[i+1] = str(0.25*scale) + 'U' + str(0.75*scale) + 'H'\n elif (re.search('0',time_step)):\n signal_array[i+1] = str(scale) + 'L'\n elif (re.search('1',time_step)):\n signal_array[i+1] = str(scale)+'H'\n elif (re.search('\\|', time_step)):\n signal_array[i+1] = 'S'\n temp = re.sub(r'\\d+([UDXLHC]).*',r'\\1',signal_array[i])\n signal_array[i+1] = ';[dotted]2' + temp + ';'\n else:\n # allow us to deal with a value change format by searching\n # backwards to find the last change from the current time step. The\n # search is to be performed on the waveform rendered so far.\n signal_array[i+1] = restore_after_spacer(signal_array[i],signal_array[i-1]) \n\n return signal_array", "def _fill_audio_buffer(\n self,\n frames: io.BytesIO,\n in_data,\n frame_count,\n time_info,\n status_flags):\n if self.do_record:\n frames.write(in_data)\n return None, pyaudio.paContinue\n return None, pyaudio.paComplete", "def build_pulse_waveform(startper,endper):\r\n mywaveform = numpy.zeros(100, dtype=numpy.int)\r\n if startper > endper:\r\n mywaveform[0:endper]=1\r\n mywaveform[startper:100]=1\r\n else:\r\n mywaveform[startper:endper]=1 \r\n return mywaveform", "def update_waveforms(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n self.trace_lines[0].set_ydata(self.pv_monitor.arrays[key][0])\n self.trace_lines[1].set_ydata(self.pv_monitor.arrays[key][1])\n self.draw()", "def recive_data(self, data_waveformreceived):\r\n self.adcollector.save_as_binary(self.savedirectory)\r\n self.channel_number = len(data_waveformreceived)\r\n if self.channel_number == 1: \r\n if 'Vp' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage)\r\n elif 'Ip' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current) \r\n elif 'PMT' in self.readinchan: # repeatnum, PMT_data_index_array, averagenum, ScanArrayXnum\r\n self.data_collected_0 = data_waveformreceived[0]*-1\r\n self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]\r\n \r\n # pmt data could come from raster scanning mode or from contour scanning mode.\r\n try:\r\n for i in range(self.repeatnum):\r\n self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array_repeated == i+1)]\r\n Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)\r\n Value_yPixels = int(len(self.samples_1)/self.ScanArrayXnum)\r\n self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))\r\n \r\n # Stack the arrays into a 3d array\r\n if i == 0:\r\n self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed\r\n else:\r\n self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0)\r\n \r\n Localimg = Image.fromarray(self.PMT_image_reconstructed) #generate an image object\r\n Localimg.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+str(i)+'.tif')) #save as tif\r\n \r\n plt.figure()\r\n plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray)\r\n plt.show()\r\n except:\r\n np.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+'flatten'), self.data_collected_0)\r\n \r\n elif self.channel_number == 2: \r\n if 'PMT' not in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage) \r\n \r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current) \r\n elif 'PMT' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]*-1\r\n self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]\r\n \r\n try:\r\n for i in range(self.repeatnum):\r\n self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array_repeated == i+1)]\r\n Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)\r\n Value_yPixels = int(len(self.samples_1)/self.ScanArrayXnum)\r\n self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))\r\n \r\n # Stack the arrays into a 3d array\r\n if i == 0:\r\n self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed\r\n else:\r\n self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0)\r\n \r\n Localimg = Image.fromarray(self.PMT_image_reconstructed) #generate an image object\r\n Localimg.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+str(i)+'.tif')) #save as tif\r\n \r\n plt.figure()\r\n plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray)\r\n plt.show()\r\n except:\r\n np.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+'contourscanning'), self.data_collected_0)\r\n \r\n if 'Vp' in self.readinchan:\r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage)\r\n elif 'Ip' in self.readinchan:\r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current)", "def make_signal(self, waveform):\n\n #print >> sys.stdout, \"generating signal...\"\n\n # --- Set up timing\n\n # index of the absolute maximum peak\n #idx = np.concatenate(np.argwhere(abs(waveform.hplus.data.data)>0))[0]\n idx = np.argmax(abs(waveform.hplus.data))\n\n # Epoch = GPS start of time series. Want the peak time of the waveform\n # to be aligned to the geocenter, so set the epoch to the geocentric\n # peak time minus the time to the waveform peak. In other words:\n # (waveform epoch) = (geocentric peak time) - (# of seconds to peak)\n\n hplus_epoch = self.ext_params.geocent_peak_time - idx*waveform.hplus.delta_t\n hcross_epoch = self.ext_params.geocent_peak_time - idx*waveform.hcross.delta_t\n\n # XXX: create regular lal timeseries objects for this bit (may replace\n # with pycbc injection routines later)\n\n hplus = lal.CreateREAL8TimeSeries('hplus', hplus_epoch, 0,\n waveform.hplus.delta_t, lal.StrainUnit,\n int(waveform.hplus.duration / waveform.hplus.delta_t))\n hplus.data.data = np.array(waveform.hplus.data)\n\n hcross = lal.CreateREAL8TimeSeries('hcross', hcross_epoch, 0,\n waveform.hcross.delta_t, lal.StrainUnit,\n int(waveform.hcross.duration / waveform.hcross.delta_t))\n hcross.data.data = np.array(waveform.hcross.data)\n\n\n if self.taper is True:\n\n print >> sys.stderr, \"Warning: tapering out inspiral (not a realistic strategy)\"\n delay = 0.0e-3\n idx = np.argmax(hplus.data.data) + \\\n np.ceil(delay/self.delta_t)\n hplus.data.data[0:idx]=0.0\n hcross.data.data[0:idx]=0.0\n lalsim.SimInspiralREAL8WaveTaper(hplus.data,\n lalsim.SIM_INSPIRAL_TAPER_START)\n lalsim.SimInspiralREAL8WaveTaper(hcross.data,\n lalsim.SIM_INSPIRAL_TAPER_START)\n\n\n # Scale for distance (waveforms extracted at 20 Mpc)\n hplus.data.data *= 20.0 / self.ext_params.distance\n hcross.data.data *= 20.0 / self.ext_params.distance\n\n tmp = lalsim.SimDetectorStrainREAL8TimeSeries(hplus, hcross,\n self.ext_params.ra, self.ext_params.dec,\n self.ext_params.polarization, self.det_site) \n\n # Pad the end so we have the same length signal and noise (useful for\n # snr and psds)\n sigdata = np.zeros(len(self.td_noise))\n sigdata[:len(tmp.data.data)] = np.copy(tmp.data.data)\n\n # Project waveform onto these extrinsic parameters\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=sigdata,\n delta_t=tmp.deltaT, epoch=tmp.epoch)\n\n del tmp\n\n # Remove extraneous data\n #self.td_signal = self.td_signal.trim_zeros()", "def updateWaveSol(self, tell_sp):\n\t\twfit0 = tell_sp.header['WFIT0NEW']\n\t\twfit1 = tell_sp.header['WFIT1NEW']\n\t\twfit2 = tell_sp.header['WFIT2NEW']\n\t\twfit3 = tell_sp.header['WFIT3NEW']\n\t\twfit4 = tell_sp.header['WFIT4NEW']\n\t\twfit5 = tell_sp.header['WFIT5NEW']\n\t\tc3 = tell_sp.header['c3']\n\t\tc4 = tell_sp.header['c4']\n\n\t\tlength1 = tell_sp.header['NAXIS1']\n\n\t\tself.wave = np.delete(smart.waveSolution(np.arange(length1),\n\t\t\twfit0,wfit1,wfit2,wfit3,wfit4,wfit5,c3,c4, order=self.order), list(self.mask))\n\t\tself.oriWave = smart.waveSolution(np.arange(length1),\n\t\t\twfit0,wfit1,wfit2,wfit3,wfit4,wfit5,c3,c4, order=self.order)\n\n\t\treturn self", "def raw_to_wav(data, path, rate=44100):\n wavfile.write(path, rate, data)", "def prosody_static(self, audio, plots):\n fs, data_audio = read(audio)\n\n if len(data_audio.shape)>1:\n data_audio = data_audio.mean(1)\n data_audio = data_audio-np.mean(data_audio)\n data_audio = data_audio/float(np.max(np.abs(data_audio)))\n size_frameS = self.size_frame*float(fs)\n size_stepS = self.step*float(fs)\n thr_len_pause = self.thr_len*float(fs)\n\n if self.pitch_method == 'praat':\n name_audio = audio.split('/')\n temp_uuid = 'prosody'+name_audio[-1][0:-4]\n if not os.path.exists(PATH+'/../tempfiles/'):\n os.makedirs(PATH+'/../tempfiles/')\n temp_filename_f0 = PATH+'/../tempfiles/tempF0'+temp_uuid+'.txt'\n temp_filename_vuv = PATH+'/../tempfiles/tempVUV'+temp_uuid+'.txt'\n praat_functions.praat_vuv(audio, temp_filename_f0, temp_filename_vuv,\n time_stepF0=self.step, minf0=self.minf0, maxf0=self.maxf0)\n\n F0, _ = praat_functions.decodeF0(\n temp_filename_f0, len(data_audio)/float(fs), self.step)\n os.remove(temp_filename_f0)\n os.remove(temp_filename_vuv)\n elif self.pitch_method == 'rapt':\n data_audiof = np.asarray(data_audio*(2**15), dtype=np.float32)\n F0 = pysptk.sptk.rapt(data_audiof, fs, int(\n size_stepS), min=self.minf0, max=self.maxf0, voice_bias=self.voice_bias, otype='f0')\n\n segmentsV = V_UV(F0, data_audio, type_seg=\"Voiced\",\n size_stepS=size_stepS)\n segmentsUP = V_UV(F0, data_audio, type_seg=\"Unvoiced\",\n size_stepS=size_stepS)\n\n segmentsP = []\n segmentsU = []\n for k in range(len(segmentsUP)):\n if (len(segmentsUP[k]) > thr_len_pause):\n segmentsP.append(segmentsUP[k])\n else:\n segmentsU.append(segmentsUP[k])\n\n F0_features = F0feat(F0)\n energy_featuresV = energy_feat(segmentsV, fs, size_frameS, size_stepS)\n energy_featuresU = energy_feat(segmentsU, fs, size_frameS, size_stepS)\n duration_features = duration_feat(\n segmentsV, segmentsU, segmentsP, data_audio, fs)\n\n if plots:\n self.plot_pros(data_audio, fs, F0, segmentsV,\n segmentsU, F0_features)\n\n features = np.hstack(\n (F0_features, energy_featuresV, energy_featuresU, duration_features))\n\n return features", "def test_ulaw(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.wav\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=8, encoding=\"u-law\", duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 8\n assert info.encoding == \"ULAW\"", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def spew_fake_data(self, ideal_datapoint=None):\n if not ideal_datapoint:\n ideal_datapoint = 0\n for chan, wsock in self._chan_to_wsocket.items():\n if chan.stream_type == \"Integrated\":\n length = 1\n data = 0.5 + 0.1*(np.random.random(length).astype(chan.dtype) + 1j*np.random.random(length).astype(chan.dtype)) + ideal_datapoint\n elif chan.stream_type == \"Demodulated\":\n length = int(self._lib.record_length/32)\n data = np.zeros(length, dtype=chan.dtype)\n data[int(length/4):int(3*length/4)] = 1.0\n data += 0.1*(np.random.random(length) + 1j*np.random.random(length))\n else: #Raw\n length = int(self._lib.record_length/4)\n signal = np.sin(np.linspace(0,10.0*np.pi,int(length/2)))\n data = np.zeros(length, dtype=chan.dtype)\n data[int(length/4):int(length/4)+len(signal)] = signal\n data += 0.1*np.random.random(length)\n wsock.send(struct.pack('n', length*data.dtype.itemsize) + data.tostring())", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def push_data(self, wave_data, finish_processing=False):\n self._parent_conn.send((wave_data, finish_processing))", "def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None", "def prosody_dynamic(self, audio):\n fs, data_audio = read(audio)\n\n if len(data_audio.shape)>1:\n data_audio = data_audio.mean(1)\n data_audio = data_audio-np.mean(data_audio)\n data_audio = data_audio/float(np.max(np.abs(data_audio)))\n size_frameS = self.size_frame*float(fs)\n size_stepS = self.step*float(fs)\n overlap = size_stepS/size_frameS\n\n if self.pitch_method == 'praat':\n name_audio = audio.split('/')\n temp_uuid = 'prosody'+name_audio[-1][0:-4]\n if not os.path.exists(PATH+'/../tempfiles/'):\n os.makedirs(PATH+'/../tempfiles/')\n temp_filename_f0 = PATH+'/../tempfiles/tempF0'+temp_uuid+'.txt'\n temp_filename_vuv = PATH+'/../tempfiles/tempVUV'+temp_uuid+'.txt'\n praat_functions.praat_vuv(audio, temp_filename_f0, temp_filename_vuv,\n time_stepF0=self.step, minf0=self.minf0, maxf0=self.maxf0)\n\n F0, _ = praat_functions.decodeF0(\n temp_filename_f0, len(data_audio)/float(fs), self.step)\n os.remove(temp_filename_f0)\n os.remove(temp_filename_vuv)\n elif self.pitch_method == 'rapt':\n data_audiof = np.asarray(data_audio*(2**15), dtype=np.float32)\n F0 = pysptk.sptk.rapt(data_audiof, fs, int(\n size_stepS), min=self.minf0, max=self.maxf0, voice_bias=self.voice_bias, otype='f0')\n\n pitchON = np.where(F0 != 0)[0]\n dchange = np.diff(pitchON)\n change = np.where(dchange > 1)[0]\n iniV = pitchON[0]\n\n featvec = []\n iniVoiced = (pitchON[0]*size_stepS)+size_stepS\n seg_voiced = []\n f0v = []\n Ev = []\n for indx in change:\n finV = pitchON[indx]+1\n finVoiced = (pitchON[indx]*size_stepS)+size_stepS\n VoicedSeg = data_audio[int(iniVoiced):int(finVoiced)]\n temp = F0[iniV:finV]\n tempvec = []\n if len(VoicedSeg) > int(size_frameS):\n seg_voiced.append(VoicedSeg)\n dur = len(VoicedSeg)/float(fs)\n x = np.arange(0,len(temp))\n z = np.poly1d(np.polyfit(x,temp,self.P))\n f0v.append(temp)\n tempvec.extend(z.coeffs)\n temp=get_energy_segment(size_frameS, size_stepS, VoicedSeg, overlap)\n Ev.append(temp)\n x = np.arange(0, len(temp))\n z = np.poly1d(np.polyfit(x, temp, self.P))\n tempvec.extend(z.coeffs)\n tempvec.append(dur)\n featvec.append(tempvec)\n iniV = pitchON[indx+1]\n iniVoiced = (pitchON[indx+1]*size_stepS)+size_stepS\n\n # Add the last voiced segment\n finV = (pitchON[len(pitchON)-1])\n finVoiced = (pitchON[len(pitchON)-1]*size_stepS)+size_stepS\n VoicedSeg = data_audio[int(iniVoiced):int(finVoiced)]\n temp = F0[iniV:finV]\n tempvec = []\n\n if len(VoicedSeg) > int(size_frameS):\n # Compute duration\n dur = len(VoicedSeg)/float(fs)\n \n x = np.arange(0, len(temp))\n z = np.poly1d(np.polyfit(x, temp, self.P))\n tempvec.extend(z.coeffs)\n # Energy coefficients\n temp=get_energy_segment(size_frameS, size_stepS, VoicedSeg, overlap)\n x = np.arange(0, len(temp))\n z = np.poly1d(np.polyfit(x, temp, self.P))\n tempvec.extend(z.coeffs)\n tempvec.append(dur)\n # Compute duration\n featvec.append(tempvec)\n\n return np.asarray(featvec)", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def towave(filename, rate, data):\n if hasattr(filename, 'write'):\n fid = filename\n else:\n fid = open(filename, 'wb')\n\n try:\n dkind = data.dtype.kind\n if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and data.dtype.itemsize == 1)):\n raise ValueError(\"Unsupported data type '%s'\" % data.dtype)\n\n fid.write(b'RIFF')\n fid.write(b'\\x00\\x00\\x00\\x00')\n fid.write(b'WAVE')\n # fmt chunk\n fid.write(b'fmt ')\n if dkind == 'f':\n comp = 3\n else:\n comp = 1\n if data.ndim == 1:\n noc = 1\n else:\n noc = data.shape[1]\n bits = data.dtype.itemsize * 8\n sbytes = rate * (bits // 8) * noc\n ba = noc * (bits // 8)\n fid.write(struct.pack('<ihHIIHH', 16, comp, noc, rate, sbytes, ba, bits))\n # data chunk\n fid.write(b'data')\n fid.write(struct.pack('<i', data.nbytes))\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\n data = data.byteswap()\n fid.write(data.ravel().view('b').data)\n\n # Determine file size and place it in correct\n # position at start of the file.\n size = fid.tell()\n fid.seek(4)\n fid.write(struct.pack('<i', size - 8))\n\n finally:\n if not hasattr(filename, 'write'):\n fid.close()\n else:\n fid.seek(0)", "def record_audio(self, time):\n p = pyaudio.PyAudio()\n stream = p.open(format=self.format,\n channels=self.channels,\n rate=self.rate,\n input=True,\n frames_per_buffer=self.chunk)\n\n print(\"* recording\")\n\n frames = []\n for i in range(0, int(self.rate / self.chunk * time)):\n data = stream.read(self.chunk)\n frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n return p.get_sample_size(self.format), b''.join(frames)", "def wave_tx_busy():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVBSY, 0, 0))" ]
[ "0.60401595", "0.55275714", "0.5293037", "0.5122268", "0.5093086", "0.48237413", "0.48048532", "0.4800931", "0.47385803", "0.47370905", "0.47193342", "0.47022563", "0.46959713", "0.46900284", "0.46810058", "0.46779042", "0.46707094", "0.46495053", "0.4648288", "0.46479854", "0.4642017", "0.4638728", "0.46104953", "0.45830598", "0.4571749", "0.4564213", "0.45567018", "0.45548794", "0.4549315", "0.45322078" ]
0.71535426
0
Deletes all created waveforms with ids greater than or equal to wave_id. Wave ids are allocated in order, 0, 1, 2, etc. Returns 0 if OK, otherwise PI_BAD_WAVE_ID.
def wave_delete(wave_id): return _u2i(_pigpio_command(_control, _PI_CMD_WVDEL, wave_id, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RemoveWave(self, wave_id):\n if wave_id in self._waves:\n del self._waves[wave_id]", "def remove_deleted_dos_records():\n count = 0\n dos = DirectlyObservedSprayingForm.objects.last()\n formid = dos.data.get(\"_xform_id\") if dos else DIRECTLY_OBSERVED_FORM_ID\n if formid:\n data = fetch_form_data(formid, dataids_only=True)\n if not data:\n return count\n\n pks = [i[\"_id\"] for i in data]\n deleted_submissions = DirectlyObservedSprayingForm.objects.exclude(\n submission_id__in=pks\n )\n count = deleted_submissions.count()\n deleted_submissions.delete()\n\n return count", "def RemoveWavelet(self, wavelet_id):\n if wavelet_id in self._wavelets:\n del self._wavelets[wavelet_id]", "def drop_invalid_wavs(ds):\n logger.info(\"Dropping all elements that have a wav file with a corrupted header.\")\n # The current wav check implementation has a lot of latency so we add flags in parallel and filter sequentially using the flags\n\n def _add_valid_header_flag(x):\n return dict(x, _wav_header_is_valid=audio_features.wav_header_is_valid(x[\"path\"]))\n\n def _has_valid_header(x):\n return x[\"_wav_header_is_valid\"]\n\n def _drop_flag(x):\n return {k: v for k, v in x.items() if k != \"_wav_header_is_valid\"}\n\n return (ds.map(_add_valid_header_flag, num_parallel_calls=TF_AUTOTUNE)\n .filter(_has_valid_header)\n .map(_drop_flag, num_parallel_calls=TF_AUTOTUNE))", "def deleteWaveforms(self, Names):\n if isinstance(Names, basestring):\n dlmsg='WLISt:WAVeform:DELete \"'+Names+'\"'\n else:\n try:\n dlmsg=[]\n for name in Names:\n dlmsg.append('WLISt:WAVeform:DELete \"'+name+'\"')\n except TypeError:\n print ('TypeError occourred on Waveform Names in function deleteWaveforms, please ensure that message is a string or a list of strings')\n self.sendMessage(dlmsg)", "def Delete(self):\n self.__context.builder.BlipDelete(self.GetWaveId(),\n self.GetWaveletId(),\n self.GetId())\n return self.__context.RemoveBlip(self.GetId())", "def WaveletRemoveSelf(self, wave_id, wavelet_id):\n raise NotImplementedError()", "def _onASWaveRemoved(self, waveID, *args, **kwargs):\n self.dispatch(AC_EVENTS.BOMBERS_DIED, waveID)", "def delete_savings_entries():\n\n savings_ids = request.json['idArr']\n\n # Find and delete savings entry\n for savings_id in savings_ids:\n savings_entry = SavingsEntry.query.get(savings_id)\n db.session.delete(savings_entry)\n db.session.commit()\n\n # Recalculate all savings_totals after savings_entry deletion and update db\n recalculate_totals(current_user)\n\n flash('Savings entry successfully deleted', 'warning')\n return {\"msg\": \"success\"}", "def BlipDelete(self, wave_id, wavelet_id, blip_id):\n op = Operation(BLIP_DELETE, wave_id, wavelet_id, blip_id=blip_id)\n self.__context.AddOperation(op)", "def delete_record_files(self, record, logStat):\n from corrdb.common.models import FileModel\n final_result = True\n for _file_id in record.resources:\n _file = FileModel.objects.with_id(_file_id)\n result = self.delete_record_file(_file, logStat)\n if not result:\n final_result = result\n return final_result", "def _remove_samples_by_id(\n self, ids: AnyGenericIdentifier, **kwargs\n ) -> Optional[AnyGenericIdentifier]:\n tmp_ids = np.asarray(ids, dtype=self.__internal_samples.index.dtype)\n if len(tmp_ids) > 0:\n self.__internal_samples.drop(tmp_ids, inplace=True)\n return self._ratify_action(\"_remove_samples_by_id\", ids, **kwargs)", "def _handle_wave_clear(self):\n if self._wave == self._level.get_max_wave():\n self._handle_game_over(won=True)", "def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None", "def unique_wavelengths(wavelengths, flux, wmin=40., wmax=300.):\n\n wsin, indicies = np.unique(wavelengths, return_index=True)\n fsin = flux[indicies]\n indx = (wsin >= wmin) & (wsin <= wmax)\n ws = wsin[indx]\n fs = fsin[indx]\n return ws, fs", "def WaveletCreate(self, wave_id):\n raise NotImplementedError()", "def short():\n countneg = 0\n countpos = 0\n testset_id = 4\n\n testfiles = db.session.query(evaluation.Testfile).filter(evaluation.Testfile.testset_id==testset_id)\n print \"Number testfiles: %s\" % testfiles.count()\n for i, tf in enumerate(testfiles):\n if i % 100 == 0:\n print i\n with audioread.audio_open(tf.file.path.encode(\"utf-8\")) as f:\n duration = f.duration\n if duration < 60.0:\n if tf.file.negative:\n countneg+=1\n else:\n countpos+=1\n print \"Removing short duration file: %s (%s)\" % (tf.file.path.encode(\"utf-8\"), duration)\n cur = db.session.query(evaluation.Result).filter(evaluation.Result.testfile_id==tf.id)\n print \"%d results to remove\" % cur.count()\n cur.delete()\n db.session.query(evaluation.Testfile).filter(evaluation.Testfile.id==tf.id).delete()\n db.session.commit()\n testfiles = db.session.query(evaluation.Testfile).filter(evaluation.Testfile.testset_id==testset_id)\n print \"New number testfiles: %s\" % testfiles.count()\n print \"deleted negative: %s\" % countneg\n print \"deleted positive: %s\" % countpos", "def post_wave(cnct):\n files = []\n\n if request.mimetype == 'multipart/form-data':\n for _, file in request.files.items():\n files.append((file.filename, file))\n else:\n files.append(('%s.wav' % uuid4(), request.stream))\n\n response = []\n for (name, fp) in files:\n parser = WaveParser(fp)\n try:\n audio_file = db.AudioFile.FromWaveParser(name, parser)\n cnct.add(audio_file)\n except WaveException as err:\n raise HttpError(406, str(err)) from None\n except Exception as err:\n print(err)\n raise HttpError(500) from None\n\n response.append(audio_file.info)\n\n cnct.commit()\n return {'files': response}", "def waveforms(self):\n return list(self._waveforms)", "def clearWaves(ham: Dict[str, Any], names: Union[str, List[str]] = None) -> None:\n if names is None:\n for name in ham[\"control\"].keys():\n ham[\"control\"][name][\"waveforms\"] = []\n elif isinstance(names, str):\n ham[\"control\"][names][\"waveforms\"] = []\n elif isinstance(names, list):\n for name in names:\n ham[\"control\"][name][\"waveforms\"] = []\n else:\n assert False, \"Variable names should be a list or int.\"", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def clean_table_records(self, experiment_id):\n # delete join job records from table\n join_job_records = self.join_db_client.get_all_join_job_records_of_experiment(experiment_id)\n\n if join_job_records:\n self.join_db_client.batch_delete_items(\n experiment_id, [record[\"join_job_id\"] for record in join_job_records]\n )\n\n # delete model records from table\n model_records = self.model_db_client.get_all_model_records_of_experiment(experiment_id)\n\n if model_records:\n self.model_db_client.batch_delete_items(\n experiment_id, [record[\"model_id\"] for record in model_records]\n )\n\n # # exit sync thread\n self.sync_thread.thread_running.clear()\n\n # delete exp record from table\n self.exp_db_client.delete_item(experiment_id)\n\n self._close_existing_containers()", "def has_generated_waves(self):\n dirname = self.dirname\n name = self.get_name()\n videocluster = os.path.join(dirname, name)\n try:\n listwaves = os.listdir(videocluster)\n except OSError:\n return False\n listw = [os.path.join(videocluster, fil) for fil in listwaves]\n for wav in listw:\n if os.path.isfile(wav) == True:\n continue\n else:\n return False\n return True", "def clear_bps(self, show_id):\r\n for band in ShowsOtherBands.query.filter_by(ShowID=show_id).all():\r\n db.session.delete(band)\r\n\r\n for person in ShowsPeopleMapping.query.filter_by(ShowID=show_id).all():\r\n db.session.delete(person)\r\n\r\n for setlist in ShowsSongsMapping.query.filter_by(ShowID=show_id).all():\r\n db.session.delete(setlist)\r\n\r\n db.session.commit()", "def delete_banks():\n\n\tres = None\n\trequest_str = \"request url: {0}?{1}\".format(request.url, request.get_data().decode('utf8'))\n\t# print(\"request url: %s\" % request_str)\n\treq_param = request_str.split('?')[-1]\n\tfor id_param in req_param.split('&'):\n\t\tid_value = id_param.split('=')[1]\n\t\tprint(\"id=%s\" % id_value)\n\t\tres = Bank.query.filter(Bank.id == id_value).delete()\n\n\treturn send_result(obj=\"\", status=\"True\", rid=res)", "def clean(self, benchmark_id, max_age):\n\n self.interface.start_transaction()\n\n # gather all benchmarks that are too old\n id_clause = BinaryOperation(_history_table['benchmark_id'], str(benchmark_id), '==')\n max_age_clause = BinaryOperation(_history_table['timestamp'], time.time() - max_age, '<=')\n where = BinaryOperation(id_clause, max_age_clause, 'and')\n old_ids = self.interface.select([_history_table], [_history_table['id']], where_statement=max_age_clause)\n self.interface.delete_rows(_history_table, where_statement=max_age_clause)\n\n # delete each old benchmark\n for old_id in old_ids:\n where = BinaryOperation(_history_stats_table['benchmark'], int(old_id[0]), '==')\n self.interface.delete_rows(_history_stats_table, where_statement=where)\n\n self.interface.commit_transaction()", "def delete_all_populations(self, sample_id: str, remove_gates: bool = False) -> None:\n for f in self.fcs_files:\n if sample_id == 'all' or f.primary_id == sample_id:\n f.populations = []\n if remove_gates:\n f.gates = []\n f.save()", "def deleteTuning(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\tresponse = []\n\t\n\t# We check to see if there are ruleset IDs given.\n\tif request.POST.getlist('id'):\n\t\ttuningIDs = request.POST.getlist('id')\n\telse:\n\t\tresponse.append({'response': 'noIDsGiven', 'text': 'No Tuning ID was given, deletion cancelled.'})\n\t\treturn HttpResponse(json.dumps(response))\n\t\n\t# We iterate over the IDs given and delete them.\n\tfor tuningID in tuningIDs:\n\t\t\n\t\t# The ID is given in a tuningID-tuningType pattern, so we have to match it.\n\t\tmatchPattern = r\"(\\d+)-(\\w+)\"\n\t\tpattern = re.compile(matchPattern)\n\t\tresult = pattern.match(tuningID)\n\t\t\n\t\ttuning = result.group(1)\n\t\ttuningType = result.group(2)\n\t\t\n\t\t# Based on the tuningType, we get the object if it exists and delete it and its comment object. \n\t\tif tuningType == \"EventFilter\":\n\t\t\ttry:\n\t\t\t\teFilter = EventFilter.objects.get(id=tuning)\n\t\t\t\tif eFilter.comment is not None:\n\t\t\t\t\teFilter.comment.delete()\n\t\t\t\teFilter.delete()\n\t\t\texcept EventFilter.DoesNotExist:\n\t\t\t\tlogger.warning(\"Could not find EventFilter with id \"+str(tuningID)+\".\")\n\t\t\t\tresponse.append({'response': 'tuningDoesNotExists', 'text': 'Could not find EventFilter with id '+str(tuningID)+'.'})\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\t\t\n\t\telif tuningType == \"DetectionFilter\":\n\t\t\ttry:\n\t\t\t\tdFilter = DetectionFilter.objects.get(id=tuning)\n\t\t\t\tif dFilter.comment is not None:\n\t\t\t\t\tdFilter.comment.delete()\n\t\t\t\tdFilter.delete()\n\t\t\texcept DetectionFilter.DoesNotExist:\n\t\t\t\tlogger.warning(\"Could not find DetectionFilter with id \"+str(tuningID)+\".\")\n\t\t\t\tresponse.append({'response': 'tuningDoesNotExists', 'text': 'Could not find DetectionFilter with id '+str(tuningID)+'.'})\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\telif tuningType == \"Suppression\":\n\t\t\ttry:\n\t\t\t\tsuppress = Suppress.objects.get(id=tuning)\n\t\t\t\tif suppress.comment is not None:\n\t\t\t\t\tsuppress.comment.delete()\n\t\t\t\tsuppress.delete()\n\t\t\texcept Suppress.DoesNotExist:\n\t\t\t\tlogger.warning(\"Could not find Suppress with id \"+str(tuningID)+\".\")\n\t\t\t\tresponse.append({'response': 'tuningDoesNotExists', 'text': 'Could not find Suppress with id '+str(tuningID)+'.'})\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\telse:\n\t\t\tlogger.warning(\"Invalid tuningType: \"+str(tuningType)+\".\")\n\t\t\tresponse.append({'response': 'invalidTuningType', 'text': 'Invalid tuningType: '+str(tuningType)+'.'})\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\t\n\tresponse.append({'response': 'tuningSuccessfulDeletion', 'text': 'Tuning was successfully deleted.'})\n\treturn HttpResponse(json.dumps(response))", "def loadWaveform(self, channels, waveform, start_delay, waveId = 1):\n log.info(\"Loading waveform...\")\n if len(waveform) == 0:\n log.info(\"Waveform is empty\")\n return -1\n # plt.plot(waveform)\n wave = key.SD_Wave()\n error = wave.newFromArrayDouble(key.SD_WaveformTypes.WAVE_ANALOG, \n waveform)\n if error < 0:\n log.info(\"Error Creating Wave - {}\".format(error))\n error =self.handle.waveformLoad(wave, waveId)\n if error < 0:\n log.info(\"Error Loading Wave - {}\".format(error))\n start_delay = start_delay / 10E-09 # expressed in 10ns\n start_delay = int(np.round(start_delay))\n log.info(\"Enqueueing waveform {}, StartDelay = {}\".format(waveId, \n start_delay))\n for channel in channels:\n error =self.handle.AWGqueueWaveform(channel, \n waveId, \n key.SD_TriggerModes.SWHVITRIG, \n start_delay, \n 1, \n self.WAVE_PRESCALER)\n if error < 0:\n log.info(\"Queueing waveform failed! - {}\".format(error))\n error =self.handle.AWGqueueConfig(channel, \n key.SD_QueueMode.CYCLIC)\n if error < 0:\n log.info(\"Configure cyclic mode failed! - {}\".format(error))\n error =self.handle.AWGstart(channel)\n if error < 0:\n log.info(\"Starting AWG failed! - {}\".format(error))\n log.info(\"Finished Loading waveform\")\n return 1", "def delete_exam_recording(exam_recording_id):\n try:\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n if examiner:\n exam_recording = ExamRecording.query.get(exam_recording_id)\n if exam_recording:\n db.session.delete(exam_recording)\n db.session.commit()\n return jsonify(exam_recording.to_dict()), 200\n return jsonify({'message':'Exam recording with id {} could not be found'.format(exam_recording_id)}), 404\n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except exc.SQLAlchemyError as e:\n db.session.rollback()\n return jsonify({ 'message': e.args }), 500\n except Exception as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500" ]
[ "0.5868553", "0.5145849", "0.51431346", "0.50910246", "0.50188667", "0.49904272", "0.4953379", "0.49368852", "0.48529765", "0.47613552", "0.470606", "0.4702558", "0.4698699", "0.46863565", "0.4682282", "0.46481162", "0.46313632", "0.45702335", "0.4560006", "0.45574272", "0.45515308", "0.45478445", "0.45340037", "0.45210376", "0.4515082", "0.44784123", "0.4466346", "0.443371", "0.4427514", "0.4415462" ]
0.65333945
0
Transmits the waveform with id wave_id. The waveform is sent once. Returns the number of cbs in the waveform if OK, otherwise PI_BAD_WAVE_ID, or PI_BAD_WAVE_MODE.
def wave_send_once(wave_id): return _u2i(_pigpio_command(_control, _PI_CMD_WVTX, wave_id, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wave_send_repeat(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVTXR, wave_id, 0))", "def wave_delete(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVDEL, wave_id, 0))", "def wave_tx_busy():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVBSY, 0, 0))", "def set_waveform():\n waveform = request.params.get(\"waveform\", 0, type=int)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenWaveform(output, ctypes.c_int(waveform))\n if retval != 0:\n LOG.error(\"Failed to set waveform of the signal generator. Error code: %s\",\n ERROR_CODES[retval])", "def wave(methodcnt): # NOTE - INSTANTIATE WITH SPECIAL CASE\n\tprint (\"waving\")\n\t#\treact_with_sound(confirmation_final)\n\treturn 0", "def askwave(self):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"wave?\" + \"\\r\\n\")\n r = m.read(100)\n r = r[7:]\n result = string.strip(r)\n return result\n else:\n pass", "def post_wave(cnct):\n files = []\n\n if request.mimetype == 'multipart/form-data':\n for _, file in request.files.items():\n files.append((file.filename, file))\n else:\n files.append(('%s.wav' % uuid4(), request.stream))\n\n response = []\n for (name, fp) in files:\n parser = WaveParser(fp)\n try:\n audio_file = db.AudioFile.FromWaveParser(name, parser)\n cnct.add(audio_file)\n except WaveException as err:\n raise HttpError(406, str(err)) from None\n except Exception as err:\n print(err)\n raise HttpError(500) from None\n\n response.append(audio_file.info)\n\n cnct.commit()\n return {'files': response}", "def WaveletCreate(self, wave_id):\n raise NotImplementedError()", "def wave_tx_start():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVGO, 0, 0))", "def do_wave(l, wave_type, r, g, b, duration, repeat):\n command = create_wave_command(\n wave_type, r, g, b, duration, repeat\n )\n l.write(command)", "def wave_tx_stop():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVHLT, 0, 0))", "def transmitWaveformData(self, name, data, stringOnly=0, marker1=[], marker2=[]):\n MARKER1= 0b01000000\n MARKER2= 0b10000000\n if (marker1==[]):\n marker1=np.zeros(len(data),dtype=int)\n else:\n marker1=marker1*MARKER1\n\n if (marker2==[]):\n marker2=np.zeros(len(data),dtype=int)\n else:\n marker2=marker2*MARKER2\n # self.newWaveform(name,len(data))\n block_data=''\n msgStart=('WLISt:WAVeform:DATA \"'+name+'\",0,'+str(len(data))+',#'+str(len(str(5*len(data))))+str(5*len(data)))\n for val,m1,m2 in itertools.izip(data,marker1,marker2):\n converted_data=struct.pack('<fB',float(val),m1+m2) # or should work aswell\n\n block_data = block_data + converted_data\n msg=msgStart+block_data\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def wave_create():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCRE, 0, 0))", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def gowave(self, wave):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"filter?\\r\\n\")\n r = m.read(100)\n # m.write(\"filter 1\\r\\n\")\n\n # adjust order blocking filter, if necessary\n if wave < 600:\n if int(r[9:]) != 1:\n m.write(\"filter 1\\r\\n\")\n # print \"out.monochrom: Moving to filter 1 (no filter)\"\n else:\n # print \"out.monochrom: Filter 1 already in place\"\n pass\n elif wave >= 600:\n if int(r[9:]) != 2:\n m.write(\"filter 2\\r\\n\")\n # print \"out.monochrom: Moving to filter 2\"\n else:\n # print \"out.monochrom: Filter 2 already in place\"\n pass\n # elif wave <= 1050:\n # if int(r[9:]) != 2:\n # m.write(\"filter 2\\r\\n\")\n # print \"out.monochrom: Moving to filter 2\"\n # else:\n # print \"out.monochrom: Filter 2 already in place\"\n # elif wave > 1050:\n #\tif int(r[9:]) == 3:\n #\t\tm.write(\"filter 3\\r\\n\")\n #\t\tprint \"out.monochrom: Moving to filter 3\"\n #\telse:\n #\t\tif int(r[9:]) == 0:\n #\t\t\tprint \"out.monochrom: Filter 3 already in place\"\n m.write(\"gowave \" + str(wave) + \"\\r\\n\")\n r = m.read(100)\n result = wave\n return result\n else:\n pass", "def __request_pat_wave(self, r_number):\n packet = bytearray()\n packet.append(0) # 16 bit options\n packet.append(0) # 8 bit options\n packet.append(1) # Request the 1 option\n\n # ---------------------------------------------------------------------\n # Request the variable length options. pattern wave.\n packet.append(0x01 << t_var_size_Options.PATTERN_WAVE)\n\n # ---------------------------------------------------------------------\n # Packets to follow\n packet.append(r_number)\n\n # ---------------------------------------------------------------------\n # Length of the bytes to follow\n packet.append(0)\n rval = self.interact_with_shouter(packet)\n if rval != False:\n return rval\n return []", "def wave_tx_repeat():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVGOR, 0, 0))", "def wave_add_new():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVNEW, 0, 0))", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def wave(self):\n return self._wave", "def loadWaveform(self, channels, waveform, start_delay, waveId = 1):\n log.info(\"Loading waveform...\")\n if len(waveform) == 0:\n log.info(\"Waveform is empty\")\n return -1\n # plt.plot(waveform)\n wave = key.SD_Wave()\n error = wave.newFromArrayDouble(key.SD_WaveformTypes.WAVE_ANALOG, \n waveform)\n if error < 0:\n log.info(\"Error Creating Wave - {}\".format(error))\n error =self.handle.waveformLoad(wave, waveId)\n if error < 0:\n log.info(\"Error Loading Wave - {}\".format(error))\n start_delay = start_delay / 10E-09 # expressed in 10ns\n start_delay = int(np.round(start_delay))\n log.info(\"Enqueueing waveform {}, StartDelay = {}\".format(waveId, \n start_delay))\n for channel in channels:\n error =self.handle.AWGqueueWaveform(channel, \n waveId, \n key.SD_TriggerModes.SWHVITRIG, \n start_delay, \n 1, \n self.WAVE_PRESCALER)\n if error < 0:\n log.info(\"Queueing waveform failed! - {}\".format(error))\n error =self.handle.AWGqueueConfig(channel, \n key.SD_QueueMode.CYCLIC)\n if error < 0:\n log.info(\"Configure cyclic mode failed! - {}\".format(error))\n error =self.handle.AWGstart(channel)\n if error < 0:\n log.info(\"Starting AWG failed! - {}\".format(error))\n log.info(\"Finished Loading waveform\")\n return 1", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def send_audio(self):\n print(\"got to send audio\")\n self.send_audio_socket = self.start_socket(IP, SEND_AUDIO_PORT)\n self.send_chunk(self.call_name.encode(), self.send_audio_socket)\n mes = self.receive_mes(self.send_audio_socket)\n print(mes)\n mes = self.receive_mes(self.send_audio_socket)\n print(mes)\n while mes == \"wait\":\n time.sleep(TIME_SLEEP)\n mes = self.receive_mes(self.send_audio_socket)\n print(mes)\n # p_send = pyaudio.PyAudio() # Create an interface to PortAudio\n print('Recording...')\n\n # stream_send = p_send.open(format=FORMAT, channels=CHANNELS, rate=RATE, frames_per_buffer=chunk, input=True,\n # output=False)\n print(\"send stream opened\")\n # Store data in chunks for 3 seconds\n done = False\n num = 1\n while not done:\n try:\n self.lock.acquire()\n data = self.voice_stream.read(chunk) # records chunk\n self.lock.release()\n #print(\"chunk {} recorded\".format(num))\n self.send_audio_socket.send(data) # sends chunk\n #print(\"chunk {} sent\".format(num))\n num += 1\n except socket.error as msg:\n print(\"socket failure send audio: {}\".format(msg))\n done = True\n except Exception as e:\n print(\"sending audio error: {}\".format(e))\n done = True\n self.send_audio_socket.close()\n self.voice_stream.close()\n self.voice_device.terminate()", "def wave_clear():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCLR, 0, 0))", "def set_wave(self, wave, waves_cnt):\n\n wave_str = \"WAVE {0}/{1}\".format(wave, waves_cnt)\n self.wave_label.element.text = wave_str", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def square_wave(self, sqw=0, out=0):\n rs0 = 1 if sqw == 4 or sqw == 32 else 0\n rs1 = 1 if sqw == 8 or sqw == 32 else 0\n out = 1 if out > 0 else 0\n sqw = 1 if sqw > 0 else 0\n reg = rs0 | rs1 << 1 | sqw << 4 | out << 7\n self.i2c.writeto_mem(self.addr, CONTROL_REG, bytearray([reg]))", "def fingerprint_wave(file):\n\n\twav = wave.open(file, 'rb')\n\tif wav.getnchannels() == 1:\n\t\tstereo = 0\n\telif wav.getnchannels() == 2:\n\t\tstereo = 1\n\telse:\n\t\twav.close()\n\t\traise Exception(\"Only 1 or 2 channel WAV files supported\")\n\n\twidth = wav.getsampwidth()\n\tif width != 2:\n\t\twav.close()\n\t\traise Exception(\"Only 16-bit sample widths supported\")\n\n\tsrate = wav.getframerate()\t\n\n\tbuffer = wav.readframes(wav.getnframes())\n\twav.close()\n\n\tms = (len(buffer) / 2)/(srate/1000)\n\tif stereo == 1:\n\t\tms = ms / 2\n\t\n\tfprint = libofa.create_print(buffer, libofa.BYTE_ORDER_LE, len(buffer) / 2,\n\t\t\t\t\t\t\t\tsrate, stereo);\n\n\treturn (fprint, ms)" ]
[ "0.69623274", "0.5965196", "0.5949356", "0.55702776", "0.550055", "0.5496185", "0.53805286", "0.5364022", "0.5323804", "0.52244794", "0.51428604", "0.5134184", "0.5124433", "0.5105211", "0.5104969", "0.5095982", "0.5079108", "0.5063093", "0.5027118", "0.49701113", "0.49698845", "0.49638483", "0.49637643", "0.4934432", "0.49318618", "0.49224174", "0.4891007", "0.4881866", "0.48773235", "0.485155" ]
0.77059525
0
Transmits the waveform with id wave_id. The waveform repeats until wave_tx_stop is called or another call to wave_send_ is made. Returns the number of cbs in the waveform if OK, otherwise PI_BAD_WAVE_ID, or PI_BAD_WAVE_MODE.
def wave_send_repeat(wave_id): return _u2i(_pigpio_command(_control, _PI_CMD_WVTXR, wave_id, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wave_send_once(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVTX, wave_id, 0))", "def wave_tx_busy():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVBSY, 0, 0))", "def wave_delete(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVDEL, wave_id, 0))", "def wave_tx_stop():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVHLT, 0, 0))", "def wave_tx_start():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVGO, 0, 0))", "def wave(methodcnt): # NOTE - INSTANTIATE WITH SPECIAL CASE\n\tprint (\"waving\")\n\t#\treact_with_sound(confirmation_final)\n\treturn 0", "def wave_tx_repeat():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVGOR, 0, 0))", "def askwave(self):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"wave?\" + \"\\r\\n\")\n r = m.read(100)\n r = r[7:]\n result = string.strip(r)\n return result\n else:\n pass", "def WaveletCreate(self, wave_id):\n raise NotImplementedError()", "def post_wave(cnct):\n files = []\n\n if request.mimetype == 'multipart/form-data':\n for _, file in request.files.items():\n files.append((file.filename, file))\n else:\n files.append(('%s.wav' % uuid4(), request.stream))\n\n response = []\n for (name, fp) in files:\n parser = WaveParser(fp)\n try:\n audio_file = db.AudioFile.FromWaveParser(name, parser)\n cnct.add(audio_file)\n except WaveException as err:\n raise HttpError(406, str(err)) from None\n except Exception as err:\n print(err)\n raise HttpError(500) from None\n\n response.append(audio_file.info)\n\n cnct.commit()\n return {'files': response}", "def do_wave(l, wave_type, r, g, b, duration, repeat):\n command = create_wave_command(\n wave_type, r, g, b, duration, repeat\n )\n l.write(command)", "def gowave(self, wave):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"filter?\\r\\n\")\n r = m.read(100)\n # m.write(\"filter 1\\r\\n\")\n\n # adjust order blocking filter, if necessary\n if wave < 600:\n if int(r[9:]) != 1:\n m.write(\"filter 1\\r\\n\")\n # print \"out.monochrom: Moving to filter 1 (no filter)\"\n else:\n # print \"out.monochrom: Filter 1 already in place\"\n pass\n elif wave >= 600:\n if int(r[9:]) != 2:\n m.write(\"filter 2\\r\\n\")\n # print \"out.monochrom: Moving to filter 2\"\n else:\n # print \"out.monochrom: Filter 2 already in place\"\n pass\n # elif wave <= 1050:\n # if int(r[9:]) != 2:\n # m.write(\"filter 2\\r\\n\")\n # print \"out.monochrom: Moving to filter 2\"\n # else:\n # print \"out.monochrom: Filter 2 already in place\"\n # elif wave > 1050:\n #\tif int(r[9:]) == 3:\n #\t\tm.write(\"filter 3\\r\\n\")\n #\t\tprint \"out.monochrom: Moving to filter 3\"\n #\telse:\n #\t\tif int(r[9:]) == 0:\n #\t\t\tprint \"out.monochrom: Filter 3 already in place\"\n m.write(\"gowave \" + str(wave) + \"\\r\\n\")\n r = m.read(100)\n result = wave\n return result\n else:\n pass", "def set_waveform():\n waveform = request.params.get(\"waveform\", 0, type=int)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenWaveform(output, ctypes.c_int(waveform))\n if retval != 0:\n LOG.error(\"Failed to set waveform of the signal generator. Error code: %s\",\n ERROR_CODES[retval])", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def loadWaveform(self, channels, waveform, start_delay, waveId = 1):\n log.info(\"Loading waveform...\")\n if len(waveform) == 0:\n log.info(\"Waveform is empty\")\n return -1\n # plt.plot(waveform)\n wave = key.SD_Wave()\n error = wave.newFromArrayDouble(key.SD_WaveformTypes.WAVE_ANALOG, \n waveform)\n if error < 0:\n log.info(\"Error Creating Wave - {}\".format(error))\n error =self.handle.waveformLoad(wave, waveId)\n if error < 0:\n log.info(\"Error Loading Wave - {}\".format(error))\n start_delay = start_delay / 10E-09 # expressed in 10ns\n start_delay = int(np.round(start_delay))\n log.info(\"Enqueueing waveform {}, StartDelay = {}\".format(waveId, \n start_delay))\n for channel in channels:\n error =self.handle.AWGqueueWaveform(channel, \n waveId, \n key.SD_TriggerModes.SWHVITRIG, \n start_delay, \n 1, \n self.WAVE_PRESCALER)\n if error < 0:\n log.info(\"Queueing waveform failed! - {}\".format(error))\n error =self.handle.AWGqueueConfig(channel, \n key.SD_QueueMode.CYCLIC)\n if error < 0:\n log.info(\"Configure cyclic mode failed! - {}\".format(error))\n error =self.handle.AWGstart(channel)\n if error < 0:\n log.info(\"Starting AWG failed! - {}\".format(error))\n log.info(\"Finished Loading waveform\")\n return 1", "def wave_create():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCRE, 0, 0))", "def send_audio(self):\n print(\"got to send audio\")\n self.send_audio_socket = self.start_socket(IP, SEND_AUDIO_PORT)\n self.send_chunk(self.call_name.encode(), self.send_audio_socket)\n mes = self.receive_mes(self.send_audio_socket)\n print(mes)\n mes = self.receive_mes(self.send_audio_socket)\n print(mes)\n while mes == \"wait\":\n time.sleep(TIME_SLEEP)\n mes = self.receive_mes(self.send_audio_socket)\n print(mes)\n # p_send = pyaudio.PyAudio() # Create an interface to PortAudio\n print('Recording...')\n\n # stream_send = p_send.open(format=FORMAT, channels=CHANNELS, rate=RATE, frames_per_buffer=chunk, input=True,\n # output=False)\n print(\"send stream opened\")\n # Store data in chunks for 3 seconds\n done = False\n num = 1\n while not done:\n try:\n self.lock.acquire()\n data = self.voice_stream.read(chunk) # records chunk\n self.lock.release()\n #print(\"chunk {} recorded\".format(num))\n self.send_audio_socket.send(data) # sends chunk\n #print(\"chunk {} sent\".format(num))\n num += 1\n except socket.error as msg:\n print(\"socket failure send audio: {}\".format(msg))\n done = True\n except Exception as e:\n print(\"sending audio error: {}\".format(e))\n done = True\n self.send_audio_socket.close()\n self.voice_stream.close()\n self.voice_device.terminate()", "def wave(self):\n return self._wave", "def next_wave(self):\n if self._wave == self._level.get_max_wave():\n return\n\n self._wave += 1\n\n #Task 1.3 (Status Bar): Update the current wave display here\n self._status_bar.set_wave(self._wave)\n\n #Task 1.5 (Play Controls): Disable the add wave button here (if this is the last wave)\n if self._wave == 20:\n self._wave_button.config(state=tk.DISABLED)\n\n #Generate wave and enqueue\n wave = self._level.get_wave(self._wave, self._game)\n for step, enemy in wave:\n enemy.set_cell_size(self._game.grid.cell_size)\n\n self._game.queue_wave(wave)", "def wave_clear():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCLR, 0, 0))", "def record_and_get_wav(self, time):\n sample_width, frames = self.record_audio(time)\n return WavFile(samples=frames, sample_width=sample_width, time=time)", "def set_wave(self, wave, waves_cnt):\n\n wave_str = \"WAVE {0}/{1}\".format(wave, waves_cnt)\n self.wave_label.element.text = wave_str", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "async def set_wavelength(self, wavelength: int):\n return await self.hw_device.set_wavelength(self.channel, wavelength)", "def transmitWaveformData(self, name, data, stringOnly=0, marker1=[], marker2=[]):\n MARKER1= 0b01000000\n MARKER2= 0b10000000\n if (marker1==[]):\n marker1=np.zeros(len(data),dtype=int)\n else:\n marker1=marker1*MARKER1\n\n if (marker2==[]):\n marker2=np.zeros(len(data),dtype=int)\n else:\n marker2=marker2*MARKER2\n # self.newWaveform(name,len(data))\n block_data=''\n msgStart=('WLISt:WAVeform:DATA \"'+name+'\",0,'+str(len(data))+',#'+str(len(str(5*len(data))))+str(5*len(data)))\n for val,m1,m2 in itertools.izip(data,marker1,marker2):\n converted_data=struct.pack('<fB',float(val),m1+m2) # or should work aswell\n\n block_data = block_data + converted_data\n msg=msgStart+block_data\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def play_wav(self):\n chunk = 1024\n wav_f = wave.open(\"temple_bell.wav\", 'rb')\n wav_p = pyaudio.PyAudio()\n stream = wav_p.open(format=wav_p.get_format_from_width(wav_f.getsampwidth()),\n channels=wav_f.getnchannels(),\n rate=wav_f.getframerate(),\n output=True)\n data = wav_f.readframes(chunk)\n\n while data:\n stream.write(data)\n data = wav_f.readframes(chunk)\n stream.stop_stream()\n stream.close()\n wav_p.terminate()\n self.start_button.config(text='Start', state='normal')\n self.start_button.update()", "def __request_pat_wave(self, r_number):\n packet = bytearray()\n packet.append(0) # 16 bit options\n packet.append(0) # 8 bit options\n packet.append(1) # Request the 1 option\n\n # ---------------------------------------------------------------------\n # Request the variable length options. pattern wave.\n packet.append(0x01 << t_var_size_Options.PATTERN_WAVE)\n\n # ---------------------------------------------------------------------\n # Packets to follow\n packet.append(r_number)\n\n # ---------------------------------------------------------------------\n # Length of the bytes to follow\n packet.append(0)\n rval = self.interact_with_shouter(packet)\n if rval != False:\n return rval\n return []" ]
[ "0.7468949", "0.6118355", "0.57054114", "0.54462314", "0.5436499", "0.53422296", "0.5259516", "0.51352197", "0.5088149", "0.5042317", "0.5010825", "0.4981488", "0.49620596", "0.48458806", "0.48164538", "0.48053008", "0.4792684", "0.4789144", "0.47432637", "0.4741151", "0.47379616", "0.4726147", "0.4720705", "0.47049385", "0.47038686", "0.4698026", "0.467301", "0.4660899", "0.46607387", "0.46524987" ]
0.69590354
1
Stops the transmission of the current waveform. Returns 0 if OK. This function is intended to stop a waveform started with wave_send_repeatedly.
def wave_tx_stop(): return _u2i(_pigpio_command(_control, _PI_CMD_WVHLT, 0, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def StopRecording( self ): \r\n\r\n self._socket.write( 'E' ) \r\n \r\n return self.GetServerResponse()", "def stopit(self):\n\n self.stop.stop()\n self.stream.close()\n self.p.terminate()\n self.p = None\n\n print(\"Recording terminated!\")", "def stop():\n message = \"STOP:0\" + '\\n'\n sock.sendall(message)\n return", "def stop(self):\n\n frame = _create_bcm_frame(\n opcode=CAN_BCM_TX_DELETE,\n flags=0,\n count=0,\n ival1_seconds=0,\n ival1_usec=0,\n ival2_seconds=0,\n ival2_usec=0,\n can_id=self.can_id,\n nframes=0,\n msg_frame=CAN_FRAME()\n )\n\n bytes_sent = libc.send(self.bcm_socket, ctypes.byref(frame), ctypes.sizeof(frame))\n if bytes_sent == -1:\n logging.debug(\"Error sending frame to stop cyclic message:-/\")", "def recording_stop(self):\n self._post('recording/stop')", "def request_stop(self):\n self._messaged.emit((\"stop\",None,0,None))", "def stop_record_microphone(self):\n return self.microphone_handler.stop_recording()", "def stopwasp():\n\n\trespond = send_command('stopwasp')", "def stop(self) -> None:\n self._stream.stop()", "def stop(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_stop(self)", "def stop():\n global running\n global reading\n global zeroed\n if zeroed == False:\n time.sleep(1)\n xy_stage.reset_input_buffer();\n changeStatus('Zeroing')\n\n sendSerial(xy_stage, \"0lo0;0or;\\r\\n\");\n time.sleep(com_sleep);\n print(recSerial(xy_stage));\n time.sleep(com_sleep);\n sendSerial(xy_stage,\"0pr\"+str(start_x)+\";1pr\"+str(start_y)+\";\\r\\n\");\n time.sleep(com_sleep);\n print(recSerial(xy_stage));\n\n running = False\n reading = False\n zeroed = True\n changeStatus('Ready')", "def stop_recording_asr2():\n record[0] = False\n\n wf = wave.open('Test.wav', 'wb')\n wf.setnchannels(2)\n wf.setsampwidth(2)\n wf.setframerate(44100)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n utterance = 'You said: ' + rsp1('Test.wav')\n store_utterance(3, utterance)\n\n utterance3 = display_string(3)\n\n frames.clear()\n os.remove('Test.wav')\n\n return render_template('Prototype2.html', utterance3=utterance3)", "def stop(self):\n return self._send_command(\"stop\")", "def stop(self):\n\n command = [0x00, 0x00, 0x00, 0x00]\n self.send_command(command)", "def stop(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_stop(self)", "def stop_calibration(self):\n self.socket.send_string('c')\n return self.socket.recv_string()", "def do_stop(self):\n debug(\"CBA4.do_stop()\")\n if (self.__thread and self.__thread.isAlive()):\n self.__thread.stop()\n self.__thread.join(None)\n self.__thread = None\n\n if (self.is_valid()):\n tx = bytearray(16)\n tx[0] = 0x53\n tx[1] = 1\n self.get_status_response(tx)\n #end do_stop()", "def stop(self):\n self._stop_signal = True", "def stop(self) -> int:\n return self._stop", "def stop_recording(self):\n ret = 0\n if self.trace_name is not None:\n logging.debug('Stopping ETW trace')\n command = ['xperf', '-stop', self.trace_name]\n ret = subprocess.call(command, shell=True)\n return ret", "def stop(self):\n self.send_stop()\n self.join()", "async def power_off(self) -> str:\n reply = await self.hw_device.set_wavelength(self.channel, 0)\n return reply", "async def stop(self):\n await self.pause()\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"V\")\n )", "def stop(self):\n if self._real_send:\n requests.Session.send = self._real_send\n self._real_send = None", "def stop(self):\n if self._running:\n self._running = False\n self._call.stop()", "def stop() -> None:", "def stop_recording(self, *args, **kwargs):\n return self.recorder.stop_recording(*args, **kwargs)", "def stop(self) -> None:\n self._stream.stop()", "def stop(self):\n return _spacegrant_swig.hdlc_framer_sptr_stop(self)", "def stop(self) -> bool:\n return self._sendcommand(STOP_KEY, self._retry_count)" ]
[ "0.6869246", "0.6584221", "0.6582802", "0.654871", "0.64110214", "0.6366031", "0.6353361", "0.63304377", "0.6280061", "0.6220325", "0.6213796", "0.61993796", "0.6190571", "0.61875826", "0.61592615", "0.61425036", "0.61169356", "0.6100712", "0.60501325", "0.60472375", "0.60402983", "0.60349524", "0.6029611", "0.60027725", "0.5996449", "0.59937537", "0.59762913", "0.59699386", "0.59679556", "0.59670955" ]
0.67097133
1
Returns the maximum possible size of a waveform in microseconds.
def wave_get_max_micros(): return _u2i(_pigpio_command(_control, _PI_CMD_WVSM, 2, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateMaxAmplitude(sampleWidth: int) -> int:\n return 2 ** (sampleWidth * NUM_BITS_IN_A_BYTE - 1) - 1", "def maxwavelen(self):\n return self._maxwavelen", "def timeScale(self) -> int:\n return int(1 / (1 - self.momentum))", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)", "def peak_time(self):\n return np.array([self.wftime[ch][self.waveform[ch].argmax()] for ch in range(self.nchannels)])", "def get_duration_sox_s(audio_file_path: str) -> float:\n global FS_HZ\n assert FS_HZ is not None\n duration_n = get_duration_sox_n(audio_file_path)\n return duration_n / FS_HZ", "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)", "def get_recording_size(file_name):\n recording_size = check_output(\n [\"mp3info\", \"-p\", \"%m:%s\\n\", \"{}\".format(file_name)]).decode(\"utf-8\")\n print(\"Recording size:\", str(recording_size))\n\n minutes_seconds = (int(recording_size.split(\":\")[0]) * 60)\n seconds = int(recording_size.split(\":\")[1].replace(\"\\n\", \"\"))\n recording_seconds_size = minutes_seconds + seconds\n print(\"Recording seconds size:\", str(recording_seconds_size))\n\n return recording_seconds_size", "def GetTimeoutScale(self):\n return 1.0", "def get_duration_sox_n(audio_file_path: str) -> float:\n global FS_HZ\n assert FS_HZ is not None\n audiometadata = torchaudio.info(audio_file_path)\n num_frames = audiometadata.num_frames\n original_fs_hz = audiometadata.sample_rate\n duration_n = num_frames\n # TODO(theis): probably not exact value\n duration_n_resampled = round(duration_n * (FS_HZ / original_fs_hz))\n return duration_n_resampled", "def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]", "def maximum_element_size_for_length(length):\n\t\n\treturn (2**(7*length)) - 2", "def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])", "def get_size_in_mb(file_size):\n return round(file_size / (1024 * 1024), 2)", "def chunk_size(self):\r\n return int(self.frame_length * self.sample_rate)", "def max_speed(self) -> int:\n return self._max_speed", "def max_size_bytes(self) -> Optional[float]:\n return pulumi.get(self, \"max_size_bytes\")", "def _get_buffered_sdram_per_record(self, variable, n_neurons):\n if variable in self.__per_timestep_variables:\n if variable not in self.__per_timestep_recording:\n return 0\n size = self.__per_timestep_datatypes[variable].size\n return self._N_BYTES_FOR_TIMESTAMP + size\n if variable in self.__events_per_core_variables:\n if variable not in self.__events_per_core_recording:\n return 0\n size = self.__events_per_core_datatypes[variable].size\n return self.__events_per_ts[self.MAX_REWIRES] * (\n self._N_BYTES_FOR_TIMESTAMP + size)\n if n_neurons == 0:\n return 0\n if variable in self.__bitfield_variables:\n # Overflow can be ignored as it is not save if in an extra word\n out_spike_words = int(math.ceil(n_neurons / BITS_PER_WORD))\n out_spike_bytes = out_spike_words * BYTES_PER_WORD\n return self._N_BYTES_FOR_TIMESTAMP + out_spike_bytes\n else:\n size = self.__data_types[variable].size\n return self._N_BYTES_FOR_TIMESTAMP + (n_neurons * size)", "def width(self):\n return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)", "def peak_memory_mb() -> float:\n if resource is None or sys.platform not in ('linux', 'darwin'):\n return 0.0\n\n # TODO(joelgrus): For whatever, our pinned version 0.521 of mypy does not like\n # next line, but later versions (e.g. 0.530) are fine with it. Once we get that\n # figured out, remove the type: ignore.\n peak = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # type: ignore\n\n if sys.platform == 'darwin':\n # On OSX the result is in bytes.\n return peak / 1_000_000\n\n else:\n # On Linux the result is in kilobytes.\n return peak / 1_000", "def size_in_mb(size_in_bytes):\n if size_in_bytes < 10**6:\n return size_in_bytes // 1000\n else:\n return size_in_bytes // 10**6", "def fft_size(self):\n import supriya.ugens\n\n return supriya.ugens.BufFrames.ir(self.buffer_id)", "def get_stepsize(self, DAC):\n if self.span[DAC] == S4g_module.range_max_uni:\n return self.max_current/(2**18)\n if self.span[DAC] == S4g_module.range_max_bi:\n return self.max_current/(2**17)\n if self.span[DAC] == S4g_module.range_min_bi:\n return self.max_current/(2**18)", "def frames_to_ms(num_frames: int) -> int:\n return int(16.67 * num_frames)", "def frames_to_ms(num_frames: int) -> int:\n return int(16.67 * num_frames)", "def maxsize(self) -> int:\n return self._maxsize", "def get_duration(self) -> int:\n return int( (self._frame_count / self._fps) * 1000 )", "def unitSize( cls, value, typeCode=None ):\n return value.shape[-1]" ]
[ "0.67743355", "0.6739954", "0.65313715", "0.6441043", "0.63622504", "0.6326981", "0.6323641", "0.62186784", "0.6213729", "0.6175673", "0.61721396", "0.6133076", "0.61291486", "0.61224705", "0.61124337", "0.6086162", "0.60746914", "0.60595626", "0.6052618", "0.6051396", "0.60494024", "0.6034178", "0.6024513", "0.60237366", "0.60005516", "0.5992792", "0.5992792", "0.5990987", "0.59851515", "0.59719014" ]
0.6895317
0
Send a trigger pulse to a gpio. The gpio is set to level for pulse_len microseconds and then reset to not level. Returns 0 if OK, otherwise PI_BAD_USER_GPIO, PI_BAD_LEVEL, PI_BAD_PULSELEN, or PI_NOT_PERMITTED.
def gpio_trigger(user_gpio, pulse_len=10, level=1): # pigpio message format # I p1 user_gpio # I p2 pulse_len ## extension ## # I level extents = [struct.pack("I", level)] return _u2i(_pigpio_command_ext( _control, _PI_CMD_TRIG, user_gpio, pulse_len, extents))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)", "def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)", "def _pin_pulse(pin, initial_state=GPIO.LOW, pulse_width=PULSE_WIDTH_SEC):\n # type: (int, bool, Union[int, float]) -> None\n GPIO.output(pin, not initial_state)\n try:\n time.sleep(pulse_width)\n finally:\n GPIO.output(pin, initial_state)", "def time_pulse_us(pin:Pin, pulse_level:int, timeout_us:int=1000000, /) -> int:", "def initialize_trig(pg, pulsen, channel = '2', pulsewidth = '10e-9', delay = '10e-9',\n\t\t\t\t\t low_voltage = '0', high_voltage = '1'):\n\terr = ''\n\twhile err != 'Error: 0, No error\\n':#clear errors\n\t\terr = pg.query('syst:err:next?')\n\t\t\n\tif pulsen in set({1,2}):\n\t\tdelay = str((float(pulsewidth) + float(delay))*(pulsen - 1))\n\n\tpgchannel = channel\n\tpw = pulsewidth\n\n\tdelay1 = str(float(pw) + float(delay))\n\tdelay2 = str(float(delay1)*2)\n\tpg.write('outp'+pgchannel+':puls:mode sin')\n\tpg.write('source'+pgchannel+':pulse1:wid '+pw)\n\n\tpg.write('source'+pgchannel+':pulse1:del '+delay)\n\n\tpg.write('source'+pgchannel+':volt:lev:imm:low '+low_voltage)\n\tpg.write('source'+pgchannel+':volt:lev:imm:high '+high_voltage)\n\tpg.write('trig:seq:sour man')\n\tpg.write('outp'+pgchannel+':stat on')\n\tpg.write('PULSEGENControl:START')\n\tif pg.query('syst:err:next?') != 'Error: 0, No error\\n':\n\t\tstop(pg,)\n\t\traise ValueError('error in initialize_2pulse init')\n\treturn", "def trigger(self):\n GPIO.output(self.trigger_pin, 1)\n time.sleep(10/1000000)\n GPIO.output(self.trigger_pin, 0)", "def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle", "def fire():\n print(\"FIRING\")\n GPIO.output(PIN1, 0)\n GPIO.output(PIN2, 0)\n GPIO.output(TRIGGER, 1)", "def pulse_config(self, pin, lowOrHigh=1, timeout=1000):\n if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:\n raise IOError(\"Pin %s is not a valid pulse pin\")\n data = itertools.chain([pin], [lowOrHigh], to_two_bytes(timeout))\n self.send_sysex(PULSE_CONFIG, data)\n \n # set pin._mode to PULSE so that it sends messages\n # don't set pin.mode as that calls this method\n self.digital[pin]._mode = PULSE", "def set_servo_pulsewidth(user_gpio, pulsewidth):\n return _u2i(_pigpio_command(_control, _PI_CMD_SERVO, user_gpio, pulsewidth))", "def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)", "def pulse(self, coil, milliseconds):\n self.log.info(\"RASPDriver.Pulse(%s %s, %d ms)\" %\n (coil.config['label'], coil.hw_driver.number, milliseconds))\n self.platform.communicator.driver_pulse(coil.hw_driver.number, milliseconds)\n return milliseconds", "def speed_pulse(x, v0=U_I, drop: float = 1, delay: int = 250, duration: float = 1000):\n return v0 - pulse_sigmoid(x=x, A=drop, d=delay, duration=duration)", "def trigger_level(self, value):\n self.lib.SetTriggerLevel(ct.c_float(value))", "def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)", "def left(self):\n self.pi.set_servo_pulsewidth(self.gpio, self.pulse_left_ns)", "def pulse_analysis(\n pulse: int, measurement_statistics: entity.MeasurementStatistics\n) -> entity.StatusEnum:\n result = entity.StatusEnum.MISSING\n if not str(pulse).isnumeric() or isinstance(pulse, str):\n result = entity.StatusEnum.MISSING\n\n elif 0 < pulse < 230:\n result = _pulse_analysis_determine(pulse)\n\n measurement_statistics.increment(result)\n return result", "def set_trigger(self, channel, threshold, delay=0.0, direction='rising', timeout=0.1, enable=True):\n ch = self.convert_to_enum(channel, self.enChannel, to_upper=True)\n if ch.name not in self._channels_dict:\n msg = \"Must call set_channel(channel='{0}', ...) before enabling a trigger with channel {0}\".format(ch.name)\n self.raise_exception(msg)\n\n if self._sampling_interval is None:\n self.raise_exception('Must call set_timebase(...) before setting the trigger')\n\n if ch == self.enChannel.EXT:\n threshold_adu = int(round(self.EXT_MAX_VALUE * threshold/float(self.EXT_MAX_VOLTAGE)))\n else:\n voltage_offset = self._channels_dict[ch.name].voltage_offset\n adu_per_volt = 1.0/self._channels_dict[ch.name].volts_per_adu\n threshold_adu = int(round(adu_per_volt * (threshold + voltage_offset)))\n\n delay_ = int(round(delay / self._sampling_interval))\n if delay < 0:\n msg = 'The trigger delay must be >=0 seconds, requested a delay of {} seconds'.format(delay)\n self.raise_exception(msg)\n elif delay_ > self.MAX_DELAY_COUNT:\n msg = 'The maximum allowed trigger delay is {} seconds, ' \\\n 'requested a delay of {} seconds'.format(self.MAX_DELAY_COUNT*self._sampling_interval, delay)\n self.raise_exception(msg)\n\n trig_dir = self.convert_to_enum(direction, self.enThresholdDirection, to_upper=True)\n auto_trigger_ms = int(round(max(0.0, timeout*1e3)))\n return self.SetSimpleTrigger(self._handle, enable, ch, threshold_adu, trig_dir, delay_, auto_trigger_ms)", "def initialize_2pulse(pg, channel = '1', pulsewidth = '10e-9', delay = '10e-9',\n\t\t\t\t\t low_voltage = '0', high_voltage = '4.8'):\n\terr = ''\n\twhile err != 'Error: 0, No error\\n':#clear errors\n\t\terr = pg.query('syst:err:next?')\n\n\tpgchannel = channel\n\tpw = pulsewidth\n\n\tdelay1 = str(float(pw) + float(delay))\n\tdelay2 = str(float(delay1)*2)\n\tpg.write('outp'+pgchannel+':puls:mode dou')\n\tpg.write('source'+pgchannel+':pulse1:wid '+pw)\n\tpg.write('source'+pgchannel+':pulse2:wid '+pw)\n\n\tpg.write('source'+pgchannel+':pulse1:del '+'0')\n\tpg.write('source'+pgchannel+':pulse2:del '+delay1)\n\n\tpg.write('source'+pgchannel+':volt:lev:imm:low '+low_voltage)\n\tpg.write('source'+pgchannel+':volt:lev:imm:high '+high_voltage)\n\tpg.write('trig:seq:sour man')\n\tpg.write('outp'+pgchannel+':stat on')\n\tpg.write('PULSEGENControl:START')\n\tif pg.query('syst:err:next?') != 'Error: 0, No error\\n':\n\t\tstop(pg,)\n\t\traise ValueError('error in initialize_2pulse init')\n\t\t\n\treturn", "def ultrasound_pulse(self, out_pin: int, in_pin: int) -> float:\n return randint(1, 1500) / 1000", "async def _cb(self, gpio, level, tick):\n\n if level < asyncpio.TIMEOUT:\n\n if self.in_code == False:\n self.bits = 1\n self.num = 0\n\n self.in_code = True\n self.code_timeout = 0\n await self.pi.set_watchdog(self.gpio_0, self.bit_timeout)\n await self.pi.set_watchdog(self.gpio_1, self.bit_timeout)\n else:\n self.bits += 1\n self.num = self.num << 1\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout & 2 # clear gpio 0 timeout\n else:\n self.code_timeout = self.code_timeout & 1 # clear gpio 1 timeout\n self.num = self.num | 1\n\n else:\n\n if self.in_code:\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout | 1 # timeout gpio 0\n else:\n self.code_timeout = self.code_timeout | 2 # timeout gpio 1\n\n if self.code_timeout == 3: # both gpios timed out\n await self.pi.set_watchdog(self.gpio_0, 0)\n await self.pi.set_watchdog(self.gpio_1, 0)\n self.in_code = False\n self.callback(self.bits, self.num)", "async def Pulse_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0)\n) -> Dict[str, Any]:\n busylightapi.manager.apply_effect_to_light(light_id, pulse)\n return {\n \"action\": \"effect\",\n \"name\": \"pulse\",\n \"light_id\": light_id,\n \"color\": \"red\",\n }", "def test_pulse(self):\n report(_(\"Testing PULSE sound output\"))\n return self.audio_try_play(type='pulse')", "def write(gpio, level):\n return _u2i(_pigpio_command(_control, _PI_CMD_WRITE, gpio, level))", "def pump_water(pump_pin, delay=1):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pump_pin, GPIO.OUT)\n timeout = time.time() + 1.5*60 # 1.5 minutes\n\n try:\n print \"Watering plant...\"\n GPIO.output(pump_pin, GPIO.HIGH)\n\n while get_percent_wet() < 75:\n time.sleep(delay)\n if time.time() > timeout:\n break\n\n GPIO.output(pump_pin, GPIO.LOW)\n GPIO.cleanup(pump_pin)\n return\n\n except:\n GPIO.cleanup(pump_pin)\n\n return", "def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")", "def config_pulse_modulation(self, frequency=1e3, input='square'):\n self.enable_pulse_modulation()\n self.pulse_source = 'internal'\n self.pulse_input = input\n self.pulse_frequency = frequency", "async def Pulse_Lights():\n busylightapi.manager.apply_effect_to_light(ALL_LIGHTS, pulse)\n return {\n \"action\": \"effect\",\n \"name\": \"pulse\",\n \"light_id\": \"all\",\n \"color\": \"red\",\n }", "def wait_for_trigger(hat):\n try: \n # GPIO.setmode(GPIO.BCM)\n GPIO.setup(PWR_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n print('\\n <<<READY>>>\\n\\n (0) Waiting for trigger to initiate recording (or press Ctrl+C to abort)\\n')\n \n # Wait until LoStik is properly inserted\n global LoStikInserted\n while(LoStikInserted):\n try:\n ser = serial.Serial(\"/dev/ttyUSB0\", baudrate=57600)\n manager = (ReaderThread(ser, PrintLines))\n enter = type(manager).__enter__\n exit = type(manager).__exit__\n value = enter(manager)\n hit_except = False\n LoStikInserted = 0\n\t\t\t\t\n except:\n for a in range(10):\n GPIO.output(RECORDING_LED,GPIO.HIGH)\n time.sleep(.1)\n GPIO.output(RECORDING_LED,GPIO.LOW) \n time.sleep(.1)\t\t\t\n print(\" LoStik USB not Properly Inserted!\") \n LoStikInserted = 1\n try:\n protocol = value\n while(CMD_RECEIVED):\n if GPIO.input(PWR_PIN) == 1 or CMD_SHUTDOWN:\n print(\" Shutting Down\")\n GPIO.cleanup()\n time.sleep(1)\n hat.a_in_scan_cleanup()\n time.sleep(1)\n quit()\n pass\n except:\n hit_except = True\n if not exit(manager, *sys.exc_info()):\n raise\n finally:\n if not hit_except:\n exit(manager, None, None, None)\n\n except KeyboardInterrupt:\n\t print(CURSOR_BACK_2, ERASE_TO_END_OF_LINE, '\\n')\n\t hat.a_in_scan_cleanup()\n\t GPIO.cleanup()\n\t quit()\n\n # Sends trigger pin on RPi to HIGH which should be connected to MCC118 trigger input pin\n GPIO.setup(TRIGGER_PIN, GPIO.OUT)\t\n GPIO.output(TRIGGER_PIN,GPIO.HIGH)\n \n # Read the status only to determine when the trigger occurs.\n is_running = True\n is_triggered = False\n while is_running and not is_triggered:\n status = hat.a_in_scan_status()\n is_running = status.running\n is_triggered = status.triggered\n if not is_triggered:\n time.sleep(0.001)\n GPIO.cleanup()", "def valve_pulse(self, channel, width):\n print('Valve pulse', width)\n step_name = 'Valve Pulse'\n assert channel in [1,2]\n self.relay.settings['pulse_width{}'.format(channel)] = 1e3*width\n getattr(self.relay, 'write_pulse{}'.format(channel))(width)\n self.settings['PV{}'.format(channel)] = 1\n t0 = time.time()\n t_lastlog = t0\n while True:\n if self.interrupt_measurement_called:\n self.settings['PV{}'.format(channel)] = 0 \n break\n if time.time()-t0 > width:\n self.settings['PV{}'.format(channel)] = 0\n break\n time.sleep(0.001)\n if time.time() - t_lastlog > 0.005:\n # do some logging\n self.db_poll(step_name)\n t_lastlog = time.time()\n self.settings['steps_taken'] += 1" ]
[ "0.6322603", "0.6057154", "0.5986317", "0.59158", "0.58596945", "0.55599904", "0.5547386", "0.5538918", "0.5438976", "0.5277301", "0.52660275", "0.5258334", "0.52092797", "0.51829314", "0.51344234", "0.50539565", "0.50418454", "0.5019348", "0.50039", "0.49866304", "0.49618262", "0.49615225", "0.49361807", "0.49352318", "0.49096194", "0.48990607", "0.48583415", "0.4819667", "0.47733158", "0.47213134" ]
0.78696144
0
Store a script for later execution. Returns a script id if OK, otherwise PI_BAD_SCRIPT.
def store_script(script): # I p1 script length # I p2 0 ## extension ## # s script return _u2i(_pigpio_command_ext( _control, _PI_CMD_PROC, len(script), 0, script))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addScript(self, script):\r\n\r\n if not self.isClosed:\r\n if len(self.__script) == 0:\r\n if ScriptEngine.validate(script):\r\n self.__script = script\r\n else:\r\n raise HDDOPermissionException('Tried to add an invalid script to a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to add script twice to a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to add script to a closed HealthDominoDataObject.')", "def register_script(self, script):\n return self.conn.register_script(script)", "def assign_script(self, script, location):\n self.can_receive_scripts.wait()\n\n if script is not None:\n self.scripts.append((script, location))\n self.script_received.set()\n else:\n self.script_received.set() \n self.timepoint_done = True \n \n # Updating the dict of semaphores whenever we assign a new location\n\n semaphore = Semaphore()\n\n if location not in self.device_semaphores:\n for device in self.devices:\n device.device_semaphores[location] = semaphore", "def execute_script(self, script, enterpreter='/bin/sh'):\n destination = '/tmp/' + ''.join(\n random.choice(string.lowercase) for i in range(16))\n\n self.upload(script, destination)\n self.execute('%s %s' % (enterpreter, destination))\n self.execute('rm %s' % destination)", "def upload(self):\n\n data = {'name': self.script_name,\n 'content': self.script_data,\n 'type': 'groovy'}\n\n payload = json.dumps(data)\n\n headers = {'Content-Type': 'application/json'}\n resp = False\n if self.get():\n log.debug('Updating script: {0}'.format(self.script_name))\n upload_url = '{0}/{1}'.format(self.url, self.script_name)\n req = requests.put(upload_url, auth=(self.username, self.password), headers=headers, data=payload)\n if req.status_code == 204 or 200:\n resp = True\n return resp\n log.error('Failed updating script: {0} Reason: {1}'.format(self.script_name, req.status_code))\n else:\n log.debug('Uploading script: {0}'.format(self.script_name))\n req = requests.post(self.url, auth=(self.username, self.password), headers=headers, data=payload)\n if req.status_code == 204 or 200:\n resp = True\n return resp\n log.error('Failed uploading script \"{0}.\" Reason: {1}'.format(self.script_name, req.status_code))\n\n return resp", "def writeScript( script, writeDir=None ):\n fd, name = tempfile.mkstemp( suffix = '_pilotWrapper.py', prefix = 'DIRAC_', dir=writeDir )\n pilotWrapper = os.fdopen(fd, 'w')\n pilotWrapper.write( script )\n pilotWrapper.close()\n return name", "def write_script(script, game_title):\n try:\n script_name = '{}{}.sh'.format(roms_directory, game_title.replace(\":\", \"\"))\n print('Writing {} to disk...'.format(script_name))\n f = open(script_name, \"w+\")\n f.write(script)\n f.close()\n\n st = os.stat(script_name)\n os.chmod(script_name, st.st_mode | stat.S_IEXEC)\n except Exception as write_exception:\n print(write_exception)", "def run_script():\n # pylint: disable=unsupported-assignment-operation\n script_source.data['script'] = [inp_script.value]", "def register(self, script, timeout=None):\n\n if isinstance(script, Story):\n script = Script(script, timeout=timeout)\n\n log.access_log.info('Register story: %s', script.story)\n\n script.avalon = self\n self.scripts.add(script)\n return script", "def script(self, script):\n\n self._script = script", "def putscript(self, name, content):\n content = self.__prepare_content(content)\n code, data = (\n self.__send_command(\"PUTSCRIPT\", [name.encode(\"utf-8\"), content]))\n if code == \"OK\":\n return True\n return False", "def test_script(self):\n self.script(\"# script.py\\n\"\n \"a = 2\\n\"\n \"# other\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n self.assertEqual(script.type, \"script\")\n self.assertEqual(script.mode, \"w\")\n self.assertEqual(script.first_char_line, 1)\n self.assertEqual(script.first_char_column, 0)\n self.assertEqual(script.last_char_line, 3)\n self.assertEqual(script.last_char_column, 7)\n self.assertEqual(script.container_id, -1)\n\n script_block = self.metascript.code_blocks_store[script.id]\n self.assertEqual(script_block.code, \"# script.py\\na = 2\\n# other\")\n self.assertEqual(script_block.docstring, \"\")\n self.assertTrue(bool(script_block.code_hash))", "def payToScriptHashScript(scriptHash):\n if len(scriptHash) != crypto.RIPEMD160_SIZE:\n raise DecredError(\n f\"script hash must be {crypto.RIPEMD160_SIZE}\"\n f\" bytes but is {len(scriptHash)}\"\n )\n script = ByteArray(\"\")\n script += opcode.OP_HASH160\n script += addData(scriptHash)\n script += opcode.OP_EQUAL\n return script", "def script_from_string(self, script: str) -> LocalScriptRunner:\n hasher = hashlib.md5(bytes(script, \"utf-8\"))\n script_name = hasher.hexdigest()\n\n path = pathlib.Path(self.tmp_script_dir.name, script_name)\n aligned_script = textwrap.dedent(script)\n path.write_text(aligned_script)\n return LocalScriptRunner(str(path))", "def script(self, code):\r\n LOG(\"Executing script \" + repr(code))\r\n cmd = MsgHelper.createMessage(Messages.CMD_SCRIPT)\r\n cmd[Messages.FIELD_SCRIPT] = code\r\n cmd[Messages.FIELD_FORCE] = True\r\n self.mailbox.push( cmd, high_priority = False )", "def _send_script(self, script):\n script = self._get_cleaned_script(script)\n with open(self.fifo_path, 'w', encoding='UTF-8') as fifo_write:\n fifo_write.write(script)", "def delete_script(script_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_PROCD, script_id, 0))", "def save(self):\n fn=os.environ['VMEWORKDIR'] +\"/WORK/\"+\"phase.ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)", "def isScriptHashScript(pkScript):\n return extractScriptHash(pkScript) is not None", "def script(self, object_id, script, args=None, timeout=None):\n # Resolve object data\n data = yield self.service.get_executor(\"db\").submit(self.get_object_data, object_id)\n # Find pool name\n pool = self.service.get_pool_name(data[\"pool_id\"])\n if not pool:\n metrics[\"error\", (\"type\", \"pool_not_found\")] += 1\n raise APIError(\"Pool not found\")\n # Check script is exists\n script_name = \"%s.%s\" % (data[\"profile\"], script)\n if not loader.has_script(script_name):\n metrics[\"error\", (\"type\", \"invalid_scripts_request\")] += 1\n raise APIError(\"Invalid script\")\n #\n url = yield self.get_activator_url(pool)\n if not url:\n raise APIError(\"No active activators for pool '%s'\" % pool)\n self.redirect(\n url,\n \"script\",\n [\n script_name,\n data[\"credentials\"],\n data[\"capabilities\"],\n data[\"version\"],\n args,\n timeout,\n ],\n )", "def with_script(self, script):\n self.script = script\n return self", "def exec_script(self, script):\n filename = os.path.join(self.script_dir, script + \".sh\")\n # http://docs.python.org/library/os.html#os.X_OK\n if os.access(filename, os.X_OK):\n with open(filename):\n subprocess.call(filename)\n self.vibrate(0.1)", "def addOnScriptSave(call, args=(), kwargs={}, nodeClass='Root'):\n pass", "def save( self, save_path=\"/home/rts2obs/.rts2scripts\", save_file=None ):\n\n self.id = self.create_target_api()\n\n commer=rts2comm()\n\t # the following line should be handle by rts2.ini and not here. \n #commer.setscript(self.id, script=\"exe /home/rts2obs/.local/bin/targetscript.py\")\n\n if save_file is None:\n save_file = \"{}.json\".format( self.name )\n fpath = os.path.join( save_path, save_file )\n\n with open(fpath, 'w') as fd:\n json.dump( self.dictify(), fd, indent=2 )", "def script_load(self, script):\n return self._execute([b'SCRIPT', b'LOAD', script])", "def runScript(self, script):\n c = self\n game = self.game\n app = self.game.app\n shell = self.shell\n sprite = self.sprite\n s = shell\n self = self.env\n exec(open(\"script/\" + script).read())", "def SetScriptCreatedFrom(self, script):\n return _gmat_py.GmatBase_SetScriptCreatedFrom(self, script)", "def script(script_file, **context):\n with open(script_file) as f:\n return {\n \"class\": \"script\",\n \"script\": f.read(),\n \"context\": context\n }", "def delScript(self):\r\n\r\n if not self.isClosed:\r\n if len(self.__script) > 0:\r\n self.__pha = ''\r\n else:\r\n raise HDDOPermissionException('Tried to remove not added script from a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to remove script from a closed HealthDominoDataObject.')", "def prepare_script(i):\n\n # Check vars\n if 'script_name' not in i: return {'cm_return':1, 'cm_error':'\"script_name\" is not defined in \"code prepare_script\"'}\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code prepare_script\"'}\n\n # Prepare path\n p=''\n if 'cm_path' in i and i['cm_path']!='':\n p=os.path.join(i['cm_path'], i['script_name'])\n else:\n p=i['script_name']\n\n # Load OS\n ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['os'],\n 'cm_action':'load',\n 'cm_data_uoa':i['target_os_uoa']}\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_os_cfg=r['cm_data_obj']['cfg']\n target_os_path=r['cm_path']\n target_os_uid=r['cm_uid']\n target_os_alias=r['cm_alias']\n\n if i.get('skip_extension','')!='yes':\n p+=target_os_cfg['script_ext']\n\n try:\n f=open(p, 'w')\n if 'batch_prefix' in target_os_cfg and target_os_cfg['batch_prefix']!='': f.write(target_os_cfg['batch_prefix'])\n\n if 'rem' in target_os_cfg: f.write('\\n'+target_os_cfg['rem']+' target_os_uoa: '+i['target_os_uoa']+'\\n')\n\n if 'set_env1' in i and len(i['set_env1'])>0:\n f.write('\\n')\n if 'rem' in target_os_cfg: f.write(target_os_cfg['rem']+' Set global parameters\\n')\n r=prepare_env_vars({'array':i['set_env1'],\n 'prefix':target_os_cfg['env_set'],\n 'separator':target_os_cfg['env_separator'],\n 'quotes':target_os_cfg['env_quotes']})\n if r['cm_return']>0: return r\n for x in r['cm_array']: f.write(x+'\\n')\n\n if 'code_deps' in i and len(i['code_deps'])>0:\n r=prepare_env_for_all_codes({'code_deps':i['code_deps'],\n 'os_uoa':i['target_os_uoa']})\n if r['cm_return']>0: return r\n f.write('\\n')\n if 'rem' in target_os_cfg: f.write(target_os_cfg['rem']+' Prepare code dependencies\\n')\n for x in r['cm_array']: f.write(x+'\\n')\n\n if 'set_env2' in i and len(i['set_env2'])>0:\n f.write('\\n')\n if 'rem' in target_os_cfg: f.write(target_os_cfg['rem']+' Set execution parameters\\n')\n r=prepare_env_vars({'array':i['set_env2'],\n 'prefix':target_os_cfg['env_set'],\n 'separator':target_os_cfg['env_separator'],\n 'quotes':target_os_cfg['env_quotes']})\n if r['cm_return']>0: return r\n for x in r['cm_array']: f.write(x+'\\n')\n\n if 'run_commands_before' in i:\n for x in i['run_commands_before']: f.write(x+'\\n')\n\n if 'run_cmd' in i:\n f.write('\\n')\n if 'rem' in target_os_cfg: f.write(target_os_cfg['rem']+' Executable\\n')\n f.write(i['run_cmd'].strip()+'\\n')\n\n if 'run_commands_after' in i:\n for x in i['run_commands_after']: f.write(x+'\\n')\n\n f.close()\n except Exception as e:\n return {'cm_return':1, 'cm_error':'error while preparing script in \"code prepare_script\" ('+format(e)+')'}\n\n return {'cm_return':0, 'cm_path':p}" ]
[ "0.6213111", "0.60115063", "0.5875983", "0.55819553", "0.55543226", "0.54968554", "0.54652226", "0.5443183", "0.5422984", "0.53906035", "0.5321932", "0.5276195", "0.5085724", "0.50554866", "0.5024538", "0.4966637", "0.4954975", "0.49315414", "0.4907256", "0.4866486", "0.48632634", "0.48540252", "0.48507568", "0.4791757", "0.47685963", "0.47669765", "0.47345084", "0.47311637", "0.47082108", "0.4693083" ]
0.73608357
0
This function returns the run status of a stored script as well as the current values of parameters 0 to 9. The function returns greater than or equal to 0 if OK, otherwise PI_BAD_SCRIPT_ID. The run status may be PI_SCRIPT_HALTED PI_SCRIPT_RUNNING PI_SCRIPT_WAITING PI_SCRIPT_FAILED It returns a tuple of run status and a parameter list tuple. If the script does not exist a negative error code will be returned in which case the parameter tuple will be empty.
def script_status(script_id): status = _u2i(_pigpio_command(_control, _PI_CMD_PROCP, script_id, 0)) if status >= 0: param = struct.unpack('IIIIIIIIII', _control.recv(40)) return status, param return status, ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run(self, script, args):\n proc = subprocess.Popen([script] + args,\n stdout=subprocess.PIPE\n )\n\n stdout = proc.communicate()[0]\n retcode = proc.returncode\n\n return stdout, retcode", "def get_status(self):\n if self._is_running():\n return \"RUNNING\"\n elif self._has_error():\n # The run started but failed\n return \"FAILED\"\n elif self._is_finished():\n # The run was finished\n return \"FINISHED\"\n elif self.current_step() >= 0:\n # The run started at some point but was not completed\n return \"INCOMPLETE\"\n else:\n # The run did not start\n return \"NOT STARTED\"", "def _get_execution_status(self, execution_id, document_name=None):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n # TODO(semiond): we can remove document name as parameter, can take it by execution id.\n document_name = document_name if document_name else execution['AutomationExecution']['DocumentName']\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_status(step_executions, 'InProgress')\n if step:\n step_name = step['StepName']\n self.logger.info(f'Waiting SSM document step [{document_name}>{step_name}] to be completed: '\n f'{self.get_execution_step_url(execution_id, step_name, step_executions)}')\n return execution['AutomationExecution']['AutomationExecutionStatus']", "def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")", "def execute_success(self, *args, **kwargs):\n return 0, self.shell_output, None", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def _get_module_return_code(self, status, module):\n\n # initialize return code array\n arr = []\n check_failed = False\n\n if module not in status.data:\n # assume running\n arr = [1]\n else:\n for job_name in status.data[module].keys():\n if job_name != 'pipeline_index':\n\n # update the job status and get the status string\n status._update_job_status(module, job_name)\n js = status.data[module][job_name]['job_status']\n\n if js == 'successful':\n arr.append(0)\n elif js == 'failed':\n arr.append(2)\n check_failed = True\n elif js is None:\n arr.append(3)\n else:\n arr.append(1)\n\n status._dump()\n\n return_code = self._parse_code_array(arr)\n\n status = self.RETURN_CODES[return_code]\n fail_str = ''\n if check_failed and status != 'failed':\n fail_str = ', but some jobs have failed'\n logger.info('Module \"{}\" for job \"{}\" is {}{}.'\n .format(module, self._config.name, status, fail_str))\n\n return return_code", "def get_status(self, scenario_id):\n table = self.get_execute_table()\n try:\n return table.loc[int(scenario_id), \"status\"]\n except KeyError:\n raise Exception(f\"Scenario not found in execute list, id = {scenario_id}\")", "def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err", "def RetrievePipelineToolStatus( raiseOnExitCode=False ):\n global submissionInfo\n\n scenePath = NodegraphAPI.GetSourceFile()\n\n jobWriterPath = os.path.join(submissionInfo[\"RepoDirs\"][\"submission/Integration/Main\"], \"JobWriter.py\")\n argArray = [\"-ExecuteScript\", jobWriterPath, \"Katana\", \"--status\", \"--scene-path\", scenePath]\n statusMessage = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=raiseOnExitCode)\n return statusMessage", "def count_scripts_without_module_load():\n repos_scripts = '/apps/leuven/icts/jobscripts/2017-10-10'\n script_files = glob.glob(repos_scripts + '/**/*.SC', recursive=True)\n n_scr = len(script_files)\n\n n_OK, n_fail = 0, 0 # for jobs that failed\n n_has, n_does_not = 0, 0 # for modules that have even a single \"module load ...\" \n for i, script_file in enumerate(script_files):\n try:\n with scripts.script(script_file) as scr:\n if scr.loaded:\n n_has += 1\n else:\n n_does_not += 1\n n_OK += 1\n except: # FileNotFoundError or etc.\n pass\n# n_fail += 1\n\n n_mod = n_has + n_does_not\n p_has = n_has / n_mod * 1e2\n p_does_not = n_does_not / n_mod * 1e2\n\n n_tot = n_scr\n n_fail= n_tot - n_OK\n p_OK = n_OK / n_tot * 1e2\n p_fail= n_fail / n_tot * 1e2\n\n print('\\nn_scr={0}'.format(n_scr))\n print('n_OK:{0}, n_fail:{1}'.format(n_OK, n_fail))\n print('n_has:{0}, n_does_not:{1}'.format(n_has, n_does_not))\n\n print('\\n\"{0}\" scripts have module loads and \"{1}\" do not load modules'.format(n_has, n_does_not))\n print('\"{0:.2f}%\" have module loads, and \"{1:.2f}%\" do not load modules'.format(p_has, p_does_not))\n print('\"{0:.2f}%\" of jobs succeeded, out of \"{1}\" jobs'.format(p_OK, n_tot))\n\n try: \n assert n_OK == n_has + n_does_not\n print('Assert: numbers match as expected\\n')\n return -1\n except AssertionError:\n print('AssertionError: numbers do not match: n_tot != n_has + n_does_not\\n')\n return 0", "def script_execution_get() -> str | None:\n if (data := script_execution_cv.get()) is None:\n return None\n return data.script_execution", "def check_terraform_run_status(**kwargs):\n\n ti: TaskInstance = kwargs[\"ti\"]\n run_id = ti.xcom_pull(key=TerraformTasks.XCOM_TERRAFORM_RUN_ID, task_ids=TerraformTasks.TASK_ID_RUN)\n project_id = Variable.get(AirflowVars.PROJECT_ID)\n\n token = BaseHook.get_connection(AirflowConns.TERRAFORM).password\n terraform_api = TerraformApi(token)\n\n run_status = None\n while run_status not in [\n \"planned_and_finished\",\n \"applied\",\n \"errored\",\n \"discarded\",\n \"canceled\",\n \"force_canceled\",\n ]:\n run_details = terraform_api.get_run_details(run_id)\n run_status = run_details[\"data\"][\"attributes\"][\"status\"]\n\n logging.info(f\"Run status: {run_status}\")\n comments = f\"Terraform run status: {run_status}\"\n logging.info(f'Sending slack notification: \"{comments}\"')\n slack_hook = create_slack_webhook(comments, project_id, **kwargs)\n slack_hook.execute()", "def explain_task_exit_status(wq_task, parsl_id):\n\n status = wq_task.return_status # aka exit code\n wq_result = wq_task.result\n\n if status != 0:\n reason = \"Wrapper Script Failure: \"\n if status == 1:\n reason += \"problem parsing command line options\"\n elif status == 2:\n reason += \"problem loading function data\"\n elif status == 3:\n reason += \"problem remapping file names\"\n elif status == 4:\n reason += \"problem writing out function result\"\n else:\n reason += \"unable to process wrapper script failure with status = {}\".format(status)\n reason += \"\\nTrace:\\n\" + str(wq_task.output)\n# WorkQueue system failure\n else:\n reason = \"work queue result: \"\n if wq_result == wq.WORK_QUEUE_RESULT_SUCCESS:\n reason += \"succesful execution\"\n elif wq_result == wq.WORK_QUEUE_RESULT_INPUT_MISSING:\n reason += \"missing input file\"\n elif wq_result == wq.WORK_QUEUE_RESULT_OUTPUT_MISSING:\n reason += \"unable to generate output file\"\n elif wq_result == wq.WORK_QUEUE_RESULT_STDOUT_MISSING:\n reason += \"stdout has been truncated\"\n elif wq_result == wq.WORK_QUEUE_RESULT_SIGNAL:\n reason += \"task terminated with a signal\"\n elif wq_result == wq.WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION:\n reason += \"task used more resources than requested\"\n elif wq_result == wq.WORK_QUEUE_RESULT_TASK_TIMEOUT:\n reason += \"task ran past the specified end time\"\n elif wq_result == wq.WORK_QUEUE_RESULT_UNKNOWN:\n reason += \"result could not be classified\"\n elif wq_result == wq.WORK_QUEUE_RESULT_FORSAKEN:\n reason += \"task failed, but not a task error\"\n elif wq_result == wq.WORK_QUEUE_RESULT_MAX_RETRIES:\n reason += \"unable to complete after specified number of retries\"\n elif wq_result == wq.WORK_QUEUE_RESULT_TASK_MAX_RUN_TIME:\n reason += \"task ran for more than the specified time\"\n elif wq_result == wq.WORK_QUEUE_RESULT_DISK_ALLOC_FULL:\n reason += \"task needed more space to complete task\"\n elif wq_result == wq.WORK_QUEUE_RESULT_RMONITOR_ERROR:\n reason += \"task failed because the monitor did not produce an output\"\n else:\n reason += \"unable to process Work Queue system failure\"\n return reason", "def checkScriptParses(scriptVersion, script):\n tokenizer = ScriptTokenizer(scriptVersion, script)\n while tokenizer.next():\n pass\n return tokenizer.err", "def decode_wait_status(sts):\r\n if os.WIFEXITED(sts):\r\n es = os.WEXITSTATUS(sts) & 0xffff\r\n msg = \"exit status %s\" % es\r\n return es, msg\r\n elif os.WIFSIGNALED(sts):\r\n sig = os.WTERMSIG(sts)\r\n msg = \"terminated by %s\" % signame(sig)\r\n if hasattr(os, \"WCOREDUMP\"):\r\n iscore = os.WCOREDUMP(sts)\r\n else:\r\n iscore = sts & 0x80\r\n if iscore:\r\n msg += \" (core dumped)\"\r\n return -1, msg\r\n else:\r\n msg = \"unknown termination cause 0x%04x\" % sts\r\n return -1, msg", "def get_ret_code(self):\n\t\treturn call_sdk_function('PrlJob_GetRetCode', self.handle)", "def database_script_check(table, bs_id, attempt_num):\n con = lite.connect(DB_FILE, timeout = TIMEOUT)\n con.row_factory = lite.Row\n with con:\n cur = con.cursor()\n #get script data\n cur.execute(\"SELECT * FROM {0} WHERE AttemptNum=? AND BSID=?\".format(table),\n (attempt_num, bs_id))\n rows = cur.fetchall()\n\n error_data = []\n for row in rows:\n if row['Started'] == None or row['Ended'] == None or row['Exit'] != 0:\n error_data.append([row['Command'], row['Arguments'], row['ExpProc']])\n return error_data", "def get_status(self, run_id):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/runnables/%s/state/%s\" % (self.project_key, self.runnable_type, run_id))", "def run_script(command):\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n output = process.communicate()[0]\n success = process.poll() == 0\n return (success, output)", "def get(self):\n get_url = '{0}/{1}'.format(self.url, self.script_name)\n resp = False\n try:\n log.debug('Checking for script: {0}'.format(self.script_name))\n req = requests.get(get_url, auth=(self.username, self.password))\n if req.status_code == 204 or 200:\n resp = req.content\n return resp\n log.error('Failed checking for script: {0} Reason: {1}'.format(self.script_name, req.status_code))\n except Exception as e:\n log.error('Failed checking for script: {0} Reason: {1}'.format(self.script_name, e))\n\n return resp", "def getErrors(script):\n\tp = subprocess.Popen(['./'+script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tout, err = p.communicate()\n\treturn err", "def status(self):\n st = ct.c_int()\n self.lib.GetStatus(ct.pointer(st))\n if st.value == 20073:\n return 'Camera is idle, waiting for instructions.'\n elif st.value == 20074:\n return 'Camera is executing the temperature cycle.'\n elif st.value == 20072:\n return 'Acquisition in progress.'\n elif st.value == 20023:\n return 'Unable to meet accumulate cycle time.'\n elif st.value == 20022:\n return 'Unable to meet kinetic cycle time.'\n elif st.value == 20013:\n return 'Unable to communicate with card.'\n elif st.value == 20018:\n return ('Computer unable to read the data via the ISA slot at the '\n 'required rate.')\n elif st.value == 20026:\n return 'Overflow of the spool buffer.'", "def _check_step_completed(self, i):\n\n module, _ = self._get_command_config(i)\n status = self._get_status_obj()\n submitted = self._check_jobs_submitted(status, module)\n if not submitted:\n return_code = 1\n else:\n return_code = self._get_module_return_code(status, module)\n\n return return_code", "def _get_execution_step_status(self, execution_id, step_name):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_name(step_executions, step_name)\n if step:\n return step['StepStatus']\n return 'Pending'", "def t_status_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In status process...\")\n\n d_state = self.job_state(*args, **kwargs)\n\n d_ret = d_state['d_ret']\n b_status = d_state['status']\n\n l_keys = d_ret.items()\n l_status = []\n for i in range(0, int(len(l_keys)/2)):\n b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]\n try:\n endcode = d_ret['%s.end' % str(i)]['returncode'][0]\n except:\n endcode = None\n\n if endcode == None and b_startEvent:\n l_status.append('started')\n if not endcode and b_startEvent and type(endcode) is int:\n l_status.append('finishedSuccessfully')\n if endcode and b_startEvent:\n l_status.append('finishedWithError')\n\n self.dp.qprint('b_startEvent = %d' % b_startEvent)\n self.dp.qprint(endcode)\n self.dp.qprint('l_status = %s' % l_status)\n\n d_ret['l_status'] = l_status\n return {\"d_ret\": d_ret,\n \"status\": b_status}", "def get_run_status(self, run_id):\n postresult = requests.get(\n f\"{self.proto}://{self.host}/ga4gh/wes/v1/runs/{run_id}/status\",\n headers=self.auth,\n )\n return wes_reponse(postresult)", "def get_games_in_progress(self):\n gip_url = 'scores/json/AreAnyGamesInProgress?key=<key>'\n contents = urllib.request.urlopen(self._base_url + gip_url.replace('<key>', self._ak, 1))\n return contents.getcode(), contents.read().decode(\"utf-8\")", "def get_current_status(cls):\n from sauna.plugins.base import Plugin\n from sauna import check_results_lock, check_results\n\n def reduce_status(accumulated, update_value):\n if update_value.status > Plugin.STATUS_CRIT:\n return accumulated\n return accumulated if accumulated > update_value.status else \\\n update_value.status\n\n with check_results_lock:\n code = reduce(reduce_status, check_results.values(), 0)\n\n return Plugin.status_code_to_str(code), code", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")" ]
[ "0.5598393", "0.55753976", "0.54773116", "0.544132", "0.5344359", "0.53350085", "0.52775574", "0.52622855", "0.5262133", "0.5236368", "0.5199259", "0.51738703", "0.51600146", "0.5136024", "0.5073093", "0.50615144", "0.5027628", "0.5024301", "0.50133663", "0.50084287", "0.5005301", "0.5000314", "0.49975637", "0.498941", "0.49850893", "0.497417", "0.49475148", "0.49472246", "0.49361646", "0.49187484" ]
0.7176214
0
Stops a running script. Returns 0 if OK, otherwise PI_BAD_SCRIPT_ID.
def stop_script(script_id): return _u2i(_pigpio_command(_control, _PI_CMD_PROCS, script_id, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_script(status):\n if status is not 0:\n print(\"Failure occurred: \" + str(status))\n sys.exit(status)", "def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')", "def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)", "async def stop(self, program_id: int) -> dict[str, Any]:\n return await self.controller.request(\n \"post\", f\"program/{program_id}/stop\", json={\"pid\": program_id}\n )", "def test_request_script_stop(self):\n session = _create_test_session()\n mock_scriptrunner = MagicMock(spec=ScriptRunner)\n session._scriptrunner = mock_scriptrunner\n\n session.request_script_stop()\n mock_scriptrunner.request_stop.assert_called()", "def EndScript(self):\r\n self.__script.close()", "def InterfaceClientStop(self, exitCode=200): \n pass", "def TerminalClientStop(self, exitCode=200):\n pass", "def stop(self):\n\n with open(self.pidfile, 'r') as pidfile:\n pid = int(pidfile.read().strip())\n\n proc = psutil.Process(pid)\n proc.terminate()", "def stop(self):\n return self._send_command(\"stop\")", "def __abort_script(message):\n print(message)\n sys.exit()", "def stop(self) -> str:\n return self.rpc_call(\"stop\")", "def stop_scripts():\n print \"*** WARNING ***: This is about to kill all python processes\"\n run(\"killall python\")", "def delete_script(script_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_PROCD, script_id, 0))", "def stop_execution(self):\n self.send_message(\"control.stop\",None)", "def test_request_script_stop_no_scriptrunner(self):\n session = _create_test_session()\n session._scriptrunner = None\n\n # Nothing else to do here aside from ensuring that no exception is thrown.\n session.request_script_stop()", "def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')", "def stop(self):\n self.scion_sh('stop')", "def quit(exitcode = EXIT_CODE_OK):\n global _running\n global _exitcode\n _exitcode = exitcode\n _running = False", "def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "def stop_procedure(self):\n pass", "async def stop(self):\n await self.pause()\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"V\")\n )", "def stop(self):\n if self._process is not None:\n self._process.terminate()", "def stop(self, code=None):\n\n if not self.running:\n return\n\n self._running = False\n\n self.fire(stopped(self))\n\n if self.root._executing_thread is None:\n for _ in range(3):\n self.tick()\n\n if code is not None:\n raise SystemExit(code)", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def stop():\n server = current_server()\n server.stop()", "def stop(self, _id):\n\n try:\n UpstartJob(_id).stop()\n except DBusException as e:\n raise ServiceOperationError(e)", "def stop(self):\n \n\n if os.path.isfile(self.pidfilename):\n\n with open(self.pidfilename) as f:\n data = json.load(f)\n pid = data['pid']\n os.kill(int(pid), signal.SIGTERM)\n\n # Check that the process has been killed\n # Give up after 15 seconds\n for i in range(15):\n if int(pid) not in psutil.pids():\n\n return True\n time.sleep(1)\n return False\n\n # If the daemon is not currently running, do nothing\n else:\n log(\"The daemon is not currently running\")", "def stopwasp():\n\n\trespond = send_command('stopwasp')" ]
[ "0.6652773", "0.63040155", "0.59945965", "0.59253967", "0.58986974", "0.589822", "0.58600795", "0.5851952", "0.5836521", "0.57976264", "0.57832086", "0.5754656", "0.5747786", "0.57075477", "0.5694859", "0.5670879", "0.56373674", "0.5636533", "0.55989164", "0.5597186", "0.5588195", "0.558453", "0.55661494", "0.5550295", "0.5548976", "0.55307484", "0.5511617", "0.5499017", "0.5495502", "0.5493368" ]
0.7541988
0
Deletes a stored script. Returns 0 if OK, otherwise PI_BAD_SCRIPT_ID.
def delete_script(script_id): return _u2i(_pigpio_command(_control, _PI_CMD_PROCD, script_id, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n delete_url = '{0}/{1}'.format(self.url, self.script_name)\n resp = False\n if self.get():\n log.debug('Deleting script: {0}'.format(self.script_name).format(self.script_name))\n req = requests.delete(delete_url, auth=(self.username, self.password))\n if req.status_code == 204 or 200:\n resp = req.content\n return resp\n log.error('Failed deleting script: {0} Reason: {1}'.format(self.script_name, req.status_code))\n\n return resp", "def delScript(self):\r\n\r\n if not self.isClosed:\r\n if len(self.__script) > 0:\r\n self.__pha = ''\r\n else:\r\n raise HDDOPermissionException('Tried to remove not added script from a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to remove script from a closed HealthDominoDataObject.')", "def script_delete(ctx: click.Context, name):\n subcommand_script.cmd_delete(ctx.obj, name)", "def delete_site_script(context, _id):\n utility = SiteScriptUtility(context)\n payload = {\n \"id\": _id,\n }\n qry = ServiceOperationQuery(utility, \"DeleteSiteScript\", None, payload, None, None)\n qry.static = True\n context.add_query(qry)\n return utility", "def deletescript(self, name):\n code, data = self.__send_command(\n \"DELETESCRIPT\", [name.encode(\"utf-8\")])\n if code == \"OK\":\n return True\n return False", "def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')", "def remove_kill_script():\n try:\n os.unlink('kill_script.sh')\n except:\n pass", "def _delete(filename):\n return os.remove(filename)", "def delete_asset(location, filename):\r\n try:\r\n content = Transcript.get_asset(location, filename)\r\n contentstore().delete(content.get_id())\r\n log.info(\"Transcript asset %s was removed from store.\", filename)\r\n except NotFoundError:\r\n pass\r\n return StaticContent.compute_location(location.course_key, filename)", "def delete(self, handle):\n self.LogCommand()\n tclcode = \"stc::delete \" + handle\n\n result = self.Exec(tclcode)\n logging.debug(\" - Python result - \" + str(result))\n return result", "def delete():\n id_num = int(input('Enter the ID number of the item you wish to delete\\n'))\n db_actions.remove(id_num)", "def delete(self):\n self.oxdb.execute(DELETE, self.variable_name, commit=True)\n self._exists = None", "def salir_del_programa():\n import sys\n sys.exit(0)", "def remove_transcripts_from_store(_step, subs_id):\r\n filename = 'subs_{0}.srt.sjson'.format(subs_id.strip())\r\n content_location = StaticContent.compute_location(\r\n world.scenario_dict['COURSE'].id,\r\n filename\r\n )\r\n try:\r\n content = contentstore().find(content_location)\r\n contentstore().delete(content.get_id())\r\n print('Transcript file was removed from store.')\r\n except NotFoundError:\r\n print('Transcript file was NOT found and not removed.')", "def test_run_script(self):\n script = 'var %s = {foo: \"foo\"};' % _global\n\n utils.run_script(script)\n self.assertEqual('foo', utils.run_script('%s.foo' % _global))\n\n # Cleanup\n self.assertEqual('true', utils.run_script('delete %s.foo' % _global))", "def _delete():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no deleting id input')\n\t\treturn 1\n\n\tfor ID in IDs:\n\t\tmyTask = query.get(ID)\n\t\tmyTaskSession.delete(myTask)\n\n\t\n\tmyTaskSession.commit()\n\n\treturn 0", "def delete(self, asset_uid):\n Script._validate_type(asset_uid, u'asset_uid', STR_TYPE, True)\n if (self._client.CLOUD_PLATFORM_SPACES or self._client.ICP_PLATFORM_SPACES) and \\\n self._if_deployment_exist_for_asset(asset_uid):\n raise WMLClientError(\n u'Cannot delete script that has existing deployments. Please delete all associated deployments and try again')\n\n if not self._ICP:\n response = requests.delete(self._href_definitions.get_asset_href(asset_uid), params=self._client._params(),\n headers=self._client._get_headers())\n else:\n response = requests.delete(self._href_definitions.get_asset_href(asset_uid), params=self._client._params(),\n headers=self._client._get_headers(), verify=False)\n if response.status_code == 200:\n return self._get_required_element_from_response(response.json())\n else:\n return self._handle_response(204, u'delete assets', response)", "def test_do_delete_non_existent_id(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(300)\r\n\r\n assert _error_code == 2005\r\n assert _msg == (\"RAMSTK ERROR: Attempted to delete non-existent \"\r\n \"Function ID 300.\")", "def lua_delete_record(self, lua):\n assert isinstance(lua, str)\n\n manager = transaction.TransactionManager.get_instance(self.redis)\n\n lines = (\n self.lua_get_lua_record(lua),\n \"local record_to_delete = cjson.decode(irecords[1])\",\n\n manager.lua_transaction_info(),\n \"if row_is_locked(record_to_delete, xid, xids) then\",\n \" error('Transaction failed. Will ROLLBACK.')\",\n \"end\",\n\n \"if row_is_visible(record_to_delete, xid, xids) then\",\n \" record_to_delete[':xex'] = %d\" % self._xid(),\n \" redis.call('ZREMRANGEBYSCORE', '%s', %s, %s)\" % (\n self.redis_key(),\n lua,\n lua,\n ),\n \" redis.call('ZADD', '%s', tostring(%s), cjson.encode(record_to_delete))\" % (\n self.redis_key(),\n lua,\n ),\n \"end\",\n )\n\n return '\\n'.join(lines)", "def test_6d_delete_file(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.deleting_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare deleting test.\")\n self.dismiss_dialogs()\n function = js_func[\"delete\"] % GST.gs_file_paths[\"file_to_delete_path\"]\n try:\n self.send_request(function, \"delete_data()\")\n except Exception as e:\n raise DeleteException(e.__str__()) \n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise DeleteException(response)", "def dal_delete(key):\n global store\n return store.delete(urllib.quote(key))", "def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])", "async def _delete(self, key):\n return 1 if await self.client.delete(key) else 0", "def delete():", "def delData(self, ide = None):\r\n try:\r\n if ide is not None:\r\n self.cursor.execute(\"DELETE FROM DATAS WHERE ID = %s;\" %(ide))\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False", "def delete_item(id):\n return '', 201", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete(self):\n self.parser.add_argument('lp_id',\n help=\"Language pack id\")\n args = self.parser.parse_args()\n self.client.languagepacks.delete(lp_id=args.lp_id)", "def test_do_delete(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n DUT.do_insert(revision_id=1, parent_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(DUT.last_id)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\"RAMSTK SUCCESS: Deleting an item from the RAMSTK Program \"\r\n \"database.\")", "def delete(self):\r\n url = \"%s/delete\" % os.path.dirname(self._url)\r\n params = {\r\n 'f' : 'json',\r\n 'versionName' : self.properties.versionName,\r\n 'sessionID' : self._guid\r\n }\r\n res = self._con.post(url, params)\r\n if 'success' in res:\r\n return res['success']\r\n return res" ]
[ "0.7324965", "0.7057548", "0.6257923", "0.6118986", "0.61122984", "0.5730052", "0.5465458", "0.54205316", "0.5403492", "0.5357954", "0.5315152", "0.5308733", "0.5302029", "0.5299273", "0.5273089", "0.5259421", "0.52500176", "0.5235091", "0.5230419", "0.52181107", "0.5214595", "0.52054507", "0.51795757", "0.5171345", "0.51533663", "0.51187545", "0.51142174", "0.5113659", "0.5106521", "0.5098526" ]
0.74092734
0
This function opens a gpio for reading serial data. Returns 0 if OK, otherwise PI_BAD_USER_GPIO, PI_BAD_WAVE_BAUD, or PI_GPIO_IN_USE. The serial data is held in a cyclic buffer and is read using gpioSerialRead(). It is the caller's responsibility to read data from the cyclic buffer in a timely fashion.
def serial_read_open(user_gpio, baud): return _u2i(_pigpio_command(_control, _PI_CMD_SLRO, user_gpio, baud))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serial_read_close(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_SLRC, user_gpio, 0))", "def read(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_READ, gpio, 0))", "def serial_read(user_gpio):\n bytes = _u2i(_pigpio_command(_control, _PI_CMD_SLR, user_gpio, 10000))\n if bytes > 0:\n buf = \"\"\n while len(buf) < bytes: buf += _control.recv(bytes-len(buf))\n return bytes, buf\n return bytes, \"\"", "def notify_open():\n return _u2i(_pigpio_command(_control, _PI_CMD_NO, 0, 0))", "def wave_add_serial(user_gpio, baud, offset, data):\n # pigpio message format\n\n # I p1 user_gpio\n # I p2 len(data)\n ## extension ##\n # I baud\n # I offset\n # s data\n if len(data):\n extents = [struct.pack(\"I\", baud),struct.pack(\"I\", offset), data]\n return _u2i(_pigpio_command_ext(\n _control, _PI_CMD_WVAS, user_gpio, len(data), extents))\n else:\n return 0", "def connectFPGACL():\n global serialObject\n try:\n print(\"Establishing connection to FPGA ..\")\n serialObject = serial.Serial('COM12', 115200, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE)\n print(\"Connection established ..\")\n except:\n print(\"Cant Open Specified Port\")", "def connectFPGA():\n global serialObject\n try:\n print(\"Establishing connection to FPGA ..\")\n serialObject = serial.Serial('COM12', 115200, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE)\n print(\"Connection established ..\")\n except:\n print(\"Cant Open Specified Port\")", "def gpio_input(door: Door):\n input_state = GPIO.input(GPIO_PIN)\n if input_state:\n door.is_closed()\n else:\n door.is_opened()", "def test_read(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.input') as mock_input:\n mock_input.return_value = True\n value = gpio.read(0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n self.assertDictEqual(value, {\"value\": True})", "def open_serial(self):\n msg = (\"\"\"Failed to open serial port, check if \"\"\"\n \"\"\"winch panel is connected and retry\"\"\")\n\n try:\n if not self.serial.opened and self.port and self.brate:\n if not self.serial.open(self.port, self.brate, 1):\n raise Exception\n\n self.serial.opened = True\n except:\n self.dialog_critical(msg)\n return -1\n\n if not self.serial.start():\n self.dialog_critical(\"\"\"Communication not established, check if \"\"\"\n \"\"\"winch panel is connected and retry\"\"\")\n return -1\n\n self.serSeq = 0\n self.serTimer = QTimer(self)\n self.serTimer.timeout.connect(self.read_data)\n self.serTimer.start(300)", "def imu_fth_isr(gpio, level, tick):\n isr_time = time.time()\n\n # Sometimes INT1 can trigger again as the FIFO is being read and filled\n # back up at the same time. If the time since the last tick is less than\n # 0.1s then exit the ISR.\n global last_tick\n MIN_TICK_DIFF_US = 10**5 \n tick_diff = pigpio.tickDiff(last_tick, tick)\n print(f\"Time since last tick {tick_diff / 10**6} seconds\")\n if tick_diff < MIN_TICK_DIFF_US:\n return\n\n global fifo_start\n print(f\"Interrupt at {isr_time}\")\n print(f\"FIFO fill time: {isr_time - fifo_start:4.03f} seconds\")\n fifo_start = isr_time\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n\n # Number of unread words (16 bits) \n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n\n # Pattern index\n # In our case, the accelerometer and gyroscope data rates are equal, so the\n # pattern is in [0:5] where\n # 0 -> Gx\n # 1 -> Gy\n # 2 -> Gz\n # 3 -> Ax\n # 4 -> Ay\n # 5 -> Az\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n # Read in multiples of 6, the number of readings from Gx to Az\n BYTES_PER_WORD = 2\n WORDS_PER_PATTERN = 6\n words_to_read = unread_words // WORDS_PER_PATTERN * WORDS_PER_PATTERN\n buffer_size = words_to_read * BYTES_PER_WORD\n buffer = bytearray(buffer_size)\n FIFO_DATA_OUT_L = bytearray(b'\\x3E')\n\n # Read FIFO data into buffer\n start_time = time.time()\n imu.i2c_device.write_then_readinto(FIFO_DATA_OUT_L, buffer)\n end_time = time.time()\n total_read_time = end_time - start_time\n print(f\"{buffer_size} bytes read in {total_read_time:.6f} seconds. {buffer_size/total_read_time:.0f} bytes/s\")\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n last_tick = tick\n\n # Print data\n PREVIEW_BYTES = 12\n print(f\"buffer = {buffer[:PREVIEW_BYTES].hex()} ... {buffer[-PREVIEW_BYTES:].hex()} | Len: {len(buffer)}\")\n data = [parse_fifo_data(buffer[i:i+2]) for i in range(0, len(buffer), 2)]\n print(f\"data = [{', '.join(map(str, data[:PREVIEW_BYTES]))}, ..., {', '.join(map(str, data[-PREVIEW_BYTES:]))}] | Len: {len(data)}\")\n\n print()", "def setup_gpio(self):\n try:\n pin = 4\n gpio = importlib.import_module('RPi.GPIO')\n gpio.setmode(gpio.BCM)\n gpio.setup(pin, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.add_event_detect(pin, gpio.FALLING, callback=self.on_snap_pressed, bouncetime=200)\n except ImportError as e:\n self._logger.exception(e)\n print('raspi gpio module not found, continuing...')", "def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)", "def read(pin):\n return _read_value(\"{0}/gpio{1}/value\".format(_path_prefix, pin))", "def gpio(self) -> int:", "def read_sensor(sensor: int, gpio_pin: int) -> (int, float, float, datetime):\r\n logging.debug('reading sensor')\r\n hum_rh, temp_c = Adafruit_DHT.read_retry(sensor, gpio_pin)\r\n if hum_rh is None or temp_c is None:\r\n logging.error(\"failed to read from the sensor\")\r\n return 1, 0, 0, datetime.now()\r\n logging.debug('sensor data: RH: {}, Tc: {}'.format(hum_rh, temp_c))\r\n return 0, hum_rh, temp_c, datetime.now()", "def gpio_read_digital(self, pin: int) -> bool:\n return bool(getrandbits(1))", "def open_serial_con(self):\n msg = (\"\"\"Failed to open serial port, check if \"\"\"\n \"\"\"winch panel is connected and retry\"\"\")\n\n try:\n if not self.serial.opened and self.port and self.brate:\n if not self.serial.open(self.port, self.brate, 1):\n raise Exception\n\n self.serial.opened = True\n # While acquiring it's not possible to calibrate\n self.menuCalibrate.setEnabled(False)\n except:\n self.dialog_critical(msg)\n return -1", "def gpioRequest():\n waterHeight,gateStatus = readingJson.serverReadGpioJson(\"192.168.42.3\",\"gpiodata.json\")\n return waterHeight,gateStatus", "def open_serial_port(serial_dev):\n # open serial port\n serial_port = serial.Serial(port=serial_dev, baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=3)\n # Flush the inputs and outputs\n serial_port.flushInput()\n serial_port.flushOutput()\n\n return serial_port", "def open_serial(self):\n self.port = serial.Serial(\n self.device,\n baudrate=SERIAL_BAUD,\n timeout=5.0,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n xonxoff=False,\n rtscts=False,\n dsrdtr=False)\n\t\n self.port.flushInput()\n self.port.flushOutput()", "def read_from_gpio(self):\n # if (use_static):\n # if (self.device_id in static_values.keys()):\n # return static_values[self.device_id]\n # else:\n # return 1;\n \n # if (random.random()<0.2):\n # if (random.random()<0.5):\n # self._decrease_position_index()\n # else:\n # self._increase_position_index()\n # self._position_index = (self._position_index+1)%len(FakePositionDevice._values)\n\n # self.position = FakePositionDevice._values[self._position_index]\n mu = self._value\n sig = 1\n self._value = np.random.normal(mu,sig,1)[0]\n if self._value < self._min_value:\n self._value = self._min_value\n if self._value > self._max_value:\n self._value = self._max_value\n \n return self._value\n return 4\n # return FakePositionDevice._values[self._position_index]", "def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])", "def __init__(self, pi, gpio):\n\n self.pi = pi\n self.gpio = gpio\n\n self._start_tick = None\n self._last_tick = None\n self._low_ticks = 0\n self._high_ticks = 0\n\n pi.set_mode(gpio, pigpio.INPUT)\n\n self._cb = pi.callback(gpio, pigpio.EITHER_EDGE, self._cbf)", "def gpio_can_read_analogue(self, pin: int) -> bool:\n return pin > 5", "def Test_serial_open(self, mock_serial_port,\n mock_serial_port_path, mock_sys_call):\n mock_serial_port_path('ttyUSB0')\n self.portdetect = ['ttyUSB0', 'ttyUSB1']\n self.baudrate = 9600\n mock_serial_port_path('ttyUSB0')\n serial_port_connection(self.portdetect, baudrate=self.baudrate)\n # checking whether sys.exit(0) is called or not\n mock_sys_call.assert_not_called()", "def read_bytes(self, ctrl_pin):\n try:\n '''\n ctrl_pin1.value(0)\n time.sleep_ms(2)\n ctrl_pin1.value(1)\n time.sleep_ms(220)\n ctrl_pin1.value(0)\n temp = hspi.read(2)\n ctrl_pin1.value(1)\n '''\n pin_ = self.ctrl_pins[ctrl_pin]\n pin_.value(0)\n time.sleep_ms(2)\n pin_.value(1)\n time.sleep_ms(220)\n pin_.value(0)\n temp = self.hspi.read(2)\n pin_.value(1)\n except KeyError:\n print('requested pin not defined')\n temp = None\n return temp", "def read_dht11_dat():\n GPIO.setup(DHTPIN, GPIO.OUT)\n GPIO.output(DHTPIN, GPIO.HIGH)\n time.sleep(0.05)\n GPIO.output(DHTPIN, GPIO.LOW)\n time.sleep(0.02)\n GPIO.setup(DHTPIN, GPIO.IN, GPIO.PUD_UP)\n\n unchanged_count = 0\n last = -1\n data = []\n while True:\n current = GPIO.input(DHTPIN)\n data.append(current)\n if last != current:\n unchanged_count = 0\n last = current\n else:\n unchanged_count += 1\n if unchanged_count > MAX_UNCHANGE_COUNT:\n break\n\n state = STATE_INIT_PULL_DOWN\n\n lengths = []\n current_length = 0\n\n for current in data:\n current_length += 1\n\n if state == STATE_INIT_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_INIT_PULL_UP\n else:\n continue\n if state == STATE_INIT_PULL_UP:\n if current == GPIO.HIGH:\n state = STATE_DATA_FIRST_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_FIRST_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_DATA_PULL_UP\n else:\n continue\n if state == STATE_DATA_PULL_UP:\n if current == GPIO.HIGH:\n current_length = 0\n state = STATE_DATA_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_PULL_DOWN:\n if current == GPIO.LOW:\n lengths.append(current_length)\n state = STATE_DATA_PULL_UP\n else:\n continue\n if len(lengths) != 40:\n # print \"Data not good, skip\"\n return False\n\n shortest_pull_up = min(lengths)\n longest_pull_up = max(lengths)\n halfway = (longest_pull_up + shortest_pull_up) / 2\n bits = []\n the_bytes = []\n byte = 0\n\n for length in lengths:\n bit = 0\n if length > halfway:\n bit = 1\n bits.append(bit)\n # print \"bits: %s, length: %d\" % (bits, len(bits))\n for i in range(0, len(bits)):\n byte = byte << 1\n if (bits[i]):\n byte = byte | 1\n else:\n byte = byte | 0\n if ((i + 1) % 8 == 0):\n the_bytes.append(byte)\n byte = 0\n # print the_bytes\n checksum = (the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3]) & 0xFF\n if the_bytes[4] != checksum:\n # print \"Data not good, skip\"\n return False\n\n return the_bytes[0], the_bytes[2]", "def __init__(self, pi, gpio_0, gpio_1, callback, bit_timeout=5):\n\n self.pi = pi\n self.gpio_0 = gpio_0\n self.gpio_1 = gpio_1\n\n self.callback = callback\n\n self.bit_timeout = bit_timeout\n\n self.in_code = False", "def get_cgpio_digital(self, ionum=None):\r\n return self._arm.get_cgpio_digital(ionum=ionum)" ]
[ "0.6448849", "0.63108885", "0.60779285", "0.58235747", "0.56402314", "0.5577666", "0.5394534", "0.53789324", "0.53400993", "0.52496374", "0.52308685", "0.52211136", "0.5221027", "0.5119176", "0.5118039", "0.5113823", "0.5086014", "0.507884", "0.5065258", "0.5011837", "0.50094324", "0.49887705", "0.49881053", "0.49577612", "0.49484602", "0.49390942", "0.49338746", "0.49249065", "0.4906105", "0.4895183" ]
0.7376853
0
This function closes a gpio for reading serial data. Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_NOT_SERIAL_GPIO.
def serial_read_close(user_gpio): return _u2i(_pigpio_command(_control, _PI_CMD_SLRC, user_gpio, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serial_read_open(user_gpio, baud):\n return _u2i(_pigpio_command(_control, _PI_CMD_SLRO, user_gpio, baud))", "def closeFPGACL():\n\n try:\n print(\"Closing connection to FPGA ..\")\n serialObject.close()\n print(\"Connection closed ..\")\n except:\n print(\"Cant Open Specified Port\")", "def close(self):\n self.stop() #ENA=0, DIR=0, bit0=0\n status=self.objdll.USBIO_CloseDevice(self.id)\n print(f\"Close USB-GPIO Device:{status}\")", "def close_serial(ser):\r\n\tser.close()\r\n\tif ser.isOpen():\r\n\t print ser.portstr+' could not be closed and is still open.'\r\n\telse:\r\n\t print 'Serial Port '+ser.portstr+' has been closed.' \r\n\treturn ser", "def close(self):\n\t\tlogging.debug(\"Closing serial device\")\n\t\tret = os.close(self.fd)\n\n\t\treturn ret", "def close(self):\n GPIO.cleanup(self.gpio_pin)", "def pi_close():\n print(\"\\nClosing lock :(\")\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(11, GPIO.OUT)\n servo1 = GPIO.PWM(11, 50)\n\n # start(0) pulse off\n print('servo.start(0)')\n servo1.start(0)\n time.sleep(1)\n\n # turns a little at a time using servo\n print('turning...')\n i = 8.5\n while i > 2:\n # pulse next degree \n print('ChangeDutyCycle(%d)' % i)\n servo1.ChangeDutyCycle(i)\n time.sleep(0.2)\n # no pulse, for smoother turn\n servo1.ChangeDutyCycle(0)\n time.sleep(0.1)\n i -= 1\n\n # stop pulse\n print('servo.ChangeDutyCycle(0)')\n servo1.ChangeDutyCycle(0)\n servo1.stop()\n GPIO.cleanup()\n print('done closing')", "def close(self):\n super(GPIODevice, self).close()\n with _PINS_LOCK:\n pin = self._pin\n self._pin = None\n if pin in _PINS:\n _PINS.remove(pin)\n pin.close()", "def close(self):\n if self._serial_port is not None and self._serial_port.is_open:\n self._serial_port.close()\n\n super(Cambrionix, self).close()", "def notify_close(handle):\n return _u2i(_pigpio_command(_control, _PI_CMD_NC, handle, 0))", "def close(self) -> None:\r\n\r\n # Close the serial port\r\n if self._serial_handle.read():\r\n self._serial_handle.flushInput()\r\n self._serial_handle.flushOutput()\r\n self._serial_handle.close()", "def end_serial(self):\n if self.serial is None:\n ser = self.get_serial(self.port)\n if ser is not None and ser.is_open:\n ser.close()\n elif self.serial.is_open:\n self.serial.close()", "def close_serial(self):\n if(self.serial):\n self.serial.flush()\n self.serial.close()\n self.serial = False", "def notify_open():\n return _u2i(_pigpio_command(_control, _PI_CMD_NO, 0, 0))", "def read(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_READ, gpio, 0))", "def _close(self):\n \n # Close device\n logger.debug(\"%s: Serial port closing started...\" % \\\n self.__class__.__name__)\n self._serial_port.close()\n logger.debug(\"%s: ...serial port closing complete.\" % \\\n self.__class__.__name__)", "def close(self):\n # use the lock to let finish writing/reading\n self._lock.lock()\n # first stop reading, so that closing can be done on idle port\n self.stop()\n try:\n self.serial.close()\n except Exception as e:\n logger.exception(\"Failed to close serial port\")\n self._lock.unlock()", "def Close_Serial(self):\n try:\n self.ser.close()\n print \"Successfully closed Stage Serial Bus\\n\"\n return\n except Exception as e:\n print \"Failed to close Stage serial bus. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n return", "def serial_read(user_gpio):\n bytes = _u2i(_pigpio_command(_control, _PI_CMD_SLR, user_gpio, 10000))\n if bytes > 0:\n buf = \"\"\n while len(buf) < bytes: buf += _control.recv(bytes-len(buf))\n return bytes, buf\n return bytes, \"\"", "def close(self):\n\n self.serial.close()", "def on_close_sensor_com_btn_clicked(self):\n self.sensor1.sensor.close_port()\n self.sensor1.pause()\n self.open_sensor_com_btn.setEnabled(True)\n self.close_sensor_com_btn.setEnabled(False)\n self.set_status_txt(\"closing sensor board \")", "def close(self):\n self._simple_serial.close()", "def end_read():\n global ov_read\n ov_read = False\n GPIO.cleanup()", "def disconnect(self):\n try:\n self.boardcon.close()\n self.boardcon = False\n self.status = 0\n return True\n except:\n logger.error(\"Cannot close Serial connection with the/dev/ttyUSB \"\n \"{}\".format(self.machine_id))\n return False", "def closeSer(self): \n \n # Send commands to turn off the Arduino\n self.send('F',data=[str(0)], delay=ARD_DELAY_CMD) # Turn off fan\n self.send('P',data=[str(0)], delay=ARD_DELAY_CMD) # Turn off pump\n \n endMsg = 'arduinoSerial:: Signing Off!'\n if DEBUG: print endMsg\n self.dbF.writerow([endMsg]) \n self.ser.close()", "def gpio_input(door: Door):\n input_state = GPIO.input(GPIO_PIN)\n if input_state:\n door.is_closed()\n else:\n door.is_opened()", "def close(self):\n self.stop()\n GPIO.cleanup()", "def close(self):\n debug(\"CBA4.close()\")\n self.do_stop()\n if self.__usb_if:\n self.__usb_if.close()\n self.__usb_if = None\n #end close()", "def comDone(self):\n if DEBUG > 1: sys.stderr.write(\"* comDone()\\n\")\n self.SetRSTpin(1) #disable power\n self.SetTESTpin(0) #disable power\n self.serialport.close()", "def close(self):\n self.arduino.close()" ]
[ "0.6426908", "0.62594116", "0.6107423", "0.60317576", "0.5929799", "0.5830778", "0.5822655", "0.57815504", "0.5764639", "0.5722318", "0.57152706", "0.570631", "0.5651065", "0.56308454", "0.5601326", "0.55753505", "0.5536002", "0.5535432", "0.5458669", "0.5453773", "0.5452374", "0.54509205", "0.5429343", "0.539081", "0.53210545", "0.5268223", "0.5244552", "0.52057403", "0.51417905", "0.5123559" ]
0.78484946
0
Provides a count of how many times the default tally callback has triggered. The count will be zero if the user has supplied their own callback function.
def tally(self): return self.count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(f):\n def counted(*args):\n counted.call_count += 1\n return f(*args)\n counted.call_count = 0\n return counted", "def counter(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n wrapper.count = wrapper.count + 1\n res = func(*args, **kwargs)\n print(\"{0} has been used: {1}x\".format(func.__name__, wrapper.count))\n return res\n wrapper.count = 0\n return wrapper", "def COUNTER_TOTAL():\n return 3", "def counter(self) -> int:", "def counter(self) -> int:", "def counted ( f ):\n def wrapped ( *args, **kwargs ):\n wrapped.calls += 1\n return f( *args , **kwargs )\n wrapped.calls = 0\n return wrapped", "def count() -> int:\n pass", "def call_counted(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs): # noqa: WPS430\n wrapper.called += 1\n return func(*args, **kwargs)\n\n wrapper.called = 0\n return wrapper", "def count_calls(func):\n\n @functools.wraps(func)\n def wrapper_count_calls(*args, **kwargs):\n wrapper_count_calls.num_calls += 1\n print(f\"Call {wrapper_count_calls.num_calls} of {func.__name__!r}\")\n return func(*args, **kwargs)\n\n wrapper_count_calls.num_calls = 0\n return wrapper_count_calls", "def count_calls(func):\n \n @functools.wraps(func)\n def wrapper_count_calls(*args, **kwargs):\n wrapper_count_calls.num_calls += 1\n print(f\"Call {wrapper_count_calls.num_calls} of {func.__name__!r}\")\n return func(*args, **kwargs)\n\n wrapper_count_calls.num_calls = 0\n return wrapper_count_calls", "def countcalls(f):\n def _f(fn):\n countcalls[fn]", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def number_of_on_calls(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"number_of_on_calls\")", "def test_count(self):\n self._test_count_func(count)", "def call_count(self):\n start_time = self._get_interval_start_time()\n return self._get_call_count(start_time)", "def countcalls(func):\n def wrapper(*args, **kwargs):\n wrapper.calls += 1\n return func(*args, **kwargs)\n wrapper.calls = 0\n return wrapper", "def register_count_changed_callback(self, callback=None):\r\n return self._arm.register_count_changed_callback(callback=callback)", "def count_me(fnc):\n def increment(self, *args, **kwargs):\n type(self)._count += 1\n return fnc(self, *args, **kwargs)\n return increment", "def count_change(amount):\n \"*** YOUR CODE HERE ***\"\n\n return helper(1, amount)", "def _listen_count(hass: HomeAssistant) -> int:\n return sum(hass.bus.async_listeners().values())", "def make_count_change():\n \"*** YOUR CODE HERE ***\"", "def count():", "def counter(self, value: int, /) -> None:", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count", "def Counter(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n ans = func(*args, **kwargs)\n logger.debug(\n f'Running func:{func.__name__} for {wrapper.calls} times.')\n wrapper.calls += 1\n return ans\n except Exception as e:\n logger.critical(e)\n logger.critical(f'{func.__name__} cannot work.')\n wrapper.calls = 1\n return wrapper", "async def count(self, **kw):\n\n pass", "def action_count(self):\n raise NotImplementedError", "def test_own_count(self):\n self._test_count_func(it_count)", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)" ]
[ "0.6530857", "0.6418102", "0.63938993", "0.6386148", "0.6386148", "0.6377512", "0.6375082", "0.6357372", "0.63504136", "0.63432616", "0.6302167", "0.62513196", "0.6235649", "0.6225064", "0.61561054", "0.61551017", "0.6151384", "0.61359334", "0.60966057", "0.6022655", "0.60067004", "0.6003875", "0.5978163", "0.59675527", "0.5954921", "0.5944573", "0.594199", "0.59347767", "0.5930747", "0.59259224" ]
0.66037875
0
Wait for an edge event on a gpio. The function returns as soon as the edge is detected or after the number of seconds specified by timeout has expired.
def wait_for_edge(user_gpio, edge=RISING_EDGE, timeout=60.0): a = _wait_for_edge(user_gpio, edge, timeout) return a.trigger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_edge(self, channel, edge, bouncetime=None, timeout=None):\n self._check_mode()\n self._check_channel_input(channel)\n # ueberpruefe, ob `edge` gueltig ist und erstelle eventuell aus der Zahl einen lesbaren Text\n if edge == GPIO.RISING:\n edge = \"rising\"\n elif edge == GPIO.FALLING:\n edge = \"falling\"\n elif edge == GPIO.BOTH:\n edge = \"both\"\n else:\n raise ValueError(\"The edge must be set to RISING, FALLING or BOTH\")\n print(f\"wait_for_edge mit channel {channel} edge {edge}, bouncetime {bouncetime} und timeout {timeout}\")\n # generiere und warte fuer eine zufaellige Zeit\n # aber maximal fuer den `timeout`\n wartezeit = max(randint(5, 10), timeout / 1000)\n sleep(wartezeit)", "def wait_for(self, timeout):\n ready = False\n # Dividing sleep time by 300 instead of 30 double CPU load but cuts\n # IMU timestamp variation from about 20% to less than 1%\n sleep_time = (timeout / 1000.0) / 30\n stop_time = time.monotonic_ns() + (timeout * 1000000.0)\n while not ready and time.monotonic_ns() < stop_time:\n ready = GPIO.input(self.gpio_pin)\n time.sleep(sleep_time)\n return ready", "def _waitEvConnected(self, timeout):\r\n timestamp_start = time.time()\r\n cp_state = self.whitebeet.controlPilotGetState()\r\n if cp_state == 1:\r\n print(\"EV already connected\")\r\n return True\r\n elif cp_state > 1:\r\n print(\"CP in wrong state: {}\".format(cp_state))\r\n return False\r\n else:\r\n print(\"Wait until an EV connects\")\r\n while True:\r\n cp_state = self.whitebeet.controlPilotGetState()\r\n if timeout != None and timestamp_start + timeout > time.time():\r\n return False\r\n if cp_state == 0:\r\n time.sleep(0.1)\r\n elif cp_state == 1:\r\n print(\"EV connected\")\r\n return True\r\n else:\r\n print(\"CP in wrong state: {}\".format(cp_state))\r\n return False", "def get_next_event(self, timeout=None):\n ret = self.inq.Wait(timeout)\n return ret", "def wait_for_event_timeout(event):\n received = event.wait(2)\n name = threading.current_thread().getName()\n print \"Waited with timeout, got {}, name {}\".format(received, name)", "def wait_for_connection(timeout):\n global connected \n\n total_time = 0\n while not connected and total_time < timeout:\n time.sleep(1)\n total_time += 1\n\n if not connected:\n raise RuntimeError('Could not connect to MQTT bridge.')", "def _busy_wait(self, timeout=40.0):\n # If the busy_pin is *high* (pulled up by host)\n # then assume we're not getting a signal from inky\n # and wait the timeout period to be safe.\n if self._gpio.input(self.busy_pin):\n warnings.warn(\"Busy Wait: Held high. Waiting for {:0.2f}s\".format(timeout))\n time.sleep(timeout)\n return\n\n # If the busy_pin is *low* (pulled down by inky)\n # then wait for it to high.\n t_start = time.time()\n while not self._gpio.input(self.busy_pin):\n time.sleep(0.01)\n if time.time() - t_start >= timeout:\n warnings.warn(\"Busy Wait: Timed out after {:0.2f}s\".format(time.time() - t_start))\n return\n\n # print(\"Busy_waited\", time.time()-t_start, \"out of\", timeout, \"seconds\")", "def _wait_on_condition(self, timeout):\n self.__condition.wait(timeout)", "def wait_for_disconnection(timeout):\n global connected \n\n total_time = 0\n while connected and total_time < timeout:\n time.sleep(1)\n total_time += 1\n\n if connected:\n raise RuntimeError('Could not disconnect to MQTT bridge.')", "def _wait_for_event_file(event_file_path, timeout):\n\n start_time = time.time()\n end_time = start_time + timeout\n\n while True:\n timeout_remaining = end_time - time.time()\n if timeout_remaining <= 0:\n return (False, 0)\n elif os.path.exists(event_file_path):\n return (True, timeout_remaining)\n else:\n time.sleep(0.1)", "def wait_for_acquire(self, timeout=30):\n self._acquire_event.wait(timeout)", "def wait_fluently(condition: Callable, timeout: TimeoutType, err_msg: str):\n if timeout is None:\n timeout = 0\n start_time = time.time()\n while True:\n res = condition()\n if res:\n return res\n if time.time() - start_time >= timeout:\n raise TimeoutException(err_msg)\n time.sleep(0.3)", "def wait_for_result(self, timeout=10, interval=0.1):\n end = time.time() + timeout\n while time.time() <= end:\n ev = self.get_event()\n if ev.id == ID_OK:\n return\n elif ev.id in (ID_ERROR, ID_ALARM):\n raise GrblEventError(ev)\n time.sleep(interval)\n raise GrblHostError(\"Timeout\")", "def wait(self, timeout):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "async def _read_event(self, timeout: int=None):\n event = None\n try:\n event = await wait_for(self.get_event(), timeout=timeout)\n except TimeoutError:\n pass\n\n return event", "def wait_until_responsive(self, check, timeout, pause,\n clock=timeit.default_timer):\n\n ref = clock()\n now = ref\n while (now - ref) < timeout:\n if check():\n return\n time.sleep(pause)\n now = clock()\n\n raise Exception(\n 'Timeout reached while waiting on service!'\n )", "def wait(self, timeout=None):\n with self.condition:\n if not self.ready:\n self.condition.wait(timeout)", "async def test_wait_for(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death)\n\n def do_nothing(_: auraxium.event.Event) -> None:\n pass\n\n trigger.action = do_nothing\n\n await self.client.wait_for(trigger, timeout=-1.0)\n\n with self.assertRaises(TimeoutError):\n await self.client.wait_for(trigger, timeout=0.00001)", "def wait(self, timeout=None):\n if timeout is None:\n timeout = self.timeout\n started = time.time()\n while True:\n if self.get_ip():\n self.state = State.RUNNING\n return True\n else:\n time.sleep(1)\n if timeout != 0:\n if time.time() - started > timeout:\n return False", "def wait_for(node, namespace=None, timeout=None):\n\n from rospy.names import canonicalize_name\n node = canonicalize_name(node)\n def is_node_up(node):\n try:\n node_up = any([node in upnode for upnode in\n rosnode.get_node_names(namespace)])\n return node_up\n except Exception:\n return False\n\n if not is_node_up(node):\n if timeout is not None:\n timeout_t = time.time() + timeout\n while not is_node_up(node):\n time.sleep(0.1)\n if time.time() >= timeout_t:\n raise rospy.exceptions.ROSException(\"timeout exceeded while waiting for node %s\" % node)\n else:\n while not is_node_up(node):\n time.sleep(0.1)", "def wait_for_press(self):\n GPIO.add_event_detect(self.channel, self.polarity)\n while True:\n if GPIO.event_detected(self.channel) and self._debounce():\n GPIO.remove_event_detect(self.channel)\n return\n time.sleep(0.02)", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)", "def wait_for_press(self, b):\n \n self.hardware_interfaces[self._gpio].wait_pin_for_edge(self._b_names[b])", "def wait_for_event(event):\r\n return event.accept()", "def wait_until_done(self, timeout=10.0):\r\n cfunc = lib_importer.windll.DAQmxWaitUntilTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle, ctypes.c_double]\r\n\r\n error_code = cfunc(self._handle, timeout)\r\n check_for_error(error_code)", "def _WaitForLinkerTestStatus(adb, timeout):", "def wait_disconnected(self, timeout=-1):\n\n with self.connect_cv:\n util.timed_wait(self.connect_cv,\n lambda: True if not self.switch_socket else None,\n timeout=timeout)\n return self.switch_socket is None", "def wait_for_completion(self, timeout=10):\n cur_status = self.runtime_status()\n while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:\n time.sleep(0.2)\n timeout -= 0.2\n cur_status = self.runtime_status()\n if timeout < 0:\n break\n\n return timeout > 0", "def wait_for_interrupts(self, wait_time = 1):\n raise AssertionError(\"wait_for_interrupts function i not implemented\")", "def block_until_close(self):\r\n return self._eventThread.join()" ]
[ "0.7034726", "0.6780801", "0.6662591", "0.6510038", "0.62618953", "0.6202515", "0.6161633", "0.61384743", "0.6069354", "0.6034987", "0.6032244", "0.6013128", "0.5991581", "0.59828234", "0.59619355", "0.59435755", "0.5928785", "0.59200543", "0.5908114", "0.5907992", "0.58606595", "0.5852983", "0.58259207", "0.5822687", "0.5771467", "0.575496", "0.57262254", "0.5717297", "0.5707706", "0.5697392" ]
0.8251145
0
Start the pigpio module.
def start(host = os.getenv("PIGPIO_ADDR", ''), port = os.getenv("PIGPIO_PORT", 8888)): global _control, _notify global _host, _port _host = host _port = int(port) _control = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: _control.connect((_host, _port)) _notify = _callback_thread() except socket.error: if _control is not None: _control = None if _host == '': h = "localhost" else: h = _host errStr = "Can't connect to pigpio on " + str(h) + "(" + str(_port) + ")" print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print(errStr) print("") print("Did you start the pigpio daemon? E.g. sudo pigpiod") print("") print("Did you specify the correct Pi host/port in the environment") print("variables PIGPIO_ADDR/PIGPIO_PORT?") print("E.g. export PIGPIO_ADDR=soft, export PIGPIO_PORT=8888") print("") print("Did you specify the correct Pi host/port in the") print("pigpio.start() function? E.g. pigpio.start('soft', 8888))") print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pigpio_command(sock, cmd, p1, p2):\n if sock is not None:\n sock.send(struct.pack('IIII', cmd, p1, p2, 0))\n x, y, z, res = struct.unpack('IIII', sock.recv(16))\n return res\n else:\n raise _pigpioError(\"*** Module not started, call pigpio.start() ***\")", "def start(self):\n self.p.start()", "def start():\n global running\n # os.system('python3 /Users/bowenwaugh/Documents/GA/GA_Puzzles/simple.py')\n global process\n process = Popen(['python3', '/Users/bowenwaugh/Documents/GA/GA_Puzzles/simple.py'])\n running = True", "def start():\n trio.run(_main)", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [config.gpio_pin_p1_stretch,\n config.gpio_pin_p1_serve,\n config.gpio_pin_p2_stretch,\n config.gpio_pin_p2_serve]:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n input_reader_thread = threading.Thread(target=input_reader_worker)\n input_reader_thread.setDaemon(True)\n input_reader_thread.start()", "def start_orc8r():\n subprocess.check_call(['./run.py'], shell=True, cwd=orc8_docker_path)", "def startFluidinfo():\n sudo('start fluidinfo-api')\n sudo('/etc/init.d/haproxy start')\n sudo('/etc/init.d/nginx start')", "def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()", "async def start(self, raise_on_fail: bool = False) -> None:\n try:\n await self._rpc.start()\n except InvalidPipe:\n if raise_on_fail:\n raise\n self._rpc = None", "def start():", "def start():", "def start():", "def start():", "def start():\n\n start_server()", "def run():\n entry_point.run()", "def run():\n board = SimpleGoBoard(7)\n con = GtpConnection(Gomoku4(), board)\n con.start_connection()", "def start(context):\n context.run(\"python hellotensorflow/hello.py\")", "def __init__(self, pi, gpio):\n\n self.pi = pi\n self.gpio = gpio\n\n self._start_tick = None\n self._last_tick = None\n self._low_ticks = 0\n self._high_ticks = 0\n\n pi.set_mode(gpio, pigpio.INPUT)\n\n self._cb = pi.callback(gpio, pigpio.EITHER_EDGE, self._cbf)", "def main():\n kernel_params = _parse_kernel_cmdline()\n api_url = kernel_params.get('ironic_api_url')\n deployment_id = kernel_params.get('deployment_id')\n inspect = kernel_params.get('inspect')\n # TODO(aarefiev): change ssh driver\n ironic_driver = kernel_params.get('callback-driver-name', 'ansible_ssh')\n if inspect and api_url is None:\n _process_error('Ironic ansible callback: Mandatory parameter '\n '\"ironic_api_url\" is missing.')\n if api_url is None or deployment_id is None:\n _process_error('Mandatory parameter (\"ironic_api_url\" or '\n '\"deployment_id\") is missing.')\n\n boot_mac = kernel_params.get('BOOTIF')\n if boot_mac is None:\n _process_error('Cannot define boot interface, \"BOOTIF\" parameter is '\n 'missing.')\n\n # There is a difference in syntax in BOOTIF variable between pxe and ipxe\n # boot with Ironic. For pxe boot the the leading `01-' denotes the device type\n # (Ethernet) and is not a part of the MAC address\n if boot_mac.startswith('01-'):\n boot_mac = boot_mac[3:].replace('-', ':')\n\n for n in range(_GET_ADDR_MAX_ITERATION):\n boot_ip = _get_interface_ip(boot_mac)\n if boot_ip is not None:\n break\n time.sleep(_RETRY_INTERVAL)\n else:\n _process_error('Cannot find IP address of boot interface.')\n\n data = {\"callback_url\": \"ssh://\" + boot_ip}\n\n if inspect:\n passthru = ('%(api-url)s/v1/drivers/%(driver)s/vendor_passthru'\n '/inspect' % {'api-url': api_url,\n 'driver': ironic_driver}\n else:\n passthru = '%(api-url)s/v1/nodes/%(deployment_id)s/vendor_passthru' \\\n '/heartbeat' % {'api-url': api_url,\n 'deployment_id': deployment_id}\n\n for attempt in range(_POST_CALLBACK_MAX_ITERATION):\n try:\n resp = requests.post(passthru, data=json.dumps(data),\n headers={'Content-Type': 'application/json',\n 'Accept': 'application/json'})\n except Exception as e:\n error = str(e)\n else:\n if resp.status_code != 202:\n error= ('Wrong status code %d returned from Ironic API' %\n resp.status_code)\n else:\n break\n\n if attempt == (_POST_CALLBACK_MAX_ITERATION - 1):\n _process_error(error)\n\n time.sleep(_RETRY_INTERVAL)", "def run():\n board = GoBoard(7)\n con = GtpConnection(Gomoku(), board)\n con.start_connection()", "def main():\n rospy.init_node('image_to_pointcloud')\n rospy.loginfo(\"Starting sonar image to pointcloud node...\")\n converter = ImageConverter()\n\n rate = rospy.Rate(10)\n rospy.sleep(3.0)\n while not rospy.is_shutdown():\n converter.proc_and_pub_pointcloud()\n rate.sleep()", "def setup_pi():\n global pi\n pi = modOrangePi.OrangePiOne()", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def start(self):\n self.microblaze.run()\n self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0)\n self.load_switch_config(self.iop_switch_config)", "def setup_module(module):\n print(\"Start rishabhSetupModule of Program\")", "def pibooth_startup(cfg, app):", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def run(self):\n ioloop.IOLoop.current().start()", "def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])" ]
[ "0.5963777", "0.57519615", "0.56450987", "0.56196016", "0.5602489", "0.55918384", "0.55056417", "0.54661393", "0.5464411", "0.54620284", "0.5455737", "0.5455737", "0.5455737", "0.5455737", "0.5386441", "0.53849673", "0.53796583", "0.5359336", "0.53541785", "0.53469396", "0.53434765", "0.532231", "0.53138506", "0.5286618", "0.5271164", "0.5266808", "0.52509886", "0.52381384", "0.52216125", "0.5219537" ]
0.7611507
0
For a given numpy array, plots the histogram for each column
def plot_features(data: np.array)->None: n_rows = np.size(data, 0) n_cols = np.size(data, 1) for i in range(n_cols): plt.hist(data[:,i]) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hist_of_numeric(X):\n figsize(10,3)\n for col in get_numeric(X):\n print(col)\n X[col].hist(bins=50)\n show()", "def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def histogram(arr, xlbl, xrng=None, nbins=20, alpha=1.):\n if xrng is None:\n xrng = (np.min(arr),np.max(arr))\n p = figure(plot_width=600, plot_height=400)\n # Histogram\n hist, edges = np.histogram(arr, range=xrng, density=True, bins=nbins)\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color='blue', alpha=alpha)\n # Label\n p.xaxis.axis_label = xlbl\n # Show\n show(p)", "def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig", "def np_histogram(data, title, bins=\"auto\"):\n figure = plt.figure()\n canvas = figure.canvas\n plt.hist(data, bins=bins)\n plt.title(title)\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def show_histogram(im):\n\n if im.ndim == 2:\n # Input image is single channel\n plt.hist(im.flatten(), 256, range=(0, 250), fc='k')\n plt.show()\n\n elif im.ndim == 3:\n # Input image is three channels\n fig = plt.figure()\n fig.add_subplot(311)\n plt.hist(im[..., 0].flatten(), 256, range=(0, 250), fc='b')\n fig.add_subplot(312)\n plt.hist(im[..., 1].flatten(), 256, range=(0, 250), fc='g')\n fig.add_subplot(313)\n plt.hist(im[..., 2].flatten(), 256, range=(0, 250), fc='r')\n plt.show()", "def histogram(self, filename, column_name):\n plt.figure()\n self.data[column_name].hist()\n plt.savefig(filename)", "def plot_hist(df, num_bins=8):\n df.hist(figsize=(24, 20), bins=num_bins)\n plt.axes", "def draw_histogram(data, # type: thelper.typedefs.ArrayType\n bins=50, # type: Optional[int]\n xlabel=\"\", # type: Optional[thelper.typedefs.LabelType]\n ylabel=\"Proportion\", # type: Optional[thelper.typedefs.LabelType]\n show=False, # type: Optional[bool]\n block=False, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.DrawingType\n fig, ax = plt.subplots()\n ax.hist(data, density=True, bins=bins)\n if len(ylabel) > 0:\n ax.set_ylabel(ylabel)\n if len(xlabel) > 0:\n ax.set_xlabel(xlabel)\n ax.set_xlim(xmin=0)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])", "def print_hist(a_column, a_category):\n plt.hist(a_column)\n plt.title(\"Histogram for {} variable\".format(a_category))\n plt.xlabel(a_category)\n plt.ylabel(\"frequency\")\n plt.show()", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def plot_uv_hist(df, colname, nbins='auto', xlogflag=False, colorid=0):\n\n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n\n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n \n # plotting... histogram\n if (xlogflag):\n \tsns.histplot(ax = ax, data = df\n \t, x = colname\n \t, bins = nbins\n \t, color = base_color\n \t\t\t\t\t\t, log_scale= True)\n else:\n \tsns.histplot(ax = ax, data = df\n \t, x = colname\n \t, color = base_color\n \t, bins = nbins)\n\n \n #plt.legend(prop={'size': 12})\n plt.title('Distribution of '+colname, fontsize=20)\n plt.xlabel(colname+' (units)', fontsize=16)\n plt.ylabel('Frequency', fontsize=16)\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n \n return plt.show()", "def hist(self, overlay=False, **vargs):\n n = len(self)\n colors = list(itertools.islice(itertools.cycle(('b', 'g', 'r')), n))\n if overlay:\n plt.figure(figsize=(6, 4))\n plt.hist(self.columns, color=colors, **vargs)\n plt.legend(self.column_labels)\n else:\n _, axes = plt.subplots(n, 1, figsize=(6, 4 * n))\n if n==1 : axes = [axes]\n for axis, label, color in zip(axes, self.column_labels, colors):\n axis.hist(self[label], color=color, **vargs)\n axis.set_xlabel(label, fontsize=16)", "def draw_hist(self, column_name, data):\n\n # Reset plot first\n self.axes.clear()\n\n try:\n # Check data type\n if data.dtype == \"object\":\n # Different drawing method for strings\n value_count = data.value_counts().sort_index()\n value_count.plot(kind=\"bar\", ax=self.axes)\n else:\n self.axes.hist(data.dropna(), bins=100)\n except ValueError as e:\n # log Error\n _log_message = \"\\nHistogram plot failed due to error:\\n--> {}\".format(e)\n pub.sendMessage(\"LOG_MESSAGE\", log_message=_log_message)\n\n # Set plot info\n self.axes.set_title(\"Histogram Plot for %s\" % column_name)\n self.axes.set_ylabel(\"Value Count\")\n self.canvas.draw()", "def plot_channel_histogram(img, tri):\n rgb = ['red', 'green', 'blue']\n\n f = img[tri == 255]\n b = img[tri == 0]\n\n for source in [f, b]:\n for channel in range(source.shape[-1]):\n sns.distplot(source[:, channel], color=rgb[channel])", "def create_histogram(self, i):\n # styling\n sns.set(style=\"whitegrid\")\n font = {'weight': 'normal'}\n plt.rc('font', **font)\n plt.rc('axes', labelsize=25) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=25) # fontsize of the tick labels\n plt.rc('ytick', labelsize=25)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=100)\n try:\n if self.dtype_is_object() or self.num_of_values() <= 15:\n if self.num_of_values() > 15:\n data = pd.to_numeric(self.data, errors='coerce')\n plot = sns.distplot(data.dropna())\n else:\n plot = sns.countplot(self.remove_nan_values())\n else:\n plot = sns.distplot(self.remove_nan_values())\n plot.set(xlabel='', ylabel='')\n except Exception:\n plt.text(0.5, 0.5, f'Unable to plot', ha='center', va='center', transform=ax.transAxes, fontsize=16)\n if not os.path.isdir('hist_images'):\n os.mkdir('hist_images')\n plt.savefig(f'hist_images/histogram{i}.png', bbox_inches='tight')\n plt.close()\n plt.clf()", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\n pylab.hist(values, bins = numBins)\n pylab.xlabel(xLabel)\n pylab.ylabel(yLabel)\n if not title == None:\n pylab.title(title)\n pylab.show()", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()", "def plot_histogram(self,**kwargs):\n axes = []\n for i in range(self.score_length):\n fig = plt.figure()\n scores = np.array([s[i] for s in self.scores_list])\n probs,bins,patches = plt.hist(scores,label=\"Sample {}\".format(self.labels[i]), **kwargs)\n plt.vlines(self.xhat,fig.get_axes().get_ylim(),label='Mean',color='r')\n plt.legend()\n axes.append(fig.get_axes())\n return axes", "def get_histogram(self):\n n_rows = self.df.shape[0]\n if n_rows > 250:\n fig, ax = plt.subplots()\n ax.hist(self.df[self.col_name], bins=50)\n else: \n fig, ax = plt.subplots()\n ax.hist(self.df[self.col_name], bins=int(round(n_rows/5,0)))\n return fig", "def plot_histogram(path: str, image: sitk.Image, no_bins: int=255, slice_no: int=-1,\n title: str='', xlabel: str='', ylabel: str='') -> None:\n if slice_no > -1:\n data = sitk.GetArrayFromImage(image[:, :, slice_no])\n else:\n data = sitk.GetArrayFromImage(image)\n\n data = data.flatten()\n\n plt.hist(data, bins=no_bins)\n if title: plt.title(title)\n if xlabel: plt.xlabel(xlabel)\n if ylabel: plt.ylabel(ylabel)\n plt.savefig(path)\n plt.close()", "def plot_histogram(hist, outname, xlabel=\"\", ylabel=\"frequency\"):\n plt.bar(hist[:,0], hist[:,1])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.savefig(outname)\n plt.close()", "def plot_histogram(self) -> None:\n\n if self.data:\n plt.hist(self.data)\n plt.title(\"Histogram of data\")\n plt.xlabel(\"data\")\n plt.ylabel(\"count\")\n else:\n raise ValueError(\"Histogram cannot be generated as no\\\n data has been provided\")", "def histogram(data, title, path):\n plt.hist(data,\n bins=60)\n plt.xticks(size=22)\n plt.yticks(size=22)\n plt.title(title,\n fontsize=30)\n plt.savefig(path)\n plt.clf()", "def plot_histograms(p_hist, p_hbins, title, figure_path=None):\n\n base_fig_size = 7\n h_fig = base_fig_size\n w_fig = base_fig_size * 4\n\n fig = plt.figure(figsize=(w_fig, h_fig))\n fig.suptitle(title)\n iplot = 0\n\n p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1\n\n p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n if figure_path:\n plt.savefig(figure_path, format='png')\n\n return fig" ]
[ "0.75319946", "0.73958194", "0.7227305", "0.7227305", "0.7227305", "0.7023404", "0.695583", "0.6950402", "0.6821363", "0.6771328", "0.6743031", "0.67058104", "0.66986424", "0.66847414", "0.66547585", "0.65645534", "0.65592194", "0.6551716", "0.6529379", "0.64892125", "0.6478263", "0.6475924", "0.6444956", "0.64328706", "0.63944596", "0.6372541", "0.6362008", "0.6353678", "0.6351483", "0.63490915" ]
0.767311
0
Trains the K Mean algorithm
def train_KMean(data: np.array, labels: np.array, n_clusters: int)->None: n_examples = np.size(data, 0) n_features = np.size(data, 1) # Scale the data so that Euclidian distance makes sense means = np.mean(data, axis = 0) stddevs = np.std(data, axis = 0, ddof = 1) #print(means) #print(stddevs) data_scaled = np.zeros((n_examples, n_features)) for i in range(n_features): data_scaled[:, i] = (data[:,i] - means[i]) / stddevs[i] study_correlation(data_scaled) # Initialize the centroids idx = np.random.randint(n_examples, size = n_clusters) centroids = data_scaled[idx, :] counter = 0 while True: distances = np.array([[np.sqrt(np.sum(np.square(example-centroid))) for centroid in centroids] for example in data_scaled]) centroid_idx = np.argmin(distances, axis = 1) old_centroids = centroids centroids = update_centroids(data_scaled, centroid_idx, n_examples) #displacement = get_displacement(old_centroids, centroids) displacement = np.linalg.norm(np.array([old - new for old, new in zip(old_centroids, centroids)])) #assert np.linalg.norm(np.array([old - new for old, new in zip([1, 2, 3, 4], [5, 6, 7, 8])])) == 8 if counter == 0: # print("Initial displacement = {}".format(displacement)) initial_displacement = displacement counter += 1 if displacement < (initial_displacement / 10000): break #print("Total number of loops before ending : {}".format(counter)) converted_predictions = convert_predictions(centroid_idx) accuracy = np.mean([p == l for p, l in zip(converted_predictions, labels)]) print("Accuracy = {}".format(accuracy)) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kmean(encoder,tsne,true_data,true_label):\n enc_output = encoder.predict(true_data)\n kmean = KMeansClustering()\n kmean.fit(enc_output)\n pred = kmean.predict(enc_output)\n accuracy(true_label,pred)\n confusion_matrix(true_label,pred, save_name = \"confusion_matrix_kmean.png\")\n tsne.tsne_plot(true_data,pred,save_data_dir =\"kmean\",save_name=\"kmean\")", "def _kmean(self, data, k):\n #np.seterr(divide='ignore', invalid='ignore')\n # 1 step: choose random points as initial centroids\n X_centroid = np.random.randint(low = np.min(data[0,:]), high=np.max(data[0,:]), size=k)\n Y_centroid = np.random.randint(low = np.min(data[:,1]), high=np.max(data[:,1]), size=k)\n centroids = np.array([X_centroid, Y_centroid]).T\n #\n while True:\n # calculate distance\n distance = np.array([np.linalg.norm(data-centroids[i,:], axis=1) for i in range(k)])\n # assign each point to closest centroid\n labels = np.argmin(distance, axis=0)\n # copy the centroids coordiantes\n old_centroids = copy.deepcopy(centroids)\n # update centroids coordiates\n centroids = np.array([np.nanmean(data[np.where(labels==i)[0],:], axis=0) \n if np.any(labels==i) else old_centroids[i,:] for i in range(k) ])\n # verify if centroids changed\n if np.allclose(centroids, old_centroids):\n break\n \n return labels, centroids", "def train(self):\n\n print \"==> Running Kmeans on data set of shape: {}\".format(self.data.shape)\n km = KMeans(n_clusters = self.n_clusters)\n km.fit(self.data.values)\n self.labels = km.labels_\n self.inertia = km.inertia_", "def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)", "def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")", "def run_various_Ks(x, K):\n m = len(x) # length of data points\n min_list = [] # list that will contain minimum costs\n Ks = [i for i in range(1,K+1)] # values of K's\n\n for i in range(1, K+1):\n # runs algorithm with different values of K\n kmeans = KMeans(n_clusters=i, random_state=0).fit(x)\n minval = kmeans.inertia_\n print(minval)\n min_list.append(minval) # appends minimum cost \n\n # Plotting J vs. K to choose best value of K\n plt.plot(Ks, min_list)\n plt.plot(Ks, min_list, '-o')\n plt.xlabel('K (# of clusters)')\n plt.ylabel('Cost function J')\n plt.title('J vs. K plot')\n plt.show()", "def improved_initialization(X, k):\n new_values = X.copy()\n best_like = float('-inf')\n MU, SIGMA, PI = None, None, None\n for _ in range(10):\n initial_means = get_initial_means(new_values, k)\n pi = np.full(k, 1 / k)\n while True:\n mu, clusters = k_means_step(new_values, k, initial_means)\n diff = np.sum(mu - initial_means)\n if not diff:\n sigma = compute_sigma(X, mu)\n break\n initial_means = mu\n mu, sigma, pi, res = train_model(X, k, default_convergence, (mu, sigma, pi))\n lk = likelihood(X, pi, mu, sigma, k)\n if lk > best_like:\n best_like = lk\n MU = mu\n SIGMA = sigma\n PI = pi\n return MU, SIGMA, PI", "def k_means_step(X, k, means):\n dists = np.array([np.sum((X - mean) * (X - mean), axis=1) for mean in means]) # k*m\n clusters = np.argmin(dists, axis=0)\n new_means = np.array([np.mean(X[clusters == i, :], axis=0) for i in range(k)])\n return new_means, clusters", "def train_kmeans(encodings, k):\n kmean = KMeans(n_clusters=k)\n\n model = kmean.fit(encodings)\n\n return model", "def mean(self):\n return self.k * self.theta", "def kmeans(img, k):\n # Randomly pick k pixels as initial cluster \"means\"\n # Random indices are picked without replacement; to avoid duplicate means\n n = len(img) \n rand_ind = np.random.choice(n, size=k, replace=False) \n means = img[rand_ind, :].astype(np.float32) \n\n print \"Using Kmeans..\"\n return kmeans_driver(img, means)", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def _ftl_jump(self, y, K, **kwargs):\n\n logger.debug(\"Re-initializing with K-means++ at K = {}\".format(K))\n\n # Initialize new centroids by k-means++\n mixtures = []\n mls = []\n for z in range(30):\n\n mean = kmeans._k_init(y, K, kmeans.row_norms(y, squared=True),\n kmeans.check_random_state(None))\n\n # Calculate weights by L2 distances to closest centers.\n distance = np.sum((y[:, :, None] - mean.T)**2, axis=1).T\n\n N, D = y.shape\n responsibility = np.zeros((K, N))\n responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0\n\n weight = responsibility.sum(axis=1)/N\n\n covariance = _estimate_covariance_matrix(y, responsibility, mean,\n self.covariance_type, self.covariance_regularization)\n\n mixture = self.__class__(\n threshold=self.threshold,\n covariance_type=self.covariance_type,\n max_em_iterations=self.max_em_iterations,\n covariance_regularization=self.covariance_regularization)\n\n # Initialize it.\n mixture.set_parameters(mean=mean, weight=weight, covariance=covariance)\n\n # Run E-M on the partial mixture.\n R, meta = mixture._expectation_maximization(\n y, parent_responsibility=responsibility)\n raise UnsureError\n\n mixtures.append(mixture)\n mls.append(meta[\"message_length\"])\n\n print(np.std(mls))\n index = np.argmin(mls)\n mixture = mixtures[index]\n\n #slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))\n slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))\n self._proposed_mixtures.append(mixture)\n self._mixture_predictors.append([\n mixture.weight.size,\n np.sum(np.log(mixture.weight)),\n meta[\"log_likelihood\"],\n slogdet,\n -meta[\"log_likelihood\"] + (D+2)/2.0 * slogdet\n ])\n # TODO: Remove predictors that we don't use.\n #self._slogs.append(np.linalg.det(mixture.covariance))\n\n return mixture, R, meta #(len(self._proposed_mixtures) - 1, R, meta)\n\n\n raise a\n\n #self.set_parameters(\n # weight=weight, mean=mean, covariance=covariance)\n\n #return responsibility", "def k_means_multiple(self, K):\r\n self.K = K\r\n table = []\r\n\r\n for numberoftimes in range(self.tries):\r\n self.randomcentroids()\r\n try:\r\n atry = self.run_k_means()\r\n table.append(atry)\r\n except ValueError:\r\n pass\r\n\r\n c = ['centroid position', 'how many for each', 'J']\r\n\r\n self.table = pd.DataFrame(table, columns=c).sort_index(by=['J']).head()", "def run_k_means(self):\r\n centroids = self.centroids\r\n\r\n for i in range(self.max_iters):\r\n self.closestcentroids()\r\n self.newcentroids()\r\n\r\n J = 0\r\n X = self.x\r\n m = len(X)\r\n idx = self.index\r\n K = self.K\r\n dim = X.shape[1]\r\n\r\n for num in range(K):\r\n # find the index of all entries where idx==n\r\n indexentries = np.nonzero(idx == num)[0]\r\n # the values in X that have the index in indesxentries\r\n values = X[indexentries]\r\n # using one of the K centroids to do the calculation. K<=2 doesn't\r\n # work here for some reason.\r\n centroid = centroids[num, 0]\r\n J += np.sum((values - centroid) ** 2)\r\n\r\n return [centroids.reshape((1, K, dim)), [X[idx == k].size for k in range(K)], J / m]", "def _MStep(x, z, k):\n dim = x.shape[1]\n centers = np.repeat(np.reshape(x.mean(0), (1, dim)), k, 0)\n for q in range(k):\n if np.sum(z == q) == 0:\n pass\n else:\n centers[q] = np.mean(x[z == q], 0)\n return centers", "def experiment(improved):\n\n N_list = [5 ,10 ,20]\n K_list = [3 , 7 ,9]\n P_list = [0.3 , 0.4 , 0.5 ,0.6 ,0.7]\n\n data = pandas.read_csv('train.csv')\n\n avg_list = []\n for i in range(0,len(N_list) * len(K_list) * len(P_list)):\n avg_list.append([0 , None])\n\n kf = KFold(n_splits=5, shuffle=True, random_state=209418441)\n rotation_index = 1\n for train_index, test_index in kf.split(data):\n\n train = data.iloc[train_index]\n test = data.iloc[test_index]\n index = 0\n for n in N_list:\n for k in K_list:\n for p in P_list:\n\n #print('testing for N= ',n,', K = ',k, 'P = ',p)\n KNN = forest.KNN_forest(N=n, K=k, P=p, data = train , improved=improved)\n success_rate = utls.tests.succ_rate_test.test(test,KNN.Classify)\n avg_list[index][0] += success_rate\n avg_list[index][1] = (n,k,p)\n #print(' rate is: ',avg_list[index][0]/rotation_index)\n index += 1\n rotation_index +=1\n\n\n\n best_option = max(avg_list,key= lambda x:x[0])\n #print(' ****** DONE ******')\n #print('best n,k,p are : ' , best_option[1] , ' with success rate: ' , best_option[0])\n\n return best_option[1]", "def k_means(prev_args, data_set_obj):\n parser = argparse.ArgumentParser(description='kmeans')\n parser.add_argument('--clusters', required=True,\n help='The number of clusters to use for kmeans.', type=int)\n parser.add_argument('--iterations', default=300,\n help='The maximum number of iterations for the algorithm.', type=int)\n parser.add_argument('--metric', default='euclidean',\n help='The distance metric to use.')\n args, unknown = parser.parse_known_args()\n kmeans = KMeans(prev_args.rank, args.clusters, args.iterations, args.metric)\n kmeans.fit_predict(data_set_obj.gallery_idx, data_set_obj)\n return kmeans.ranked_acc", "def kmean(X,initial_centroids,max_iters):\n m = np.size(X,0)\n K = np.size(initial_centroids,0)\n centroids = initial_centroids\n idx = np.zeros((m,1))\n for i in range(1,max_iters):\n idx = nearest_cluster(X,centroids)\n centroids = update_centroids(X,idx,K)\n return centroids,idx", "def forward(self, x, target, N_k):\n elbo = 0\n phi = self.base(x)\n def loglik(sample): #currently hardcoded for binary classification\n return -torch.log(1 + torch.exp(-target * sample))\n mu = self.mu\n cov = self.L @ self.L.T\n means = phi @ mu\n variances = torch.diagonal(phi @ cov @ phi.T, 0)\n elbo += (self.quadr(loglik, means, variances)).sum()\n elbo /= x.shape[0] #mean\n \n kls = 0\n kls -= kl_divergence(self.w_distr, self.w_prior)\n\n for i in range(len(self.prev_tasks_distr)):\n phi_i = self.base(self.prev_tasks_tensors[i])\n cov_i = phi_i @ phi_i.T\n p_u = MultivariateNormal(torch.zeros(cov_i.shape[0]).to(self.device),\n covariance_matrix=cov_i)#cov_i * self.sigma_prior)\n kls -= kl_divergence(self.prev_tasks_distr[i], p_u)\n elbo += kls / N_k\n\n return -elbo", "def kmeans_pp(img, k):\n n = len(img)\n means = np.zeros((k, 3), dtype=np.float32)\n rand_ind = np.random.randint(n)\n means[0,:]= img[rand_ind,:].astype(np.float32)\n\n # Pick means based on a probability distribution\n dist_mat = np.inf * np.ones((n, k))\n pseudo_dist = get_sq_distance(img, means[0,:])\n dist_mat[:,0] = np.ravel(np.abs(pseudo_dist))\n\n for ii in xrange(1, k):\n # Calculate probability\n min_dist = np.min(dist_mat[:,0:ii], axis=1)\n prob = np.power(min_dist, 2)\n prob = prob/sum(prob)\n\n # Sample next mean with probability, 'prob'\n new_ind = np.random.choice(n, p=prob)\n means[ii,:] = img[new_ind,:] # new mean\n\n # Update distance matrix with new mean\n pseudo_dist = get_sq_distance(img, means[ii,:])\n dist_mat[:,ii] = np.ravel(np.abs(pseudo_dist))\n\n print \"Using Kmeans++..\"\n return kmeans_driver(img, means)", "def learn(self, Xtrain, ytrain):\n Ktrain = None\n \n ### YOUR CODE HERE\n Xless = Xtrain\n randomize = np.arange(len(ytrain))\n np.random.shuffle(randomize)\n Xless = Xless[randomize]\n self.kcentre = Xless[:self.params['k']]\n #ytrain = ytrain[randomize]\n \n # print (Xtrain.shape)\n if (self.params['kernel'] == 'hamming'):\n print('')\n Ktrain = np.zeros([Xtrain.shape[0], self.params['k']])\n for i in range (0, Xtrain.shape[0]):\n for j in range (0, self.params['k']):\n Ktrain[i][j] = (self.hamming(Xtrain[i], self.kcentre[j]))\n \n else:\n \n Ktrain = np.dot(Xtrain, self.kcentre.T)\n #print (self.kcentre.shape)\n ### END YOUR CODE\n \n self.weights = np.zeros(Ktrain.shape[1])\n\n ### YOUR CODE HERE\n super(KernelLogitReg , self).learn(Ktrain, ytrain)\n ### END YOUR CODE\n\n self.transformed = Ktrain # Don't delete this line. It's for evaluation.", "def test_mean(self):\n m = self.m\n analytical_mean = 1.5*self.T*self.k\n computed_mean = 0 \n for j in self.v:\n computed_mean += self.meankin() \n computed_mean = computed_mean/self.N\n relative_error = abs(analytical_mean - computed_mean)/analytical_mean\n print(\"----------Kinetic energy----------\")\n print(\"{:<20}{:g}\".format(\"Computed mean:\", computed_mean))\n print(\"{:<20}{:g}\".format(\"Analytical mean:\", analytical_mean))\n print(\"{:<20}{:.2f}%\".format(\"Relative error:\", relative_error * 100))\n print(\"-----------------------------\")\n break\n assert relative_error < 0.02, \"the mean kinetic energy is off\"\n\n print(\"----------Velocity----------\")\n\n\n analytical_vel = np.sqrt(8*self.k*self.T/(np.pi*m))\n computed_vel = 0\n for i in self.v: \n computed_vel += self.meanvel()\n computed_vel = computed_vel/self.N \n relative_error = abs(analytical_vel - computed_vel)/analytical_vel\n print(\"{:<20}{:g}\".format(\"Computed velocity:\", computed_vel))\n print(\"{:<20}{:g}\".format(\"Analytical velocity:\", analytical_vel))\n print(\"{:<20}{:.2f}%\".format(\"Relative error:\", relative_error *100))\n print(\"-----------------------------\")\n break\n assert relative_error < 0.02, \"the mean velocity is off\"", "def ex7():\n\n \"\"\"\n ================= Part 1: Find Closest Centroids ====================\n To help you implement K-Means, we have divided the learning algorithm\n into two functions -- find_closest_centroids and computeCentroids. In this\n part, you shoudl complete the code in the find_closest_centroids function.\n \"\"\"\n print('Finding closest centroids.\\n\\n')\n\n # Load an example dataset that we will be using\n with open('ex7/data/ex7data2.pkl', 'rb') as fin:\n X = pickle.load(fin)\n\n # Select an initial set of centroids\n K = 3 # 3 Centroids\n initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])\n\n # Find the closest centroids for the examples using the\n # initial_centroids\n idx = find_closest_centroids(X, initial_centroids)\n\n print('Closest centroids for the first 3 examples: \\n')\n print(idx[0:3])\n print('\\n(the closest centroids should be 0, 2, 1 respectively)\\n')\n\n \"\"\"\n ===================== Part 2: Compute Means =========================\n After implementing the closest centroids function, you should now\n complete the computeCentroids function.\n \n \"\"\"\n print('\\nComputing centroids means.\\n\\n')\n\n # Compute means based on the closest centroids found in the previous part.\n centroids = compute_centroids(X, idx, K)\n\n print('Centroids computed after initial finding of closest centroids: \\n')\n print(centroids)\n print('\\n(the centroids should be\\n')\n print(' [ 2.428301 3.157924 ]\\n')\n print(' [ 5.813503 2.633656 ]\\n')\n print(' [ 7.119387 3.616684 ]\\n)\\n')\n\n \"\"\"\n =================== Part 3: K-Means Clustering ======================\n After you have completed the two functions computeCentroids and\n find_closest_centroids, you have all the necessary pieces to run the\n kMeans algorithm. In this part, you will run the K-Means algorithm on\n the example dataset we have provided.\n \"\"\"\n print('\\nRunning K-Means clustering on example dataset.\\n\\n')\n\n # Load an example dataset\n with open('ex7/data/ex7data2.pkl', 'rb') as fin:\n X = pickle.load(fin)\n\n # Settings for running K-Means\n K = 3\n max_iters = 10\n\n \"\"\"\n For consistency, here we set centroids to specific values\n but in practice you want to generate them automatically, such as by\n settings them to be random examples (as can be seen in\n kmeans_init_centroids).\n \"\"\"\n initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])\n\n # Run K-Means algorithm. The 'true' at the end tells our function to plot\n # the progress of K-Means\n centroids, idx = run_kmeans(X, initial_centroids, max_iters, True)\n print('\\nK-Means Done.\\n\\n')\n\n \"\"\"\n ============= Part 4: K-Means Clustering on Pixels ===============\n In this exercise, you will use K-Means to compress an image. To do this,\n you will first run K-Means on the colors of the pixels in the image and\n then you will map each pixel on to it's closest centroid.\n \n You should now complete the code in kmeans_init_centroids.py\n \"\"\"\n\n print('\\nRunning K-Means clustering on pixels from an image.\\n\\n')\n\n # Load an image of a bird\n A = plt.imread('ex7/data/bird_small.png')\n # A = A / 255; # Divide by 255 so that all values are in the range 0 - 1\n\n # Size of the image\n img_size = A.shape\n\n # Reshape the image into an Nx3 matrix where N = number of pixels.\n # Each row will contain the Red, Green and Blue pixel values\n # This gives us our dataset matrix X that we will use K-Means on.\n X = np.reshape(A, (img_size[0] * img_size[1], 3))\n\n # Run your K-Means algorithm on this data\n # You should try different values of K and max_iters here\n K = 16\n max_iters = 10\n\n # When using K-Means, it is important the initialize the centroids\n # randomly.\n # You should complete the code in kmeans_init_centroids.py before proceeding\n initial_centroids = kmeans_init_centroids(X, K)\n\n # Run K-Means\n [centroids, idx] = run_kmeans(X, initial_centroids, max_iters)\n\n \"\"\"\n ================= Part 5: Image Compression ======================\n In this part of the exercise, you will use the clusters of K-Means to\n compress an image. To do this, we first find the closest clusters for\n each example. After that, we \n \"\"\"\n print('\\nApplying K-Means to compress an image.\\n\\n')\n\n # Find closest cluster members\n idx = find_closest_centroids(X, centroids)\n\n # Essentially, now we have represented the image X as in terms of the\n # indices in idx.\n\n # We can now recover the image from the indices (idx) by mapping each pixel\n # (specified by it's index in idx) to the centroid value\n X_recovered = centroids[idx, :]\n\n # Reshape the recovered image into proper dimensions\n X_recovered = np.reshape(X_recovered, (img_size[0], img_size[1], 3))\n\n # Display the original image\n plt.close()\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.imshow(A)\n ax1.set_title('Original')\n\n # Display compressed image side by side\n ax2.imshow(X_recovered)\n ax2.set_title('Compressed, with {:d} colors.'.format(K))\n plt.show()", "def __init__(self, training_set, k=5):\n super(Knn, self).__init__(training_set)\n self.k = k", "def run_kohonen_dynamicLearningRate(data,fun,size_k: int=6, eta: float=0.1, tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n sigma = fun(t)\n som_step(centers, data[i,:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n\n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n return centers, error[1:]", "def learn(self, Xtrain, ytrain):\n self.mean = np.mean(ytrain)", "def elbow_kmeans_ch(self, corpus):\n print('Iterating kmeans over range of topics...')\n km = KMeans(init='k-means++')\n visualizer = KElbowVisualizer(km,k=range(self.start, self.stop, self.step),\n metric='calinski_harabaz', timings=False)\n visualizer.fit(corpus.vectors)\n visualizer.poof(outpath= self.folder + 'elbow_c_h.png')\n print('Saved elbow curve.')\n return", "def __init__(self, top_k: int):\n self._topk_acc_dict: Dict[int, Mean] = defaultdict(Mean)\n self.top_k: int = top_k\n\n self.__torchmetrics_requires_task = version.parse(\n torchmetrics.__version__\n ) >= version.parse(\"0.11.0\")", "def __init__(self, top_k):\n super(MinibatchTopkAccuracy, self).__init__(\n reset_at=\"iteration\", emit_at=\"iteration\", mode=\"train\", top_k=top_k\n )\n self.top_k = top_k" ]
[ "0.6554288", "0.6256333", "0.62400466", "0.620281", "0.61864436", "0.6135534", "0.61229354", "0.5987888", "0.59740555", "0.5946454", "0.5930415", "0.59069073", "0.59066105", "0.5885856", "0.5878588", "0.58668435", "0.5842038", "0.5839786", "0.5814828", "0.57988185", "0.57925886", "0.579221", "0.57856524", "0.5777399", "0.57760805", "0.57628834", "0.5751332", "0.57504195", "0.5746358", "0.57348895" ]
0.6680943
0
Trains and estimate the performances of a logistic regression algorithm
def train_logisticRegression(data: np.array, labels: np.array)->None: n_examples = np.size(data, 0) n_features = np.size(data, 1) n_categories = np.size(labels, 1) data = np.hstack((np.ones((n_examples, 1)), data)) print(data[0:5, :]) X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7) convergence_goal = 1e-3 learning_rate = 0.01 theta = np.random.uniform(size=((n_features+1, n_categories))) for i in range(n_categories): cost_var = 1 previous_cost = 1e6 iterations = 0 cost_to_plot = [] while cost_var > convergence_goal: iterations += 1 cost, grad = costFunction(X_train, y_train[:, i], theta[:, i]) theta[:, i] = update_theta(theta[:, i], grad, learning_rate) cost_var = previous_cost - cost previous_cost = cost if iterations == 1: cost_var = 1 cost_to_plot.append(cost) # print(cost) plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost') plt.xlabel('iterations') plt.ylabel('cost') # plt.show() predictions = lrPredict(theta, X_test) print(predictions[0:5, :]) print(y_test[0:5, :]) accuracy = np.mean([p == l for p, l in zip(predictions, y_test)]) print("Accuracy = {}".format(accuracy)) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def run_logistic(X_train, X_test, y_train, y_test, C=1, penalty = 'l2', solver = 'lbfgs'):\n \n logreg = LogisticRegression(fit_intercept=True, C=C, penalty = penalty, solver = solver)\n logreg.fit(X_train, y_train)\n get_scores(logreg, X_train, X_test, y_train, y_test)", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def logistic_regression(X, Y):\n m, n = X.shape\n theta = np.zeros(n)\n learning_rate = 10\n\n i = 0\n while True:\n i += 1\n prev_theta = theta\n grad = calc_grad(X, Y, theta)\n theta = theta - learning_rate * grad\n if i % 10000 == 0:\n print('Finished %d iterations' % i)\n # plot decision boundary for the ith iteration listed in i_lst\n i_lst = [1, 2, 3, 10, 100, 200, 500, 1000, 10000, 30370, 40000, 50000]\n if i in i_lst:\n save_path = \"output/p01_b_a\" + str(i) + \".png\"\n plot(X, Y, theta, save_path)\n if np.linalg.norm(prev_theta - theta) < 1e-15:\n print('Converged in %d iterations' % i)\n break\n return", "def create_logistic_regression():\n\n pause_data = shuffle(pd.read_csv(sys.argv[1]))\n pause_data = pause_data.replace([np.inf, -np.inf], np.nan).dropna()\n # X = pause_data.drop([HAS_DEMENTIA, TRANSCRIPT_ID], axis=1)\n X = pause_data[MEMORY_FEATURES]\n y = pause_data[HAS_DEMENTIA]\n split_tracker = []\n rskf = RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=36851234)\n # n_repeats 10 too\n for train_index, test_index in rskf.split(X, y):\n X_train, X_test = X.iloc[list(train_index)], X.iloc[list(test_index)]\n y_train, y_test = y.iloc[list(train_index)], y.iloc[list(test_index)]\n logmodel = LogisticRegression()\n logmodel.fit(X_train, y_train)\n predictions = logmodel.predict(X_test)\n split_tracker.append({\n TRAIN: train_index,\n TEST: test_index,\n PREDICTIONS: predictions,\n Y_TEST: y_test\n })\n accuracy = []\n f1 = []\n auc = []\n print(\"Predictions\", split_tracker[0])\n for predictions in split_tracker:\n # print(classification_report(predictions[Y_TEST], predictions[PREDICTIONS]))\n accuracy.append(accuracy_score(predictions[Y_TEST], predictions[PREDICTIONS]))\n f1.append(f1_score(predictions[Y_TEST], predictions[PREDICTIONS]))\n auc.append(roc_auc_score(predictions[Y_TEST], predictions[PREDICTIONS]))\n print(accuracy)\n accuracy = np.array(accuracy)\n f1 = np.array(f1)\n auc = np.array(auc)\n print(len(accuracy))\n print('mean accuracy: ', accuracy.mean())\n print('mean f1 score: ', f1.mean())\n print('mean auc: ', auc.mean())", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr", "def run_logistic_regression(training, testing, feature_cols, outcome_col):\n if 'intercept' not in training.columns:\n training['intercept'] = 1\n if 'intercept' not in testing.columns:\n testing['intercept'] = 1\n intercept_feature_cols = feature_cols + ['intercept']\n logit = sm.Logit(training[outcome_col], training[intercept_feature_cols])\n fitted_logit_model = logit.fit()\n logit_diagnostics = get_diagnostics(testing[outcome_col], testing[intercept_feature_cols], fitted_logit_model, model_type = 'logit')\n predicted_logit_probs = fitted_logit_model.predict(testing[intercept_feature_cols])\n\n return fitted_logit_model, logit_diagnostics, predicted_logit_probs", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def test_logistic_regression_c_parameter(params, X_train, X_test, y_train, y_test):", "def train_logistic_regression(X_train_input, y_train_input, C=1):\r\n from sklearn.linear_model import LogisticRegression\r\n logr_clf = LogisticRegression(C=C)\r\n logr_clf.fit(X_train_input, y_train_input)\r\n return logr_clf", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def _simple_logistic_regression(x,y,beta_start=None,verbose=False,\n CONV_THRESH=1.e-3,MAXIT=500):\n if len(x) != len(y):\n raise ValueError, \"x and y should be the same length!\"\n if beta_start is None:\n beta_start = NA.zeros(2,x.dtype.char)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood\n s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function\n # information matrix\n J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],\n [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n return beta, J_bar, l", "def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,\n MAXIT=500):\n if x.shape[-1] != len(y):\n raise ValueError, \"x.shape[-1] and y should be the same length!\"\n try:\n N, npreds = x.shape[1], x.shape[0]\n except: # single predictor, use simple logistic regression routine.\n return _simple_logistic_regression(x,y,beta_start=beta_start,\n CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)\n if beta_start is None:\n beta_start = NA.zeros(npreds+1,x.dtype.char)\n X = NA.ones((npreds+1,N), x.dtype.char)\n X[1:, :] = x\n Xt = NA.transpose(X)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n ebx = NA.exp(NA.dot(beta, X))\n p = ebx/(1.+ebx)\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood\n s = NA.dot(X, y-p) # scoring function\n J_bar = NA.dot(X*p,Xt) # information matrix\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n if iter == MAXIT and diff > CONV_THRESH: \n print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)\n return beta, J_bar, l", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def LogisticRegression_self_test(X_train, X_test, y_train, y_test, learning_rates, epochs, iteration):\n\n\t# scoping number of training samples\n\n\tn_inputs = X_train.shape[0]\n\tn_features = X_train.shape[1]\n\n\t\n\n\teta_ = 1e-12\n\tbeta_opt = np.random.randn(X_train.shape[1], 2)\n\tcalc_beta_GD, norm = GradientDescent(X_train, beta_opt, y_train, iteration, eta_)\n\tprob_GD, predict_GD= Probability_GD(X_test, calc_beta_GD) #defining values to be between 0 and 1\n\t#yPred_GD = (predict_GD >= 0.5).astype(int) # converting to just 0 or 1\n\n\t#Define Logistic regression\n\tclf = LogisticRegression(solver='lbfgs', max_iter=1e5)\n\tclf = clf.fit(X_train, np.ravel(y_train))\n\tpred_sklearn = clf.predict(X_test)\n\tprob_sklearn = clf.predict_proba(X_test)\n\t#print(prob_sklearn)\n\n\t#for eta in np.logspace(np.log10(1e-6), np.log10(1e0), 7):\n\taccuracy = np.zeros(len(learning_rates))\n\tauc_score = np.zeros(len(learning_rates))\n\n\tfor i, eta in enumerate(learning_rates):\n\t\tbeta_SGD = stochastic_gradient_descent(X_train, beta_opt, y_train, eta, epochs, iteration)\n\t\tprob_SGD, predict_SGD= Probability(X_test, beta_SGD) #defining values to be between 0 and 1\n\t\t\n\t\t\n\t\taccuracy[i] = metrics.accuracy_score(y_test, predict_SGD)\n\t\tauc_score[i] = metrics.roc_auc_score(y_test, predict_SGD)\n\t\tdifference = y_test - predict_SGD\n\n\t\t\n\n\t\tif i> 0 and auc_score[i] > auc_score[i-1]:\n\t\t\tbest_pred_SGD= predict_SGD\n\t\t\tbest_prob_SGD = prob_SGD\n\t\n\n\t\tprint('Accuracy {}, learning rate= {}, iterations = {}'.format(accuracy[i], eta, iteration))\n\t\n\t\tprint('Auc score: {}'.format(auc_score[i]))\n\n\n\t\t\"\"\"\n\t\tplt.plot(yPred, label='predict')\n\t\tplt.plot(optimal_beta, label ='optimal beta')\n\t\tplt.plot(y_test, label='test')\n\t\tplt.show()\n\t\t\"\"\"\n\n\tsns.set()\n\tsns.heatmap(pd.DataFrame(accuracy), annot= True, fmt='.4g')\n\tplt.title('Grid-search for logistic regression')\n\tplt.ylabel('Learning rate: $\\\\eta$')\n\tplt.xlabel('Regularization Term: $\\\\lambda$')\n\t#plt.xticks(ticks=np.arange(len(learning_rates)) + 0.5, labels=learning_rates)\n\t#plt.yticks(ticks=np.arange(len(lambda_values)) + 0.5, labels=lambda_values)\n\tb, t = plt.ylim() # discover the values for bottom and top\n\tb += 0.5 # Add 0.5 to the bottom\n\tt -= 0.5 # Subtract 0.5 from the top\n\tplt.ylim(b, t) # update the ylim(bottom, top) values\n\t#plt.savefig('accuracy_logreg.png')\n\tplt.show()\n\n\tsns.heatmap(pd.DataFrame(auc_score), annot= True, fmt='.4g')\n\tplt.title('Grid-search for logistic regression')\n\tplt.ylabel('Learning rate: $\\\\eta$')\n\tplt.xlabel('Regularization Term: $\\\\lambda$')\n\t#plt.xticks(ticks=np.arange(len(learning_rates)) + 0.5, labels=learning_rates)\n\t#plt.yticks(ticks=np.arange(len(lambda_values)) + 0.5, labels=lambda_values)\n\tb, t = plt.ylim() # discover the values for bottom and top\n\tb += 0.5 # Add 0.5 to the bottom\n\tt -= 0.5 # Subtract 0.5 from the top\n\tplt.ylim(b, t) # update the ylim(bottom, top) values\n\t#plt.savefig('auc_score_logreg.png')\n\tplt.show()\n\n\t#plot confusion matrix\n\tConfusion_Matrix(y_test, predict_GD)\n\t#Confusion_Matrix(y_test, best_pred_SGD)\n\t#Confusion_Matrix(y_test, pred_sklearn)\n\n\t#diff = np.concatenate((1- predict, predict), axis=1)\n\n\tdiff_sklearn = np.concatenate((1- prob_sklearn, prob_sklearn), axis=1)\n\tdiff_GD = np.concatenate((1- prob_GD, prob_GD), axis=1)\n\tdiff_SGD = np.concatenate((1- best_prob_SGD, best_prob_SGD), axis=1)\n\n\t#plot roc curves\n\tplot_roc(y_test, prob_sklearn)\n\tplot_roc(y_test, diff_SGD)\n\tplot_roc(y_test, prob_GD)\n\tplt.show()\n\n\t#plot cumulative gain curves\n\tplot_cumulative_gain(y_test, prob_sklearn)\n\tax = plot_cumulative_gain(y_test, diff_SGD)\n\tplot_cumulative_gain(y_test, prob_GD)\n\t#plt.show()\n\n\n\n\t\"\"\"\n\t#plot roc curves\n\tplot_roc(y_test, diff_sklearn, plot_micro=False, plot_macro= False)\n\tplot_roc(y_test, diff_GD, plot_micro=False, plot_macro= False)\n\tplot_roc(y_test, diff_SGD, plot_micro=False, plot_macro= False)\n\tplt.show()\n\n\t#plot cumulative gain curves\n\tplot_cumulative_gain(y_test, diff_sklearn)\n\tplot_cumulative_gain(y_test, diff_GD)\n\tplot_cumulative_gain(y_test, diff_SGD)\n\tplt.show()\t\n\n\t\"\"\"\n\n\tmodel_curve = auc_score\n\tarea_baseline = 0.5\n\tarea_ratio = (model_curve - area_baseline)/(area_baseline)\n\tprint('Area Ratio:',area_ratio)\n\n\n\treturn accuracy, learning_rates", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n return least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=logistic_loss, gradient=logistic_grad)", "def stability_logistic(x, y, **kwargs):\n rlr = RandomizedLogisticRegression(n_jobs=kwargs.get('n_jobs', 4))\n if 'param' in kwargs:\n rlr.set_params(**kwargs['param'])\n rlr.fit(x, y)\n return rlr.get_support()", "def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"", "def test_train_logist(x_train_variable, y_train_dep):\n # Ensure the function works\n try:\n lrc = cls.train_logistic(x_train_variable, y_train_dep)\n logging.info(\"Successful Logistic Model\")\n except Exception as err:\n logging.error(\"Errors in Fitting the Logistic Regression\")\n raise err\n return lrc", "def train_logistic_regression(train_exs: List[SentimentExample],\n feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n indexer = feat_extractor.get_indexer()\n weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n learning_rate = 0.1\n for i in range(15):\n for ex in train_exs:\n features_of_str = feat_extractor.extract_features(ex.words, False)\n expo = math.exp(np.dot(weights, features_of_str))\n possibility = expo / (1 + expo)\n gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n return LogisticRegressionClassifier(weights, feat_extractor)\n\n # Methods for plotting average training loss\n\n # x = np.arange(0, 14)\n # # learning_rate = 1\n # indexer = feat_extractor.get_indexer()\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, gradient_of_w)\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.01\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.01\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.1\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.1\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n # plt.xlabel('Epochs')\n # plt.ylabel('Average Training Loss')\n # plt.legend(['step size 1', 'step size 0.01', 'step size 0.1'], loc='upper left')\n # plt.show()\n # return LogisticRegressionClassifier(weights, feat_extractor)", "def LogisticRegression_sklearn(X_train, X_test, y_train, y_test):\n\n\tlog_reg = LogisticRegression()\n\tlog_reg.fit(X_train, y_train.ravel())\n\tyPred =log_reg.predict(X_test)\n\n\t#Printing metrics of the logistic regression model\n\tprint('Accuracy:', metrics.accuracy_score(y_test, yPred))\n\tprint('Precision:', metrics.precision_score(y_test, yPred))\n\tprint('Recall', metrics.recall_score(y_test, yPred))\n\n\t#confusion matrix\n\n\tconfusionMatrix = matrix.confusion_matrix(y_test, yPred)\n\tsb.heatmap(pd.DataFrame(confusionMatrix), annot= True, fmt='g')\n\tplt.title('Confustion matrix with default value 1')\n\tplt.ylabel('True values')\n\tplt.xlabel('Predicted values')\n\tplt.show()", "def test_logistic_regression(x, y, tune):\n # Perform classification without tuning\n lrc = LogisticRegression()\n pipeline = create_pipeline(lrc)\n return accuracy(pipeline, x, y)", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def spark_LogisticRegression(*args, **kwargs): \n return LogisticRegression(*args, **kwargs)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)" ]
[ "0.74904466", "0.74298316", "0.73537105", "0.72307545", "0.71512586", "0.71214926", "0.7050272", "0.70428157", "0.70214045", "0.7007584", "0.6954771", "0.69525546", "0.69275224", "0.68443215", "0.6840125", "0.6814699", "0.6810458", "0.67569095", "0.67376316", "0.6731999", "0.6710322", "0.67062676", "0.66899604", "0.6681409", "0.66680825", "0.665638", "0.66154057", "0.65594095", "0.6548258", "0.65246755" ]
0.7459909
1
Describe dataframe (checking transformations)
def desc_df(df): print(df.shape) print(df.columns)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tran_describe(df):\n return df.describe()", "def _describe_input_data(self):\n print(self.dataframe.describe()) # only describes numeric columns", "def describe_frame(df):\n buf = io.StringIO()\n df.info(buf=buf)\n return buf.getvalue()", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''):\n info = info_prefix\n if shape:\n info = f'{info}Shape = {df.shape}'\n if cols:\n info = f'{info} , Cols = {df.columns.tolist()}'\n print(info)\n if return_info:\n return info", "def describe_qual(df):\n\n categorical = df.dtypes[df.dtypes == \"object\"].index\n df[categorical].describe()", "def describe(self):\n print('Schema Changes\\n==============')\n self.schema().describe()\n print('\\nData Changes\\n============')\n self.rows().describe()", "def test_transform_interface_repr(example_tsds: TSDataset) -> None:\n trend_transform = TrendTransform(in_column=\"target\", detrend_model=LinearRegression(), model=\"rbf\")\n out_column = f\"regressor_{trend_transform.__repr__()}\"\n result = trend_transform.fit_transform(example_tsds.df)\n for seg in result.columns.get_level_values(0).unique():\n assert out_column in result[seg].columns", "def testExampleDataFrameGeneration(ref):\n df = generate_dataframe()\n columns = ref.all_fields_except(['random'])\n ref.assertDataFrameCorrect(df, 'dataframe_result.csv',\n check_data=columns, check_types=columns)", "def general_analysis(df):\n pass", "def test_analyze_columns(self):\n\t\t\n\n\t\tdetails = self.watcher.analyze()\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tcolumns = \"layer_id,name,D,M,N,alpha,alpha_weighted,has_esd,lambda_max,layer_type,log_alpha_norm,log_norm,log_spectral_norm,norm,num_evals,rank_loss,rf,sigma,spectral_norm,stable_rank,sv_max,sv_min,xmax,xmin,num_pl_spikes,weak_rank_loss\".split(',')\n\t\tprint(details.columns)\n\t\tfor key in columns:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))", "def test_basic_columns_no_model(self):\n\t\t\t\t\n\t\tdetails = self.watcher.describe(model=self.model)\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\t\t\n\t\tprint(details)\n\n\t\tfor key in ['layer_id', 'name', 'M', 'N', 'Q', 'longname']:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))\n\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\tQ = details.Q.to_numpy()[0]\n\n\t\tself.assertAlmostEqual(Q, N/M, places=2)", "def test_log_not_names_and_dtypes(capsys, test_df):\n\n @log_step(names=True, dtypes=True)\n def do_nothing(df, *args, **kwargs):\n return df\n\n test_df.pipe(do_nothing)\n\n captured = capsys.readouterr()\n\n assert \"names=\" not in captured.out", "def test_basic_columns(self):\n\t\t\n\t\tdetails = self.watcher.describe()\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tfor key in ['layer_id', 'name', 'M', 'N', 'Q', 'longname']:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))\n\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\tQ = details.Q.to_numpy()[0]\n\n\t\tself.assertAlmostEqual(Q, N/M, places=2)", "def test_basic_columns(self):\n\t\t\n\t\tdetails = self.watcher.describe()\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tfor key in ['layer_id', 'name', 'M', 'N', 'Q', 'longname']:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))\n\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\tQ = details.Q.to_numpy()[0]\n\n\t\tself.assertAlmostEqual(Q, N/M, places=2)", "def data_frame_info(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.info())", "def log_dataset_data(df):\n logging.info(f'shape of common data is {df.shape}')\n logging.debug(f'more info: \\n{df.info}')\n logging.debug(f'types \\n{df.dtypes}')\n logging.debug(f'is nunvaluse in the dataset \\n{df.isnull().sum()}')", "def summarize_dataframe(df,columns_to_check='all',show_progress=False,arity_thresh=20):\n if columns_to_check != 'all':\n df = df[columns_to_check]\n\n nrow = len(df)\n summary_df = pd.DataFrame(columns = ['feature','datatype','nmissing','arity','accepted values'])\n len_df = len(summary_df)\n for col in df.columns:\n nmiss = nrow - df[col].value_counts().sum()\n narity = len(df[col].unique())\n if show_progress:\n #print(col, df[col].dtype,nmiss, \"\\t\", narity,\":\\t\", df[col].ix[8320])\n #else:\n print(col, df[col].dtype,nmiss, \"\\t\", narity)\n accept_val = None\n if narity < arity_thresh:\n accept_val = df[col].unique()\n else:\n accept_val = 'Too many to show'\n summary_df.loc[len_df] = [col,df[col].dtype,nmiss,narity,accept_val]\n len_df+=1\n # assing fraction of missing\n summary_df['x_missing'] = summary_df['nmissing']/float(nrow)\n\n return summary_df", "def describe(self, index=None, columns=None, query=None, split_view_names=False):\r\n stack_tree = []\r\n for dk in self.keys():\r\n path_dk = [dk]\r\n filters = self[dk]\r\n\r\n for fk in filters.keys():\r\n path_fk = path_dk + [fk]\r\n xs = self[dk][fk]\r\n\r\n for sk in xs.keys():\r\n path_sk = path_fk + [sk]\r\n ys = self[dk][fk][sk]\r\n\r\n for tk in ys.keys():\r\n path_tk = path_sk + [tk]\r\n views = self[dk][fk][sk][tk]\r\n\r\n if views.keys():\r\n for vk in views.keys():\r\n path_vk = path_tk + [vk, 1]\r\n stack_tree.append(tuple(path_vk))\r\n else:\r\n path_vk = path_tk + ['|||||', 1]\r\n stack_tree.append(tuple(path_vk))\r\n \r\n column_names = ['data', 'filter', 'x', 'y', 'view', '#']\r\n description = pd.DataFrame.from_records(stack_tree, columns=column_names)\r\n if split_view_names:\r\n views_as_series = pd.DataFrame(\r\n description.pivot_table(values='#', columns='view', aggfunc='count')\r\n ).reset_index()['view']\r\n parts = ['xpos', 'agg', 'relation', 'rel_to', 'weights', \r\n 'shortname']\r\n description = pd.concat(\r\n (views_as_series,\r\n pd.DataFrame(views_as_series.str.split('|').tolist(),\r\n columns=parts)), axis=1)\r\n \r\n description.replace('|||||', np.NaN, inplace=True)\r\n if query is not None:\r\n description = description.query(query)\r\n if not index is None or not columns is None:\r\n description = description.pivot_table(values='#', index=index, columns=columns,\r\n aggfunc='count')\r\n return description", "def metadata(self, df):\n raise NotImplementedError(\"missing metadata() method\")", "def describe():", "def describe_quant(df):\n\n df.describe()", "def dataset_statistics(dataset):\n print (dataset.describe())", "def show_feature_summary(df, colname, display_uniques=False):\n\tprint('Details of feature:',colname)\n\tprint(' - datatype:',df[colname].dtypes)\n\tprint(' - col.size:',df[colname].shape)\n\tprint(' - NaN.vals:',df[colname].isnull().sum())\n\tif (display_uniques): print(' - uniqvals:',get_unique_values(df, colname))\n\tif (display_uniques): print(' - cnt.vals:',get_unique_counts(df, colname))\n\tprint(\"\\n\")", "def ExamineData(x):\r\n print(\"Data shape:\", x.shape)\r\n print(\"\\nColumns:\", x.columns)\r\n print(\"\\nData types\\n\", x.dtypes)\r\n print(\"\\nDescribe data\\n\", x.describe())\r\n print(\"\\nData\\n\", x.head(2))\r\n print (\"\\nSize of data:\", np.sum(x.memory_usage())) # Get size of dataframes\r\n print(\"\\nAre there any NULLS\\n\", np.sum(x.isnull()))", "def dataset_statistics(dataset):\n print(dataset.describe())", "def dataset_statistics(dataset):\n print(dataset.describe())", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def test_analyze_columns_with_model(self):\n\t\t\n\n\t\tdetails = self.watcher.analyze(model=self.model)\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tcolumns = \"layer_id,name,D,M,N,alpha,alpha_weighted,has_esd,lambda_max,layer_type,log_alpha_norm,log_norm,log_spectral_norm,norm,num_evals,rank_loss,rf,sigma,spectral_norm,stable_rank,sv_max,sv_min,xmax,xmin,num_pl_spikes,weak_rank_loss\".split(',')\n\t\tprint(details.columns)\n\t\tfor key in columns:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))", "def test_analyze_columns_no_model(self):\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.fc2_layer])\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tcolumns = \"layer_id,name,D,M,N,alpha,alpha_weighted,has_esd,lambda_max,layer_type,log_alpha_norm,log_norm,log_spectral_norm,norm,num_evals,rank_loss,rf,sigma,spectral_norm,stable_rank,sv_max,sv_min,xmax,xmin,num_pl_spikes,weak_rank_loss\".split(',')\n\t\tprint(details.columns)\n\t\tfor key in columns:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))" ]
[ "0.7717136", "0.7169844", "0.6842157", "0.6736042", "0.6655487", "0.6576214", "0.64284104", "0.63757235", "0.6360584", "0.634914", "0.6317946", "0.62910897", "0.62811184", "0.6259133", "0.6259133", "0.62278116", "0.62078273", "0.61974996", "0.6168956", "0.6122926", "0.6115524", "0.61055475", "0.6079771", "0.606388", "0.60572064", "0.6043235", "0.6043235", "0.6039709", "0.6023252", "0.5997288" ]
0.7865462
0
To preprocess the data with polynomial features and fit the data
def fit_polynomial_regression(self, x_train, y_train): x_poly = self.poly_reg.fit_transform(x_train) self.lin_reg.fit(x_poly, y_train)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi", "def fit(self, X):", "def poly_regression(self,precision=8):\n # return empty lists if input is empty\n if self.training == []:\n return [],[]\n\n latitudes = []\n longitudes = []\n for point in self.training[:-1]:\n latitudes.append(point[0])\n longitudes.append(point[1]) \n # store everything in a dataframe\n latDf = pd.DataFrame(numpy.array(latitudes), columns=['latitudes'])\n longDf = pd.DataFrame(numpy.array(longitudes), columns=['longitudes'])\n\n # learn how to do regression\n reg = linear_model.LinearRegression()\n\n # pass the order of your polynomial here \n poly = PolynomialFeatures(precision)\n\n \n # regression with latitude as domain\n vertical_predicted_path = []\n transform = poly.fit_transform(longDf)\n\n reg.fit(transform,latDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n vertical_predicted_path.append([predictions[i][0],longDf[\"longitudes\"][i]])\n\n \n # regression with longitude domain\n horizontal_predicted_path = []\n transform = poly.fit_transform(latDf)\n\n reg.fit(transform,longDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n horizontal_predicted_path.append([latDf[\"latitudes\"][i], predictions[i][0]])\n\n self.horizontal = sorted(horizontal_predicted_path, key=lambda k: [k[1], k[0]])\n self.vertical = sorted(vertical_predicted_path, key=lambda k: [k[0], k[1]])\n \n # return sorted horizontal and vertical prediction\n return self.horizontal, self.vertical", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X", "def load_poly_features(df_train, df_test):\n logger = logging.getLogger(__name__)\n logger.debug('Loading polynomial features..')\n # Make a new dataframe for polynomial features\n poly_features = df_train[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\n poly_features_test = df_test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\n\n # imputer for handling missing values\n imputer = Imputer(strategy='median')\n\n # Need to impute missing values\n poly_features = imputer.fit_transform(poly_features)\n poly_features_test = imputer.transform(poly_features_test)\n\n # Create the polynomial object with specified degree\n poly_transformer = PolynomialFeatures(degree=3)\n # Train the polynomial features\n poly_transformer.fit(poly_features)\n\n # Transform the features\n poly_features = poly_transformer.transform(poly_features)\n poly_features_test = poly_transformer.transform(poly_features_test)\n logger.debug('Polynomial Features shape: %s' % str(poly_features.shape))\n\n df_poly_features = pd.DataFrame(poly_features,\n columns=poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',\n 'EXT_SOURCE_3', 'DAYS_BIRTH']))\n df_poly_features_test = pd.DataFrame(poly_features_test,\n columns=poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',\n 'EXT_SOURCE_3', 'DAYS_BIRTH']))\n df_poly_features['SK_ID_CURR'] = df_train['SK_ID_CURR']\n df_poly_features_test['SK_ID_CURR'] = df_test['SK_ID_CURR']\n logger.info('Loaded polynomial features')\n return df_poly_features, df_poly_features_test", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def make_training_xy(self, data):\n pass", "def polytrans(features,features_test,features_oos,poly): \n \n features['FEMA_21'] = poly.fit_transform(np.nan_to_num(features.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features['FEMA_8'] = poly.fit_transform(np.nan_to_num(features.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features['FADRLo'] = poly.fit_transform(np.nan_to_num(features.FADRLo.astype(np.float32)).reshape(-1, 1))\n features['FADRHi'] = poly.fit_transform(np.nan_to_num(features.FADRHi.astype(np.float32)).reshape(-1, 1))\n features['FRVI40'] = poly.fit_transform(np.nan_to_num(features.FRVI40.astype(np.float32)).reshape(-1, 1))\n features['FRVI60'] = poly.fit_transform(np.nan_to_num(features.FRVI60.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FSMA200'] = poly.fit_transform(np.nan_to_num(features.FSMA200.astype(np.float32)).reshape(-1, 1))\n features['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features['FPP'] = poly.fit_transform(np.nan_to_num(features.FPP.astype(np.float32)).reshape(-1, 1))\n features['FS38'] = poly.fit_transform(np.nan_to_num(features.FS38.astype(np.float32)).reshape(-1, 1))\n features['FS62'] = poly.fit_transform(np.nan_to_num(features.FS62.astype(np.float32)).reshape(-1, 1))\n features['FS100'] = poly.fit_transform(np.nan_to_num(features.FS100.astype(np.float32)).reshape(-1, 1))\n features['FS138'] = poly.fit_transform(np.nan_to_num(features.FS138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FS162.astype(np.float32)).reshape(-1, 1))\n features['FS200'] = poly.fit_transform(np.nan_to_num(features.FS200.astype(np.float32)).reshape(-1, 1))\n features['FR38'] = poly.fit_transform(np.nan_to_num(features.FR38.astype(np.float32)).reshape(-1, 1))\n features['FR62'] = poly.fit_transform(np.nan_to_num(features.FR62.astype(np.float32)).reshape(-1, 1))\n features['FR100'] = poly.fit_transform(np.nan_to_num(features.FR100.astype(np.float32)).reshape(-1, 1))\n features['FR138'] = poly.fit_transform(np.nan_to_num(features.FR138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FR162.astype(np.float32)).reshape(-1, 1))\n features['FR200'] = poly.fit_transform(np.nan_to_num(features.FR200.astype(np.float32)).reshape(-1, 1))\n features['SBATR'] = poly.fit_transform(np.nan_to_num(features.SBATR.astype(np.float32)).reshape(-1, 1))\n \n features_test['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_test['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_test['FADRLo'] = poly.fit_transform(np.nan_to_num(features_test.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_test['FADRHi'] = poly.fit_transform(np.nan_to_num(features_test.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI40'] = poly.fit_transform(np.nan_to_num(features_test.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI60'] = poly.fit_transform(np.nan_to_num(features_test.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_test['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_test.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FSMA200'] = poly.fit_transform(np.nan_to_num(features_test.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_test['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_test.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_test['FPP'] = poly.fit_transform(np.nan_to_num(features_test.FPP.astype(np.float32)).reshape(-1, 1))\n features_test['FS38'] = poly.fit_transform(np.nan_to_num(features_test.FS38.astype(np.float32)).reshape(-1, 1))\n features_test['FS62'] = poly.fit_transform(np.nan_to_num(features_test.FS62.astype(np.float32)).reshape(-1, 1))\n features_test['FS100'] = poly.fit_transform(np.nan_to_num(features_test.FS100.astype(np.float32)).reshape(-1, 1))\n features_test['FS138'] = poly.fit_transform(np.nan_to_num(features_test.FS138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FS162.astype(np.float32)).reshape(-1, 1))\n features_test['FS200'] = poly.fit_transform(np.nan_to_num(features_test.FS200.astype(np.float32)).reshape(-1, 1))\n features_test['FR38'] = poly.fit_transform(np.nan_to_num(features_test.FR38.astype(np.float32)).reshape(-1, 1))\n features_test['FR62'] = poly.fit_transform(np.nan_to_num(features_test.FR62.astype(np.float32)).reshape(-1, 1))\n features_test['FR100'] = poly.fit_transform(np.nan_to_num(features_test.FR100.astype(np.float32)).reshape(-1, 1))\n features_test['FR138'] = poly.fit_transform(np.nan_to_num(features_test.FR138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FR162.astype(np.float32)).reshape(-1, 1))\n features_test['FR200'] = poly.fit_transform(np.nan_to_num(features_test.FR200.astype(np.float32)).reshape(-1, 1))\n features_test['SBATR'] = poly.fit_transform(np.nan_to_num(features_test.SBATR.astype(np.float32)).reshape(-1, 1))\n\n features_oos['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_oos['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRLo'] = poly.fit_transform(np.nan_to_num(features_oos.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRHi'] = poly.fit_transform(np.nan_to_num(features_oos.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI40'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI60'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_oos['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_oos.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FSMA200'] = poly.fit_transform(np.nan_to_num(features_oos.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_oos['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_oos.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_oos['FPP'] = poly.fit_transform(np.nan_to_num(features_oos.FPP.astype(np.float32)).reshape(-1, 1))\n features_oos['FS38'] = poly.fit_transform(np.nan_to_num(features_oos.FS38.astype(np.float32)).reshape(-1, 1))\n features_oos['FS62'] = poly.fit_transform(np.nan_to_num(features_oos.FS62.astype(np.float32)).reshape(-1, 1))\n features_oos['FS100'] = poly.fit_transform(np.nan_to_num(features_oos.FS100.astype(np.float32)).reshape(-1, 1))\n features_oos['FS138'] = poly.fit_transform(np.nan_to_num(features_oos.FS138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FS162.astype(np.float32)).reshape(-1, 1))\n features_oos['FS200'] = poly.fit_transform(np.nan_to_num(features_oos.FS200.astype(np.float32)).reshape(-1, 1))\n features_oos['FR38'] = poly.fit_transform(np.nan_to_num(features_oos.FR38.astype(np.float32)).reshape(-1, 1))\n features_oos['FR62'] = poly.fit_transform(np.nan_to_num(features_oos.FR62.astype(np.float32)).reshape(-1, 1))\n features_oos['FR100'] = poly.fit_transform(np.nan_to_num(features_oos.FR100.astype(np.float32)).reshape(-1, 1))\n features_oos['FR138'] = poly.fit_transform(np.nan_to_num(features_oos.FR138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FR162.astype(np.float32)).reshape(-1, 1))\n features_oos['FR200'] = poly.fit_transform(np.nan_to_num(features_oos.FR200.astype(np.float32)).reshape(-1, 1))\n features_oos['SBATR'] = poly.fit_transform(np.nan_to_num(features_oos.SBATR.astype(np.float32)).reshape(-1, 1))\n\n return(features,features_test,features_oos)", "def fit_poly(data, error_func, degree=4): \n\n # generate initial guss for polynomial model (all coeffs = 1)\n guess = np.poly1d(np.ones(degree + 1, dtype=np.float32))\n\n # plot intial guess\n x = np.linspace(-5, 5, 21)\n plt.plot(x, np.polyval(guess, x), 'm--', linewidth=2.0, label=\"Initial guess\")\n\n # call optimizer to minimize error function\n result = spo.minimize(error_poly, guess, args=(data,), method='SLSQP', options={'disp':True})\n \n # convert optimal result into a poly1d object\n return np.poly1d(result.x)", "def plotBestFitOfAllData(x_samples, y_samples, x_poly, y_poly, order, plotFlag= True):\n train(x_samples, y_samples, x_poly, y_poly, order, plotFlag= True) \n plt.title(\"Polynomial function regression\")\n plt.grid()\n plt.plot(x_poly, y_poly, c='black', label='true function')\n plt.scatter(x_samples, y_samples, s=20, c='green', label='sample')\n plt.legend()\n plt.show()", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def get_poly(kwargs):\n from sklearn.preprocessing import PolynomialFeatures\n return PolynomialFeatures(**kwargs)", "def fitRegressor(self, data):\r\n if data.SETS == 2:\r\n self.regressor.fit(data.trainX, data.trainy)", "def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out", "def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names", "def fit_poly_model(order, train_data, feature: str, valid_data=None, output: str = 'price',\n l2_penalty=1e-9,\n normalization: bool = True, model_plot: bool = False, color_scheme: List[str] = None,\n pause_plotting_time=5):\n # an 'order' degree polynomial :\n poly_data = polynomial_dataframe(train_data[feature], order)\n poly_data[output] = train_data[output]\n\n # compute the regression weights for predicting sales[‘price’]\n # based on the 1 degree polynomial feature ‘sqft_living’:\n from sklearn.linear_model import Ridge\n # make a new instance of the object:\n model = Ridge(alpha=l2_penalty, normalize=normalization)\n # convert data frame to numpy array to prevent shape error with sikit-learn:\n x = np.array(poly_data.iloc[:, :-1])\n y = np.array(poly_data[output]).reshape(-1, 1)\n\n model.fit(x, y)\n\n # store all coefficient in poly1_weights array:\n poly_weights = model.intercept_\n for i in range(0, len(model.coef_)):\n poly_weights = np.append(poly_weights, model.coef_[i])\n\n # Plotting the model, features Xs vs observation Y:\n if model_plot:\n # produce a scatter plot of the training data (just square feet vs price) with fitted model:\n if color_scheme is not None:\n # plot without default color:\n plt.scatter(poly_data['power_1'], poly_data[output], c=color_scheme[0])\n plt.plot(x[:, 0], model.predict(x), c=color_scheme[1])\n else:\n # plot with default color but in different figures:\n import random\n num_figure = random.randint(0, 1000)\n plt.figure(num_figure)\n plt.scatter(poly_data['power_1'], poly_data[output])\n plt.plot(x[:, 0], model.predict(x), c='red')\n plt.figure(num_figure).show()\n plt.pause(pause_plotting_time)\n\n # compute rss:\n train_rss = get_residual_sum_squares(y, model.predict(x))\n # compute rss on validation set:\n if valid_data is None:\n # Then we don't need validation_rss:\n validation_rss = None\n else:\n poly_data_valid = polynomial_dataframe(valid_data[feature], order)\n poly_data_valid[output] = valid_data[output]\n\n x_valid = np.array(poly_data_valid.iloc[:, :-1])\n y_valid = np.array(poly_data_valid[output]).reshape(-1, 1)\n # get ready validation rss to return:\n validation_rss = get_residual_sum_squares(y_valid, model.predict(x_valid))\n\n return poly_weights, train_rss, validation_rss", "def fit(self, X, y, l2regularize = None ) :\n\n X = self.generate_polynomial_features(X) # map features\n\n ### ========== TODO : START ========== ###\n # part e: implement closed-form solution\n # hint: use np.dot(...) and np.linalg.pinv(...)\n # be sure to update self.coef_ with your solution\n X_X_T = np.linalg.pinv(np.dot(X.transpose(),X) + l2regularize*np.identity(np.shape(X.transpose())[0]))\n self.coef_ = np.dot(X_X_T,np.dot(X.transpose(),y))\n\n\n ### ========== TODO : END ========== ###\n\n return self", "def nnRegression(data):", "def fit(self, X,y):\n pass", "def add_poly_features(data, columns, degree=2):\n\n if degree != 2:\n print('Only works w/2 degrees right now...')\n return\n\n for col in columns:\n new_col = col + '_poly' + str(degree)\n data[new_col] = np.nan\n data[[col, new_col]] = poly(data[col], degree=degree)\n\n return data", "def _fit_poly(y_data, deg=5):\n x = np.arange(1, len(y_data) + 1)\n coeffs = np.polynomial.polynomial.polyfit(\n x, y_data, deg=deg)\n y_pred = poly(x, coeffs)\n return coeffs, np.mean((y_data - y_pred) ** 2)", "def fit_transform(self, data: np.ndarray) -> np.ndarray:\n for i in range(self.n_layers):\n if self.verbose_training:\n print(\"Fitting layer %d with output width %d\" % (i+1, self.layers[i]))\n new_data = np.nan_to_num(data)\n new_data = self.pca_list[i].fit_transform(X=new_data)\n if i != self.n_layers - 1:\n self.power_list[i].fit(new_data)\n new_data = self.power_list[i].inverse_transform(new_data)\n data = new_data\n return data", "def fit(self, x):\n pass", "def train(self,path,mode):\n if mode == \"porto\":\n self.prepare_data(path)\n else:\n self.prepare_sumo_data(path)\n self.poly_regression()", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def generate_coefficients_data(poly_degree: int, performance_data: pd.DataFrame, param_columns: typing.List) -> pd.DataFrame:\n if poly_degree != 2:\n logging.warning('Not Implemented: polynomial degree of > 2. Will use degree 2 for meta-model')\n coef_names = get_coefficient_names()\n results = []\n for idx, task_id in enumerate(performance_data['task_id'].unique()):\n frame_task = performance_data.loc[performance_data['task_id'] == task_id]\n model = sklearn.linear_model.LinearRegression(fit_intercept=False)\n poly_feat = sklearn.preprocessing.PolynomialFeatures(2)\n X = poly_feat.fit_transform(frame_task[param_columns])\n y = frame_task['predictive_accuracy']\n model.fit(X, y)\n result = {\n 'task_id': task_id,\n coef_names[0]: model.coef_[0],\n coef_names[1]: model.coef_[1],\n coef_names[2]: model.coef_[2],\n coef_names[3]: model.coef_[3],\n coef_names[4]: model.coef_[4],\n coef_names[5]: model.coef_[5],\n }\n results.append(result)\n return pd.DataFrame(results).set_index('task_id')", "def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")" ]
[ "0.659779", "0.657412", "0.6561241", "0.65110964", "0.64200664", "0.6334541", "0.63075", "0.63075", "0.63075", "0.6307181", "0.62965524", "0.62757695", "0.62735176", "0.6264986", "0.62461776", "0.62459224", "0.62243205", "0.62237537", "0.62153757", "0.6206525", "0.61727697", "0.61563337", "0.61418223", "0.6132719", "0.6120057", "0.60990983", "0.6088997", "0.60854924", "0.6061239", "0.60497546" ]
0.738424
0
creates the type widget
def create_type_widget(self): self._chb_bool = QtWidgets.QCheckBox() return self._chb_bool
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_widget(self):\n pass", "def create_widgets( self ):", "def create_widgets(self):", "def new_varTypeWidget():\n newWidget = QtGui.QComboBox()\n newWidget.addItems(['=', '+', 'num'])\n return newWidget", "def createWidget(self, parent):\n raise NotImplementedError()", "def __init__(self, type_):\n\n self.type = type_", "def create_widget(self):\n self.widget = UILabel()", "def _create_widget(self,name,master,widget_options={},on_set=None,on_modify=None):\n # select the appropriate widget-creation method;\n # default is self._create_string_widget...\n widget_creation_fn = self._create_string_widget\n\n param_obj,source_po = self.get_parameter_object(name,with_source=True)\n\n if not (param_is_dynamically_generated(param_obj,source_po) or name in self.allow_dynamic):\n # ...but overwrite that with a more specific one, if possible\n for c in classlist(type(param_obj))[::-1]:\n if self.widget_creators.has_key(c):\n widget_creation_fn = self.widget_creators[c]\n break\n elif name not in self.allow_dynamic:\n self.allow_dynamic.append(name)\n\n if on_set is not None:\n self._tkvars[name]._on_set=on_set\n\n if on_modify is not None:\n self._tkvars[name]._on_modify=on_modify\n\n widget=widget_creation_fn(master,name,widget_options)\n\n # Is widget a button (but not a checkbutton)? If so, no label wanted.\n # CEBALERT 'notNonelabel': change to have a label with no text\n if is_button(widget):\n label = None\n else:\n label = T.Label(master,text=self._pretty_print(name))\n\n # disable widgets for constant params\n if param_obj.constant and isinstance(source_po,Parameterized):\n # (need to be able to set on class, hence check it's PO not POMetaclass\n widget.config(state='disabled')\n\n widget.bind('<<right-click>>',lambda event: self._right_click(event, widget))\n\n return widget,label", "def createWidgets(self):\n raise NotImplementedError", "def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form", "def create_widget(self):\n item = QNodeItem(self)\n self.widget = item", "def _type(self) -> str:\n ...", "def type(name):", "def init_widget(self):", "def getType(self): #$NON-NLS-1$\r", "def _create_type_select_button(self, options, items):\n\t\t# FIXME: items are ignored for some unknown reason\n\t\topts = copy.deepcopy(options)\n\t\topts['action'] = 'type-selected'\n\t\taction = umcd.Action(umcp.SimpleCommand(self.command, options=opts), items)\n\t\tchoices = (\n\t\t\t\t('RAW', _('Simple format (raw)')),\n\t\t\t\t)\n\t\ttry:\n\t\t\tpool_name = options['pool-name']\n\t\t\tud.debug(ud.ADMIN, ud.ALL, 'UVMM.DW.ts(pool-name=%s)' % pool_name)\n\t\t\tif self._is_file_pool(pool_name):\n\t\t\t\tif self.node_uri.startswith('qemu'):\n\t\t\t\t\tchoices = (\n\t\t\t\t\t\t\t#('qcow', _('Extended format (qcow)')),\n\t\t\t\t\t\t\t('qcow2', _('Extended format (qcow2)')),\n\t\t\t\t\t\t\t#('vmdk', _('VMWare Disk')),\n\t\t\t\t\t\t\t('raw', _('Simple format (raw)')),\n\t\t\t\t\t\t\t)\n\t\t\t\telif self.node_uri.startswith('xen'):\n\t\t\t\t\tchoices = (\n\t\t\t\t\t\t\t('raw', _('Simple format (raw)')),\n\t\t\t\t\t\t\t#('qcow2', _('Qemu copy-on-write 2')),\n\t\t\t\t\t\t\t#('vhd', _('Virtual Hard Disk')),\n\t\t\t\t\t\t\t#('vmdk', _('VMWare Disk')),\n\t\t\t\t\t\t\t)\n\t\texcept LookupError, e:\n\t\t\tud.debug(ud.ADMIN, ud.ALL, 'UVMM.DW.ts exception=%s' % e)\n\t\ttry: # validate current setting\n\t\t\tdefault = options['driver-type']\n\t\t\tud.debug(ud.ADMIN, ud.ALL, 'UVMM.DW.ts default=%s' % default)\n\t\t\tdict(choices)[default]\n\t\texcept LookupError, e:\n\t\t\tud.debug(ud.ADMIN, ud.ALL, 'UVMM.DW.ts default exception=%s' % e)\n\t\t\tdefault = choices[0][0]\n\t\toptions['driver-type'] = default\n\t\treturn umcd.SimpleSelectButton(_('Image format'), option='driver-type', choices=choices, actions=[action], attributes={'width': '300px'}, default=default)", "def createWidget(self, QWidget): # real signature unknown; restored from __doc__\n pass", "def mainTypeName(type_name):\n return mainRender(type_name)", "def _create_value_widgets(self):\n \n # sort values\n self.values = sorted(self.values)\n self.selection = self.default\n \n for value in self.values:\n widget = self.panel.createWidgetT(\"Button\", \"Button\", \n mygui.IntCoord(15, (len(self.widgets)* 20 + 10), self.width - 20, 20),\n mygui.Align())\n widget.setUserString(\"value\", value)\n widget.setCaption(value)\n self.widgets.append(widget)\n \n if value == self.default:\n widget.setStateCheck(True)\n \n widget.subscribeEventMouseButtonClick(self, '_onTypeClick')\n \n self.panel.setSize(self.width, len(self.widgets) * 20 + 20)", "def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())", "def updateType(self):\n # building type\n _type = \"\"\n for ctrl in self.controls:\n _type = _type + ctrl.selection + \"/\"\n _type = _type[:-1]\n \n if scg_alphabet.elementsDescMap.has_key(_type):\n scg_alphabet.changeObjectType(self.object, _type)\n return True\n \n return False", "def type(self, type):\n\n self.container['type'] = type", "def type(self, type):\n\n self.container['type'] = type", "def create_widget(self, parent, tree):\n widget = wx.Panel(parent)\n sizer = wxSingleWidgetSizer()\n widget.SetSizer(sizer)\n return widget", "def __init__(self): \n self.types = {}", "def create_widgets(self):\n # only ever shown card in player's hand, so create widgets when dealt\n self.name_label = tk.Label(self, text=self.name)\n self.ability_label = tk.Label(self, text=self.ability)\n self.food_label = tk.Label(self, text=\"Food: \" + str(self.food))\n self.use_button = TraitUseButton(self, text=\"USE\", command=self.use)", "def getWidget(self):", "def create_widget(self, parent, tree):\n return QTimeEdit(parent)", "def __init__(self, type_=\"text\", name=\"\"):\n super().__init__(\"input\")\n self.type = type_\n self.name = name", "def create_widget(self):\n self.widget = ListView(self.get_context())" ]
[ "0.7240545", "0.6969439", "0.69398946", "0.65376705", "0.63738364", "0.6344139", "0.6330141", "0.6196271", "0.6076996", "0.6016271", "0.5994568", "0.59882146", "0.5943483", "0.5936037", "0.59325784", "0.5930422", "0.59246093", "0.5914714", "0.5906333", "0.5892526", "0.58777946", "0.5869882", "0.5869882", "0.58606255", "0.58438504", "0.582674", "0.5763773", "0.5761941", "0.5750177", "0.5741528" ]
0.74082613
0
Take robopoker's description of the state in XML parse in to objects
def _parse_xml_state(self, xml): if not xml: return None LOG.info(xml) state = xmltodict.parse(xml) self.community = parse_community_cards(state) if self.community: if len(self.community) > 5: raise StateParseException("More than 5 community cards: %s" % self.community) LOG.info("community: %s", self.community) self.players = parse_players(state) self.pot, self.max_bet, self.my_bet = self._get_bets() return state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state_xml():\n url = \"https://www.rit.edu/ready/rit-covid-19-alert-levels\"\n headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'\n }\n span_xpath = '//*[@id=\"block-rit-bootstrap-subtheme-content\"]/div[4]/div[1]/div/div/div/div/div/div/div/div/div/p[1]/span[2]'\n req = requests.get(url, headers)\n\n tree = html.fromstring(req.content)\n span = tree.xpath(span_xpath)\n return str(span[0].text_content())", "def createElementFromState(state):", "def _get_state(self):", "def read_xml(self):\n connection = urlopen(self.url)\n in_xml = connection.read()\n state = ElementTree.fromstring(in_xml)\n records = []\n record = []\n\n # Specific to CHP\n # TODO(David) Nested for loops are bad. Change this to be more\n # efficient, possibly use generators.\n for center in state:\n rec_center = center.attrib['ID']\n\n for dispatch in center:\n rec_dispatch = dispatch.attrib['ID']\n\n for log in dispatch:\n record = [rec_center, rec_dispatch]\n\n record.append(log.attrib['ID'])\n\n log_time = log.find('LogTime').text.strip('\"')\n log_type = log.find('LogType').text.strip('\"')\n location = log.find('Location').text.strip('\"')\n loc_desc = log.find('LocationDesc').text.strip('\"')\n area = log.find('Area').text.strip('\"')\n\n record.append(log_time)\n record.append(log_type)\n record.append(location)\n record.append(loc_desc)\n record.append(area)\n\n latlon = log.find('LATLON').text.strip('\"')\n\n (lat, lon) = latlon.split(':')\n lat = str(lat[:2]) + '.' + str(lat[2:])\n lon = '-' + str(lon[:3]) + '.' + str(lon[3:])\n\n record.append(lat)\n record.append(lon)\n\n records.append(record)\n\n self.records = records", "def toState(attrs=ALL):", "def saveState(self):\n e = xml.Element(self.type)\n e.attrib['lastUpdate'] = str(clock.now())\n e.attrib['name'] = self.name\n #e.attrib['status'] = ('true' if self.status else 'false')\n return e", "def loadState(fileid):\n dinf = {}\n root = etree.Element(\"state\")\n text = None\n statename = \"\"\n statefile = \"\"\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"start\",\"scue\",\"end\",\"ecue\",\"aspects\",\"update\"]\n for tag in tags:\n dinf[tag] = [\"\",False]\n dinf['cities'] = {}\n dinf['m'] = {}\n dinf['m']['events'] = {}\n dinf['aspects'] = {}\n if not idExists(fileid):\n status.push(0,\"new state created... '%s'\" % fileid)\n return dinf\n statefile = fileid\n fn = os.path.join(config['realmdir'],\"%s.xml\" % fileid)\n status.push(0,\"loading state from XML... '%s'\" % fn)\n try:\n with codecs.open(fn,'rU','utf-8') as f:\n tree = etree.parse(f)\n f.close()\n root = tree.getroot()\n except IOError as e:\n print \"c: Could not open configuration file: %s\" % e\n\n ir = 0\n for i in range(len(root)):\n if root[i].tag is not None:\n if root[i].tag == \"city\":\n if len(root[i]) > 0:\n node = \"\"\n node = root[i].find(\"file\")\n if node.text:\n node = node.text.strip()\n node = common.validateFileid(node)\n dinf['cities'][node] = {}\n for j in root[i]:\n if j.tag and j.text and j.tag != \"file\":\n dinf['cities'][node][j.tag] = [j.text.strip(),False]\n if config['debug'] > 3: printPretty(dinf['cities'][node])\n else:\n if config['debug'] > 0:\n print \"Invalid city tag:\"\n for c in root[i]:\n print c.tag + ': ' + c.text,\n else: # no relat length\n if config['debug'] > 0: print \"Empty city tag.\"\n elif root[i].tag == \"events\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['m']['events']))\n dinf['m']['events'][k] = {}\n for j in node:\n if j.tag and j.text:\n dinf['m']['events'][k][j.tag] = [j.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid milestone tag:\"\n for c in node:\n print c.tag + ': ' + c.text,\n if config['debug'] > 3: printPretty(dinf['m']['events'])\n else: # no relat length\n if config['debug'] > 0: print \"Empty milestone tag.\"\n\n elif root[i].tag == \"aspects\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['aspects']))\n dinf['aspects'][k] = {}\n if node.tag and node.text:\n dinf['aspects'][k] = [node.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid aspects tag:\"\n print node.tag + ': ' + node.text,\n else: # no aspects length\n if config['debug'] > 0: print \"Empty aspects tag.\"\n\n elif root[i].text is not None:\n dinf[root[i].tag] = [root[i].text.strip(), False]\n if config['debug'] > 2: print str(i) + \" \",\n statename = dinf.get(\"name\",\"\")\n if len(statename) > 1: pushLoc(statefile,statename)\n return dinf", "def state(self) -> str:", "def fromState(state):", "def _extract_state(self, state):\n raise NotImplementedError", "def _get_xml_view_state(r):\n tree = get_xml_parsed_text(r.content)\n xpath = \"//update[@id='j_id1:javax.faces.ViewState:0']/text()\"\n return tree.xpath(xpath)[0]", "def parse(self):", "def read_xml(self):\n pass", "def createxmlmall():\r\n\r\n root = ET.Element(\"state\")\r\n model = ET.SubElement(root, \"model\")\r\n model.text = r\"\"\r\n\r\n dataid = ET.SubElement(root, \"dataids\")\r\n application = ET.SubElement(root, \"application\")\r\n\r\n application.text = \"SIBS Configurator\"\r\n safecookie = ET.SubElement(root, \"safecookie\")\r\n steps = ET.SubElement(root, \"steps\")\r\n prev = ET.SubElement(steps, \"prev\")\r\n\r\n lastproxy = ET.SubElement(root, \"last-proxy\").text = \"tcserver0\"\r\n\r\n tree = ET.ElementTree(root) # saves tree in variable \"tree\"\r\n return tree, safecookie, steps, prev", "def from_xml(self, root, source: str, state: str):\n if source not in MAPPINGS:\n raise ValueError(\"No mappings for this source: {}\".format(source))\n\n # Property searches can be nation-wide, so we need to be able to discern the state\n # by looking at the data.\n attribution = root.findall(\"./source\")[0].text # Expecting, e.g. \"Garland County (Arkansas) - blah blah\"\n state_regex = r\"\\(([A-Za-z\\s]*)\"\n matches = re.findall(state_regex, attribution)\n state = StateNameToAbbreviation[matches[0].upper()]\n\n county_regex = r\"^([A-Za-z\\s]*)\"\n matches = re.findall(county_regex, attribution)\n county = matches[0].replace(\" County\", \"\").strip()\n\n if state not in MAPPINGS[source]:\n raise ValueError(\"No {} mappings for this state: {}\".format(source, state))\n\n # The mappings can vary county-by-county within a state. Kill me.\n mappings = MAPPINGS[source][state]\n if county.upper() in mappings:\n mappings = mappings[county.upper()]\n else:\n mappings = mappings[\"*\"]\n\n self.source = source\n self.state = state\n self.county = county\n\n for mapping in mappings:\n elem = root.findall(mapping[\"path\"])\n if elem:\n if \"prop\" in mapping:\n value = elem[0].get(mapping[\"prop\"])\n else:\n value = elem[0].text\n\n if value:\n if mapping[\"transform\"]:\n value = mapping[\"transform\"](value)\n setattr(self, mapping[\"attr\"], value)\n\n hash_input = \"{}{}\".format(self.owner_name, self.property_address)\n self.hash = hashlib.md5(hash_input.encode())", "def saveState(fileid,data):\n global states\n info = data.get('info')\n fn = fileid + \".xml\"\n state = etree.Element(\"state\")\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"start\",\"scue\",\"end\",\"ecue\",\"vital\",\"polit\",\"culture\",\"history\", \"geography\",\"econ\",\"demo\",\"events\",\"cities\",\"aspects\",\"update\"]\n for tag in tags:\n if tag == \"cities\":\n nodes = info.get(\"cities\")\n if nodes is not None:\n for node in nodes.keys():\n if nodes[node].get(\"name\"):\n connected = etree.Element(\"city\")\n value = info['cities'][node].get(\"name\")\n if value is None: value = ['',False]\n etree.SubElement(connected,\"name\").text = value[0]\n value = node\n if value is None: value = ''\n etree.SubElement(connected,\"file\").text = value\n value = info['cities'][node].get(\"note\")\n if value is not None and len(value[0]) > 0: etree.SubElement(connected,\"note\").text = value[0]\n state.append(connected)\n else:\n print \"A required tag is missing from city %s.\" % node\n else:\n print \"no cities found\"\n elif tag == \"events\":\n nodes = info.get(\"m\")\n nodes = nodes.get(\"events\")\n if nodes is not None:\n events = etree.Element(\"events\")\n for node in nodes.keys():\n if nodes[node].get(\"event\"):\n connected = etree.Element(\"mstone\")\n value = info['m']['events'][node].get(\"event\")\n if value is None: value = ['',False]\n etree.SubElement(connected,\"event\").text = value[0]\n value = info['m']['events'][node].get(\"date\")\n if value is None: value = ['',False]\n etree.SubElement(connected,\"date\").text = value[0]\n events.append(connected)\n else:\n print \"A required tag is missing from event %s.\" % node\n state.append(events)\n else:\n print \"no events found\"\n# 820 #\n elif tag == \"aspects\":\n nodes = info.get(\"aspects\")\n if nodes is not None:\n aspects = etree.Element(\"aspects\")\n for node in sorted(nodes.keys()):\n value = nodes[node]\n if value is None: value = ['',False]\n etree.SubElement(aspects,\"text\").text = value[0]\n state.append( aspects )\n else:\n print \"no aspects found\"\n\n elif tag == \"update\":\n etree.SubElement(state,tag).text = common.skrTimeStamp(config['datestyle'])\n else:\n value = info.get(tag)\n if value is None: value = ['',False]\n etree.SubElement(state,tag).text = value[0]\n r = saveXMLtree(state,\"state\",fileid)\n if r:\n try:\n states[fileid]['changed'] = False\n except KeyError:\n printPretty(states)\n return r", "def __init__(self, gameStateString):\n self.__armies=[]\n self.__camps=[]\n self.__parseGameState(gameStateString)", "def state(self, state: str) -> None:", "def __getstate__(self):\n return [self.dataset, self.parser]", "def __init__(self):\n\t\tself.state = None\n\t\tself.info = None\n\t\tself.next = None", "def __repr__(self, state):\n print ' ',\n for w in range(len(state)+2):\n print \"___\",\n print '\\n'\n for x in state:\n print \"| \", x, \" |\"\n print ' ',\n for y in range(len(state)+2):\n print \"___\",\n print '\\n'\n return state", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def __getstate__(self) -> Dict[str, Any]:\n return {\"name\": self.name}", "def xml(self):\n return parse_xml(self, tab=\"\\t\", id=self.id or \"\")", "def _parse(self):\n pass", "def __init__(self, elem, state):\n self.elem = elem\n self.state = state", "def __parse(self):\n\t\tparser=xml.sax.make_parser()\n\t\tparser.setContentHandler(OSMXMLFileParser(self))\n\t\tparser.parse(self.filename)\n\t\n\t\t# convert them back to lists\n\t\tself.nodes = self.nodes.values()\n\t\tself.ways = self.ways.values()\n\t\tself.relations = self.relations.values()", "def test_state_repr(self):\n state = State('test-state')\n self.assertEqual(f'{state}', 'State<test-state>')", "def get_state_tostring(rapid_data):\n try:\n if rapid_data.RapidType == 'bool':\n res = 'State = %s' % rapid_data.Value\n return res\n else:\n err = 'DataType is ' + rapid_data.RapidType + ' and not bool.'\n return err\n except Exception, err:\n return err", "def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()" ]
[ "0.61656994", "0.6113035", "0.60756284", "0.5973485", "0.5950438", "0.5884306", "0.5785711", "0.5763665", "0.5754598", "0.5736893", "0.57301617", "0.5705252", "0.5677678", "0.5626149", "0.561045", "0.5576446", "0.55511475", "0.5524495", "0.54958904", "0.5473836", "0.54701257", "0.5448255", "0.54362136", "0.5423796", "0.5418081", "0.5405461", "0.540161", "0.54011095", "0.5381096", "0.53740144" ]
0.6481559
0
Return a list of the community cards
def parse_community_cards(state): community_cards = list() community = state['game']['community'] if community and 'card' in community: for card in community['card']: new_card = robopoker.entities.Card(card['@rank'], card['@suit']) community_cards.append(new_card) return community_cards
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_card_list(self):\n return self.cards", "def show(self):\r\n for card in self.cards_list:\r\n print(card)", "def cards(self):\n return self._cards", "def test_cards_get_list(self):\n pass", "def list_repo_cards(self, repo):\n # check for permission\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'read')\n\n # get the relevant cards\n cards = Card.objects.all().filter(\n repo_base=self.repo_base, repo_name=repo)\n cards = sorted([c.card_name for c in cards])\n return cards", "def get_game_cards(gameId):\n pass", "def fetch_cards(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/BoundCards?$select=id,title\")\n bound_cards = resp.json()\n return bound_cards", "def get_all_open_cards(self):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter='open'):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources", "def get_all_cards(self, filter='open'):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter=filter):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources", "async def list_cards(self, ctx):\r\n description = \"\"\"\r\nUsually found in a box or pouch, this deck contains a number of cards made of ivory or vellum. Most (75 percent) of these decks have only thirteen cards, but the rest have twenty-two.\r\n\r\nBefore you draw a card, you must declare how many cards you intend to draw and then draw them randomly (you can use an altered deck of playing cards to simulate the deck). Any cards drawn in excess of this number have no effect. Otherwise, as soon as you draw a card from the deck, its magic takes effect. You must draw each card no more than 1 hour after the previous draw. If you fail to draw the chosen number, the remaining number of cards fly from the deck on their own and take effect all at once.\r\n\r\nOnce a card is drawn, it fades from existence. Unless the card is the Fool or the Jester, the card reappears in the deck, making it possible to draw the same card twice.\r\n\r\n\"\"\"\r\n \r\n description += \"Cards: \"\r\n for card in self.cards:\r\n description += f\"**{card['name']}**, \"\r\n\r\n emb = discord.Embed(\r\n title='Deck of Many Things',\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=description\r\n )\r\n emb.set_thumbnail(url='https://i.imgur.com/741T6Lk.png')\r\n await ctx.send(embed=emb)", "def cards():\n if user_loggined():\n user = models.User.query.get(session['user_id'])\n u_cards = user.cards.all()\n prep_cards = []\n for card in u_cards:\n prep_cards.append(card.type + ' **** '+card.cnb[-9:])\n else:\n return redirect(url_for('index'))\n return redirect(url_for('index'))", "def getAllCards(self):\n return self._cards", "def get_cards():\n with open(\"mashape_key.txt\", \"r\") as mashape_key:\n api_key = mashape_key.read()\n print(api_key)\n url = \"https://omgvamp-hearthstone-v1.p.mashape.com/cards?collectible=1\"\n headers = {\"X-Mashape-Key\": api_key}\n response = requests.get(url, headers=headers)\n cards = json.loads(response.text)\n return cards", "async def get_all_cards():\n card_tuple = await ex.conn.fetch(\"SELECT id FROM blackjack.cards\")\n all_cards = []\n for card in card_tuple:\n all_cards.append(card[0])\n return all_cards", "def get_next_community_card(self):\r\n return self.community_cards.pop(0)", "def get_cards(query_param):\n return _query_scryfall(query_param)", "def get_cards(self, name):\n cards = []\n\n for card in self.cards:\n if card.name == name:\n cards.append(card)\n\n return cards", "def receive_community_card(self, card):\r\n self.community_cards.append(card)", "def print_cards(self, all_cards=True):\n # print(\"Cards:\")\n result = \"\"\n cards = self.cards\n if all_cards:\n cards += self.cards_on_table\n for c in cards:\n result += str(c)\n return result", "def get_card(name_str, page=1):\r\n payload = {'name': name_str, 'page': page}\r\n response = query('https://api.magicthegathering.io/v1/cards', payload)\r\n return response.json()", "def cards_to_str(cards):\n cards_list = []\n for c in cards:\n cards_list.append(c.get_str())\n return cards_list", "def print_cards(list_var):\n\tfor i in range(len(list_var)):\n\t\tprint(\"player %d cards are\" %i,list_var[i])", "def get_card_info_list(self):\n self._get_card_info_list = pa_card_info_cb_t(self._card_info_cb)\n pa_context_get_card_info_list(self._context,\n self._get_card_info_list,\n None)", "def get_cards(self):\n card = self._starting_card\n return card", "def dealCommunity(self, num):\n\t\tfor _ in range(num):\n\t\t\tself.communityCards.append(self.deck.pop())", "def get_card(key, token, board_id, member_name):\n\n # Make GET request\n params = {'key': key, 'token': token}\n url = 'https://api.trello.com/1/boards/%s/members/%s/cards' % (board_id, member_name)\n response = requests.get(url, params=params)\n data = response.json()\n\n # Show user cards listing.\n count = len(data)\n max_len = len(str(count))\n print 'Your cards:'\n for i in range(count):\n card = data[i]\n print ' [%s] %s' % (string.rjust(str(i + 1), max_len, ' '), card['name'])\n print\n\n # Prompt user to choose a card.\n while True:\n choice = raw_input('Pick a card, any card [Q/%d-%d]: ' % (1, count))\n if choice == 'q' or choice == 'Q':\n return\n\n try:\n i = int(choice)\n except ValueError:\n continue\n\n if 1 <= i <= count:\n return data[i - 1]", "def ListCreditCards(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def cards_per_hand(ctx):\n message = NNB.cards_per_hand()\n await ctx.send(message)", "def get_cards(self):\n return [Flashcard.from_word(word) for word in self.get_words()]", "def Send_newCards(self, cards): \n serialized = [c.serialize() for c in cards]\n self.Send({\"action\": \"newCards\", \"cards\": serialized})" ]
[ "0.7051691", "0.6782476", "0.6458687", "0.63811606", "0.6371993", "0.6363945", "0.63036114", "0.6284406", "0.628308", "0.6273262", "0.62642545", "0.6249213", "0.6209381", "0.6188677", "0.6165295", "0.6075092", "0.6051471", "0.6034691", "0.5932884", "0.59235317", "0.59216475", "0.5895446", "0.5869743", "0.58149993", "0.5796558", "0.5794156", "0.57885873", "0.5782084", "0.57779294", "0.57717896" ]
0.7830326
0
Parse robopoker's string representation of cards into Card objects
def parse_card_representation(cards_string): cards_string = cards_string.split() cards = list() for card in cards_string: if len(card) != 2: raise Exception("The card should be represented by 2 characters," " got '%s'" % cards_string) new_card = robopoker.entities.Card(card[0], card[1]) cards.append(new_card) return cards
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_card_list_from_string(string, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n deck_list = []\n while len(string) >= 14:\n x = 'card_' + string[7:12]\n card = eval (x)\n deck_list.append(card)\n\n string = string[14:]\n\n\n return deck_list", "def from_string(string):\n\n if len(string) == 2:\n value = string[0]\n elif len(string) == 3:\n value = string[:2]\n else:\n raise ValueError(f\"cannot parse card from {repr(string)} invalid length\")\n\n suit = string[-1]\n\n if (value not in STRING_TO_VALUE_MAPPING) or (\n suit not in STRING_TO_SUIT_MAPPING\n ):\n raise ValueError(\n f\"cannot parse card from string {repr(string)} invalid characters\"\n )\n\n return Card(\n value=STRING_TO_VALUE_MAPPING[value],\n suit=STRING_TO_SUIT_MAPPING[suit],\n )", "def str2cards(s):\n assert len(s) % 2 == 0\n str_list = []\n cards = []\n for cardnum in range(len(s) / 2):\n str_list.append(s[cardnum * 2 : cardnum * 2 + 2])\n for i, si in enumerate(str_list):\n cstring = si[0].upper() + si[1].lower()\n cards.append(Card.new(cstring))\n return cards", "def make_deck_from_string(string, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n deck_list = []\n while len(string) >= 14:\n x = 'card_' + string[7:9] + '_' + string[10:12]\n card = eval (x)\n if card.card_type == 'monster':\n deck_list.append(Monster(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n attack= card.attack, health= card.health,lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'tactic':\n deck_list.append(Tactic(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'item':\n deck_list.append(Item(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'character':\n deck_list.append(Character(name = card.name,set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n health= card.health,skill_1_lv = card.skill_1_lv, skill_1_type = card.skill_1_type,skill_2_lv = card.skill_2_lv, skill_2_type = card.skill_2_type,skill_3_lv = card.skill_3_lv, skill_3_type = card.skill_3_type))\n\n string = string[14:]\n\n\n return deck_list", "def getCardFromStr(self, cardStr):\n cardList = cardStr.split()\n cardRank = self.strToCardValue(cardList[0])\n cardSuit = self.strToCardValue(cardList[2])\n return Card(cardRank, cardSuit)", "def ordered_deck(cls, s: str):\n deck = cls()\n deck.cards = eval(s)\n return deck", "def parseCreditCard(s):\n theNum=normalize(s)\n ccClass=None\n\n prefixKeys=PREFIXES.keys()\n prefixKeys.sort(lambda a, b: cmp(len(a), len(b)))\n\n for prefix in prefixKeys:\n if theNum.find(prefix) == 0:\n ccClass=PREFIXES[prefix]\n\n if ccClass is None:\n raise InvalidCard(\"Unknown card prefix\")\n\n return ccClass(theNum)", "def _string_to_usable_deck(deck_string):\n try:\n result = json.loads(deck_string)\n except Exception:\n raise InvalidDeckDataException(\"Deck data does not appear to be valid JSON\")\n\n try:\n jsonschema.validate(result, consts.DECK_SCHEMA)\n except Exception as e:\n # For now, reraise the original error, because it is useful and has important context\n raise\n # raise InvalidDeckDataException(e.message)\n\n return result", "def parseCards(self, raw_cards: dict, cardsList: list):\r\n cards = {}\r\n toParse = list(cardsList)\r\n for catagory in raw_cards:\r\n for (name, subcat) in {name:catagory[name] for name in catagory if name != \"name\"}.items():\r\n for card in subcat:\r\n if(card[\"name\"]) in cardsList:\r\n toParse.remove(card[\"name\"])\r\n cards[card[\"name\"]] = card\r\n if(len(toParse) > 0):\r\n raise Exception(f\"Could not find cards {toParse}. List was {cardsList}\")\r\n return cards", "def encode_cards(cards_str):\n plane = np.zeros(54, dtype=int)\n joker_counter = 0\n for card_str in cards_str:\n if card_str == '0' and joker_counter == 0:\n # handle the first joker situation\n joker_counter = 1\n index = card_encoding_dict['01']\n plane[index] = 1\n elif card_str == '0' and joker_counter == 1:\n # handle the second joker situation\n index = card_encoding_dict['02']\n plane[index] = 1\n else:\n index = card_encoding_dict[card_str]\n plane[index] = 1\n return plane", "def get_cards_encoded_from_str(cards: List[str]) -> np.ndarray:\n cards_int = convert_str_encoded_cards_to_int_encoded(cards)\n result = np.zeros(36, np.int32)\n result[cards_int] = 1\n return result", "def convert_to_card(self):\r\n url = '{0}/convertToCard'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json", "def parse_community_cards(state):\n community_cards = list()\n community = state['game']['community']\n if community and 'card' in community:\n for card in community['card']:\n new_card = robopoker.entities.Card(card['@rank'], card['@suit'])\n community_cards.append(new_card)\n return community_cards", "def test_parse_hand(self):\n\n hand_lines = [\n '\"-- starting hand #6 (No Limit Texas Hold\\'em) (dealer: \"\"Eddy KGB @ _7OU6FzFZP\"\")'\n ' --\",2020-12-17T00:44:19.590Z,160816585959100',\n '\"Player stacks: #1 \"\"MOP @ jwf61y3XJg\"\" (1060) | #4 \"\"rus @ PjBYO_8gbf\"\" (971) |'\n ' #6 \"\"Eddy KGB @ _7OU6FzFZP\"\" (1025) | #7 \"\"Ben @ eSbnubU-KP\"\" (1057) | #8'\n ' \"\"Max @ izsy1Zibpi\"\" (887)\",2020-12-17T00:44:19.590Z,160816585959101',\n '\"Your hand is Q♠, 3♠\",2020-12-17T00:44:19.590Z,160816585959105',\n '\"\"\"Ben @ eSbnubU-KP\"\" posts a small blind of 5\",'\n \"2020-12-17T00:44:19.590Z,160816585959107\",\n '\"\"\"Max @ izsy1Zibpi\"\" posts a big blind of 10\",'\n \"2020-12-17T00:44:19.590Z,160816585959108\",\n '\"\"\"MOP @ jwf61y3XJg\"\" folds\",2020-12-17T00:44:22.437Z,160816586243800',\n '\"\"\"rus @ PjBYO_8gbf\"\" calls 10\",2020-12-17T00:44:25.141Z,160816586514100',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" calls 10\",2020-12-17T00:44:28.601Z,160816586860200',\n '\"\"\"Ben @ eSbnubU-KP\"\" calls 10\",2020-12-17T00:44:31.296Z,160816587129700',\n '\"\"\"Max @ izsy1Zibpi\"\" checks\",2020-12-17T00:44:32.791Z,160816587279100',\n '\"flop: [J♠, 10♥, 6♥]\",2020-12-17T00:44:33.595Z,160816587359600',\n '\"\"\"Ben @ eSbnubU-KP\"\" checks\",2020-12-17T00:44:40.619Z,160816588062000',\n '\"\"\"Max @ izsy1Zibpi\"\" checks\",2020-12-17T00:44:41.477Z,160816588147800',\n '\"\"\"rus @ PjBYO_8gbf\"\" checks\",2020-12-17T00:44:44.131Z,160816588413200',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" checks\",2020-12-17T00:44:46.017Z,160816588601700',\n '\"turn: J♠, 10♥, 6♥ [Q♦]\",2020-12-17T00:44:46.823Z,160816588682400',\n '\"\"\"Ben @ eSbnubU-KP\"\" checks\",2020-12-17T00:44:50.123Z,160816589012400',\n '\"\"\"Max @ izsy1Zibpi\"\" checks\",2020-12-17T00:44:57.859Z,160816589786000',\n '\"\"\"rus @ PjBYO_8gbf\"\" checks\",2020-12-17T00:44:59.202Z,160816589920300',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" checks\",2020-12-17T00:45:01.677Z,160816590167800',\n '\"river: J♠, 10♥, 6♥, Q♦ [3♣]\",2020-12-17T00:45:02.499Z,160816590250400',\n '\"\"\"Ben @ eSbnubU-KP\"\" bets 30\",2020-12-17T00:45:08.970Z,160816590897100',\n '\"\"\"Max @ izsy1Zibpi\"\" calls 30\",2020-12-17T00:45:10.705Z,160816591070600',\n '\"\"\"rus @ PjBYO_8gbf\"\" calls 30\",2020-12-17T00:45:25.416Z,160816592541700',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" folds\",2020-12-17T00:45:26.287Z,160816592628700',\n '\"\"\"Ben @ eSbnubU-KP\"\" shows a Q♠, 3♠.\",2020-12-17T00:45:27.095Z,160816592709700',\n '\"\"\"Ben @ eSbnubU-KP\"\" collected 130 from pot with Two Pair, Q\\'s & 3\\'s'\n ' (combination: Q♠, Q♦, 3♠, 3♣, J♠)\",2020-12-17T00:45:27.095Z,160816592709701',\n '\"-- ending hand #6 --\",2020-12-17T00:45:27.095Z,160816592709702',\n ]\n\n expected_hand = Hand(\n id_=6,\n players={\n Player(name=\"Ben\", id_=\"eSbnubU-KP\"),\n Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\"),\n Player(name=\"MOP\", id_=\"jwf61y3XJg\"),\n Player(name=\"Max\", id_=\"izsy1Zibpi\"),\n Player(name=\"rus\", id_=\"PjBYO_8gbf\"),\n },\n stacks={\n Player(name=\"MOP\", id_=\"jwf61y3XJg\"): 1060,\n Player(name=\"rus\", id_=\"PjBYO_8gbf\"): 971,\n Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\"): 1025,\n Player(name=\"Ben\", id_=\"eSbnubU-KP\"): 1057,\n Player(name=\"Max\", id_=\"izsy1Zibpi\"): 887,\n },\n our_cards=(Card.from_string(\"Q♠\"), Card.from_string(\"3♠\")),\n preflop=Street(\n actions=[\n Post(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=5),\n Post(player=Player(name=\"Max\", id_=\"izsy1Zibpi\"), amount=10),\n Fold(player=Player(name=\"MOP\", id_=\"jwf61y3XJg\")),\n Call(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\"), amount=10),\n Call(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\"), amount=10),\n Call(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=10),\n Check(player=Player(name=\"Max\", id_=\"izsy1Zibpi\")),\n ]\n ),\n flop=[\n Card.from_string(\"J♠\"),\n Card.from_string(\"10♥\"),\n Card.from_string(\"6♥\"),\n ],\n first=Street(\n actions=[\n Check(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\")),\n Check(player=Player(name=\"Max\", id_=\"izsy1Zibpi\")),\n Check(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\")),\n Check(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\")),\n ]\n ),\n turn=[Card.from_string(\"Q♦\")],\n second=Street(\n actions=[\n Check(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\")),\n Check(player=Player(name=\"Max\", id_=\"izsy1Zibpi\")),\n Check(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\")),\n Check(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\")),\n ]\n ),\n river=[Card.from_string(\"3♣\")],\n third=Street(\n actions=[\n Bet(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=30),\n Call(player=Player(name=\"Max\", id_=\"izsy1Zibpi\"), amount=30),\n Call(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\"), amount=30),\n Fold(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\")),\n Show(\n player=Player(\"Ben\", id_=\"eSbnubU-KP\"),\n cards=(Card.from_string(\"Q♠\"), Card.from_string(\"3♠\")),\n ),\n Collect(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=130),\n ]\n ),\n )\n\n actual_hand = parser.parse_hand(hand_lines=hand_lines)\n\n self.assertCountEqual(actual_hand.players, expected_hand.players)\n self.assertEqual(actual_hand.stacks, expected_hand.stacks)\n self.assertEqual(actual_hand.preflop, expected_hand.preflop)\n self.assertEqual(actual_hand.flop, expected_hand.flop)\n self.assertEqual(actual_hand.first, expected_hand.first)\n self.assertEqual(actual_hand.turn, expected_hand.turn)\n self.assertEqual(actual_hand.second, expected_hand.second)\n self.assertEqual(actual_hand.river, expected_hand.river)\n self.assertEqual(actual_hand.third, expected_hand.third)", "def parse_mtgjson_cards(json_data):\n output = []\n for data in json_data.values():\n cards = []\n for raw in data[\"cards\"]:\n c = Card(raw)\n c.image_url = MTGConstants.card_image_url_base.format(c.multiverse_id)\n c.set = data[\"code\"]\n c.set_name = data[\"name\"]\n cards.append(c)\n output = output + cards\n return output", "def parse(card_info):\n\n card = {\n u\"types\" : set(), u\"supertypes\" : set(), u\"subtypes\" : set(),\n u\"appearances\" : set(), u\"abilities\" : [], u\"mana_cost\" : None,\n u\"loyalty\" : None, u\"power\" : None, u\"toughness\" : None,\n }\n\n lines = iter(card_info)\n card[u\"name\"] = next(lines)\n return _parse_mana_cost(next(lines), lines, card)", "def parse_rate_card_line(line):\n try:\n return grammar.rate_card.parseString(line)\n except pyparsing.ParseException as parse_err:\n raise RateCardParsingException(parse_err)", "def parse_hand(self):\n self.parse_part()\n self.parse_header()\n self.parse_setup()\n self.parse_preflop()\n self.parse_flop()\n self.parse_turn()\n self.parse_river()\n self.parse_showdown()\n self.conclude_hand()", "def cards_to_str(cards):\n cards_list = []\n for c in cards:\n cards_list.append(c.get_str())\n return cards_list", "def read_card():\n suit_is_valid = False\n while not suit_is_valid:\n suit_input = input('Suit: ').upper()\n for suit in Suit:\n if suit_input == suit.name:\n card_suit = suit\n suit_is_valid = True\n\n rank_is_valid = False\n while not rank_is_valid:\n rank_input = input('Rank: ').upper()\n for rank in Rank:\n if rank_input == rank.name:\n card_rank = rank\n rank_is_valid = True\n return Card(card_suit, card_rank)", "def test_parse_shows_with_cards(self):\n\n line = '\"\"\"Gargs @ izsy1Zibpi\"\" shows a A♠, A♣.\",2021-02-11T02:41:46.355Z,161301130635712'\n actual = parser.parse_action(line)\n expected = Show(\n player=Player(name=\"Gargs\", id_=\"izsy1Zibpi\"),\n cards=(Card.from_string(\"A♠\"), Card.from_string(\"A♣\")),\n )\n self.assertEqual(actual, expected)", "def construct_cards(tokens: List[Tuple[Token, str]]) -> List[str]:\n md = Render()\n headers: Tuple[str, str, str] = (\"\", \"\", \"\")\n cards: List[str] = []\n\n for token in tokens:\n if token[0] == Token.HEADER_1:\n headers = (token[1], \"\", \"\")\n elif token[0] == Token.HEADER_2:\n headers = (headers[0], token[1], \"\")\n elif token[0] == Token.HEADER_3:\n headers = (headers[0], headers[1], token[1])\n elif token[0] == Token.CARD:\n constructed_headers = Template.assemble_headers(headers)\n body = add_cloze(md.render(token[1]))\n\n if constructed_headers is None:\n cards.append(\n f'{Template.OPEN_CARD}' \n f'{body}'\n f'{Template.CLOSE_CARD}'\n )\n else:\n cards.append(\n f'{Template.OPEN_CARD}' \n f'{constructed_headers}'\n f'<hr>'\n f'{body}'\n f'{Template.CLOSE_CARD}'\n )\n\n return cards", "def to_hand( strings ):\n assert len(strings) == 5\n hand= [to_card(c) for c in strings]\n hand.sort() # Ascending order by rank. hand[4] is high card.\n assert order(hand[4]) >= order(hand[0])\n return tuple(hand)", "def read_bc_string_cards(line_split, temp_data):\n bc_string_list = temp_data.setdefault('bc_string_list', [])\n record = [float('NaN') for i in range(4)]\n record[0] = line_split[0] # card NDS, EGS, MDS, MTS\n try:\n record[1] = int(line_split[1]) # string id\n record[2] = int(line_split[2]) # node id, cell id, material id\n if line_split[0] == 'EGS' or line_split[0] == 'MDS':\n record[3] = int(line_split[3]) # node id, cell id\n bc_string_list.append(record)\n except:\n raise IOError(\"Error reading boundary string from *.bc file.\")", "def __init__(self, cards):\n self.cards = cards", "def ascii_version_of_card(*cards):\n\n # we will use this to prints the appropriate icons for each card\n name_to_symbol = {\n 'Spades': '♠',\n 'Diamonds': '♦',\n 'Hearts': '♥',\n 'Clubs': '♣',\n }\n\n def card_to_string(card):\n # 10 is the only card with a 2-char rank abbreviation\n rank = card.rank if card.rank == '10' else card.rank[0]\n\n # add the individual card on a line by line basis\n return CARD.format(rank=rank, suit=name_to_symbol[card.suit])\n\n\n return join_lines(map(card_to_string, cards))", "def tocard(dict_card):\n info = models.CardInfo(**dict_card['info']) if 'info' in dict_card and dict_card['info'] else None\n prices = models.CardPrices(**dict_card['prices']) if 'prices' in dict_card and dict_card['prices'] else None\n shops = dict([(k, models.Shop(**v)) for k, v in dict_card['shops'].items()])\n return models.Card(dict_card['name'], dict_card['redaction'], dict_card['type'],\n info=info, prices=prices, shops=shops)", "def __parseGameState(self, s):\n self.__camps=[]\n self.__armies=[]\n idCamp = 0\n lines = s.split(\"\\n\")[:-1] # letzte leeres ding nicht liefern.\n for line in lines:\n tokens = line.split(\" \")\n if( (len(tokens) == 6) or (len(tokens) == 7) ):\n if( \"C\" in tokens[0] ):\n if( len(tokens) == 6 ):\n x = int(tokens[1])\n y = int(tokens[2])\n owner = int(tokens[3])\n mancount = int(tokens[4])\n size = int(tokens[5])\n self.__camps.append(Camp(idCamp, owner, mancount, size, x, y))\n idCamp=idCamp+1\n elif( \"A\" in tokens[0] ):\n if( len(tokens) == 7):\n owner = int(tokens[1])\n mancount = int(tokens[2])\n source = int(tokens[3])\n destination = int(tokens[4])\n totalTripLength = int(tokens[5])\n turnsRemaining = int(tokens[6])\n self.__armies.append(Army(owner, mancount, source, destination, totalTripLength, turnsRemaining))", "def read_mp_cards(line_split, bc_class):\n try:\n card = line_split[1]\n if card == 'MU':\n bc_class.model_constants.kinematic_viscosity = float(line_split[2])\n elif card == 'G':\n bc_class.model_constants.gravity = float(line_split[2])\n elif card == 'MUC':\n bc_class.model_constants.mannings_unit_constant = float(line_split[2])\n elif card == 'RHO':\n bc_class.model_constants.density = float(line_split[2])\n elif card == 'DTL':\n bc_class.model_constants.enable_wetting_drying = True\n bc_class.model_constants.wet_dry_limit = float(line_split[2])\n else:\n # get material ID\n if card != 'WND':\n material_id = int(line_split[2])\n else:\n material_id = int(line_split[3])\n\n if material_id in bc_class.material_properties:\n material_property = bc_class.material_properties[material_id]\n else:\n # get material property class\n material_property = MaterialProperties()\n # set not required to deactivated\n material_property.set_not_required(False)\n bc_class.material_properties[material_id] = material_property\n if card == 'EVS':\n material_property.eddy_viscosity_method = 'Constant (EVS)'\n material_property.vxx_eddy_viscosity = float(line_split[3])\n material_property.vyy_eddy_viscosity = float(line_split[4])\n material_property.vxy_eddy_viscosity = float(line_split[5])\n elif card == 'EEV':\n material_property.eddy_viscosity_method = 'Estimated (EEV)'\n material_property.estimated_eddy_viscosity_weighting_factor = float(line_split[3])\n material_property.estimated_eddy_viscosity_method = int(line_split[4])\n elif card == 'COR':\n material_property.coriolis = True\n material_property.coriolis_latitude = float(line_split[3])\n elif card == 'ML':\n material_property.max_refinement_level = int(line_split[3])\n elif card == 'SRT':\n material_property.refinement_tolerance = float(line_split[3])\n elif card == 'DF' or card == 'TRT':\n constituent_id = int(line_split[3])\n transport_property = material_property.transport_properties.setdefault(constituent_id,\n MaterialTransportProperties())\n if card == 'TRT':\n transport_property.refinement_tolerance = float(line_split[4])\n else:\n transport_property.turbulent_diffusion_rate = float(line_split[4])\n elif card == 'WND':\n if line_split[2] == 'STR':\n material_property.wind_properties.stress_formulation = int(line_split[4])\n elif line_split[2] == 'ATT':\n material_property.wind_properties.attenuation = float(line_split[4])\n except:\n raise IOError(\"Error reading MP card from *.bc file.\")", "def _string2matrix(self,pokerhand_string):\n assert len(pokerhand_string)==14\n\n hand_matrix=pd.DataFrame(np.zeros([13,4],dtype=int),columns=['H','S','C','D'],index=list(range(2,15)))\n\n cards_list_string=pokerhand_string.split()\n\n for i in cards_list_string:\n hand_matrix.loc[self._converter(i[0]),i[1]]=1\n\n return hand_matrix" ]
[ "0.6624971", "0.6607469", "0.6566958", "0.62419844", "0.62304085", "0.6056891", "0.6044788", "0.5988233", "0.59559876", "0.58448565", "0.5821235", "0.581481", "0.5701939", "0.5653816", "0.5642834", "0.56326014", "0.56197953", "0.5609998", "0.55149424", "0.55072945", "0.54873663", "0.54724073", "0.54638064", "0.54059315", "0.5395709", "0.53701204", "0.53400487", "0.533606", "0.5321416", "0.5310121" ]
0.83194864
0
Seek current file to next byte after a delimiter bytestring This seeks the file to the next byte following the delimiter. It does not return anything. Use ``file.tell()`` to see location afterwards.
def seek_delimiter(file, delimiter, blocksize): if file.tell() == 0: return last = b'' while True: current = file.read(blocksize) if not current: return full = last + current try: i = full.index(delimiter) file.seek(file.tell() - (len(full) - i) + len(delimiter)) return except ValueError: pass last = full[-len(delimiter):]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_delimiter_position(self, byte_data):\n\n # Find line delimiter.\n for delimiter, delimiter_str in ((b\"\\r\\n\", \"\\r\\n\"),\n (b\"\\n\", \"\\n\"),\n (b\"\\r\", \"\\r\")):\n pos = byte_data.find(delimiter)\n if pos >= 0:\n self._linesep = delimiter_str\n return pos + len(delimiter)\n return -1", "def _next_char(self):\n self.current_position += 1\n if self.current_position >= len(self.stream):\n self.current_char = \"\\0\"\n self.EOF = True\n else:\n self.current_char = self.stream[self.current_position]\n if self.current_char == \"\\n\":\n self.line_number += 1\n self.line_start_position = self.current_position", "def find_delimiter_position(self, byte_data): # pylint: disable=no-self-use\n return len(byte_data)", "def _read_until(self, c, chunk_size=96):\n s = io.BytesIO()\n fp = self._fp\n eof = False\n\n while True:\n chunk = fp.read(chunk_size)\n\n if not chunk:\n # The end of the file was reached. We'll bail out of the loop\n # and return everything we've read so far.\n eof = True\n break\n\n i = chunk.find(c)\n\n if i == -1:\n # We didn't find the character. Store the entire chunk.\n s.write(chunk)\n else:\n # We found the character. Store everything up to and including\n # it, and then go back in the stream for the next read.\n s.write(chunk[:i + 1])\n fp.seek(i + 1 - len(chunk), os.SEEK_CUR)\n break\n\n result = s.getvalue()\n s.close()\n\n return result, eof", "def advance_in_file(self, file_pos):\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise", "def read_until(self, delimiter, timeout=None):\n timeout_cnt = 0\n if timeout is not None:\n self._conn.settimeout(timeout)\n while True:\n delimiter_pos = self._buffer.find(delimiter)\n if delimiter_pos == -1:\n try:\n received = self._conn.recv(4096)\n timeout_cnt = 0\n except socket.timeout as exc:\n timeout_cnt += 1\n if timeout_cnt >= self.timeout_limit:\n raise TS3ConnectionClosedException(\n \"Socket connection timeout limit received!\"\n ) from exc\n continue\n if len(received) == 0:\n raise TS3ConnectionClosedException(\"Socket connection was closed!\")\n self._buffer += received\n else:\n break\n data = self._buffer[: delimiter_pos + len(delimiter)]\n self._buffer = self._buffer[delimiter_pos + len(delimiter) :]\n if timeout is not None:\n self._conn.settimeout(self.timeout)\n return data", "def get_next_marker(jpeg: Jpeg, file_: T.BinaryIO):\n seek_position = file_.tell()\n # ignore byte-stuffed FFs (0xFF, 0x00)\n def find_next_ff():\n byte = file_.read(1)\n while byte != b\"\\xFF\":\n byte = file_.read(1)\n if byte == b\"\":\n return None # EOF\n return file_.read(1) # read marker identifier (or 0x00)\n\n while True:\n marker_identifier = find_next_ff()\n if marker_identifier is None:\n return None # EOF\n elif marker_identifier != b\"\\x00\":\n break # not a byte stuffed thing!\n\n int_marker_id = struct.unpack(\"B\", marker_identifier)[0]\n\n if int_marker_id in MARKER_LOOKUP:\n found_marker = MARKER_LOOKUP[int_marker_id]\n print(\n \"Found marker {}, {}, {}\".format(\n hex(int_marker_id), found_marker.short, found_marker.name\n )\n )\n\n if found_marker.decoder is not None:\n found_marker.decoder(jpeg, file_)\n else:\n print(\"Unknown marker {}\".format(hex(int_marker_id)))\n\n return file_.tell() - 2 # right before the marker byte", "def lookahead(offset=1):\r\n\tindex = sourceIndex + offset\r\n\r\n\tif index > lastIndex:\r\n\t\t# We've read past the end of textFile.\r\n\t\t# Return the ENDMARK character.\r\n\t\treturn ENDMARK\r\n\telse:\r\n\t\treturn textFile[index]", "def read_until(steg_bytes: bytes, offset: int, ending: str):\r\n # Create a variable to hold the bytes read\r\n bytes_read = b\"\"\r\n\r\n # Loop through the steg_bytes\r\n while offset < len(steg_bytes):\r\n # Check if the current byte is the ending byte sequence\r\n if steg_bytes[offset:offset + len(ending)] == ending.encode():\r\n # Return the bytes read and the offset of the ending byte sequence\r\n return bytes_read, offset\r\n # Read the next byte\r\n bytes_read += steg_bytes[offset:offset + 1]\r\n offset += 1", "def read_block(f, offset, length, delimiter=None):\n if delimiter:\n f.seek(offset)\n seek_delimiter(f, delimiter, 2**16)\n start = f.tell()\n length -= start - offset\n\n f.seek(start + length)\n seek_delimiter(f, delimiter, 2**16)\n end = f.tell()\n\n offset = start\n length = end - start\n\n f.seek(offset)\n b = f.read(length)\n return b", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def seek_line(self, pattern):\n line = next(self.f)\n while not line.startswith(pattern):\n line = next(self.f)\n return line", "def next_character(self) -> str:\n return self.seek(self.index + 1)", "def _advance_line(self):\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n while self.current_line.startswith('#') or self.current_line == '':\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n self._gobble_comments()", "def peek_byte(self):\n try:\n return self._buffer[self.pos]\n except IndexError:\n raise self._eof", "def find_file_end(chunks, pos):\n\n\tpos = pos + 1\n\twhile pos < len(chunks)-1:\n\n\t\tif chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:\n\n\t\t\t# This is not a block\n\t\t\treturn pos\n\n\t\telse:\n\t\t\tpos = pos + 1\n\n\treturn pos", "def splitFileContents(f, delimiter, BLOCKSIZE=8192):\n remainder = StringIO()\n while True:\n block = f.read(BLOCKSIZE)\n if not block:\n break\n parts = block.split(delimiter)\n remainder.write(parts[0])\n for part in parts[1:]:\n yield remainder.getvalue()\n remainder = StringIO()\n remainder.write(part)\n yield remainder.getvalue()", "def _slurp_till(self, end_delimiters):\n cc = None\n if type(end_delimiters) in [str, unicode]:\n end_delimiters = [end_delimiters]\n\n while True:\n try:\n self._next()\n except StopIteration:\n raise NemoException('Expected delimiter %s but EOL found instead' % end_delimiters)\n\n self.buffer.write(self._c)\n\n for end in end_delimiters:\n if self._c == end and self._last_c != '\\\\':\n return", "def _seek(self, offset):\n assert offset % self.recordsize == 0\n file_number, file_offset = divmod(offset,\n self.filesize - self.header_size)\n self.open(file_number)\n self.fh_raw.seek(file_offset + self.header_size)\n self.offset = offset", "def __read_to(self, terminator: bytes) -> bytes:\r\n try:\r\n # noinspection PyTypeChecker\r\n i = self.data.index(terminator, self.idx)\r\n b = self.data[self.idx:i]\r\n self.idx = i + 1\r\n return b\r\n except ValueError:\r\n raise DecodingError(\r\n 'Unable to locate terminator character \"{0}\" after index {1}.'.format(str(terminator), str(self.idx)))", "def _next_char(self):\r\n\r\n if self._index >= len(self._input_string):\r\n return None\r\n\r\n ret = self._input_string[self._index]\r\n self._index += 1\r\n return ret", "def AdvanceToByteBoundary(self):\n bits_to_advance = (8 - self.idx_boff) % 8\n if bits_to_advance:\n self.idx_boff += bits_to_advance\n self.idx_boff %= 8\n self.idx_byte += 1", "def readuntil(self, separator=b'\\n'):\n seplen = len(separator)\n if seplen == 0:\n raise ValueError('Separator should be at least one-byte string')\n\n if self._exception is not None:\n raise self._exception\n\n # Consume whole buffer except last bytes, which length is\n # one less than seplen. Let's check corner cases with\n # separator='SEPARATOR':\n # * we have received almost complete separator (without last\n # byte). i.e buffer='some textSEPARATO'. In this case we\n # can safely consume len(separator) - 1 bytes.\n # * last byte of buffer is first byte of separator, i.e.\n # buffer='abcdefghijklmnopqrS'. We may safely consume\n # everything except that last byte, but this require to\n # analyze bytes of buffer that match partial separator.\n # This is slow and/or require FSM. For this case our\n # implementation is not optimal, since require rescanning\n # of data that is known to not belong to separator. In\n # real world, separator will not be so long to notice\n # performance problems. Even when reading MIME-encoded\n # messages :)\n\n # `offset` is the number of bytes from the beginning of the buffer\n # where there is no occurrence of `separator`.\n offset = 0\n\n # Loop until we find `separator` in the buffer, exceed the buffer size,\n # or an EOF has happened.\n while True:\n buflen = len(self._buffer)\n\n # Check if we now have enough data in the buffer for `separator` to\n # fit.\n if buflen - offset >= seplen:\n isep = self._buffer.find(separator, offset)\n\n if isep != -1:\n # `separator` is in the buffer. `isep` will be used later\n # to retrieve the data.\n break\n\n # see upper comment for explanation.\n offset = buflen + 1 - seplen\n if offset > self._limit:\n raise LimitOverrunError(\n 'Separator is not found, and chunk exceed the limit',\n offset)\n\n # Complete message (with full separator) may be present in buffer\n # even when EOF flag is set. This may happen when the last chunk\n # adds data which makes separator be found. That's why we check for\n # EOF *ater* inspecting the buffer.\n if self._eof:\n chunk = bytes(self._buffer)\n self._buffer.clear()\n raise IncompleteReadError(chunk, None)\n\n # _wait_for_data() will resume reading if stream was paused.\n yield from self._wait_for_data('readuntil')\n\n if isep > self._limit:\n raise LimitOverrunError(\n 'Separator is found, but chunk is longer than limit', isep)\n\n chunk = self._buffer[:isep + seplen]\n del self._buffer[:isep + seplen]\n self._maybe_resume_transport()\n return bytes(chunk)", "def skip(self) -> None:\n n = self.chunksize - self.size_read\n # maybe fix alignment\n if self.chunksize & 1:\n n = n + 1\n try:\n self.file.seek(n, 1)\n except (AttributeError, OSError): # Cannot seek, manually read.\n while self.size_read < self.chunksize:\n n = min(8192, self.chunksize - self.size_read)\n skipped = self.read(n)\n if not skipped:\n raise EOFError from None\n else:\n self.size_read = self.size_read + n", "def find_file_start(chunks, pos):\n\n\tpos = pos - 1\n\twhile pos > 0:\n\n\t\tif chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:\n\n\t\t\t# This is not a block\n\t\t\treturn pos\n\n\t\telse:\n\t\t\tpos = pos - 1\n\n\treturn pos", "def open_and_seek_past_bom(infilename):\r\n\r\n infile = open(infilename, \"r+b\")\r\n chunk = infile.read(BOMLEN * 2)\r\n if chunk.startswith(codecs.BOM_UTF8):\r\n infile.seek(BOMLEN)\r\n else:\r\n infile.seek(0)\r\n return infile", "def skipString(self, startingChar):\r\n\t\tif not startingChar in VALID_STRING_DELIMITERS:\r\n\t\t\traise ValueError(\"{} is not a valid string delimiter\".format(startingChar))\r\n\r\n\t\t# If the immediate next character ends the string (empty string), just move to the\r\n\t\t# next character\r\n\t\tif (self.peekChar() == startingChar):\r\n\t\t\tself.nextChar()\r\n\t\telse:\r\n\t\t\t# Find first occurrence of required string delimeter\r\n\t\t\t# from the remaining string, wheere it DOES NOT precede\r\n\t\t\t# with an escape character (\\)\r\n\t\t\tremainingString = self.source[(self.index + 1):]\r\n\t\t\tmatch = END_OF_STRING_REGEXES[startingChar].search(remainingString)\r\n\t\t\tif match:\r\n\t\t\t\tself.index += match.end()\r\n\t\t\t# If an occurrence was not found, we've reached the end of the string\t\r\n\t\t\telse:\r\n\t\t\t\tself.index = len(self.source)", "def next_byte(self):\r\n return self.next_bytes(1)", "def read_block(file, block_size):\n block = b\"\"\n for i in range(block_size):\n this_byte = file.read(1)\n # If the last block consumed the last char in file:\n if this_byte == b\"\" and i == 0:\n return (-1, False)\n # If we reach EOF prematurely:\n elif this_byte == b\"\":\n block += chr(0).encode()*(block_size - i)\n return (block, False)\n else:\n block += this_byte\n return (block, True)", "def getnextbyte(data, index):\n v = data[index]\n index += 1\n if v == 0x10:\n v = data[index]\n index += 1\n\n return v,index" ]
[ "0.5966974", "0.58046436", "0.55704606", "0.54566735", "0.54178035", "0.54062945", "0.53535044", "0.5340305", "0.53359973", "0.531412", "0.5304874", "0.528317", "0.52717334", "0.5265855", "0.52260786", "0.5221168", "0.52211565", "0.51817477", "0.51549596", "0.5100815", "0.4989173", "0.4975541", "0.4958949", "0.49495924", "0.49403316", "0.4929173", "0.488102", "0.48645288", "0.48442453", "0.48267257" ]
0.7784415
0
Test case for get_token_supply_all_using_get
def test_get_token_supply_all_using_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_token_supply_using_get(self):\n pass", "def test_get_tokens():\n pass", "def test_listtem_using_get(self):\n pass", "def test_gettem_using_get(self):\n pass", "def test_list_o_auth_authorize_token(self):\n pass", "def test_get_device_token(self):\n pass", "def test_get_initial_token():\n pass", "def test_get_list(self):\n pass", "def test_get_all_tokens_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n\r\n res = self.app.get('api/token?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n for provider in TokenAPI.oauth_providers:\r\n token_name = '%s_token' % provider\r\n assert data.get(token_name) is not None, data", "def test_list_o_auth_access_token(self):\n pass", "def test_TreebankTokenReader():", "def test_get_tokens():\n tokens = get_tokens()\n assert tokens[\"token_type\"] == \"Bearer\"\n assert tokens[\"access_token\"] is not None\n assert tokens[\"expires_at\"] is not None\n assert tokens[\"expires_in\"] is not None\n assert tokens[\"refresh_token\"] is not None\n\n assert \"token_type\" in tokens\n assert \"access_token\" in tokens\n assert \"expires_at\" in tokens\n assert \"expires_in\" in tokens\n assert \"refresh_token\" in tokens\n\n assert tokens[\"expires_at\"] > int(time.time())", "def test_get_token(\n self,\n mock_init_credentials_function,\n mock_generate_token_function\n ): # pylint: disable=unused-argument\n\n with self.app.app_context():\n url = '/donation/braintree/get-token'\n\n # Ensure a GET with no saved agents returns 0.\n\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( json.loads( response.data.decode( 'utf-8' ) ), BRAINTREE_TOKEN )", "def test_discover_tokens(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n found = CloudCredentials.objects._discover_tokens(self.cloud)\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)", "def test_kyc_get_legal_list(self):\n pass", "def test_get_tokens(self):\n tu = get_tu('int foo(int i);')\n foo = get_cursor(tu, 'foo')\n\n tokens = list(foo.get_tokens())\n self.assertEqual(len(tokens), 6)\n self.assertEqual(tokens[0].spelling, 'int')\n self.assertEqual(tokens[1].spelling, 'foo')", "def test_get_transaction_list_request(self):\n self.trans_details.get_transaction_list(\n batch_id = 123456,\n )", "def test_get2(self):\n pass", "def test_get(self):\n pass", "def _candidates(self, token):", "def test_quote_guest_payment_method_management_v1_get_list_get(self):\n pass", "def _single_fetch(self, to_skip: int) -> set:\r\n all_tokens = []\r\n query = gql_queries.generate_query_all_tokens(to_skip)\r\n \r\n r = requests.post(\r\n self.graph_node_url,\r\n json = {\"query\": query},\r\n headers = {\"Content-Type\": \"application/json\"}\r\n )\r\n \r\n if r.status_code == 200:\r\n for t in r.json()[\"data\"][\"tokens\"]:\r\n all_tokens.append(\r\n Token(\r\n t[\"name\"],\r\n t[\"symbol\"],\r\n self.w3.toChecksumAddress(t[\"id\"]),\r\n t[\"decimals\"]\r\n ).__dict__() # Hacky way to bypass the use of Token for now\r\n )\r\n return all_tokens\r\n else:\r\n return []", "def test_wallets_get_list(self):\n pass", "def test_tenants_cardtoken(self):\n pass", "def test_get_currency_all_using_get(self):\n pass", "def test_get_scan(self):\n pass", "def test_get1(self):\n pass", "def test_get_token(self):\n token = self.client.get_token()\n assert_equals(token, self.client.token)", "def test_csc_authorization_request_list_normal_user(self):\n # Arrange:\n self.client.credentials(\n HTTP_AUTHORIZATION=\"Token \" + self.token_user_normal.key\n )\n\n # Act:\n url = reverse(\"authlistrequest-list\")\n response = self.client.get(url, format=\"json\")\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 2)", "def test_get_all_existing_tokens_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n del user.info['google_token']\r\n\r\n res = self.app.get('api/token?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n assert data.get('twitter_token') is not None, data\r\n assert data.get('facebook_token') is not None, data\r\n assert data.get('google_token') is None, data" ]
[ "0.8238931", "0.6645593", "0.6224392", "0.6151389", "0.5890228", "0.58499545", "0.58307004", "0.5781016", "0.5767589", "0.57456815", "0.5711572", "0.5708087", "0.5686729", "0.5580533", "0.55687904", "0.5568298", "0.5537261", "0.552446", "0.5519415", "0.5503504", "0.54897326", "0.54714483", "0.54566723", "0.5449632", "0.54445237", "0.5434388", "0.543031", "0.5423366", "0.5406277", "0.5404687" ]
0.904204
0
Test case for get_token_supply_using_get
def test_get_token_supply_using_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_token_supply_all_using_get(self):\n pass", "def test_get_tokens():\n pass", "def test_get_initial_token():\n pass", "def test_get_device_token(self):\n pass", "def test_get_token(self):\n token = self.client.get_token()\n assert_equals(token, self.client.token)", "def test_get_token(\n self,\n mock_init_credentials_function,\n mock_generate_token_function\n ): # pylint: disable=unused-argument\n\n with self.app.app_context():\n url = '/donation/braintree/get-token'\n\n # Ensure a GET with no saved agents returns 0.\n\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( json.loads( response.data.decode( 'utf-8' ) ), BRAINTREE_TOKEN )", "def test_gettem_using_get(self):\n pass", "def test_tracking_context(self):\n self.assert_expected_token_value({'foo': 'bar'})", "def test_TreebankTokenReader():", "def test_get_token(tmpdir):\n test_config = MetagenscopeConfiguration('.metagenscope.ini', str(tmpdir))\n assert test_config.get_token(default=None) == None", "def getToken(self):\n \n raise NotImplementedError", "def test_read_o_auth_authorize_token(self):\n pass", "def test_read_o_auth_access_token(self):\n pass", "def test_generate_token(self):\n door_pass = DoorPassFactory.build()\n token = door_pass.generate_token()\n self.assertIsInstance(token, str)\n self.assertEqual(len(token), 40)", "def test_get_token(client, request):\n res = client.get('/token?uid={}'.format(settings.FIREBASE_UID))\n\n assert res.status_code == 200\n assert len(res.data) > 0", "def test_get_tokens():\n tokens = get_tokens()\n assert tokens[\"token_type\"] == \"Bearer\"\n assert tokens[\"access_token\"] is not None\n assert tokens[\"expires_at\"] is not None\n assert tokens[\"expires_in\"] is not None\n assert tokens[\"refresh_token\"] is not None\n\n assert \"token_type\" in tokens\n assert \"access_token\" in tokens\n assert \"expires_at\" in tokens\n assert \"expires_in\" in tokens\n assert \"refresh_token\" in tokens\n\n assert tokens[\"expires_at\"] > int(time.time())", "def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)", "def get_token(self, *args, **kwargs):\n if \"SHIB-ECP\" == self._auth_mode:\n return self._shib_get_token(*args, **kwargs)\n elif \"MAST-AUTH\" == self._auth_mode:\n return self._get_token(*args, **kwargs)\n else:\n raise Exception(\"Unknown MAST Auth mode %s\" % self._auth_mode)", "def test_tenants_cardtoken(self):\n pass", "def test_0001(self):\n assert self.vca.token", "def test_0001(self):\n assert self.vca.token", "def test_0001(self):\n assert self.vca.token", "def test_listtem_using_get(self):\n pass", "async def solicit_token(url, scope):\n rc = RestClient(url, \"\")\n result = await rc.request(\"GET\", f\"/token?scope={scope}\")\n print(result[\"access\"])", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def test_get_tokens(self):\n tu = get_tu('int foo(int i);')\n foo = get_cursor(tu, 'foo')\n\n tokens = list(foo.get_tokens())\n self.assertEqual(len(tokens), 6)\n self.assertEqual(tokens[0].spelling, 'int')\n self.assertEqual(tokens[1].spelling, 'foo')", "def testGetToken(self):\n # Token is base64 for a json object so always starts with '{\"'\n self.assertTrue(self.dl_object._access_token.startswith('eyJ'))\n self.assertTrue(len(self.dl_object._access_token) > 100)", "def test_get(self):\n pass", "def test_token_interface(mysterium_token, team_multisig, token_new_name, token_new_symbol):\n\n assert mysterium_token.call().totalSupply() == 0\n assert mysterium_token.call().symbol() == \"MYST\"\n assert mysterium_token.call().name() == \"Mysterium\"\n assert mysterium_token.call().decimals() == 8\n assert mysterium_token.call().owner() == team_multisig\n assert mysterium_token.call().upgradeMaster() == team_multisig\n\n mysterium_token.transact({\"from\": team_multisig}).setTokenInformation(token_new_name, token_new_symbol)\n assert mysterium_token.call().name() == token_new_name\n assert mysterium_token.call().symbol() == token_new_symbol", "def test_get2(self):\n pass" ]
[ "0.8064323", "0.67642564", "0.6594378", "0.6495325", "0.6450932", "0.6386458", "0.6166247", "0.6035702", "0.602632", "0.6020494", "0.5912643", "0.5754508", "0.5745325", "0.5740001", "0.57172304", "0.5709296", "0.5706441", "0.5696055", "0.5685541", "0.56594896", "0.56594896", "0.56594896", "0.5646562", "0.5636619", "0.5609005", "0.55859643", "0.55855536", "0.556819", "0.555795", "0.55278707" ]
0.89617294
0
Returns the list of fields in the given model instance.
def get_fields_in_model(instance: Any) -> List: from auditlog.registry import auditlog attrs = object_mapper(instance).iterate_properties model_attrs = auditlog.get_model_fields(instance.__class__) if model_attrs['include_fields']: attrs = (attr for attr in attrs if attr.key in model_attrs['include_fields']) if model_attrs['exclude_fields']: attrs = (attr for attr in attrs if attr.key not in model_attrs['exclude_fields']) return attrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields_in_model(instance):\n assert isinstance(instance, Document)\n return instance._fields", "def get_model_fields(self):\n fields = []\n\n for field in self.model._meta.get_fields():\n fields.append(field.name)\n\n return fields", "def get_fields(cls):\n return cls.fields.values()", "def model_fields(cls):\n members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))\n return [m for m in members if issubclass(m[1].__class__, fields.Field)]", "def get_fieldlist(cls):\n return cls.fieldlist", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields(model, fields=None):\n include = [f.strip() for f in fields.split(',')] if fields else None\n return utils.get_fields(\n model,\n include\n )", "def get_fields(self):\n\n\t\treturn self.__fields", "def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def get_fields(self):\r\n return self.fields", "def _get_fields(self):\n return self._fields", "def all_fields(cls):\n return cls.__by_name.values()", "def fields(cls):\n if not hasattr(cls, '_fields'):\n cls.parse_attributes()\n return cls._fields", "def listFields(self):\n return self.get_json('/field')", "def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def fields(self) -> List[SingleField]:\n return self._fields", "def get_field_names(cls):\n return cls._meta.get_field_names()", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def fields(cls):\n return cls._nameToValue", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n model_fields = {}\n for field in self.model._meta.fields:\n name, column = field.get_attname_column()\n model_fields[converter(column)] = field\n return model_fields" ]
[ "0.8146898", "0.7883229", "0.75082386", "0.74458134", "0.73897547", "0.72630495", "0.72630495", "0.7256735", "0.7246862", "0.7218282", "0.71928096", "0.7170339", "0.7128551", "0.7006814", "0.6986545", "0.69208544", "0.69141835", "0.6906648", "0.68992066", "0.6889762", "0.6853559", "0.6819043", "0.68110454", "0.6748386", "0.6719595", "0.66780245", "0.66691905", "0.6624211", "0.66195977", "0.6607744" ]
0.8100586
1
Initiate rebuild on source files changes. build_cb building function, to be called on sources changes. interval optional keyword argument, defines period (in seconds) between checks. Any other subsequent positional and/or keyword arguments will be passed to build_cb. Every found dependancy (to be watched for changes) build_cb must append to deps_list, which is passed to it as additional keyword argument.
def watch(build_cb, *args, interval=1, **kwargs): from time import ctime, sleep, time from traceback import print_exc basetime = None last_stamp = None watch_list = None while True: if watch_list is not None and last_stamp == timestamp(watch_list, basetime): try: sleep(interval) except KeyboardInterrupt: print('Keyboard interrupt received, exiting.') break else: continue basetime = time() print('\nrebuild started [{}]'.format(ctime(basetime))) watch_list = [] try: build_cb(*args, deps_list=watch_list, **kwargs) except Exception: print_exc() on_event(settings.on_fail) else: on_event(settings.on_success) if not watch_list: raise Exception('watch list is empty') last_stamp = timestamp(watch_list, basetime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def build(cfg, jobs, watch):\n libjobs.buildJobs(cfg, jobs, watch)", "def start_updater(self, interval, clbk):\n self._scheduler = BlockingScheduler(executors={\n 'default': {'type': 'threadpool', 'max_workers': 1}\n })\n\n def job():\n clbk(self.check_feeds())\n\n self._scheduler.add_job(job, trigger='interval', minutes=interval)\n self._scheduler.start()", "def run_every(self, callback, start, interval, auto_constraints=False, **kwargs):\n if not auto_constraints:\n return super().run_every(callback, start, interval, **kwargs)\n\n return self._attach_constraints(\n super().run_every, callback, start, interval, **kwargs\n )", "def rebuild_cache(build_name=None,sources=None,target=None,force_build=False):\n if build_name:\n sources = mongo.get_source_fullnames(build_manager.list_sources(build_name))\n target = mongo.get_latest_build(build_name)\n elif sources:\n sources = mongo.get_source_fullnames(sources)\n if not sources and not target:\n raise Exception(\"No valid sources found\")\n\n def rebuild(col):\n cur = mongo.id_feeder(col,batch_size=10000,logger=config.logger,force_build=force_build)\n [i for i in cur] # just iterate\n\n @asyncio.coroutine\n def do(srcs,tgt):\n pinfo = {\"category\" : \"cache\",\n \"source\" : None,\n \"step\" : \"rebuild\",\n \"description\" : \"\"}\n config.logger.info(\"Rebuild cache for sources: %s, target: %s\" % (srcs,tgt))\n for src in srcs:\n # src can be a full name (eg. clinvar.clinvar_hg38) but id_feeder knows only name (clinvar_hg38)\n if \".\" in src:\n src = src.split(\".\")[1]\n config.logger.info(\"Rebuilding cache for source '%s'\" % src)\n col = mongo.get_src_db()[src]\n pinfo[\"source\"] = src\n job = yield from job_manager.defer_to_thread(pinfo, partial(rebuild,col))\n yield from job\n config.logger.info(\"Done rebuilding cache for source '%s'\" % src)\n if tgt:\n config.logger.info(\"Rebuilding cache for target '%s'\" % tgt)\n col = mongo.get_target_db()[tgt]\n pinfo[\"source\"] = tgt\n job = job_manager.defer_to_thread(pinfo, partial(rebuild,col))\n yield from job\n\n task = asyncio.ensure_future(do(sources,target))\n return task", "def watch_build_config_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_build_config_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/buildconfigs'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def watch_build_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_build_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/builds'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def run():\n ftpd_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.ftpd.run,\n name=\"ftpd\",\n bind=get().model.ftpd_bind,\n queue=get().incoming_queue)\n\n builder_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.builder.run,\n name=\"builder\",\n daemon_=get())\n\n while True:\n event = get().incoming_queue.get()\n if event == \"SHUTDOWN\":\n break\n\n try:\n LOG.info(\"Status: {0} active packages, {1} changes waiting in incoming.\".\n format(len(get().packages), get().incoming_queue.qsize()))\n\n changes = None\n changes = mini_buildd.changes.Changes(event)\n\n if changes.type == changes.TYPE_BREQ:\n # Build request: builder\n\n def queue_buildrequest(event):\n \"\"\"Queue in extra thread so we don't block here in case builder is busy.\"\"\"\n get().build_queue.put(event)\n mini_buildd.misc.run_as_thread(queue_buildrequest, name=\"build queuer\", daemon=True, event=event)\n\n else:\n # User upload or build result: packager\n mini_buildd.packager.run(\n daemon=get(),\n changes=changes)\n\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file\", e)\n\n # Try to notify\n try:\n with mini_buildd.misc.open_utf8(event, \"r\") as body:\n subject = \"INVALID CHANGES: {c}: {e}\".format(c=event, e=e)\n get().model.mbd_notify(subject, body.read())\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes notify failed\", e)\n\n # Try to clean up\n try:\n if changes:\n changes.remove()\n else:\n os.remove(event)\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes cleanup failed\", e)\n\n finally:\n get().incoming_queue.task_done()\n\n get().build_queue.put(\"SHUTDOWN\")\n mini_buildd.ftpd.shutdown()\n builder_thread.join()\n ftpd_thread.join()\n\n # keyrings.close() is not called implicitly; this leaves tmp files around.\n # There should be a nicer way, really...\n try:\n get().keyrings.close()\n except BaseException:\n pass", "def build(ctx, max_revisions, targets, operators, archiver):\n config = ctx.obj[\"CONFIG\"]\n\n from wily.commands.build import build\n\n if max_revisions:\n logger.debug(f\"Fixing revisions to {max_revisions}\")\n config.max_revisions = max_revisions\n\n if operators:\n logger.debug(f\"Fixing operators to {operators}\")\n config.operators = operators.strip().split(\",\")\n\n if archiver:\n logger.debug(f\"Fixing archiver to {archiver}\")\n config.archiver = archiver\n\n if targets:\n logger.debug(f\"Fixing targets to {targets}\")\n config.targets = targets\n\n build(\n config=config,\n archiver=resolve_archiver(config.archiver),\n operators=resolve_operators(config.operators),\n )\n logger.info(\n _(\n \"Completed building wily history, run `wily report <file>` or `wily index` to see more.\"\n )\n )", "def run(self, args=[]):\n # Assert correct configuration.\n assert self.c.cbb.config, 'An empty configuration was specified.'\n assert self.c.cbb.builddir, 'A build directory name must be specified.'\n\n # Load properties from the commit being processed. This requires both a\n # repository and revision to proceed.\n repository = self.m.properties.get('repository')\n revision = self.m.properties.get('revision')\n if repository and revision:\n # Pull more information from the commit if it came from certain known\n # repositories.\n if (self.c.use_chrome_version and\n self.check_repository('chromium', repository)):\n # If our change comes from a Chromium repository, add the\n # '--chrome_version' flag.\n self.c.cbb.chrome_version = self.m.properties['revision']\n if (self.c.read_cros_manifest and\n self.check_repository('cros_manifest', repository)):\n # This change comes from a manifest repository. Load configuration\n # parameters from the manifest command.\n self.load_manifest_config(repository, revision)\n\n buildroot = self.m.path['root'].join('cbuild', self.c.cbb.builddir)\n cbb_args = [\n '--buildroot', buildroot,\n ]\n if not args:\n cbb_args.append('--buildbot')\n if self.c.chromite_branch and not self.c.cbb.disable_bootstrap:\n cbb_args.extend(['--branch', self.c.chromite_branch])\n if self.c.cbb.build_number is not None:\n cbb_args.extend(['--buildnumber', self.c.cbb.build_number])\n if self.c.cbb.chrome_rev:\n cbb_args.extend(['--chrome_rev', self.c.cbb.chrome_rev])\n if self.c.cbb.debug:\n cbb_args.extend(['--debug'])\n if self.c.cbb.clobber:\n cbb_args.extend(['--clobber'])\n if self.c.cbb.chrome_version:\n cbb_args.extend(['--chrome_version', self.c.cbb.chrome_version])\n if self.c.cbb.config_repo:\n cbb_args.extend(['--config_repo', self.c.cbb.config_repo])\n if self.c.repo_cache_dir and self.c.cbb.supports_repo_cache:\n cbb_args.extend(['--repo-cache', self.c.repo_cache_dir])\n\n # Set the build ID, if specified.\n if self.c.cbb.build_id:\n cbb_args.extend(['--master-build-id', self.c.cbb.build_id])\n\n # Add custom args, if there are any.\n cbb_args.extend(args)\n\n # Run cbuildbot.\n return self.cbuildbot(str('cbuildbot [%s]' % (self.c.cbb.config,)),\n self.c.cbb.config,\n args=cbb_args,\n cwd=self.m.path['slave_build'])", "def rebuild(working_directory=None, args=None):\n\n from .rebuildme import main\n if args is None:\n args = []\n return main(working_directory, args)", "def monitor_project_build(self, project_name):\n pass", "def watch_namespaced_build_config_list(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_build_config_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_build_config_list`\")\n\n resource_path = '/oapi/v1/watch/namespaces/{namespace}/buildconfigs'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def rebuild(options, project_directory=None):\n if options.help:\n print rebuild.__doc__\n sys.exit(1)\n\n if not project_directory:\n project_directory = os.getcwd()\n action_rebuild(project_directory)", "def trigger_job(revision, buildername, times=1, files=None, dry_run=False,\n extra_properties=None):\n repo_name = query_repo_name_from_buildername(buildername)\n builder_to_trigger = None\n list_of_requests = []\n LOG.info(\"We want to trigger '%s' on revision '%s' a total of %d time(s).\" %\n (buildername, revision, times))\n\n if not buildapi.valid_revision(repo_name, revision):\n return list_of_requests\n\n if not valid_builder(buildername):\n LOG.error(\"The builder %s requested is invalid\" % buildername)\n # XXX How should we exit cleanly?\n exit(-1)\n\n if files:\n builder_to_trigger = buildername\n _all_urls_reachable(files)\n else:\n builder_to_trigger, files = _determine_trigger_objective(\n revision,\n buildername,\n )\n\n if builder_to_trigger != buildername and times != 1:\n # The user wants to trigger a downstream job,\n # however, we need a build job instead.\n # We should trigger the downstream job multiple times, however,\n # we only trigger the upstream jobs once.\n LOG.debug(\"Since we need to trigger a build job we don't need to \"\n \"trigger it %s times but only once.\" % times)\n LOG.info(\"In order to trigger %s %i times, please run the script again after %s ends.\"\n % (buildername, times, builder_to_trigger))\n times = 1\n\n if builder_to_trigger:\n if dry_run:\n LOG.info(\"Dry-run: We were going to request '%s' %s times.\" %\n (builder_to_trigger, times))\n # Running with dry_run being True will only output information\n trigger(builder_to_trigger, revision, files, dry_run, extra_properties)\n else:\n for _ in range(times):\n req = trigger(builder_to_trigger, revision, files, dry_run, extra_properties)\n if req is not None:\n list_of_requests.append(req)\n else:\n LOG.debug(\"Nothing needs to be triggered\")\n\n return list_of_requests", "def watch_namespaced_build_config(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_build_config\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_build_config`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `watch_namespaced_build_config`\")\n\n resource_path = '/oapi/v1/watch/namespaces/{namespace}/buildconfigs/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def trigger_range(buildername, revisions, times=1, dry_run=False, files=None):\n repo_name = query_repo_name_from_buildername(buildername)\n LOG.info(\"We want to have %s job(s) of %s on revisions %s\" %\n (times, buildername, str(revisions)))\n for rev in revisions:\n LOG.info(\"\")\n LOG.info(\"=== %s ===\" % rev)\n if not buildapi.valid_revision(repo_name, rev):\n LOG.info(\"We can't trigger anything on pushes that the revision is not valid for \"\n \"buildapi.\")\n continue\n\n LOG.info(\"We want to have %s job(s) of %s on revision %s\" %\n (times, buildername, rev))\n\n # 1) How many potentially completed jobs can we get for this buildername?\n jobs = query_jobs(repo_name, rev)\n matching_jobs = _matching_jobs(buildername, jobs)\n successful_jobs, pending_jobs, running_jobs = _status_summary(matching_jobs)[0:3]\n\n potential_jobs = pending_jobs + running_jobs + successful_jobs\n LOG.debug(\"We found %d pending jobs, %d running jobs and %d successful_jobs.\" %\n (pending_jobs, running_jobs, successful_jobs))\n\n if potential_jobs >= times:\n LOG.info(\"We have %d job(s) for '%s' which is enough for the %d job(s) we want.\" %\n (potential_jobs, buildername, times))\n\n else:\n # 2) If we have less potential jobs than 'times' instances then\n # we need to fill it in.\n LOG.info(\"We have found %d potential job(s) matching '%s' on %s. \"\n \"We need to trigger more.\" % (potential_jobs, buildername, rev))\n\n # If a job matching what we want already exists, we can\n # use the retrigger API in self-serve to retrigger that\n # instead of creating a new arbitrary job\n if len(matching_jobs) > 0:\n request_id = matching_jobs[0][\"requests\"][0][\"request_id\"]\n buildapi.make_retrigger_request(\n repo_name,\n request_id,\n count=(times - potential_jobs),\n dry_run=dry_run)\n\n # If no matching job exists, we have to trigger a new arbitrary job\n else:\n list_of_requests = trigger_job(\n revision=rev,\n buildername=buildername,\n times=(times - potential_jobs),\n dry_run=dry_run,\n files=files)\n\n if list_of_requests and any(req.status_code != 202 for req in list_of_requests):\n LOG.warning(\"Not all requests succeeded.\")\n\n # TODO:\n # 3) Once we trigger a build job, we have to monitor it to make sure that it finishes;\n # at that point we have to trigger as many test jobs as we originally intended\n # If a build job does not finish, we have to notify the user... what should it then\n # happen?", "def parse_deps():\n Files = []\n Dependencies = []\n TimeBins = ['recover_parameters', 'startup', 'wragh', 'paramcheck',\n 'preregridinitial', 'postregridinitial', 'basegrid', \n 'initial', 'postinitial', 'postrestrictinitial', \n 'postpostinitial', 'recover_variables', \n 'post_recover_variables', 'cpinitial', 'checkpoint', \n 'preregrid', 'postregrid', 'prestep', 'evol', 'postrestrict', \n 'poststep', 'analysis', 'terminate', 'shutdown']\n\n implement_re = re.compile('implements:\\s*(\\w+)', re.I)\n inherit_re = re.compile('inherits:\\s*(.+)', re.I)\n provides_function_re = re.compile('PROVIDES\\s+FUNCTION\\s+(\\w+)', re.I)\n uses_function_re = re.compile('USES\\s+FUNCTION\\s+(\\w+)', re.I)\n requires_function_re = re.compile('REQUIRES\\s+FUNCTION\\s+(\\w+)', re.I)\n shares_re = re.compile('shares:\\s*(\\w+)', re.I)\n requires_thorn_re = re.compile('REQUIRES\\s+(?!FUNCTION\\s*)(\\w+)', re.I)\n schedules_function_re = re.compile('schedule\\s+(?:group\\s+)?(\\w+)\\s+(?:in|at)\\s+(\\w+)', re.I)\n\n # find all interface.ccl and param.ccl files in cwd\n Cactus_Path = os.path.expanduser('~/Cactus/')\n for dirpath, dirnames, filenames in os.walk(Cactus_Path + 'arrangements', followlinks=True):\n for file in filenames:\n if file == 'interface.ccl':\n Files.append(os.path.join(dirpath, file))\n\n for file in Files:\n # first parse interface.ccl\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines = fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then parse param.ccl\n file = re.sub('interface.ccl', 'param.ccl', file)\n\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines += fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then configuration.ccl\n file = re.sub('param.ccl', 'configuration.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # then schedule.ccl\n file = re.sub('configuration.ccl', 'schedule.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # get the thorn dir and its parent\n thornname = os.path.basename(os.path.dirname(file))\n parentdir = os.path.basename(os.path.dirname(os.path.dirname(file)))\n thornname = os.path.join(parentdir, thornname)\n file_dict = {'name' : thornname.lower()}\n for line in lines:\n line = line.strip()\n m = re.match(implement_re, line)\n if m:\n file_dict['implements'] = m.group(1).lower()\n\n m = re.match(inherit_re, line)\n if m:\n inheritance = re.split('\\W+', m.group(1).lower())\n file_dict['inherits'] = inheritance\n\n m = re.match(provides_function_re, line)\n if m:\n try:\n file_dict['provides_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['provides_function'] = [m.group(1).lower()]\n\n m = re.match(uses_function_re, line)\n if m:\n try:\n file_dict['uses_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['uses_function'] = [m.group(1).lower()]\n\n m = re.match(requires_function_re, line)\n if m:\n try:\n file_dict['requires_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['requires_function'] = [m.group(1).lower()]\n\n m = re.match(requires_thorn_re, line)\n if m:\n requires = re.split('\\W+', m.group(1).lower())\n # sometimes we have 'REQUIRES THORNS' instead of 'REQUIRES'\n if requires[0].lower() == 'thorns':\n del requires[0]\n file_dict['requires_thorn'] = requires\n\n m = re.match(shares_re, line)\n if m:\n try:\n file_dict['shares'].append(m.group(1).lower())\n except KeyError:\n file_dict['shares'] = [m.group(1).lower()]\n\n m = re.match(schedules_function_re, line)\n if m:\n bin, func = m.group(2).lower(), m.group(1).lower()\n if bin in TimeBins:\n bin = 'cctk_' + bin\n func_dict = {bin : func}\n try:\n file_dict['schedules_function'].append(func_dict)\n except KeyError:\n file_dict['schedules_function'] = [func_dict]\n\n\n Dependencies.append(file_dict)\n\n return Dependencies", "def build(self):\n logging.info('Build %s of %s (%s)', self._build, self.name,\n self.working_dir)\n self._build += 1\n self._event = None\n status = self._builder.execute_script(self.working_dir, self.script)\n self._show_notification(status)", "def _build_file_list_from_dependency_list(self, build_rule_name: str, build_dir_abs: str) -> List[str]:\n file_list = []\n visited = set()\n queue = Queue()\n queue.put((build_rule_name, build_dir_abs))\n visited.add((build_rule_name, build_dir_abs))\n\n # BFS\n while not queue.empty():\n rule_name, rule_dir_abs = queue.get()\n rule_files_rel_path = \\\n BuildConfig.load_from_build_directory(rule_dir_abs).get_build_rule(rule_name).get_files()\n # Get absolute paths of files and add to file_list\n rules_files_abs_path = [os.path.join(rule_dir_abs, path) for path in rule_files_rel_path]\n file_list.extend(rules_files_abs_path)\n\n if (rule_name, rule_dir_abs) in self._rule_to_dependency_list:\n for dep in self._rule_to_dependency_list[(rule_name, rule_dir_abs)]:\n if dep not in visited:\n visited.add(dep)\n queue.put(dep)\n else:\n self._last_build_passed = False\n raise ParallelBuilder.CircularDependencyException()\n return file_list", "def build(c, force=None):\n for sp_ns in ns_foreach_task_subdir(c):\n print(\"-- running build in \", os.getcwd())\n\n # sp_ns.tasks.build(c, force)\n c.run('invoke build')", "def buildStarted(builderName, build):", "def trigger_cb(file_name: str, commit_hash: str, project_name: str) -> None:\n\n cb_client = boto3.client(\"codebuild\")\n build = {\n \"projectName\": project_name,\n \"sourceVersion\": commit_hash,\n \"environmentVariablesOverride\": [\n {\"name\": \"REQ_FILENAME\", \"value\": file_name, \"type\": \"PLAINTEXT\"}\n ],\n }\n cb_client.start_build(**build)", "def build_job_configs(self, args):\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n\n datafile = args['data']\n if datafile is None or datafile == 'None':\n return job_configs\n NAME_FACTORY.update_base_dict(args['data'])\n outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')\n\n inputfiles = create_inputlist(args['ft1file'])\n num_files = len(inputfiles)\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n\n mktimelist = copy.copy(comp.mktimefilters)\n if not mktimelist:\n mktimelist.append('none')\n evtclasslist_keys = copy.copy(comp.evtclasses)\n if not evtclasslist_keys:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n else:\n evtclasslist_vals = copy.copy(evtclasslist_keys)\n\n for mktimekey in mktimelist:\n for evtclassval in evtclasslist_vals:\n fullkey = comp.make_key(\n '%s_%s_{ebin_name}_%s_{evtype_name}' %\n (evtclassval, zcut, mktimekey))\n\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=mktimekey,\n evclass=evtclassval,\n fullpath=True)\n\n ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys))\n outfile = os.path.join(outdir_base, ccube_name)\n infiles = _make_input_file_list(outfile, num_files)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[fullkey] = dict(args=infiles,\n output=outfile,\n logfile=logfile)\n\n return job_configs", "async def trigger_build(self, *, branch=None, message=None):", "def runScheduler(self):\n\n for source in self.sources:\n intervals = [\n int(self.sources[source]['metrics'][x]['interval']) for x\n in range(0, len(self.sources[source]['metrics']))]\n sourceInterval = self.gcd(intervals)\n self.sources[source]['sourceInterval'] = sourceInterval\n self.logger.debug(self.sources[source]['metrics'])\n\n self.scheduler.add_job(\n self.getDriverData, 'interval', args=[\n self.sources[source]['metrics']],\n seconds=sourceInterval)", "def partial_build(src, lang, bsp_c3, opt_level, march, reporter):\n if lang == 'c3':\n srcs = [\n relpath('..', 'librt', 'io.c3'),\n bsp_c3,\n io.StringIO(src)]\n o2 = c3c(\n srcs, [], march, opt_level=opt_level,\n reporter=reporter, debug=True)\n objs = [o2]\n elif lang == 'bf':\n o3 = bfcompile(src, march, reporter=reporter)\n o2 = c3c(\n [bsp_c3], [], march, reporter=reporter)\n objs = [o2, o3]\n elif lang == 'c':\n o2 = c3c(\n [bsp_c3], [], march, reporter=reporter)\n coptions = COptions()\n include_path1 = relpath('..', 'librt', 'libc')\n coptions.add_include_path(include_path1)\n with open(relpath('..', 'librt', 'libc', 'lib.c'), 'r') as f:\n o3 = cc(\n f, march, coptions=coptions, debug=True,\n reporter=reporter)\n o4 = cc(\n io.StringIO(src), march, coptions=coptions, debug=True,\n reporter=reporter)\n objs = [o2, o3, o4]\n else:\n raise NotImplementedError('language not implemented')\n obj = link(\n objs, partial_link=True,\n use_runtime=True, reporter=reporter, debug=True)\n return obj", "def RunOnDeps(self, command, args):\n if not command in self.supported_commands:\n raise gclient_utils.Error(\"'%s' is an unsupported command\" % command)\n\n # Check for revision overrides.\n revision_overrides = {}\n for revision in self._options.revisions:\n if revision.find(\"@\") == -1:\n raise gclient_utils.Error(\n \"Specify the full dependency when specifying a revision number.\")\n revision_elem = revision.split(\"@\")\n # Disallow conflicting revs\n if revision_overrides.has_key(revision_elem[0]) and \\\n revision_overrides[revision_elem[0]] != revision_elem[1]:\n raise gclient_utils.Error(\n \"Conflicting revision numbers specified.\")\n revision_overrides[revision_elem[0]] = revision_elem[1]\n\n solutions = self.GetVar(\"solutions\")\n if not solutions:\n raise gclient_utils.Error(\"No solution specified\")\n\n # When running runhooks --force, there's no need to consult the SCM.\n # All known hooks are expected to run unconditionally regardless of working\n # copy state, so skip the SCM status check.\n run_scm = not (command == 'runhooks' and self._options.force)\n\n entries = {}\n entries_deps_content = {}\n file_list = []\n # Run on the base solutions first.\n for solution in solutions:\n name = solution[\"name\"]\n deps_file = solution.get(\"deps_file\", self._options.deps_file)\n if '/' in deps_file or '\\\\' in deps_file:\n raise gclient_utils.Error('deps_file name must not be a path, just a '\n 'filename.')\n if name in entries:\n raise gclient_utils.Error(\"solution %s specified more than once\" % name)\n url = solution[\"url\"]\n entries[name] = url\n if run_scm and url:\n self._options.revision = revision_overrides.get(name)\n scm = gclient_scm.CreateSCM(url, self._root_dir, name)\n scm.RunCommand(command, self._options, args, file_list)\n file_list = [os.path.join(name, f.strip()) for f in file_list]\n self._options.revision = None\n try:\n deps_content = gclient_utils.FileRead(\n os.path.join(self._root_dir, name, deps_file))\n except IOError, e:\n if e.errno != errno.ENOENT:\n raise\n deps_content = \"\"\n entries_deps_content[name] = deps_content\n\n # Process the dependencies next (sort alphanumerically to ensure that\n # containing directories get populated first and for readability)\n deps = self._ParseAllDeps(entries, entries_deps_content)\n deps_to_process = deps.keys()\n deps_to_process.sort()\n\n # First pass for direct dependencies.\n for d in deps_to_process:\n if type(deps[d]) == str:\n url = deps[d]\n entries[d] = url\n if run_scm:\n self._options.revision = revision_overrides.get(d)\n scm = gclient_scm.CreateSCM(url, self._root_dir, d)\n scm.RunCommand(command, self._options, args, file_list)\n self._options.revision = None\n\n # Second pass for inherited deps (via the From keyword)\n for d in deps_to_process:\n if type(deps[d]) != str:\n filename = os.path.join(self._root_dir,\n deps[d].module_name,\n self._options.deps_file)\n content = gclient_utils.FileRead(filename)\n sub_deps = self._ParseSolutionDeps(deps[d].module_name, content, {})\n url = sub_deps[d]\n entries[d] = url\n if run_scm:\n self._options.revision = revision_overrides.get(d)\n scm = gclient_scm.CreateSCM(url, self._root_dir, d)\n scm.RunCommand(command, self._options, args, file_list)\n self._options.revision = None\n\n # Convert all absolute paths to relative.\n for i in range(len(file_list)):\n # TODO(phajdan.jr): We should know exactly when the paths are absolute.\n # It depends on the command being executed (like runhooks vs sync).\n if not os.path.isabs(file_list[i]):\n continue\n\n prefix = os.path.commonprefix([self._root_dir.lower(),\n file_list[i].lower()])\n file_list[i] = file_list[i][len(prefix):]\n\n # Strip any leading path separators.\n while file_list[i].startswith('\\\\') or file_list[i].startswith('/'):\n file_list[i] = file_list[i][1:]\n\n is_using_git = gclient_utils.IsUsingGit(self._root_dir, entries.keys())\n self._RunHooks(command, file_list, is_using_git)\n\n if command == 'update':\n # Notify the user if there is an orphaned entry in their working copy.\n # Only delete the directory if there are no changes in it, and\n # delete_unversioned_trees is set to true.\n prev_entries = self._ReadEntries()\n for entry in prev_entries:\n # Fix path separator on Windows.\n entry_fixed = entry.replace('/', os.path.sep)\n e_dir = os.path.join(self._root_dir, entry_fixed)\n # Use entry and not entry_fixed there.\n if entry not in entries and os.path.exists(e_dir):\n modified_files = False\n if isinstance(prev_entries, list):\n # old .gclient_entries format was list, now dict\n modified_files = gclient_scm.scm.SVN.CaptureStatus(e_dir)\n else:\n file_list = []\n scm = gclient_scm.CreateSCM(prev_entries[entry], self._root_dir,\n entry_fixed)\n scm.status(self._options, [], file_list)\n modified_files = file_list != []\n if not self._options.delete_unversioned_trees or modified_files:\n # There are modified files in this entry. Keep warning until\n # removed.\n print((\"\\nWARNING: \\\"%s\\\" is no longer part of this client. \"\n \"It is recommended that you manually remove it.\\n\") %\n entry_fixed)\n else:\n # Delete the entry\n print(\"\\n________ deleting \\'%s\\' \" +\n \"in \\'%s\\'\") % (entry_fixed, self._root_dir)\n gclient_utils.RemoveDirectory(e_dir)\n # record the current list of entries for next time\n self._SaveEntries(entries)", "def watch_namespaced_build_list(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_build_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_build_list`\")\n\n resource_path = '/oapi/v1/watch/namespaces/{namespace}/builds'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response" ]
[ "0.5340288", "0.5337348", "0.503201", "0.49329406", "0.48699933", "0.48493886", "0.47616592", "0.46867886", "0.467615", "0.46380544", "0.46182352", "0.4580375", "0.45653725", "0.45618638", "0.45410687", "0.4527639", "0.45264634", "0.4522288", "0.45181048", "0.4508268", "0.45075577", "0.4505349", "0.4485089", "0.44701198", "0.44465408", "0.44125593", "0.44050545", "0.4383525", "0.43781632", "0.43642694" ]
0.78031754
0
Returns a list of unique resource names to create.
def _resource_names(resource_types): return [six.text_type(i) for i, t in enumerate(resource_types)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def generate_resource_names(num):\n assert num > 0\n return [generate_resource_name(i) for i in range(num)]", "def resource_names(self):\n return self._resource_names", "def resource_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resource_names\")", "def resource_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resource_names\")", "def names(self):\r\n return resource.Name(self)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def resource_id_patterns(self) -> Sequence[str]:\n return pulumi.get(self, \"resource_id_patterns\")", "def resource_names(self) -> pulumi.Output[Optional[Mapping[str, Sequence[str]]]]:\n return pulumi.get(self, \"resource_names\")", "def get_init_all_names(self) -> list[str]:\n names = {self.client.name, self.client.alias_name}\n if self.service_resource:\n names.add(self.service_resource.name)\n names.add(self.service_resource.alias_name)\n for waiter in self.waiters:\n names.add(waiter.name)\n for paginator in self.paginators:\n names.add(paginator.name)\n\n result = list(names)\n result.sort()\n return result", "def resource_names(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]:\n return pulumi.get(self, \"resource_names\")", "def _get_resource_test_names(self):\n\t\ttests = []\n\t\tfor resource in self.resources:\n\t\t\tpath = os.path.join(self.history_path, '*', '*', resource)\n\t\t\tself.logger.info(\"Looking for Inca tests in %s\" % path)\n\t\t\ttests.extend(glob.glob(path))\n\t\treturn tests", "def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys", "def namelist(self):\n return []", "def resource_types(self) -> Sequence[str]:\n return pulumi.get(self, \"resource_types\")", "def _generateRoleName(self, obj, **args):\n # Subclasses must override this.\n return []", "def get_resources(self):\n return []", "def _create_resources(self, create_list):\n LOGGER.debug(\"Creating %d resources...\", len(create_list))\n retry_list = list()\n for resource in create_list:\n try:\n start_time = time()\n resource.create(self._bigip.mgmt_root())\n LOGGER.debug(\"Created %s in %.5f seconds.\",\n resource.name, (time() - start_time))\n except exc.F5CcclResourceConflictError:\n LOGGER.warning(\n \"Resource /%s/%s already exists, skipping task...\",\n resource.partition, resource.name)\n except (exc.F5CcclResourceCreateError,\n exc.F5CcclError) as e:\n LOGGER.error(str(e))\n LOGGER.error(\n \"Resource /%s/%s creation error, requeuing task...\",\n resource.partition, resource.name)\n retry_list.append(resource)\n\n return retry_list", "def names(self) -> list[str]:", "def list_unique_names(self):\n return [os.path.splitext(x)[0] for x in os.listdir(self._event_dir)]", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def getNames(self) -> List[unicode]:\n ...", "def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"associated_resource_ids\")", "def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"associated_resource_ids\")", "def list(self):\n resources = self._os_resource_manager.list()\n resource_list = []\n for resource in resources:\n resource_list.append(self._resource_class(id=resource.id,\n name=resource.name))\n return resource_list", "def names(cls) -> List[str]:", "def make_unique(name, reserved_names):\n while name in reserved_names:\n name += '_'\n\n return name", "def get_unique_label_list(self) -> List[str]:\n return self.tasks.get_label_list()", "def name_get(self):\n res = [(r.id, r.name) for r in self]\n return res", "def get_unique_project_list(self) -> List[str]:\n return self.tasks.get_project_list()" ]
[ "0.7136954", "0.69740754", "0.67689633", "0.6493735", "0.6493735", "0.6410779", "0.63506067", "0.62877566", "0.6221355", "0.6051435", "0.6032399", "0.5939095", "0.58702785", "0.5866576", "0.5823574", "0.5683516", "0.5680578", "0.5621436", "0.56203556", "0.56067157", "0.5603627", "0.55792767", "0.5570131", "0.5570131", "0.55659854", "0.5563185", "0.55115706", "0.54935205", "0.54897434", "0.5484109" ]
0.7015451
1
Get the address of a region for a placement
def locate_memory_region_for_placement(placement, region, transceiver): regions_base_address = transceiver.get_cpu_information_from_core( placement.x, placement.y, placement.p).user[0] # Get the position of the region in the pointer table region_offset_in_pointer_table = \ utility_calls.get_region_base_address_offset( regions_base_address, region) region_address = buffer(transceiver.read_memory( placement.x, placement.y, region_offset_in_pointer_table, 4)) region_address_decoded = struct.unpack_from("<I", region_address)[0] return region_address_decoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def placement(self):\n return self.container['placement']", "def get_address(self, mode, offset):\n\t\taddress = None\n\t\tif mode == 0:\n\t\t\taddress = self.data[ self.pos + offset ]\n\t\telif mode == 1:\n\t\t\taddress = self.pos + offset\n\t\telif mode == 2:\n\t\t\taddress = self.rel_pos + self.data[ self.pos + offset ]\n\t\telse:\n\t\t\tprint(\"FAIL - wrong mode parameter\")\n\t\treturn address", "def position_region(reg):\n x,y = reg.coord_list[:2]\n posn = coords.Position([x,y],system=coords_format(reg.coord_format))\n return posn", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def get_region(ip_str, topology_map): \n ip = ipaddress.ip_address(ip_str)\n\n for net in topology_map:\n if ip in net:\n return topology_map[net]\n\n return None", "def get_region(zone):\n return '-'.join(zone.split('-')[:2])", "def get_region(latitude, longitude, region_type=\"country\"):\n if region_type not in REGION_TYPE_TO_ADMIN_AREA.keys():\n raise Exception(\"Not a valid region type:{}\".format(region_type))\n\n admin_area = REGION_TYPE_TO_ADMIN_AREA.get(region_type)\n response = requests.get(\"http://www.mapquestapi.com/geocoding/v1/reverse?key={}&\"\n \"location={},{}\".format(MAPQUEST_API_KEY, latitude, longitude))\n try:\n address = response.json().get(\"results\")[0].get(\"locations\")[0].get(admin_area)\n if address is not None:\n return address\n except Exception:\n print response.content\n return None", "def region(self):\n # type: () -> string_types\n return self._region", "def geo_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"geo_region\")", "def geo_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"geo_region\")", "def toAddr(self, offset: int) -> ghidra.program.model.address.Address:\n ...", "def region(self):\n return self._get(\"region\")", "def Address(self) -> _n_5_t_0:", "def get_address(self, ):\n return self.get_parameter('address')", "def get_offset_address():\n return command(\"O\")", "def get_region(self, variable):\n return self.__region_ids[variable]", "def toAddr(self, offset: long) -> ghidra.program.model.address.Address:\n ...", "def placement(self) -> pulumi.Output['outputs.JobPlacement']:\n return pulumi.get(self, \"placement\")", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def getAddress(self):\r\n return self._container.getAddress()", "def region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region\")", "def region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region\")", "def region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region\")", "def region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region\")", "def region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region\")", "def region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region\")", "def region(self):\n return regions.lookup(self.state)", "def region(self):\n return self._region", "def region(self):\n return self._region", "def get_address(self, index, mode):\n if mode == IMMEDIATE_MODE:\n return index\n elif mode == POSITION_MODE:\n return self.program[index]\n elif mode == RELATIVE_MODE:\n return self.program[index] + self.relative_base\n raise Exception(f\"unknown mode: {mode}\")" ]
[ "0.635856", "0.6345484", "0.62339664", "0.6139868", "0.60450715", "0.59317094", "0.59305286", "0.58674234", "0.5849697", "0.5849697", "0.5804182", "0.57843435", "0.57785076", "0.5759799", "0.57451206", "0.57362026", "0.56928176", "0.5655404", "0.56482154", "0.5631404", "0.5608113", "0.5608113", "0.5608113", "0.5608113", "0.5608113", "0.5608113", "0.56044596", "0.5604386", "0.5604386", "0.5598673" ]
0.70431525
0
Translate a string list of cores into a core subset
def convert_string_into_chip_and_core_subset(cores): ignored_cores = CoreSubsets() if cores is not None and cores != "None": for downed_core in cores.split(":"): x, y, processor_id = downed_core.split(",") ignored_cores.add_processor(int(x), int(y), int(processor_id)) return ignored_cores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_cores(core_str):\n num_cores = os.cpu_count()\n cores = []\n\n # remove spaces\n core_str.replace(\" \", \"\")\n\n # check if not a range\n if '-' not in core_str:\n cores = list(map(int, core_str.strip().split(',')))\n else:\n # parse range e.g. 2-8\n core_str = core_str.strip().split('-')\n for i in range(int(core_str[0]), int(core_str[1]) + 1):\n cores.append(i)\n\n # ensure valid cores specified\n for core in cores:\n if core < 0 or core >= num_cores:\n print(\"Core {} out of range!\".format(core), file=sys.stderr)\n raise Exception()\n\n return cores", "def set_cpus(core_str, **kwargs):\n online_cores = osutil.parse_cpus(core_str)\n offline_cores = POSSIBLE_CPUS - online_cores\n for online in online_cores:\n set_cpu(online, True, **kwargs)\n for offline in offline_cores:\n set_cpu(offline, False, **kwargs)", "def shorten_cores(cores):\n cores = sorted(list(cores))\n if len(cores) == 0:\n return ''\n core_buffer = ''\n start = 0\n while start < len(cores):\n cont_seq = find_max_continous_sequence(cores, start)\n start += len(cont_seq)\n if len(cont_seq) > 1:\n core_buffer += ',%d-%d' % (cont_seq[0], cont_seq[-1])\n else:\n core_buffer += ',%d' % cont_seq[0]\n return core_buffer[1:]", "def available_cpu_list():\n \n def expand(s):\n if s.count(\"-\") == 1:\n numbers = re.findall(r'(\\d+)', s)\n start = int(numbers[0])\n end = int(numbers[1])\n return list(range(start, end+1))\n \n elif s.count(\"-\") == 0:\n return [int(s)]\n else:\n print(\"The string cannot have more than one dash mark (-).\")\n\n # cpuset\n # cpuset may restrict the number of *available* processors\n try:\n m = re.search(r'(?m)^Cpus_allowed_list:\\s*(.*)$',\n open('/proc/self/status').read())\n if m:\n group = m.group(1)\n # group=\"0-7,9-10, 14\"\n \n m = re.findall(r'(\\d+(-\\d+)?)', group)\n items = [item[0] for item in m]\n \n cpus = []\n for item in items:\n cpus += expand(item)\n \n return cpus\n except IOError:\n raise IOError(\"Could not read /proc/self/status\")", "def _mask_to_cores(mask):\n cores = set()\n i = 0\n while mask:\n if mask % 2:\n cores.add(i)\n mask >>= 1\n i += 1\n return cores", "def set_core(self, core):\n self.core = core\n mask = 1 << core\n self.cmd += ' --cores {}'.format(str(hex(mask)))", "def _cpus(cls):\n # The real processor map is found at different paths based on cgroups version:\n # - cgroupsv1: /cpuset.cpus\n # - cgroupsv2: /cpuset.cpus.effective\n # For more details, see https://docs.kernel.org/admin-guide/cgroup-v2.html#cpuset-interface-files\n cpulist = None\n for path in [\n Path(\"/sys/fs/cgroup/cpuset/cpuset.cpus\"),\n Path(\"/sys/fs/cgroup/cpuset.cpus.effective\"),\n ]:\n if path.exists():\n cpulist = path.read_text(\"ascii\").strip()\n break\n else:\n raise RuntimeError(\"Could not find cgroups cpuset\")\n return ListFormatParser(cpulist).parse()", "def get_cores(sockets):\n cores = []\n for skt in sockets:\n cores.extend(SOCKET_DICT[skt])\n\n return cores", "def _sort_by_coreid(cpu):\n return (int(cpu.core), int(cpu.thread))", "def get_coreids(self):\n return range(0, self.get_ncores()) # default behaviour for x86", "def _apply_vpp_cpu(node):\n\n # Get main core\n cpu = \"\\n\"\n if \"vpp_main_core\" in node[\"cpu\"]:\n vpp_main_core = node[\"cpu\"][\"vpp_main_core\"]\n else:\n vpp_main_core = 0\n if vpp_main_core != 0:\n cpu += \" main-core {}\\n\".format(vpp_main_core)\n\n # Get workers\n vpp_workers = node[\"cpu\"][\"vpp_workers\"]\n vpp_worker_len = len(vpp_workers)\n if vpp_worker_len > 0:\n vpp_worker_str = \"\"\n for i, worker in enumerate(vpp_workers):\n if i > 0:\n vpp_worker_str += \",\"\n if worker[0] == worker[1]:\n vpp_worker_str += \"{}\".format(worker[0])\n else:\n vpp_worker_str += \"{}-{}\".format(worker[0], worker[1])\n\n cpu += \" corelist-workers {}\\n\".format(vpp_worker_str)\n\n return cpu", "def eval_cpuset():\n\tnum_cpu = run('grep -c ^processor /proc/cpuinfo',quiet=True,warn_only=True)\n\tprint(red('Number of cpus : \\t'+num_cpu))", "def check_cores(self):\n\n cores = []\n # Execute command to check for cores\n header = [\"VDC\", \"Module\", \"Instance\",\n \"Process-name\", \"PID\", \"Date\\(Year-Month-Day Time\\)\"]\n\n if self.device.alias == 'uut':\n # In case of restarting process on a the main VDC\n output = oper_fill_tabular(device=self.device,\n show_command='show cores vdc-all',\n header_fields=header, index=[5])\n else:\n # In case of restarting process on a sub-VDC\n self.device.disconnect()\n output = oper_fill_tabular(device=self.device,\n show_command='show cores',\n header_fields=header, index=[5])\n\n if not output.entries:\n log.info('No core found')\n return []\n\n # Parse through output to collect core information (if any)\n for k in sorted(output.entries.keys(), reverse=True):\n row = output.entries[k]\n date = row.get(\"Date\\(Year-Month-Day Time\\)\", None)\n if not date:\n continue\n date_ = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n # Save core info\n core_info = dict(module=row['Module'],\n pid=row['PID'],\n instance=row['Instance'],\n process=row['Process-name'],\n date=date.replace(\" \", \"_\"))\n cores.append(core_info)\n\n return cores", "def tokenize(doc_list, language_code, core_num=multiprocessing.cpu_count()):\n param = [[d, language_code] for d in doc_list]\n pool = multiprocessing.Pool(core_num)\n return pool.map(_tokenize4map, param)", "def set_cpu_affinity(pid: int, cpulist: list) -> list:\n real_cpulist = list(map(CpuMap, cpulist))\n return psutil.Process(pid).cpu_affinity(real_cpulist)", "def _get_client_cores(self, cores):\n\n if self.gpus:\n limit = self.processes_per_gpu * self.gpus\n if limit < cores:\n msg = 'Requested core count exceeds maximum gpu allowance.\\n' + \\\n 'Setting to core limit: ' + str(limit)\n logging.warning(msg)\n cores = limit\n \n return cores", "def _get_core_membind_info():\n args = [\"lscpu\", \"--parse=CPU,Core,Socket,Node\"]\n process_lscpu = subprocess.check_output(args, universal_newlines=True).split(\"\\n\")\n\n # Get information about core, node, socket and cpu. On a machine with no NUMA nodes, the last column is empty\n # so regex also check for empty string on the last column\n bind_info = []\n for line in process_lscpu:\n pattern = r\"^([\\d]+,[\\d]+,[\\d]+,([\\d]+|$))\"\n regex_out = re.search(pattern, line)\n if regex_out:\n bind_info.append(regex_out.group(1).strip().split(\",\"))\n\n return bind_info", "def compute_cores(config):\n cores = config.getint('General','cores')\n if cores > mp.cpu_count():\n cores = mp.cpu_count()\n return cores", "def set_cores(self, cores):\n self.cores = cores\n return", "def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list", "def minimize_core(s, core):\n for i in range(len(core)):\n new_core = core[:i] + core[i+1:]\n print \"trying \", new_core\n is_sat = s.check(new_core)\n print is_sat\n if is_sat == unsat:\n return minimize_core(s, list(s.unsat_core()))\n return core", "def cores(self):\n return int(self.get('cores'))", "def check_solr_cores_version(cores):\n map(solr_version_check, cores)", "def match_all_cui(s,max_len = 10, Eterm_cui = Eterm_cui):\n if len(s) == 0: \n return []\n sub_label = np.zeros(len(s),dtype = 'int')\n location_term = {}\n i = 0\n while i < len(s):\n for j in range(max_len+1,0,-1):\n temp = ' '.join(s[i:i+j])\n if temp in Eterm_cui:\n sub_label[i:i+j] = 1\n location_term[i] = [Eterm_cui[temp]]\n break#matched maximum string, so break\n i += j\n output = []\n for i in range(len(s)):\n if sub_label[i] == 0:#no match\n output += [s[i]]\n elif i in location_term:\n for cui in location_term[i][: :-1]:\n output += [cui]\n return output", "def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing \n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n \n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n target_indices = find_target_indices(tokens_a, tokens)\n if target_indices is None:\n target_indices = (1, 1 + len(tokens_a))\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n target_indices=target_indices))\n return features", "def add_ip_cores(self, scfg, ip_dir):\r\n\r\n return []", "def parallel_tokenizer(df):\n pool = mp.Pool(processes=4)\n df['tokenized_abs'] = pool.map(_tokenize_abstract, df['Abstract'])\n pool.terminate()\n return df", "def list_cpus():\n online_cpus = osutil.get_online_cpus()\n offline_cpus = POSSIBLE_CPUS - online_cpus\n print(\"Online: CPU \", shorten_cores(online_cpus))\n print(\"Offline: CPU \", shorten_cores(offline_cpus))" ]
[ "0.6973038", "0.6796162", "0.6252324", "0.5768839", "0.5585052", "0.5559871", "0.5526212", "0.5463372", "0.53741086", "0.53535175", "0.52621424", "0.5231283", "0.52162653", "0.5187501", "0.5104439", "0.5065146", "0.49959305", "0.49841636", "0.4949571", "0.4945511", "0.48954442", "0.48935986", "0.48781383", "0.4854575", "0.48499975", "0.48437852", "0.48436764", "0.48381904", "0.48275366", "0.48043054" ]
0.7976633
0
Get the string value of a config item, returning None if the value\ is "None"
def read_config(config, section, item): value = config.get(section, item) if value == "None": return None return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config(item: str) -> Union[str, int]:\n file = load_config_file(\"config.json\")\n\n value = file.get(item)\n\n if value is None:\n raise Exception(f\"Your config is out of date! Missing a value for {item}\")\n return value", "def get_item(self, key):\n return self.config[key] if key in self.config.keys() else None", "def __getitem__(self, item):\n return self._config.get(item, '') or os.environ.get(item, '')", "def config_value(name):\n def get():\n try:\n return config.get('yourls', name)\n except (NoOptionError, NoSectionError):\n return None\n return get", "def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"", "def get_value(key: str) -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", key)", "def getStrNo(self, key):\n value = self.getConf(key);\n if value == \"no\":\n return None\n else:\n return value", "def _get_field(self, section, field):\n if not self._configparser.has_option(section, field):\n return None\n return self._configparser.get(section, field).strip()", "def get(self, item, default=''):\n value = self.getSection(CFG_GENERAL, item)\n return default if not value else value", "def get(self,section,option):\n \n value = ConfigParser.SafeConfigParser.get(self,section,option)\n value=value.strip('\"')\n return value", "def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except configparser.NoOptionError:\n return", "def get_value(self, key: str) -> Optional[str]:\n raise NotImplementedError", "def parse_value(self, value_name, default=None):\n\t\treturn self.cfg_root.find(value_name).text", "def string_value(self) -> typing.Optional[str]:\n return self._values.get('string_value')", "def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except (configparser.NoOptionError, configparser.NoSectionError):\n return", "def _get_config_value(self, section, key):\n return config.get(section, key)", "def get(self, key):\n self._check(key)\n return unicode(self.__config.value(key).toString())", "def string_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"string_value\")", "def ini_get(interp, vname):\n w_value = interp.config.get_ini_w(vname)\n if w_value is None:\n return interp.space.w_False\n return interp.space.as_string(w_value)", "def get_config_value(self, name):\r\n if name in self.config_values:\r\n return self.config_values[name]", "def value(self) -> str:\n return self._config.get('value')", "def test_get_with_None_value(self):\n self.assertEqual(self.config.get('none_types','some_value'),None)\n self.assertEqual(self.config.get('none_types','some_value','something'),'something')", "def get_config_item(config, section, item):\n try:\n return config.get(section, item)\n except (configparser.NoOptionError, configparser.NoSectionError, configparser.InterpolationSyntaxError):\n error_msg = ('The item \"{0}\" is not set correctly in the \"{1}\" section '\n 'in your config file.'.format(item, section))\n raise click.ClickException(error_msg)", "def test_get_with_empty_value(self):\n self.assertEqual(self.config.get('none_types','other_value'),None)\n self.assertEqual(self.config.get('none_types','other_value','something'),'something')", "def get_value(section, option):\n try:\n value = rcp.get(section, option)\n return value\n except:\n logging.error(\"Tried to retrieve nonexistant value from config (%s:%s).\",\n section, option)\n return False", "def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None", "def get_parameter_value(self, parameter_name):\n if parameter_name in self.description[\"config\"][\"values\"].keys():\n return self.description[\"config\"][\"values\"][parameter_name][\"value\"]\n else:\n return \"No such parameter\"", "def get_configval(self, keyname, defaultval=None):\n return self.cfghelper.get_value(keyname,defaultval)", "def getitem(value, key):\n try:\n return value[key]\n except Exception:\n return \"\"", "def getDbStrNone(self, db, key):\n \n value = self.get(db + \".\" + key)\n if value == None:\n value = self.get(\"default.\" + key)\n return value" ]
[ "0.67985845", "0.6747702", "0.6738554", "0.6655566", "0.6612734", "0.6555997", "0.6535131", "0.6505249", "0.6497613", "0.6485368", "0.6459843", "0.64332193", "0.64330655", "0.6391925", "0.63310325", "0.6320904", "0.6319391", "0.628734", "0.6286437", "0.6280959", "0.62767285", "0.6262986", "0.6227073", "0.61923236", "0.6121871", "0.6102076", "0.6084225", "0.60838956", "0.60705376", "0.60695976" ]
0.73921996
0
Get the integer value of a config item, returning None if the value\ is "None"
def read_config_int(config, section, item): value = read_config(config, section, item) if value is None: return value return int(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getint(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> int:\n try:\n return parser.getint(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read int value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be an integer\"\n ) from err", "def getInt(self, item, default=0):\n value = self.getSection(CFG_GENERAL, item)\n return default if not value else int(value)", "def get_config(item: str) -> Union[str, int]:\n file = load_config_file(\"config.json\")\n\n value = file.get(item)\n\n if value is None:\n raise Exception(f\"Your config is out of date! Missing a value for {item}\")\n return value", "def config_get_int(section, option):\n return __CONFIG.getint(section, option)", "def getInt(self, key):\n self._check(key)\n value, flag = self.__config.value(key).toInt()\n if flag:\n return value\n else:\n raise ValueError(\"ConfigManager can't get key '%s' as int\" % key)", "def get_by_name_as_int(cls, name, default=None):\n try:\n return int(Configuration.get_by_name(name))\n except:\n return default", "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def read_config(config, section, item):\n value = config.get(section, item)\n if value == \"None\":\n return None\n return value", "def getSetInt(self, key: str, default: int | None = None) -> int:\n value = self.parsedConfig.getint(key, default)\n self.parsedConfig[key] = str(value)\n return value", "def _parse_int(node, key):\n element = node.get(key)\n if element is not None:\n return int(element)\n else:\n return None", "def getint(self, key):\n try:\n return self.parser.getint(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read int value in config file for key '{}' and string '{}'. Must be an integer\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err", "def test_getint_with_default(self):\n self.assertEqual(self.config.getint('advanced','p'),None)\n self.assertEqual(self.config.getint('advanced','p',11),11)", "def getint(self, section, option, default=None):\r\n return self.get(section, option, type=int, default=default)", "def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0", "def get_int(self, item: str) -> int:\n return int(self[item])", "def getint(self, section, option):\n return int(self.get(section, option))", "def _parse_int(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n return int(element.text)\r\n else:\r\n return None", "def safe_get_int(self, section, option, default=None):\n try:\n return int(self.safe_get(section, option, default))\n except ValueError:\n if default is None:\n raise\n else:\n #gvlogger.info(\"Can't convert value from section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default", "def getInt(self, section, option, default=0):\n return self.get(section, option, default, int)", "def getDbIntNone(self, db, key):\n val = self.getDbStrNone(db, key)\n if val != None:\n return int(val)\n else:\n return None", "def value(self, value: Optional[int] = None) -> Optional[int]:\n ...", "def _get_value(item_id, infobox_data):\n\n sub_id = _get_subitem_id(item_id, infobox_data)\n value = infobox_data.get('value', None)\n if value:\n return int(value)\n else:\n return int(infobox_data.get('value%s' % sub_id, None))", "def int(self, item, default=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n return int(item)", "def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)", "def get_item(self, key):\n return self.config[key] if key in self.config.keys() else None", "def getInteger(self):\n return self.value if self.isInteger() else None", "def getint(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n # call ProdConfig function with no default set so\n # we can log and set the default\n return super().getint(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars)\n\n # if config variable is not set\n except NoOptionError:\n if default is None:\n default = util.MISSING_DATA_VALUE\n\n self.check_default(sec, name, default)\n return default\n\n # if invalid value\n except ValueError:\n # check if it was an empty string and return MISSING_DATA_VALUE\n if super().getstr(sec, name) == '':\n return util.MISSING_DATA_VALUE\n\n # if value is not correct type, log error and return None\n self.logger.error(f\"[{sec}] {name} must be an integer.\")\n return None", "def getValue(self) -> Optional[int]:\n return self.__value", "def test_safeGetInt(self):\n self.assertEqual(\n BMConfigParser().safeGetInt('nonexistent', 'nonexistent'), 0)\n self.assertEqual(\n BMConfigParser().safeGetInt('nonexistent', 'nonexistent', 42), 42)", "def get_attr_int(self, name, default=0):\n v = self.get_attr(name)\n if v is None:\n return default\n try:\n return int(v)\n except: # noqa\n return default" ]
[ "0.7431964", "0.7313776", "0.7288933", "0.71721596", "0.7074228", "0.690416", "0.68237036", "0.680126", "0.67072004", "0.66927516", "0.662101", "0.6606317", "0.6581621", "0.65049773", "0.64954185", "0.6485508", "0.6388351", "0.6381325", "0.6373475", "0.6350135", "0.6325861", "0.62814593", "0.62599474", "0.6245833", "0.6245249", "0.6224701", "0.6223292", "0.61986274", "0.60982984", "0.60624385" ]
0.79471374
0
Get the boolean value of a config item, returning None if the value\ is "None"
def read_config_boolean(config, section, item): value = read_config(config, section, item) if value is None: return value if value.lower() in RawConfigParser._boolean_states: return RawConfigParser._boolean_states[value.lower()] raise ValueError("Unknown boolean value {} in configuration {}:{}".format( value, section, item))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_getboolean_with_default(self):\n self.assertEqual(self.config.getboolean('advanced','p'),None)\n self.assertEqual(self.config.getboolean('advanced','p',True),True)", "def _read_bool_from_config(key, default):\n if config.has_option('docker', key):\n return config.getboolean('docker', key)\n else:\n return default", "def _getbool(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> bool:\n try:\n return parser.getboolean(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read boolean value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be Yes/No\"\n ) from err", "def getbool(option, default = None):\n\treturn _cfg.getboolean('rosshm', option, fallback = default)", "def config_get_bool(section, option):\n return __CONFIG.getboolean(section, option)", "def getBoolean(self, key):\n self._check(key)\n return self.__config.value(key).toBool()", "def getbool(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n return super().getbool(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars)\n except NoOptionError:\n # config item was not set\n self.check_default(sec, name, default)\n return default\n except ValueError:\n # check if it was an empty string and return default or False if so\n value_string = super().getstr(sec, name)\n if not value_string:\n if default:\n return default\n\n return False\n\n # check if value is y/Y/n/N and return True/False if so\n value_string = remove_quotes(value_string)\n if value_string.lower() == 'y':\n return True\n if value_string.lower() == 'n':\n return False\n\n # if value is not correct type, log error and return None\n self.logger.error(f\"[{sec}] {name} must be an boolean.\")\n return None", "def getbool(self, key):\n try:\n return self.parser.getboolean(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read boolean value in config file for key '{}' and string '{}'. Must be Yes/No\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err", "def getbool(self, section, option, default=None):\r\n return self.get(section, option, type=bool, default=default)", "def _parse_boolean(node, key):\n element = node.get(key)\n if element is not None:\n return bool(element)\n else:\n return None", "def get_bool(self, key, default):\n value = self.get(key, default)\n if isinstance(value, bool):\n return value\n return value.lower() in (\"true\", \"t\", \"yes\", \"y\")", "def bool_value(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"bool_value\")", "def get_bool(section, option, default=False):\n\tres = get(section, option, default)\n\n\tif res == default:\n\t\treturn default\n\n\tif res.lower() == \"true\" or res == \"1\":\n\t\treturn True\n\n\treturn default", "def get_bool(self, item: str) -> bool:\n return as_bool(self[item])", "def _get_bool(element, name, context, default=None):\n\n value = element.get(name)\n try:\n value = int(value)\n except:\n value = default\n\n _assert(value is not None,\n \"Missing or invalid boolean value of '{0}.{1}'.\".format(context,\n name))\n\n return bool(value)", "def test_getboolean(self):\n self.assertEqual(self.config.getboolean('advanced','bool'),True)", "def force_bool(value):\n if isinstance(value, (bool, int)):\n return bool(value)\n\n boolean_states = ConfigParser._boolean_states\n if not value.lower() in boolean_states:\n return None\n\n return boolean_states[value.lower()]", "def bool(self, item, default=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n\n if isinstance(item, (bool, int)):\n return bool(item)\n\n if (isinstance(item, str) and\n item.lower() in ('n', 'no', 'false', 'f', '0')):\n return False\n\n return True if item else False", "def get_bool(self, option, argument=None):\n return bool(self.get(option, argument))", "def getBoolean(self, section, option, default=False):\n return self.get(section, option, default, _bool)", "def get_bool(self, name, default=False):\n return self.get_as(self.parse_bool, name, default, value_type=bool)", "def getSetBoolean(self, key: str, default: bool | None = None) -> bool:\n value = self.parsedConfig.getboolean(key, default)\n self.parsedConfig[key] = str(value)\n return value", "def getboolean(self, option, default=None):\n\t\treturn self._get_raw(option, 'boolean', default)", "def getDbBoolNone(self, db, key):\n \n val = self.get(db + \".\" + key)\n if val == None:\n val = self.get(\"default.\" + key)\n if val == None:\n return None\n elif val == \"yes\":\n return True\n elif val == \"no\":\n return False\n else:\n raise Exception(\"invalid value for \" + db + \".\" + key + \": \\\"\"\n + val + \"\\\", expected \\\"yes\\\" or \\\"no\\\"\")", "def _parse_boolean(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n if element.text == 'true':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return None", "def get_bool(options, name, default=False):\n value = options.get(name)\n if not value:\n return default\n if value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n else:\n raise zc.buildout.UserError(\n \"Invalid value for %s option: %s\" % (name, value))", "def get_boolean_attribute_value(attrs, attr_name):\n return 1 if attrs.get(attr_name, 0) in [\"True\", \"1\"] else 0", "def get_bool(self, key, default=RequiredAttr()):\n if key in self.attrs:\n val = self.attrs[key]\n return val.strip().lower() in ['true', '1', 't', 'y', 'yes']\n if isinstance(default, RequiredAttr):\n raise AttributeError(\"Required attribute {} not found.\".format(key))\n return default", "def getboolean(self, option, default = None, section = None):\n v = self.get(option, default, section)\n if isinstance(v, str):\n v = v.lower()\n if v not in self.cfg._boolean_states:\n raise ValueError, \"Not a boolean: %s\" % v\n v = self.cfg._boolean_states[v]\n return v", "def getBooleanOption(aConfig, aSection, aOption):\n if aConfig.has_option(aSection, aOption):\n return aConfig.getboolean(aSection, aOption)\n else:\n # Default value. This should match the initialization done in\n # __init__ of class task in taskHandler.py\n if (aOption == \"fullScreenMode\" or\n aOption == \"formatOutput\" or\n aOption == \"compressOutput\"):\n return True\n else:\n # \"useWebDriver\"\n # \"runSlowTests\"\n # \"runSkipTests\"\n # \"useGrid\"\n return False" ]
[ "0.7446034", "0.7419813", "0.7418912", "0.73510367", "0.7257413", "0.72289276", "0.7186529", "0.7175947", "0.7143172", "0.71275026", "0.70742744", "0.7056427", "0.7030364", "0.70099545", "0.69862735", "0.69766414", "0.69182664", "0.68011564", "0.6796177", "0.67657363", "0.6759014", "0.6745357", "0.67371273", "0.67302126", "0.6705719", "0.6697973", "0.6695535", "0.669025", "0.66871625", "0.6668596" ]
0.7598526
0
Generate a unique file name with a given extension in a given folder
def generate_unique_folder_name(folder, filename, extension): new_file_path = os.path.join(folder, "{}{}".format(filename, extension)) count = 2 while os.path.exists(new_file_path): new_file_path = os.path.join( folder, "{}_{}{}".format(filename, count, extension)) count += 1 return new_file_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateFilename(folder, prefix, ext):\n filename = os.path.basename(os.path.normpath(folder))\n if prefix:\n filename = \"{0}-{1}\".format(prefix, filename)\n path = getIncrementedFilename(os.path.join(folder, filename), ext)\n return path", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def generate_filename(extension, with_path=True, base_folder=None):\n name = get_md5(str(uuid4()))\n # if not extension:\n # extension = get_file_extension()\n if base_folder is not None:\n base_folder = \"%s/\" % base_folder.rstrip(\"/\")\n else:\n base_folder = \"\"\n\n if with_path:\n return \"%s%s/%s/%s/%s.%s\" % (base_folder, name[0], name[1], name[2], name, extension)\n else:\n return \"%s%s.%s\" % (base_folder, name, extension)", "def unique_filename(data):\n file = data\n get_ext = file.filename.split(\".\")[-1]\n new_name = \"%s.%s\" % (uuid.uuid4().hex, get_ext)\n return new_name", "def gen_file_name(filename, path=UPLOAD_FOLDER):\n\n i = 1\n while os.path.exists(os.path.join(path, filename)):\n name, extension = os.path.splitext(filename)\n filename = '%s_%s%s' % (name, str(i), extension)\n i += 1\n\n return filename", "def get_unique_filename(fname, fdir):\n ext = path.splitext(fname)[1]\n\n while True:\n uid = uuid4().hex\n name = uid + ext\n if not path.exists(path.join(fdir, name)):\n return name", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def _create_unique_filename_with_integer_suffix(fullpath):\n # create an unique filename\n suffix = None\n suffix_cnt=1\n while os.path.exists(fullpath):\n if suffix: fullpath = fullpath[0:-len(suffix)]\n suffix = \".%s\" % suffix_cnt\n suffix_cnt+=1\n fullpath = fullpath + suffix\n return fullpath", "def gen_save_name(basename = os.getcwd()):\n fname, suffix = basename.split('.') # just assume this is true.\n qualifier = 1\n unique_fname = fname\n while (os.path.exists(unique_fname + '.' + suffix)):\n unique_fname = fname + '_{}'.format(qualifier)\n qualifier += 1\n return unique_fname + '.' + suffix", "def get_random_filename(dirname, ext):\n import random\n\n # assure a first go in the while loop\n found = 1\n\n # do until you find a unique name\n while found:\n\n # get a random int number\n str_num = str(random.randint(10000, 99999))\n\n # compose a random name\n fname = dirname + 'tmp' + str_num + ext\n\n # check whether the file exists\n if not os.path.isfile(fname):\n found = 0\n\n return fname", "def generate_filename(ext,sha512base16_hash=None):\n## # Timestamp filename\n## timestamp = str(get_current_unix_time())\n## filename = timestamp+\".\"+ext\n # Base16 hash filename\n filename = sha512base16_hash+\".\"+ext\n return filename", "def make_filename(key, extension):\n key = unicode(key.strip())\n return '{}.{}'.format(slugify(key), extension)", "def get_file_name() -> str:\n import uuid\n uniq_append_string = uuid.uuid4().hex\n return \"LOCAL_STORAGE_{}\".format(uniq_append_string)", "def GenerateOutputFilename(extension):\n\n # used for resolv.conf\n if '.' in extension:\n filename = extension\n else:\n output_base = 'namebench_%s' % datetime.datetime.strftime(datetime.datetime.now(),\n '%Y-%m-%d %H%M')\n output_base = output_base.replace(':', '').replace(' ', '_')\n filename = '.'.join((output_base, extension))\n\n output_dir = tempfile.gettempdir()\n return os.path.join(output_dir, filename)", "def directory_path(directory, file):\n try:\n file_name, file_extension = os.path.splitext(file)\n except Exception:\n file_extension = ''\n new_file_name = str(uuid.uuid4()) + file_extension\n return '{}/{}'.format(directory, new_file_name)", "def create_random_file_name():\n\n def random_file_name_factory():\n length = random.randint(10, 15)\n chars = string.ascii_letters + string.digits + \"-_\"\n return f\"{''.join(random.choice(chars) for _ in range(length))}.jpg\"\n\n return random_file_name_factory", "def filename_generate(image_class, size=12, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):\n\tnew_filename = time.strftime(\"%d-%m-%Y_\")\n\tnew_filename = new_filename + ''.join(random.choice(chars) for _ in range(size))\n\tnew_filename = new_filename + \"_P\" + str(image_class)\n\treturn new_filename", "def generate_random_name(filename):\n ext = filename.split('.')[-1]\n rns = [random.randint(0, len(LETTER_SET) - 1) for _ in range(3)]\n name = ''.join([LETTER_SET[rn] for rn in rns])\n return \"{new_fn}.{ext}\".format(new_fn=name, ext=ext)", "def generate_random_media_filepath(extension: str):\n\tfilename = f'{_generate_random_string(30)}{extension}'\n\treturn os.path.join(get_media_directory(), filename)", "def new_filename(fname=None,ndigits=3):\n if fname is None:\n ext = (\"%%.%ii\" % ndigits) % 1\n fname = \"%s.%s\" % (random_string(6), ext)\n \n if os.path.exists(fname): \n fname = increment_filename(fname,ndigits=ndigits)\n\n return fname", "def _generate_output_name(extension):\n output_name = 'TDG_{:%Y-%m-%d_%H-%M-%S}.{}'.format(datetime.now(), extension)\n return output_name", "def save_file_with_id_name(self, filename):\n file_ = filename.split(os.sep)[-1]\n extension = \".\".join(file_.split(\".\")[-1:])\n filename = str(uuid.uuid4()) + \".\" + extension\n return filename", "def make_img_name(file_ext='.png'):\r\n fn = []\r\n # format seqs and write out to temp file\r\n for i in range(0, 30):\r\n fn.append(choice(ALPHABET))\r\n return ''.join(fn) + file_ext", "def generate_namefile(pathfolder, methodvalues):\n datestr = datetime.datetime.now().date().strftime('%F')\n paramsstr = str(hash(str(methodvalues)))\n namefile = datestr + '-' + methodvalues['codename'] + '_' + paramsstr\n namefile = os.path.join(pathfolder, namefile)\n return namefile", "def _create_file_name(file_path):\r\n file_base, file_ext = os.path.splitext(file_path)\r\n if os.path.isfile(file_path):\r\n nfile = 1\r\n check = True\r\n while check:\r\n name_add = '0000' + str(nfile)\r\n file_path = file_base + \"_\" + name_add[-4:] + file_ext\r\n if os.path.isfile(file_path):\r\n nfile = nfile + 1\r\n else:\r\n check = False\r\n return file_path", "def random_filename_upload_to(path):\n\n def f(instance, filename):\n ext = filename.split('.')[-1]\n filename = '{0}.{1}'.format(uuid.uuid4().hex, ext)\n return os.path.join(path, filename)\n\n return f", "def generateFilename(self, name):\n return self.context.generateUniqueId(type_name='Module')", "def generate(self, extension=None):\n filename = self._template.format(\n index=len(self._filenames),\n uuid=uuid4().hex,\n extension=extension or self._default_extension,\n )\n self._filenames.append(filename)\n return filename", "def _unique_path(prefix):\n suffix = ''.join([\n random.choice(string.ascii_letters) for i in range(8)\n ])\n return '%s/%r.%s' % (prefix, time.time(), suffix)", "def get_alternative_name(self, file_root, file_ext):\n return \"%s_%s%s\" % (file_root, get_random_string(7), file_ext)" ]
[ "0.82401055", "0.8172587", "0.8163002", "0.78911734", "0.776703", "0.76928383", "0.7683016", "0.76110697", "0.748056", "0.7458569", "0.7343265", "0.7240057", "0.7235643", "0.7173044", "0.7144841", "0.71235543", "0.71194607", "0.70785904", "0.7068501", "0.706414", "0.70498544", "0.7046028", "0.70344716", "0.7029521", "0.70231444", "0.7005655", "0.70051795", "0.69791996", "0.6975", "0.6969561" ]
0.83766705
0
locate the chip with the given board IP address
def get_ethernet_chip(machine, board_address): for chip in machine.ethernet_connected_chips: if chip.ip_address == board_address: return chip raise ConfigurationException( "cannot find the Ethernet connected chip with the board address {}" .format(board_address))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ip_lookup(self, ip_address):\r\n obj = self.client['Network_Subnet_IpAddress']\r\n return obj.getByIpAddress(ip_address, mask='hardware, virtualGuest')", "def find_my_IP_and_MAC():\n mac = ':'.join(re.findall('..', '%012x' % getnode()))\n # I write IP and not domain cause i want to save time.\n p = sr1(IP(dst=\"google.com\", ttl=0) / ICMP() / \"XXXXXXXXXXX\",verbose=0,timeout=5) #verbose = withuot output\n return mac,p.dst", "def lookup_ip(ikey, skey, host, ip):\n response = client.call_json_api(\n ikey, skey, host, 'GET', '/verify/v1/lookup/ip.json',\n ip=[ip])\n return response", "def searchCountry(host):\n process = subprocess.Popen(\"geoiplookup \"+host,stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n secondPart = output.split(\"GeoIP Country Edition: \", 1)[1]\n country = secondPart.split(\"\\nGeoIP City Edition\", 1)[0]\n return country", "def get_address(machine: Machine) -> str:\n default_route, _ = machine.run(\"ip route get 8.8.8.8\")\n return re.search(\" src ([0-9.]+) \", default_route).group(1)", "def get_ip(self):", "def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None", "def get_device_by_ip(self, device_ip):\n\n found_device = None\n\n devices = self.get_all_devices()\n for device in devices:\n if getattr(device, 'ip', None) == device_ip:\n found_device = device\n\n if found_device is None:\n raise LogglyException(\"No device found with ip: %s\" % device_ip)\n\n return found_device", "def get_region(ip_str, topology_map): \n ip = ipaddress.ip_address(ip_str)\n\n for net in topology_map:\n if ip in net:\n return topology_map[net]\n\n return None", "def known_ip(ip=DEFAULT_IP):\r\n tunnel(ip)", "def locator(pcap_obj,kml_file):\r\n ip_list = []\r\n for ts, buf in pcap_obj:\r\n eth = dpkt.ethernet.Ethernet(buf)\r\n ip = eth.data\r\n try: # extract all unique IPs\r\n src_ip = str(socket.inet_ntoa(ip.src))\r\n dst_ip = str(socket.inet_ntoa(ip.dst))\r\n if src_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(src_ip)\r\n if dst_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(dst_ip)\r\n except AttributeError:\r\n pass\r\n\r\n try:\r\n reader = geoip2.database.Reader('GeoLite2-City_20190129.mmdb') # reading from db(can be redacted)\r\n except FileNotFoundError:\r\n print(f'[!]DB file not in current directory or with a different file name')\r\n sys.exit(1)\r\n area = []\r\n longitude = []\r\n latitude = []\r\n ips = []\r\n for ip_addr in ip_list:\r\n try:\r\n rec = reader.city(ip_addr) # reading IP\r\n country = rec.country.iso_code # assigning country and city\r\n city = rec.city.name\r\n if city is None and country is None:\r\n area.append('Unknown')\r\n elif city is None:\r\n area.append(f'Unknown city:{country}') # looking for unknown country\r\n elif country is None:\r\n area.append(f'Unknown country:{city}') # looking for unknown city\r\n else:\r\n area.append(f'{city} {country}')\r\n\r\n longitude.append(rec.location.longitude)\r\n latitude.append(rec.location.latitude)\r\n ips.append(ip_addr)\r\n except geoip2.errors.AddressNotFoundError:\r\n pass\r\n\r\n try:\r\n kml = simplekml.Kml()\r\n final_path = str(os.getcwd() + os.sep + kml_file) # defining full canonical path\r\n for i in range(0, len(ips)):\r\n kml.newpoint(name=(area[i]),\r\n coords=[(longitude[i], latitude[i])],\r\n description=f'[+] Location = {area[i]}\\n IP: {ips[i]}')\r\n kml.save(final_path)\r\n print(f\"[+] Writing IP locations to {kml_file}\") # writing data to a KML file\r\n print(f\"[+] Opening Google Earth with:{kml_file}\\n\") # reading file with google earth\r\n try:\r\n os.startfile(final_path)\r\n except OSError:\r\n print(f'[!] Warning: Google Earth must be installed to open the kml')\r\n except FileNotFoundError:\r\n pass", "def find_piece(self, piece_to_find):\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n if self.board[row][col] is piece_to_find:\n return Square.at(row, col)\n raise Exception('The supplied piece is not on the board')", "def find_piece(self, piece_to_find):\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n if self.board[row][col] is piece_to_find:\n return Square.at(row, col)\n raise Exception('The supplied piece is not on the board')", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "async def get_cell_type(ip: str):\n try:\n async with asyncssh.connect(ip,username=USER_IPACCESS,password=PASS_IPACCESS,known_hosts=None) as conn:\n result = await conn.run('ls /opt/ipaccess')\n if 'DMI' in result.stdout:\n return '3g'\n else:\n return '4g'\n except:\n LOGGER.warning(f\"unable to get cell type from {ip}\")", "def checkIP(self):\n\t\tself.get(\"https://ifconfig.me/\")\n\t\treturn self.findId(\"ip_address\").text", "def host_ip(host):\n return host.cmd('ip addr show {}-eth1 | awk \\'/inet / {{ print $2 }}\\' | cut -d\\'/\\' -f1'.format(host.name, host.name), stdout=sp.PIPE).strip()", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]", "def find(self, start: ghidra.program.model.address.Address, value: int) -> ghidra.program.model.address.Address:\n ...", "def ip_lookup(ip):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': ip\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast , timeout = 1, verbose=False)[0]\n target_list=[]\n for element in answered_list:\n target_dict = {\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n target_list.append(target_dict)\n return target_list", "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def identify_remote_router(remote_address):\n global DATA\n port = remote_address[1]\n for every_router in DATA[\"neighbor\"]:\n if every_router[2] is port:\n return every_router[0]", "def ip_command():\n # 1. Get input host from Demisto\n ip = demisto.args().get('ip')\n if not is_ip_valid(ip):\n return_error('Invalid IP address, Please retry with a valid IP address')\n # 2. Get the host reputation from SlashNext API\n response = ip_lookup(ip=ip)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, ip_cont = get_dbot_std_context(\n ip, 'IP', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(ip, 'IP', response.get('threatData'))\n\n ec = {\n 'SlashNext.IP(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'IP': ip_cont\n }\n\n title = 'SlashNext Phishing Incident Response - IP Lookup\\n' \\\n '##### ip = {}'.format(ip)\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def findinput(self,inpnum,board):\n for i in self.inputs:\n #if i.inpnum == inpnum and i.board == board:\n if i.board == '1':\n ctpnum=i.ctpnum\n else:\n ctpnum=i.ctpnum\n #print ctpnum,' ctpnum,inpnum ' ,inpnum\n if ctpnum == inpnum and i.board == board:\n return self.inputs.index(i)\n return None", "def geoip(self, irc, msg, args, ip):\n \n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n irc.reply(reply.encode('utf-8'))", "def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def geolocate(ip):\n API_METHOD = [\"GET\", \"POST\"]\n IPINFO_URL = \"http://ip-api.com/json/\" + ip\n hdr = {\"content-type\": \"application/json\", \"user-agent\": \"SimplePythonFoo()\"}\n\n try:\n r = requests.request(\n API_METHOD[0],\n IPINFO_URL,\n headers=hdr\n )\n\n if r.status_code == 200:\n resp = r.json()\n logger.info(resp)\n else:\n logger.info(\"Geolocate API called returned HTTP Status Code: {}\".format(str(r.status_code)))\n\n except requests.HTTPError as http_err:\n logger.info(\"Geolocate API call returned HTTP Error: {}\".format(str(http_err)))\n\n return ip" ]
[ "0.60897994", "0.5946037", "0.578702", "0.5761881", "0.56221294", "0.5579791", "0.5571035", "0.5568465", "0.55532104", "0.55228585", "0.55188143", "0.5451177", "0.5451177", "0.5436671", "0.5430195", "0.54182607", "0.53620946", "0.5334949", "0.53317755", "0.5324168", "0.5319523", "0.5309259", "0.53072846", "0.5300295", "0.52892125", "0.52873665", "0.52710193", "0.52672106", "0.52620786", "0.5250416" ]
0.64811116
0
converts between a time diff and total milliseconds
def convert_time_diff_to_total_milliseconds(sample): return (sample.total_seconds() * 1000.0) + (sample.microseconds / 1000.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)", "def _ms_to_time(self, milliseconds):\n \n ms = milliseconds\n \n # Get the last 3 digits of the milliseconds\n trunc_ms = ms % 1000\n seconds = (ms / 1000)\n minutes = (seconds / 60)\n hours = minutes / 60\n \n # hours can go above 24, so don't modulus\n return '%02d:%02d:%02d,%03d' % (hours, minutes % 60, seconds % 60, trunc_ms)", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def ms_to_time(ms):\n milliseconds = int(ms % 1000)\n seconds = int((ms / 1000) % 60)\n minutes = int(((ms / 1000 - seconds) / 60) % 60)\n\n return (minutes, seconds, milliseconds)", "def _to_milliseconds(self, time):\n if isinstance(time, dt.datetime):\n return int(time.timestamp() * 1e3)\n elif isinstance(time, int):\n return time\n else:\n raise NotImplementedError(\"Time format not supported. Use epochs, Datetime or Pandas Datetime\")", "def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)", "def pts2ms(pts, dt):\n return pts*dt", "def convert_time(time_passed):\n\n minutes = time_passed.seconds // 60\n\n return minutes", "def _convert_to_timedelta(time_diff):\n return timedelta(microseconds=time_diff / _NANO_TO_MICRO)", "def reduce_time_bytes(time_in_ms):\n time_in_s = time_in_ms / 1000\n time_adjust = time_in_s - glob.base_time\n return time_adjust", "def get_time_ms():\n return int(round(time.time() * 1000))", "def _convert_time(self, duration):\n in_sec = int(int(duration) / 1000)\n in_time = int(in_sec / 60) + (0.01 * (in_sec % 60))\n return in_time", "def _STEPS2TIME(step):\n return step/1000.", "def MillisToSec(self):\n self.Millis = [item / 1000 for item in self.Millis]\n return self.Millis", "def time_diff(start_time, end_time=None, choice='millis'):\n if end_time is None:\n end_time = datetime.now()\n timediff = (end_time - start_time)\n if choice == 'millis':\n return timediff.seconds * 1000 + int(timediff.microseconds) / 1000\n elif choice == 'micros':\n return timediff.seconds * 1000 + float(timediff.microseconds) / 1000", "def get_millis(seconds):\n return seconds * 10 ** 3", "def units_to_msec(units, resolution):\n time_ms = units * float(resolution) / 1000\n return time_ms", "def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms", "def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)", "def _elapsed_milliseconds_since(self, start_time: datetime) -> int:\n end_time = datetime.now()\n time_diff = end_time - start_time\n elapsed_time = int(time_diff.total_seconds() * 1000)\n return elapsed_time", "def get_time_diff(start_time_ms: int) -> float:\n end_time_ms = RemoteProvisionerBase.get_current_time()\n time_diff = float((end_time_ms - start_time_ms)/1000)\n return time_diff", "def time_ms():\n return int(1000 * time.time())", "def timedeltaToFloat(self,time_d):\n time_d_min = time_d / timedelta(minutes=1)\n time_d_s = time_d / timedelta(seconds=1)\n time_d_ms = time_d / timedelta(milliseconds=1)\n\n return (time_d_min * 60 + time_d_s + time_d_ms * 0.001)", "def kmh_to_ms(speed_in_kmh):\n meters_per_second = speed_in_kmh * 1000 / 3600\n return meters_per_second", "def _TIME2STEPS(time):\n return int(time*1000)", "def clean_time_in_milliseconds(cls, seconds=None, milliseconds=None, default_seconds=1, minimum_milliseconds=200):\n #Sanitise inputs:\n try:\n seconds = float(seconds)\n except (TypeError, ValueError):\n seconds = None\n try:\n milliseconds = float(milliseconds)\n except (TypeError, ValueError):\n milliseconds = None\n \n #Resolve total time\n if seconds is None and milliseconds is None:\n out_milliseconds = default_seconds * 1000 #1s\n else:\n seconds = seconds or 0\n milliseconds = milliseconds or 0\n out_milliseconds = seconds*1000 + milliseconds\n \n #Check this isn't stupidly short\n if out_milliseconds < minimum_milliseconds:\n out_milliseconds = minimum_milliseconds\n \n return out_milliseconds", "def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def test_milliseconds():\n assert_equal(datetime.timedelta(seconds=0.01), convert_delta(\"10ms\"))", "def _convert_to_timedelta(time_diff):\n return timedelta(seconds=time_diff)", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))" ]
[ "0.752509", "0.6861813", "0.6710044", "0.6659891", "0.6555605", "0.64684844", "0.64403945", "0.6425974", "0.6407556", "0.63663876", "0.6331549", "0.6327947", "0.6302602", "0.628973", "0.62519073", "0.6215968", "0.6204256", "0.6162119", "0.61504215", "0.6111835", "0.60779774", "0.606825", "0.60620594", "0.60424376", "0.60406226", "0.60310584", "0.6029301", "0.59731627", "0.59728456", "0.5932125" ]
0.7738418
0
Return the tabulated dielectric constant of water at a given temperature at a pressure of 0.1 MPa
def dielectric_constant_water(temperature=298.15): tabulated_data = np.array([[263.15, 92.10], [268.15, 89.96], [273.15, 87.90], [278.15, 85.90], [283.15, 83.96], [288.15, 82.06], [293.15, 80.20], [298.15, 78.38], [303.15, 76.60], [308.15, 74.86], [313.15, 73.17], [318.15, 71.50], [323.15, 69.88], [328.15, 68.29], [333.15, 66.74], [338.15, 65.22], [343.15, 63.73], [348.15, 62.28], [353.15, 60.87], [358.15, 59.48], [363.15, 58.13], [368.15, 56.81], [373.15, 55.51]]) polynomal_degree = 5 fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1], polynomal_degree) fitfunction = np.poly1d(fitdata) return fitfunction(temperature)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def temperature() -> float:", "def pressure(z: tf.Tensor) -> tf.Tensor:\n return self._p_thermal * tf.math.exp(\n -(z + self._height * delta_t_frac *\n (tf.math.log(1.0 - delta_t_frac * tf.math.tanh(z / self._height)) -\n tf.math.log(1.0 + tf.math.tanh(z / self._height)) +\n z / self._height)) / h_sfc / (1.0 - delta_t_frac**2))", "async def delta_t(self, temp, humidity, pressure):\n\n wb = await self.wetbulb(temp, humidity, pressure)\n deltat = temp - wb\n\n return await self.temperature(deltat)", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def density_from_pressure(temperature, pressure, RH):\n # R = specific gas constant , J/(kg*degK) = 287.05 for dry air\n Rd = 287.05\n # http://www.baranidesign.com/air-density/air-density.htm\n # http://wahiduddin.net/calc/density_altitude.htm\n # Evaporation into the Atmosphere, Wilfried Brutsaert, p37\n # saturation vapor pressure is a polynomial developed by Herman Wobus\n e_so = 6.1078\n c0 = 0.99999683\n c1 = -0.90826951e-2\n c2 = 0.78736169e-4\n c3 = -0.61117958e-6\n c4 = 0.43884187e-8\n c5 = -0.29883885e-10\n c6 = 0.21874425e-12\n c7 = -0.17892321e-14\n c8 = 0.11112018e-16\n c9 = -0.30994571e-19\n \n p = (c0 + temperature*(\n c1 + temperature*(\n c2 + temperature*(\n c3 + temperature*(\n c4 + temperature*(\n c5 + temperature*(\n c6 + temperature*(\n c7 + temperature*(\n c8 + temperature*(\n c9)))))))))) \n \n sat_vp = e_so / p**8\n Pv = sat_vp * RH\n density = (pressure / (Rd * temperature)) * (1 - (0.378 * Pv / pressure))\n return density", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def soil_temp_factor(self, project_day):\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac", "def compute_dewpoint(temperature, humidity):\n\n temp_C = (temperature - 32) * 5 / 9 # Convert temperature from deg F to deg C\n rh = humidity / 100\n\n b = 18.678\n c = 257.14 # deg C\n\n gamma = math.log(rh) + (b * temp_C) / (c + temp_C)\n tdp = c * gamma / (b -gamma)\n\n tdp_F = 9 / 5 * tdp + 32 # Convert temperature from deg C to deg F\n return tdp_F;", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def temperature(self):\n value = float(self._parent.query('R{}'.format(self._idx))[1:])\n return pq.Quantity(value, pq.Kelvin)", "def get_specific_heat() -> float:\n return 1006.0", "def Temp(t):\n return 20 # Need to link to data", "def chemical_potential(T, pressure):\n import numpy as np\n\n kb = 1.38064852e-23 # Boltzmann constant (J K^{-1})\n planck = 6.62607004e-34 # Planck constant\n mass = 2.0 * 1.00784 * 1.6737236e-27 # Mass of H2 in Kg\n beta = 1.0 / (kb * T) # Thermodynamic beta\n\n # Thermal wavelength\n thermal_wav = planck * planck * beta / (2 * np.pi * mass)\n thermal_wav = thermal_wav ** (1/2.)\n\n # Calculate chemical potential \n chemical_pot = np.log(pressure * thermal_wav ** 3 * beta) * T\n \n return chemical_pot", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def plc_temp(coil_df):", "def sea_still_water_pressure(z, t1, rho=1.025, g=9.81):\r\n\r\n if z <= t1:\r\n return rho * g * (t1 - z)\r\n else:\r\n return 0", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()" ]
[ "0.6624659", "0.65552676", "0.63140374", "0.62205195", "0.6188801", "0.6177481", "0.61567646", "0.61184454", "0.6116112", "0.60913014", "0.60870373", "0.6081141", "0.6051456", "0.598776", "0.5978984", "0.59748316", "0.5939052", "0.593122", "0.590306", "0.5893938", "0.5886913", "0.5873047", "0.58579063", "0.5843485", "0.5808153", "0.57820725", "0.57789624", "0.57765585", "0.5772357", "0.57688427" ]
0.67020136
0
Return the bjerrum length of water at a given temperature. The Bjerrum length is defined as
def bjerrum_length_water(temperature=298.15): bjerrum = np.power(ELECTRON_CHARGE, 2.0) / \ (4.0 * np.pi * ELECTRIC_CONSTANT * dielectric_constant_water(temperature) * BOLTZMANN_CONSTANT * temperature ) return bjerrum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def planckwavelen(wavel,Temp):\n wavel=wavel*1.e-6 #convert to meters\n c1=2.*h*c**2.\n c2=h*c/kb\n Blambda=1.e-6*c1/(wavel**5.*(np.exp(c2/(wavel*Temp)) -1))\n return Blambda", "def get_bend_length(self):\n # The length of a parametric curve x(t) y(t) is Integral[ sqrt( (dx/dt)^2 + (dy/dt)^2 ), {t,0,t0}], which for a Fresnel curve, simplifies to just t0\n return 4 * self.t * self.scale_factor", "def debye_length_m(electron_density, electron_temperature):\n return 0.069 * np.sqrt(electron_temperature / electron_density)", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def temperature() -> float:", "def branchLength(self,branch):\n\t\t\n\t\tlength = 0\n\t\tfor p,point in enumerate(branch[:-1]):\n\t\t\tlength+=self.eucdist3d(branch[p],branch[p+1])\n\t\t\n\t\treturn(length)", "def bottle_duration(consumption, pressure, empty_pressure=0):\n available = pressure - empty_pressure\n return math.floor(available / consumption)", "def get_wavelength(self):\n E = -self.E0*(1.0/self.n_low**2 - 1.0/self.n_high**2)\n return SI['hc']*1e12/(E*SI['keV'])", "def getBatteryTemperature(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"\tSYST:BATT:TEMP?\")\n ret = self.myFieldFox.read()\n else:\n ret = 60\n return ret", "async def wetbulb(self, temp, humidity, pressure):\n t = float(temp)\n rh = float(humidity)\n p = float(pressure)\n\n # Variables\n edifference = 1\n twguess = 0\n previoussign = 1\n incr = 10\n es = 6.112 * math.exp(17.67 * t / (t + 243.5))\n e2 = es * (rh / 100)\n\n while (abs(edifference) > 0.005):\n ewguess = 6.112 * math.exp((17.67 * twguess) / (twguess + 243.5))\n eguess = ewguess - p * (t - twguess) * 0.00066 * (1 + (0.00115 * twguess))\n edifference = e2 - eguess\n if edifference == 0:\n break\n\n if edifference < 0:\n cursign = -1\n if (cursign != previoussign):\n previoussign = cursign\n incr = incr / 10\n else:\n incr = incr\n else:\n cursign = 1\n if (cursign != previoussign):\n previoussign = cursign\n incr = incr / 10\n else:\n incr = incr\n\n twguess = twguess + incr * previoussign\n\n return await self.temperature(twguess)", "def length(self):\n return self.n * self.t.length()", "def get_bend_length(self):\n # The length of a parametric curve x(t) y(t) is Integral[ sqrt( (dx/dt)^2 + (dy/dt)^2 ), {t,0,t0}], which for a Fresnel curve, simplifies to just t0\n if abs(self.turnby) <= np.pi / 2.0:\n return 2 * self.t * self.scale_factor\n else:\n return 2 * self.t * self.scale_factor + (\n 2 * np.pi * self.wgt.bend_radius\n ) * (self.circle_angle / (2 * np.pi))", "def test_wet_bulb_temperature(temp_units):\n temp = 25 * units.degC\n dewp = 15 * units.degC\n val = wet_bulb_temperature(1000 * units.hPa, temp.to(temp_units), dewp.to(temp_units))\n truth = 18.3432116 * units.degC # 18.59 from NWS calculator\n assert_almost_equal(val, truth, 5)", "def getLength(self):\n flength = 0\n for quad in self._quadrilaterals:\n flength = flength + get_quad_length(quad)\n return flength", "def max_temp(self):\n return 30", "def getLength(self) -> float:\n return self.length", "def Length(self):\n xyza = self.ga_ref.get_position() + self.wa\n xyzb = self.gb_ref.get_position() + self.wb\n if self.gc is not None:\n xyzc = self.gc_ref.get_position() + self.wc\n xa, ya, za = xyza\n length = self._integrate(\n xyza - xa,\n xyzb - ya,\n xyzc - za,\n )\n else:\n length = np.linalg.norm(xyzb - xyza)\n return length", "def length(self):\n return self.length2 ** 0.5", "def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)", "def getlenbarvarj(self,j_):\n lenbarvarj_ = ctypes.c_int64()\n res = __library__.MSK_XX_getlenbarvarj(self.__nativep,j_,ctypes.byref(lenbarvarj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n lenbarvarj_ = lenbarvarj_.value\n _lenbarvarj_return_value = lenbarvarj_\n return (_lenbarvarj_return_value)", "def _get_length(self):\n from math import sqrt\n\n if self._length is None:\n sum1 = 0\n for a in self.diff:\n sum1 += a * a\n self._length = sqrt(sum1)\n return self._length", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def photon_energy_to_wavelength(photon_energy):\n return 1.23984197386209e-06 / photon_energy", "def length(self):\n return self.get_delta_value(self.Z_INDEX)", "def fMaxGasWaterRatio(Salinity, Temperature, Pressure):\n\tTemp = Temperature # Deg C\n\tPress = Pressure / 145.038 # MPa\n\tSal = Salinity\n\tA = log(0.712 * Press * ((abs(Temp - 76.71)) ** 1.5) + 3676 * (Press ** 0.64)) / log(10)\n\tB = -4 - 7.786 * Sal * (Temp + 17.78) ** -0.306\n\tC = A + B\n\treturn 10**C", "def r_height(self) -> int:\n return math.ceil(self.t_height / REGION_DIM)", "def get_length(self, ak_tpl: BKT) -> Optional[float]:\n ...", "def max_temp(self):\n return 99", "def calc_log_length(self, backlog, frequency):\n return int(numpy.ceil(backlog * frequency))" ]
[ "0.59892374", "0.59223914", "0.5915271", "0.584593", "0.58436084", "0.57993007", "0.57970524", "0.5745658", "0.57156944", "0.56933063", "0.5692632", "0.5690713", "0.56635946", "0.5638114", "0.56328857", "0.56106806", "0.55993575", "0.5581614", "0.5565413", "0.5565121", "0.55624926", "0.55491203", "0.54957175", "0.5494889", "0.5479859", "0.5477119", "0.5474927", "0.54706204", "0.5469975", "0.5461011" ]
0.87628084
0
Calculate the ionizatoin constant (Kw) of water at the fiven temperature and 0.1 MPa pressure. If you pass a density, it will use this density indead of the 0.1 MPa pressure.
def ionization_constant_water(temperature=298.15, density=None): import numpy as np # using Model II from Bandura etal # model parameters n = 6 alpha_0 = -0.864671 alpha_1 = 8659.19 alpha_2 = -22786.2 beta_0 = 0.642044 beta_1 = -56.8534 beta_2 = -0.375754 # Water parameters Mw = 18.01528 # temperature T = temperature # density if density: D = density else: D = density_water(T) pKWG = 0.61415 \ + 48251.33 / T \ - 67707.93 / T**2.0 \ + 10102100.0 / T**3.0 Z = D * np.exp(alpha_0 \ + alpha_1/T \ + alpha_2/T**2 *np.power(D,2.0/3.0) ) pKw = -2*n*( np.log10(1 + Z) - (Z/(Z + 1)) * D * ( beta_0 + beta_1/T + beta_2*D ) ) + pKWG + 2 * np.log10(Mw/1000.0) return np.power(10, -pKw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def k_Ni00(wind_ms, temp_C):\n\n U = wind_ms\n\n Sc = schmidt_number(temp_C)\n k = (0.333 * U + 0.222 * U ** 2) * (600 / Sc) ** 0.5\n\n return k", "def Wk(z, zp, k, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros((len(k), len(z), len(zp)))\n chiz = np.copy(A); np.transpose(chiz, (0,2,1))[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W2 = 3/2*A*chifraction\n Wtransp = np.transpose(W2)#/k**2 # If k is included, multiply by h\n W = np.transpose(Wtransp)\n W /= c # Unit correction for Wk to be unitless\n return W", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def calc_h_sen(dry_bulb_C):\n\n h_kJ_kg = dry_bulb_C * CPA_kJ_kgC\n\n return h_kJ_kg", "def kth_func(Th, ThS, lbd, ksat):\n if Th < 0.0:\n # rwarn(\"water content < 0 IN kth_func\")\n Th = 0.0\n kth = ksat * (Th / ThS) ** (3 + (2 / lbd))\n\n return kth", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def specificVolumeLiquid(self, Tk=None, Tc=None):\n return 1 / (1000.0 * self.pseudoDensity(Tk, Tc))", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def half_space_cooling_waermefluss(k, T0, T1, kappa, t):\n return k * (T1 - T0) / (numpy.sqrt(math.pi * kappa * t))", "def AlfvenW(density,mu,B,q,R):\n freq = Alfvenv(density,mu,B)/(2.*q*R)/2./np.pi;\n print( \"TAE frequency is %10.3f kHz\"%(freq/1.e3))\n print( \"EAE frequency is %10.3f kHz\"%(2.*freq/1.e3))\n print( \"NAE frequency is %10.3f kHz\"%(3.*freq/1.e3))", "def Wkappa(z, zp, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros([len(z)] + list(np.shape(zp)))\n chiz = np.copy(A); np.transpose(chiz)[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W = 3/2*A*chifraction\n W /= c # Unit correction for Wk to be unitless\n return W", "def getK1(inp):\n\td0 = getD0(inp)\n\treturn 0.32745 + 1/(2 * d0) - 8/(81 * d0)", "def k_Wa92(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.31 * U2) * (660 / Sc) ** 0.5\n\n return k", "def ky(self, k: int) -> float:\n result = self._read_inline(f\"ky({k})\")\n return result", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def to_kwh(m):\n cp = 4243.5 # specific heat of water [J/ kg K]\n dT = 179 # change in steam temperature [deg C]\n h_in = 196 # inlet enthalpy [BTU/lb]\n h_out = 1368 # outlet enthalpy [BTU/lb]\n\n # times 0.29307107 to convert from BTU/hr to kilowatts\n kwh = (m * (h_out - h_in)) * 0.29307107\n return kwh", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def Wk2(z, zp, k, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros((len(z), len(zp)))\n chiz = np.copy(A); np.transpose(chiz)[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W = 3/2*A*chifraction\n W /= c # Unit correction for Wk to be unitless\n return W", "def fWaterDensity(Salinity, GasWaterRatio, Temperature, Pressure):\n\tTemp = Temperature\n\tPress = Pressure / 145.038\n\tSal = Salinity / 1000\n\tA = (-80 * Temp) + (-3.3 * (Temp**2)) + (0.00175 * (Temp**3))\n\tB = (489 * Press) + (-2 * Temp * Press) + (0.016 * (Temp**2) * Press)\n\tC = (-0.000013 * (Temp**3) * Press) + (-0.333 * (Press**2)) + (0.002 * Temp * (Press ** 2))\n\tPureWaterDensity = 1 + ((A + B + C) * 1e-6)\n\tA = 80 + (3 * Temp) + (-3300 * Sal) + (-13 * Press) + (47 * Press * Sal)\n\tB = (300 * Press) + (-2400 * Press * Sal)\n\tC = 0.000001 * (B + (Temp * A))\n\tD = 0.668 + (0.44 * Sal)\n\treturn PureWaterDensity + (Sal * (D + C))", "def conductivity(self, T):\n m = self.mass\n mu = self.viscosity(T)\n K = (15/4) * kB * mu / m\n return K", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def u(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z) * self.Ngal(m) / self.nBarGal(1./(1.+z))\n return result", "def specificHeatCapacity(d, d_iso, density, cp):\n d_t = min(0.5 * np.sum(d), d_iso , 0.1)\n sum_d_i = d[0]\n i = 0 \n kappa = 0 \n while sum_d_i <= d_t:\n kappa += d[i] * density[i] * cp[i]\n i += 1\n sum_d_i += d[i]\n else:\n sum_d_i -= d[i]\n d_part = d_t - sum_d_i \n kappa += d_part * density[i] * cp[i]\n\n return kappa", "def k_Sw07(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.27 * U2) * (660 / Sc) ** 0.5\n\n return k", "def k_Li86(wind_ms, temp_C):\n from numpy import zeros_like\n\n U = wind_ms\n T = temp_C\n\n Sc = schmidt_number(T)\n k = zeros_like(temp_C)\n\n i1 = U <= 3.6\n i2 = (U > 3.6) & (U < 13.0)\n i3 = U >= 13.0\n\n k[i1] = (0.17 * U[i1]) * (Sc[i1] / 600) ** (-2.0 / 3.0)\n k[i2] = ((U[i2] - 3.4) * 2.8) * (600 / Sc[i2]) ** 0.5\n k[i3] = ((U[i3] - 8.4) * 5.9) * (600 / Sc[i3]) ** 0.5\n\n return k", "def Wv2(z, k, c_M=0):\n k *= 0.6763 # 1/Mpc\n c = 299792458/1000 # km/s\n W = -(1 - c/(Hcal(z)*chi(z)) + alpha_M(z, c_M)/2)*f(z)*H(z)/(1+z)*1/k**2\n W *= 1/c\n return W", "def displacement_wein(temperature=1, units=SI):\n\n var = sy.var('h c k t')\n par = units['h'], units['c'], units['k'], temperature\n\n y = h * c / 4.9663 / k / t\n\n return dic_result(var,par,y)" ]
[ "0.60595423", "0.5722518", "0.5691604", "0.56759197", "0.56751513", "0.5663193", "0.5628854", "0.5599959", "0.55978936", "0.55777365", "0.5565677", "0.5564976", "0.55592567", "0.550081", "0.545685", "0.5451524", "0.5451415", "0.54489225", "0.5431779", "0.53877413", "0.53745234", "0.5364627", "0.5357973", "0.53409636", "0.53323764", "0.53281075", "0.5327456", "0.5321049", "0.529739", "0.5290265" ]
0.6927209
0
Takes user's friends by id of user given as string or int and returns friends as set I really have no idea how to write a full test for it because we haven't immutable users.
def get_friends(self, user_id): # if user_id is alias, replace it with id if not self._is_positive_number(user_id): user_id = get_names_of_users(set([user_id]))[0].id api = pyvkontakte.VkontakteApi() return set(api.call('friends.get', user_id=user_id, v='5.8')['items'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_friends(user_id):\n return list(set(get_following(user_id)) &\n set(get_followers(user_id)))", "def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends", "def friends_of_friends(self, node, ids):\n fof = set()\n for id in ids:\n for f in self.users[id]:\n if f != node:\n fof.add(f)\n return fof", "def get_user_friends(user_id):\n\n friends = db.session.query(User_Friend).filter(User_Friend.user_id==user_id).all() \n\n return friends", "def test_friends_of_friend_ids(self):\n expected = {0: 2, 5: 1}\n self.assertEqual(expected, self.users.friends_of_friend_ids(3))", "def get_friends_ids(api, user_id):\r\n # Getting user object:\r\n user = get_user(api, user_id)\r\n\r\n # Getting list of friends of the user:\r\n friends = get_friends(user)\r\n\r\n # Returning ids of friends of the user:\r\n return [friend.id for friend in friends]", "def friends(user_id):\n user = user_grab(user_id)\n if user is None:\n return \"user not found\", 404\n friends = user.get(\"friends\")\n if friends is None:\n friends = []\n data_json = json.dumps({'friends': [str(friend) for friend in friends]})\n return data_json", "def flatten_friends_ids(users):\n friends_ids = []\n for user_id in users:\n friends_ids.extend(users[user_id][\"friends_ids\"]) \n return list(set(friends_ids))", "def get_potential_friends(user_id):\n\n if not g.user:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n current_user = User.query.get_or_404(user_id)\n\n if current_user.username != g.user.username:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n user_options = User.get_list_of_potential_friends(current_user)\n user_options_serialized = [user.serialize() for user in user_options]\n\n return jsonify(user_options=user_options_serialized)", "def list_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_friends(user_id)", "def get_friends(graph, location_id=\"\", is_user=\"\"):\n user = graph.get_object(\"me\")\n fql = \"SELECT uid, name, profile_url, pic_small, current_location, mutual_friend_count FROM user WHERE uid IN (SELECT uid1 FROM friend WHERE uid2 = \" + user[\"id\"] + \")\"\n if location_id:\n fql += \" AND current_location.id=\" + location_id\n if is_user:\n fql += \" AND is_app_user=\" + is_user\n fql += \" ORDER BY mutual_friend_count DESC\"\n logging.info(fql)\n try:\n fql_friends = graph.fql(fql)\n return fql_friends['data']\n except:\n logging.error(\"There was an error retrieving friends of UID %s\", user[\"id\"])\n return list()", "def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)", "def get_friends(self):\n edges = DirectedUserToUserEdge.all().filter(\n 'owner_user_id =', self.key().id()).run()\n return db.get([db.Key.from_path('User', edge.friend_user_id) for edge in\n edges])", "def get_friends_ids(api, user_id):\n url = \"https://api.twitter.com/1.1/friends/ids.json\"\n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay) \n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n\n friends_ids = []\n params = {\"user_id\": user_id, \"counter\": 0, \n \"count\": 5000, \"stringify_ids\": True}\n response = api.get(url, params=params)\n friends_ids.extend(response.json().get(\"ids\", []))\n response.close()\n remaining_requests -= 1\n\n while response.json().get('next_cursor'):\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0:,.4} s...\".format(delay)\n time.sleep(delay) \n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n params[\"cursor\"] = response.json().get('next_cursor_str')\n response = api.get(url, params=params)\n friends_ids.extend(response.json().get(\"ids\", []))\n response.close()\n remaining_requests -= 1\n return friends_ids", "def getFriends(id):\n u = models.User.query.get(id)\n if not u:\n return jsonify({'error': 'No account found'}), 200\n\n if not u.isFb:\n if int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n if not u.isFb and int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n else:\n return jsonify({'error': 'No account found'}), 200\n\n session['oauth_token'] = (u.token, '')\n resp = facebook.get('/' + u.fbid + '/friends')\n friends = []\n for f in resp.data['data']:\n friends.append(f['id'])\n\n friends_json = []\n for f in friends:\n u = models.User.query.filter_by(fbid=f).first()\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'friends': friends_json}), 200", "async def edit(\n self, user_id: int, list_ids: Optional[List[int]] = None, **kwargs\n ) -> base.OkResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.edit\", params)\n model = base.OkResponse\n return model(**response).response", "def common_friends(self, user):\n\n self_friend_ids = set(self.friends.keys()) if self.friends else set()\n other_friend_ids = set(user.fb_profile.friends.keys()) if user.fb_profile.friends else set()\n\n common_friend_ids = self_friend_ids.intersection(other_friend_ids)\n\n return common_friend_ids", "def get_friends(user_id, fields):\n assert isinstance(user_id, int), \"user_id must be positive integer\"\n assert isinstance(fields, str), \"fields must be string\"\n assert user_id > 0, \"user_id must be positive integer\"\n import requests\n domain = \"https://api.vk.com/method\"\n access_token = '1efb9991613d1e0c7597cae85db190f37bbda497579e92b05af4352bc694c66fd3883d0ff1b875b53a98d'\n user_id = user_id\n\n query_params = {\n 'domain': domain,\n 'access_token': access_token,\n 'user_id': user_id,\n 'fields': fields\n }\n\n query = \"{domain}/friends.get?access_token={access_token}&user_id={user_id}&fields={fields}&v=5.53\".format(\n **query_params)\n response = requests.get(query)\n friends_list = response.json()['response']['items']\n return friends_list", "def foaf_ids_bad(user):\n return [foaf_id \n for friend_id in friendships[user[\"id\"]]\n for foaf_id in friendships[friend_id]]", "def get_friends(user):\r\n try:\r\n friends = user.friends()\r\n return friends[:]\r\n except tweepy.error.RateLimitError:\r\n print(\"Rate limit reached! Waiting...\")\r\n wait_15_minutes()\r\n return get_friends(user)\r\n except tweepy.error.TweepError:\r\n print(\"Skipping user whose information is protected.\")\r\n return list()", "def fetch_friend_ids(self, user, **kwargs):\n friends = self.fetch_friends(user, **kwargs)\n friend_ids = []\n for friend in friends['data']:\n friend_ids.append(friend['id'])\n return friend_ids", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def get_possible_friends():\n user_list = []\n for user_unprocessed in api_vars.users.find({'public': True}):\n user = user_unprocessed\n user['_id'] = str(user['_id'])\n user_list.append(user)\n # For now, let's break the list at one hundred. This is just for the\n # sake of simplicity.\n if len(user_list) >= 100:\n break\n user_data = {'users': user_list}\n json_data = json.dumps(user_data)\n return json_data", "def get_friends(self, user_id=None, fields='sex,bdate'):\n if user_id is None:\n friends_info = self.vk.friends.get(fields=fields)\n else:\n friends_info = self.vk.friends.get(fields=fields, user_id=user_id)\n return friends_info['items']", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def list_pending_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_pending_friends(user_id)", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def get_pending_friends(cu_id):\n users = db.session.execute(\n \"\"\"select fr.user_1_id, u.username, u.firstname, u.lastname\n from friend_request as fr inner join userm as u on fr.user_1_id = u.id \n where fr.user_2_id = :cu_id\n and fr.approved is NULL\"\"\",\n {\"cu_id\": cu_id}\n )\n return users" ]
[ "0.74689406", "0.7258868", "0.70823413", "0.69087654", "0.6725609", "0.67187035", "0.6641582", "0.6527603", "0.65198696", "0.6516927", "0.6492187", "0.6437091", "0.6250909", "0.61955714", "0.6153517", "0.6014912", "0.6013826", "0.5980733", "0.5966134", "0.5921841", "0.5905617", "0.5899534", "0.58968395", "0.5877344", "0.58731294", "0.58731294", "0.58731294", "0.58574265", "0.57823855", "0.5779303" ]
0.78986126
0
Substructure similarity based on subpaths
def subpath_sim(subpaths_1, subpaths_2): u = subpaths_1.union(subpaths_2) f1 = np.zeros(len(u)) f2 = np.zeros(len(u)) u = list(u) # convert graph into one-hot-vector (based on the precense of subpaths) for i in range(len(u)): if u[i] in subpaths_1: f1[i] = 1 if u[i] in subpaths_2: f2[i] = 1 score = np.dot(f1, f2) * (np.count_nonzero(f1) + np.count_nonzero(f2)) / (2 * (np.count_nonzero(f1) * np.count_nonzero(f2))) if math.isnan(score): # in case of empty set return 0.0 else: return score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substructure_sim_exact(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n for i in range(n):\n f1[i] = subtrees_1[i] == subtrees_2[i] # calculate the number of matching pairs\n\n return float(np.count_nonzero(f1)) / float(len(f1))", "def substructure_sim_partial(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n f2 = np.zeros(n)\n for i in range(n):\n if (subtrees_1[i] == subtrees_2[i]): # exact match, or both are dropped\n f1[i] = 1.0\n f2[i] = 1.0\n else: # partial match\n f1[i] = SubstructureAgreement.save_div( len(subtrees_1[i].intersection(subtrees_2[i])), float(len(subtrees_2[i])))\n f2[i] = SubstructureAgreement.save_div( len(subtrees_2[i].intersection(subtrees_1[i])), float(len(subtrees_1[i])))\n\n return (np.sum(f1) + np.sum(f2)) / (2.0 * float(n)) # average of average recall", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def _calc_path_similarity(self, sense_key_a, sense_key_b):\n\t\tdef lemma_from_key(key):\n\t\t\ttry:\n\t\t\t\treturn wn.lemma_from_key(key)\n\t\t\texcept:\n\t\t\t\t# there is some strange bug in the glosstag file that somehow does always refer to adjective satellites as adjectives\n\t\t\t\t# as these keys then cant be found in WN they are tried to be resolved here\n\t\t\t\tif re.search(\"%([0-9]):\", key).group(1) == \"3\":\n\t\t\t\t\treturn lemma_from_key(re.sub(\"(%)[0-9](:)\", \"\\g<1>5\\g<2>\", key))\n\t\t\t\tself._log_message(sense_key_a)\n\t\t\t\treturn None\n\n\t\tlemma_a = lemma_from_key(sense_key_a)\n\t\tlemma_b = lemma_from_key(sense_key_b)\n\n\t\tif lemma_a and lemma_b:\n\t\t\treturn lemma_a.synset().path_similarity(lemma_b.synset())\n\t\telse:\n\t\t\treturn 0", "def Path_Similarity(word1, word2):\n\n # Initial test to confirm unique nouns, otherwise passes back 1\n\n if word1 == word2:\n return 1, word1\n\n # Sets up Initial Variables & Dictionaries\n\n stepup1 = [word1]\n stepup2 = [word2]\n dict1= {}\n dict2= {}\n currentstep1 = []\n currentstep2 = []\n\n # Iterates through a loop an arbitrary # of times, adding new hypernyms\n # for each word to individual dictionaries with the number of iterations\n # as the key to the dictionary. The two dictionaries are saved perpetually\n\n for la in range(50):\n for i in range(len(stepup1)):\n currentstep1 += (stepup1[i].hypernyms())\n for syn in stepup1:\n dict1[syn] = la\n for i in range(len(stepup2)):\n currentstep2 = (stepup2[i].hypernyms())\n for syn in stepup2:\n dict2[syn] = la\n\n # Variables are reset and converted to continue the next stage of the loop\n\n stepup1 = currentstep1\n stepup2 = currentstep2\n currentstep1 = []\n currentstep2 = []\n\n\n # Each loop the dictionaries are checked for matches. I have confirmed that\n # checking each cycle is faster than checking at the end of an arbitrary\n # number of cycles.\n\n # Removes applied words as Possible Subsumers Per Fridays Clas\n dict1.pop(word1)\n dict2.pop(word2)\n\n #Gets possible Least Common Subsumers\n dict1Set = set(dict1)\n dict2Set = set(dict2)\n d = {}\n for name in dict1Set.intersection(dict2Set):\n d[name] = dict1[name]\n pos_lcs = [key for min_value in (min(d.values()),) for key in d if d[key] == min_value]\n\n #Returns Actual LCS\n key_lcs = []\n for i in pos_lcs:\n key_lcs.append(shortestPath(wn.synset('entity.n.01'),i))\n lcs = (pos_lcs[key_lcs.index(max(key_lcs))])\n\n #Returns path Similarity Value and Synset of LCS; Must Error Proof\n\n return 1/(dict1[lcs] + dict2[lcs]), lcs", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def find_similarities(data):\n\n good_one = copy.deepcopy(data[\"no_vpn\"])\n del data[\"no_vpn\"]\n\n all_minhashes = []\n\n # put everything easy to work with\n for extension in data:\n for webpage in data[extension]:\n for f in data[extension][webpage]:\n path = (extension, webpage, f)\n all_minhashes.append((path, data[extension][webpage][f]))\n aux = copy.deepcopy(all_minhashes)\n path_data, data = aux.pop(0)\n all_minhashes.pop(0)\n\n result = {}\n while path_data and data:\n for file in good_one[path_data[1]]:\n # print(\"Jaccard among %s and %s\" % (path_data[2], file))\n path_to_file = \"/\".join((\"no_vpn\", path_data[1], file))\n similarity = data[\"hash\"].jaccard(good_one[path_data[1]][file][\"hash\"])\n try:\n result[\"/\".join(path_data)].append((path_to_file, similarity))\n except KeyError:\n result[\"/\".join(path_data)] = [\n (path_to_file, similarity),\n ]\n\n all_minhashes.append((path_data, data))\n try:\n path_data, data = aux.pop(0)\n except IndexError:\n path_data = data = None\n\n return result", "def testFindAllSimilarityFromNodeOnPathSimExampleThree(self):\n\n graph, authorMap, conferenceMap = SampleGraphUtility.constructPathSimExampleThree()\n metaPath = [Author, Paper, Conference, Paper, Author]\n strategy = PathSimStrategy(graph, metaPath)\n\n mike = authorMap['Mike']\n mostSimilarNodes = strategy.findMostSimilarNodes(mike, 5)\n\n self.assertEquals([authorMap['Bob'], authorMap['Mary'], authorMap['Jim']], mostSimilarNodes)", "def siblinghood_sim_exact(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_exact(siblinghood_1, siblinghood_2)", "def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0", "def test_pathlength_verbs(id1, id2, pathlength):\n synset1 = germanet_data.get_synset_by_id(id1)\n synset2 = germanet_data.get_synset_by_id(id2)\n dist = synset1.shortest_path_distance(synset2)\n np.testing.assert_equal(dist, pathlength)", "def test_paths_verbs(id1, id2, expected_path_ids):\n path = get_shortest_paths(id1, id2)\n np.testing.assert_equal([synset.id for synset in path], expected_path_ids)", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def test_ontology_similarity_jaccard(ontology):\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000003\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\"], inherited=False) == 1/3\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000007\"],[\"TO:0000008\"], inherited=False) == 2/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\"], inherited=False) == 1/8\n\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False) == 1/5\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False) == 2/3 \n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False) == 3/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False) == 2/8", "def struct_sim(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n ssim = np.zeros(n)\n for ii in range(n):\n ssim[ii] = structural_similarity(image1[ii], image2[ii], **kwargs)\n return ssim", "def sentence_similarity(sentence1, sentence2):\n sentence1 = sentence1.tags\n sentence2 = sentence2.tags\n \n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n \n for synset in synsets1:\n \n li=[synset.path_similarity(ss) for ss in synsets2]\n m=0\n for i in range(len(li)):\n if li[i] is not None and m<li[i]:\n m=li[i]\n if m != 0:\n score += m\n count += 1\n\n if count is 0:\n score = 0\n else:\n score /= count\n return score", "def test_ontology_ic_similarity(ontology):\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000002\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000003\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000002\"],[\"TO:0000003\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000005\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000007\"],[\"TO:0000008\"], inherited=False, as_weight=False) == 0.5\n\tassert ontology.similarity_ic([\"TO:0000005\"],[\"TO:0000009\"], inherited=False, as_weight=False) == 0\n\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False, as_weight=False) == 1.3690702464285427\n\tassert ontology.similarity_ic([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def siblinghood_sim_partial(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_partial(siblinghood_1, siblinghood_2)", "def equiv(subdiagram1, subdiagram2):\n # TODO: Make sure arguments are the right type\n # TODO: Make this work for subdiagrams of length >= 1\n # subdiagrams are not equivalent if they have different numbers of crossings\n # print \"sub1\\t\", subdiagram1, len(subdiagram1[0])\n # print \"sub2\\t\", subdiagram2, len(subdiagram2[0])\n if len(subdiagram1[0]) != len(subdiagram2[0]):\n return False\n # look for a match\n for i in range(len(subdiagram1[0])-1):\n crossing1 = subdiagram1[0][i]\n typeMatch = False\n for j in range(len(subdiagram2[0])-1):\n crossing2 = subdiagram2[0][j]\n print \"\\tc1 \",crossing1\n print \"\\tc2 \",crossing2\n # check for same crossing type\n # TODO: check for empty crossing\n if len(crossing1) == 5 and len(crossing2) == 5:\n if crossing1[0] == crossing2[0]:\n print \" :)\"\n typeMatch = True\n \n\n return True", "def testFindSingleSimilarityPathSimExampleThree(self):\n\n graph, authorMap, conferenceMap = SampleGraphUtility.constructPathSimExampleThree()\n metaPath = [Author, Paper, Conference, Paper, Author]\n strategy = PathSimStrategy(graph, metaPath)\n\n mike = authorMap['Mike']\n jimScore, maryScore, bobScore, annScore = strategy.findSimilarityScores(\n mike, [authorMap['Jim'], authorMap['Mary'], authorMap['Bob'], authorMap['Ann']]\n )\n\n self.assertEquals(bobScore, max([jimScore, maryScore, bobScore, annScore]))\n self.assertEquals(annScore, 0)", "def _compare_structure(sample, reference):\n paths = MappingValidator._find_all_paths(reference)\n result = True\n for path in paths:\n result = result and MappingValidator._validate_key(sample, path)\n if not result:\n break\n return result", "def test_splits_similarity(self):\n a_train = torch.as_tensor(\n [\n [1, 1, 2],\n [2, 1, 3],\n [1, 2, 3],\n [4, 1, 5],\n [5, 1, 6],\n ]\n )\n a_test = torch.as_tensor(\n [\n [4, 2, 6],\n ]\n )\n b_train = torch.as_tensor(\n [\n [1, 1, 2],\n [2, 1, 3],\n [1, 2, 3],\n [4, 1, 5],\n [4, 2, 6],\n ]\n )\n b_test = torch.as_tensor(\n [\n [5, 1, 6],\n ]\n )\n\n a_train_tf = CoreTriplesFactory.create(a_train)\n a_test_tf = CoreTriplesFactory.create(a_test)\n b_train_tf = CoreTriplesFactory.create(b_train)\n b_test_tf = CoreTriplesFactory.create(b_test)\n\n steps = splits_steps([a_train_tf, a_test_tf], [b_train_tf, b_test_tf])\n self.assertEqual(2, steps)\n\n similarity = splits_similarity([a_train_tf, a_test_tf], [b_train_tf, b_test_tf])\n self.assertEqual(1 - steps / 6, similarity)", "def test_difference_in_hierarchy(self):\n self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'dog.n.01'), 0))\n self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('mammal.n.01', 'dog.n.01'), 0.9384287))\n self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'mammal.n.01'), -0.9384287))", "def test_several_paths(id1, id2, expected_path_ids):\n syn1 = germanet_data.get_synset_by_id(id1)\n syn2 = germanet_data.get_synset_by_id(id2)\n paths = syn1.shortest_path(syn2)\n assert len(paths) == len(expected_path_ids), \"the number of found paths doesn't macht the true number of paths\"\n for path in paths:\n path = [synset.id for synset in path]\n np.testing.assert_equal(path in expected_path_ids, True)", "def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim", "def test_similarity_measure_size_compatibility():\n\n patch1 = torch.randn(size=(4, 6, 2))\n patch2 = torch.randn(size=(4, 6, 2))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(4, 3))\n patch2 = torch.randn(size=(4, 3))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(5,))\n patch2 = torch.randn(size=(5,))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(3, 7, 2, 4))\n patch2 = torch.randn(size=(3, 7, 2, 4))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successful", "def test_tree_intersection_on_challenge_example(first_example, second_example):\n expected = [500, 350, 200, 175, 160, 125, 100]\n actual = tree_intersection(first_example, second_example)\n assert expected == actual" ]
[ "0.6935329", "0.64592564", "0.6062781", "0.60344684", "0.59385085", "0.5921896", "0.58923703", "0.5810353", "0.57144535", "0.5630349", "0.5630217", "0.5611155", "0.5608266", "0.5580421", "0.55268395", "0.5517529", "0.5477586", "0.54294527", "0.54036385", "0.5346049", "0.533801", "0.5304806", "0.5281558", "0.5268061", "0.526723", "0.5265672", "0.5251841", "0.5246512", "0.52444077", "0.52324474" ]
0.6949879
0
Subtructure similarity based on descendant nodes (measure on grouping, defined as node + its descendant), PARTIAL match
def substructure_sim_partial(subtrees_1, subtrees_2): assert(len(subtrees_1) == len(subtrees_2)) n = len(subtrees_1) f1 = np.zeros(n) f2 = np.zeros(n) for i in range(n): if (subtrees_1[i] == subtrees_2[i]): # exact match, or both are dropped f1[i] = 1.0 f2[i] = 1.0 else: # partial match f1[i] = SubstructureAgreement.save_div( len(subtrees_1[i].intersection(subtrees_2[i])), float(len(subtrees_2[i]))) f2[i] = SubstructureAgreement.save_div( len(subtrees_2[i].intersection(subtrees_1[i])), float(len(subtrees_1[i]))) return (np.sum(f1) + np.sum(f2)) / (2.0 * float(n)) # average of average recall
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substructure_sim_exact(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n for i in range(n):\n f1[i] = subtrees_1[i] == subtrees_2[i] # calculate the number of matching pairs\n\n return float(np.count_nonzero(f1)) / float(len(f1))", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def subpath_sim(subpaths_1, subpaths_2):\n u = subpaths_1.union(subpaths_2)\n f1 = np.zeros(len(u))\n f2 = np.zeros(len(u))\n u = list(u)\n\n # convert graph into one-hot-vector (based on the precense of subpaths)\n for i in range(len(u)):\n if u[i] in subpaths_1:\n f1[i] = 1\n if u[i] in subpaths_2:\n f2[i] = 1\n\n score = np.dot(f1, f2) * (np.count_nonzero(f1) + np.count_nonzero(f2)) / (2 * (np.count_nonzero(f1) * np.count_nonzero(f2)))\n\n if math.isnan(score): # in case of empty set\n return 0.0\n else:\n return score", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def test_ontology_ic_similarity(ontology):\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000002\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000003\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000002\"],[\"TO:0000003\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000005\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000007\"],[\"TO:0000008\"], inherited=False, as_weight=False) == 0.5\n\tassert ontology.similarity_ic([\"TO:0000005\"],[\"TO:0000009\"], inherited=False, as_weight=False) == 0\n\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False, as_weight=False) == 1.3690702464285427\n\tassert ontology.similarity_ic([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def test_ontology_similarity_jaccard(ontology):\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000003\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\"], inherited=False) == 1/3\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000007\"],[\"TO:0000008\"], inherited=False) == 2/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\"], inherited=False) == 1/8\n\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False) == 1/5\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False) == 2/3 \n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False) == 3/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False) == 2/8", "def wordSimilarityRatio(sent_1,sent_2):", "def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0", "def similarity(self, e1, e2):\n\t\tpass", "def similarity_euclid(matrix, business1, business2):\n selected_features = matrix.loc[business1].notna() & matrix.loc[business2].notna()\n\n if not selected_features.any():\n return 0\n\n features1 = matrix.loc[business1][selected_features]\n features2 = matrix.loc[business2][selected_features]\n distance = math.sqrt(((features1 - features2) ** 2).sum())\n\n if distance is np.nan:\n return 0\n\n return 1 / (1 + distance)", "def siblinghood_sim_partial(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_partial(siblinghood_1, siblinghood_2)", "def test_frac_similar(self):\n transitions = dict.fromkeys(\n [\n (\"A\", \"A\"),\n (\"A\", \"G\"),\n (\"G\", \"A\"),\n (\"G\", \"G\"),\n (\"U\", \"U\"),\n (\"U\", \"C\"),\n (\"C\", \"U\"),\n (\"C\", \"C\"),\n ]\n )\n\n s1 = self.RNA(\"UCAGGCAA\")\n s2 = self.RNA(\"CCAAAUGC\")\n s3 = self.RNA(\"GGGGGGGG\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_similar(y, transitions), z)\n\n test(e, e, 0)\n test(s1, e, 0)\n test(s1, s1, 1)\n test(s1, s2, 7.0 / 8)\n test(s1, s3, 5.0 / 8)\n test(s2, s3, 4.0 / 8)", "def nodes_ratio(data: Data, position_against_soma=None, node_types=None, filter_layers=None):\n soma = data.morphology.get_soma()\n if position_against_soma=='above':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']<soma['y']\n elif position_against_soma=='below':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']>soma['y']\n else:\n criterion=lambda nod:nod['type'] in node_types if node_types is not None else True\n num_type_nodes = data.morphology.get_node_by_types(node_types)\n if filter_layers and isinstance(filter_layers, list):\n result = {}\n for layer in filter_layers:\n filter_layer_depth = data.reference_layer_depths.get(layer)\n y_min, y_max, is_scale = filter_layer_depth.pia_side, filter_layer_depth.wm_side, filter_layer_depth.scale\n if is_scale:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and y_min<nod['y']<y_max)\n else:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and nod['y']>y_min)\n result[layer]=len(selected_nodes)/len(num_type_nodes)\n return result\n else:\n selected_nodes = data.morphology.filter_nodes(criterion)\n return len(selected_nodes)/len(num_type_nodes)", "def wordNet_similarity(sentence1, sentence2):\r\n # Tokenize and tag\r\n \r\n # sentence1 = pos_tag(word_tokenize(sentence1))\r\n sentence1=st_tagger.tag(word_tokenize(sentence1))\r\n \r\n # sentence2 = pos_tag(word_tokenize(sentence2))\r\n sentence2=st_tagger.tag(word_tokenize(sentence2))\r\n\r\n \r\n # Get the synsets for the tagged words\r\n #################################################\r\n\r\n # synsets1=[]\r\n # synsets2=[]\r\n # for tagged_word in sentence1:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # synsets1.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n # for tagged_word in sentence2:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # print(tagged_word)\r\n # synsets2.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n\r\n # The code above is the elaboration of code below\r\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\r\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\r\n \r\n # Filter out the Nones in the synonym set\r\n synsets1 = [ss for ss in synsets1 if ss]\r\n synsets2 = [ss for ss in synsets2 if ss]\r\n \r\n score, count = 0.0, 0\r\n \r\n###########################################################################\r\n # for syn1 in synsets1:\r\n # arr_simi_score = []\r\n # print('=========================================')\r\n # print(syn1)\r\n # print('----------------')\r\n # for syn2 in synsets2:\r\n # print(syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n # print(simi_score)\r\n # if simi_score is not None:\r\n # arr_simi_score.append(simi_score)\r\n # print('----------------')\r\n # print(arr_simi_score)\r\n # if(len(arr_simi_score) > 0):\r\n # best = max(arr_simi_score)\r\n # print(best)\r\n # score += best\r\n # count += 1\r\n # # Average the values\r\n # print('score: ', score)\r\n # print('count: ', count)\r\n # score /= count\r\n\r\n###########################################################################\r\n\r\n for syn1 in synsets1:\r\n arr_simi_score = []\r\n # print('=========================================')\r\n print(\"Each word from Synonym se1\",syn1)\r\n # print('----------------')\r\n for syn2 in synsets2:\r\n print(\"Each word from Synonym se2\",syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n simi_score = syn1.wup_similarity(syn2)\r\n print(\"word to word path_similarity score\",simi_score)\r\n if simi_score is not None:\r\n arr_simi_score.append(simi_score)\r\n print('----------------')\r\n print(arr_simi_score)\r\n if(len(arr_simi_score) > 0):\r\n best = max(arr_simi_score)\r\n print(\"best score so far\", best)\r\n score += best\r\n count += 1\r\n # Average the values\r\n print('score: ', score)\r\n print('count: ', count)\r\n if count!=0:\r\n score /= count\r\n else:\r\n score=0.0\r\n return score", "def siblinghood_sim_exact(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_exact(siblinghood_1, siblinghood_2)", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def reduce_rec(node):\n if node.is_leaf():\n return\n for edge in node.child_nodes:\n # replacing the subdiagram with a singular isomorphic one\n node.child_nodes[edge] = hashtable[node.child_nodes[edge].__hash__()]\n # and going down recursively along that subdiagram\n reduce_rec(node.child_nodes[edge])", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def needleMatching(self):\r\n # productive\r\n profprint()\r\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLModelNode')\r\n nbNode = modelNodes.GetNumberOfItems()\r\n result = []\r\n found = []\r\n # print nbNode\r\n for nthNode in range(nbNode):\r\n node = slicer.mrmlScene.GetNthNodeByClass(nthNode, 'vtkMRMLModelNode')\r\n if node.GetID() not in found and node.GetAttribute('type') != 'Validation':\r\n dist = []\r\n polydata = node.GetPolyData()\r\n if polydata != None:\r\n bounds = polydata.GetBounds()\r\n for nthNode2 in range(nbNode):\r\n node2 = slicer.mrmlScene.GetNthNodeByClass(nthNode2, 'vtkMRMLModelNode')\r\n if node2.GetID() not in found and node2.GetAttribute('type') == 'Validation':\r\n polydata2 = node2.GetPolyData()\r\n if polydata2 != None and polydata2.GetNumberOfPoints() > 100 and polydata.GetNumberOfPoints() > 100:\r\n tipDistance = self.distTip(int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\r\n baseDistance = self.distBase(int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\r\n name = node.GetName()\r\n manualName = name.lstrip('auto-seg_').lstrip('manual-seg_').lstrip('obturator-seg_').lstrip('0123456789').lstrip('-ID-vtkMRMLModelNode').lstrip('0123456789-')\r\n if manualName==node2.GetName(): dist.append([tipDistance, node2.GetID(), node2.GetName()])\r\n # print tipDistance\r\n if dist != []:\r\n match = [min(dist)[0], min(dist)[1], node.GetID(), min(dist)[2]]\r\n result.append(match)\r\n found.append(min(dist)[1])\r\n found.append(node.GetID())\r\n node.GetDisplayNode().SetSliceIntersectionVisibility(1)\r\n # print result\r\n return result", "def testFindAllSimilarityFromNodeOnPathSimExampleThree(self):\n\n graph, authorMap, conferenceMap = SampleGraphUtility.constructPathSimExampleThree()\n metaPath = [Author, Paper, Conference, Paper, Author]\n strategy = PathSimStrategy(graph, metaPath)\n\n mike = authorMap['Mike']\n mostSimilarNodes = strategy.findMostSimilarNodes(mike, 5)\n\n self.assertEquals([authorMap['Bob'], authorMap['Mary'], authorMap['Jim']], mostSimilarNodes)", "def match(*args, current=None):\n # current is the original edge and clone is the change\n # this function should only be getting nodes with the same edges\n # if I change this to assume nodes of the same edge attr then I can\n # send this function \"equivalent edges\"\n scores = []\n for clone in args:\n if current.edge_attribute == clone.edge_attribute:\n source_condit = (\n clone.source.original_id == current.source.id\n or clone.source.id == current.source.id\n )\n target_condit = (\n clone.target.original_id == current.target.id\n or clone.target.id == current.target.id\n )\n if source_condit and target_condit:\n scores.append(2)\n return scores\n elif source_condit or target_condit:\n\n scores.append(1)\n else:\n # TODO: check subgraph/call is_similar\n # if subgraph is isomorphic then return 2\n scores.append(0)\n elif len(current.edge_attribute) > len(clone.edge_attribute):\n scores.append(-1)\n else: # edge attribute of current is shorter than of clone\n scores.append(-2)\n return scores", "def Path_Similarity(word1, word2):\n\n # Initial test to confirm unique nouns, otherwise passes back 1\n\n if word1 == word2:\n return 1, word1\n\n # Sets up Initial Variables & Dictionaries\n\n stepup1 = [word1]\n stepup2 = [word2]\n dict1= {}\n dict2= {}\n currentstep1 = []\n currentstep2 = []\n\n # Iterates through a loop an arbitrary # of times, adding new hypernyms\n # for each word to individual dictionaries with the number of iterations\n # as the key to the dictionary. The two dictionaries are saved perpetually\n\n for la in range(50):\n for i in range(len(stepup1)):\n currentstep1 += (stepup1[i].hypernyms())\n for syn in stepup1:\n dict1[syn] = la\n for i in range(len(stepup2)):\n currentstep2 = (stepup2[i].hypernyms())\n for syn in stepup2:\n dict2[syn] = la\n\n # Variables are reset and converted to continue the next stage of the loop\n\n stepup1 = currentstep1\n stepup2 = currentstep2\n currentstep1 = []\n currentstep2 = []\n\n\n # Each loop the dictionaries are checked for matches. I have confirmed that\n # checking each cycle is faster than checking at the end of an arbitrary\n # number of cycles.\n\n # Removes applied words as Possible Subsumers Per Fridays Clas\n dict1.pop(word1)\n dict2.pop(word2)\n\n #Gets possible Least Common Subsumers\n dict1Set = set(dict1)\n dict2Set = set(dict2)\n d = {}\n for name in dict1Set.intersection(dict2Set):\n d[name] = dict1[name]\n pos_lcs = [key for min_value in (min(d.values()),) for key in d if d[key] == min_value]\n\n #Returns Actual LCS\n key_lcs = []\n for i in pos_lcs:\n key_lcs.append(shortestPath(wn.synset('entity.n.01'),i))\n lcs = (pos_lcs[key_lcs.index(max(key_lcs))])\n\n #Returns path Similarity Value and Synset of LCS; Must Error Proof\n\n return 1/(dict1[lcs] + dict2[lcs]), lcs", "def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim", "def sample_cond_on_subtree_nodes(new, tree, subtree_nodes, subtree_edges, subtree_adjlist):\n new_separators = {}\n new_cliques = set()\n old_cliques = set()\n subtree_order = len(subtree_nodes)\n #print(\"subtree nodes:\" + str(subtree_nodes))\n\n if subtree_order == 0:\n # If the tree, tree is empty (n isolated node),\n # add random neighbor.\n c = frozenset([new])\n new_cliques.add(c)\n #c2 = tree.nodes()[0] # nx 1.9\n c2 = list(tree.nodes())[0] # GraphTool\n #c2 = list(tree.nodes)[0] # nx 2.1\n tree.add_node(c, label=tuple([new]), color=\"red\")\n tree.add_edge(c, c2, label=tuple([]))\n\n sep = frozenset()\n #tree.fix_graph()\n trilearn.graph.junction_tree.randomize_at_sep(tree, sep)\n\n new_separators[sep] = [(c, c2)]\n # tree TODO: the actual value for the key is not needed.\n P = {c: np.exp(-tree.log_nu(sep))}\n return (old_cliques, new_cliques, new_separators, P, {c: 1.0})\n\n S = {c: set() for c in subtree_nodes}\n M = {c: set() for c in subtree_nodes}\n for c in S:\n for neig in subtree_adjlist[c]:\n #S[c] = S[c] | (c & neig)\n S[c] |= (c & neig)\n RM = {c: c - S[c] for c in S}\n C = {c: set() for c in subtree_nodes}\n P = {}\n N_S = {c: set() for c in subtree_nodes}\n\n sepCondition = {}\n for c in RM:\n sepCondition[c] = len({neig for neig in subtree_adjlist[c] if\n S[c] == neig & c}) > 0 or len(subtree_adjlist) == 1\n\n if sepCondition[c] is True:\n tmp = np.array(list(RM[c]))\n first_node = []\n if len(tmp) > 0:\n # Connect to one node\n first_ind = np.random.randint(len(tmp))\n first_node = tmp[[first_ind]]\n tmp = np.delete(tmp, first_ind)\n\n rest = set()\n if len(tmp) > 0:\n # Connect to the rest of the nodes if there are any left\n rest = aux.random_subset(tmp)\n M[c] = frozenset(rest | set(first_node))\n else:\n M[c] = frozenset(aux.random_subset(RM[c]))\n\n # Create the new cliques\n for clique in M:\n C[clique] = frozenset(M[clique] | S[clique] | {new})\n new_cliques.add(C[clique])\n\n # Get the neighbor set of each c which can be moved to C[c]\n for clique in subtree_nodes:\n N_S[clique] = {neig for neig in tree.neighbors(clique)\n if neig & clique <= C[clique] and neig not in subtree_nodes}\n\n # Add the new cliques\n #for c in subtree_nodes:\n # tree.add_node(C[c], label=str(tuple(C[c])), color=\"red\")\n tree.add_nodes_from([C[c] for c in subtree_nodes])\n\n # Construct and add the new edges between the new cliques,\n # replicating the subtree\n new_subtree_edges = []\n for e in subtree_edges:\n sep = C[e[0]] & C[e[1]]\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[e[0]], C[e[1]]))\n #new_subtree_edges.append((C[e[0]], C[e[1]]))\n tree.add_edge(C[e[0]], C[e[1]])\n # tree.add_edges_from(new_subtree_edges)\n\n # Connect cliques in the subtree to the new cliques\n for c in subtree_nodes:\n # Move the neighbors of a swallowed node to the swallowing node\n # Remove the swallowed node\n\n if C[c] - {new} == c:\n # If connecting to all nodes in a clique (the node should be replaces instead)\n for neig in tree.neighbors(c):\n if neig not in subtree_nodes:\n tree.add_edge(C[c], neig)#, label=lab)\n\n tree.remove_node(c)\n old_cliques.add(c)\n else: # If not connecting to every node in a clique\n sep = C[c] & c\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[c], c))\n\n #print \"adding edge: \" + str((C[c], c))\n tree.add_edge(C[c], c)\n # Pick random subset of neighbors intersecting with subset of S U M\n\n N = aux.random_subset(N_S[c])\n #for neig in N:\n # tree.add_edge(C[c], neig)\n\n tree.add_edges_from([(C[c], neig) for neig in N])\n tree.remove_edges_from([(c, neig) for neig in N])\n\n # Compute probabilities\n N = {}\n for c in subtree_nodes:\n if sepCondition[c] is False:\n # Every internal node in c belongs to a separator\n P[c] = np.power(2.0, - len(RM[c]))\n #P[c] = Fraction(1, 2 ** len(RM[c]))\n if not len(c) + 1 == len(C[c]):\n N[c] = np.power(2.0, - len(N_S[c]))\n #N[c] = Fraction(1, 2 ** len(N_S[c]))\n else:\n N[c] = 1.0\n #N[c] = Fraction(1,1)\n else:\n P[c] = 1.0\n #P[c] = Fraction(1, 1)\n N[c] = 1.0\n #N[c] = Fraction(1, 1)\n if len(RM[c]) > 1:\n P[c] = 1.0 / len(RM[c])\n #P[c] = Fraction(1, len(RM[c]))\n P[c] *= np.power(2.0, - (len(RM[c]) - 1.0)) * len(M[c])\n #P[c] *= Fraction(len(M[c]), 2 ** (len(RM[c]) - 1.0)) \n if not len(c) + 1 == len(C[c]): # c not swallowed by C[c]\n #N[c] = np.power(2.0, - len(N_S[c]))\n N[c] = Fraction(1, 2 ** len(N_S[c]))\n\n # Remove the edges in tree\n tree.remove_edges_from(subtree_edges)\n # Todo: This will introduce a bug if we instead replace a node.\n return (old_cliques, new_cliques, new_separators, P, N)", "def all_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.5,234/2)\n S2.add_peak(54.8,585/2)\n S2.add_peak(61.0,773/2)\n S2.add_peak(65.4,387/2)\n S2.add_peak(88.0,546/2)\n S2.add_peak(104.3,598/2)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,1.0), \"Incorrect score with greedy method\"\n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(score,1.0), \"Incorrect score with maximum weighted method\"", "def reduce(self):\n # initializing a hashtable for all the nodes in the tree\n hashtable = {}\n for node_it in self.nodes:\n # storing each node only once in the table\n if not node_it.__hash__() in hashtable:\n hashtable[node_it.__hash__()] = node_it\n\n def reduce_rec(node):\n \"\"\"\n The recursive method for the reduction.\n \"\"\"\n if node.is_leaf():\n return\n for edge in node.child_nodes:\n # replacing the subdiagram with a singular isomorphic one\n node.child_nodes[edge] = hashtable[node.child_nodes[edge].__hash__()]\n # and going down recursively along that subdiagram\n reduce_rec(node.child_nodes[edge])\n\n # calling the reduction method\n reduce_rec(self)\n # reinitializing the diagram\n self.reinitialize()\n return self" ]
[ "0.6691611", "0.65014845", "0.6066007", "0.6025958", "0.5882169", "0.5869788", "0.5852473", "0.5813873", "0.57827383", "0.5768942", "0.56807446", "0.5659188", "0.56524664", "0.55881804", "0.5570378", "0.5556384", "0.5547672", "0.5529254", "0.5523213", "0.549499", "0.54923385", "0.5487428", "0.5476976", "0.5468209", "0.5468117", "0.5456178", "0.54509383", "0.5431251", "0.54116684", "0.5404288" ]
0.6880419
0
Subtructure similarity based on descendant nodes (measure on grouping, defined as node + its descendant), EXACT match
def substructure_sim_exact(subtrees_1, subtrees_2): assert(len(subtrees_1) == len(subtrees_2)) n = len(subtrees_1) f1 = np.zeros(n) for i in range(n): f1[i] = subtrees_1[i] == subtrees_2[i] # calculate the number of matching pairs return float(np.count_nonzero(f1)) / float(len(f1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_similarity(self, seq_node, **kwargs):\n pass", "def substructure_sim_partial(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n f2 = np.zeros(n)\n for i in range(n):\n if (subtrees_1[i] == subtrees_2[i]): # exact match, or both are dropped\n f1[i] = 1.0\n f2[i] = 1.0\n else: # partial match\n f1[i] = SubstructureAgreement.save_div( len(subtrees_1[i].intersection(subtrees_2[i])), float(len(subtrees_2[i])))\n f2[i] = SubstructureAgreement.save_div( len(subtrees_2[i].intersection(subtrees_1[i])), float(len(subtrees_1[i])))\n\n return (np.sum(f1) + np.sum(f2)) / (2.0 * float(n)) # average of average recall", "def subpath_sim(subpaths_1, subpaths_2):\n u = subpaths_1.union(subpaths_2)\n f1 = np.zeros(len(u))\n f2 = np.zeros(len(u))\n u = list(u)\n\n # convert graph into one-hot-vector (based on the precense of subpaths)\n for i in range(len(u)):\n if u[i] in subpaths_1:\n f1[i] = 1\n if u[i] in subpaths_2:\n f2[i] = 1\n\n score = np.dot(f1, f2) * (np.count_nonzero(f1) + np.count_nonzero(f2)) / (2 * (np.count_nonzero(f1) * np.count_nonzero(f2)))\n\n if math.isnan(score): # in case of empty set\n return 0.0\n else:\n return score", "def similarity(self, e1, e2):\n\t\tpass", "def test_ontology_ic_similarity(ontology):\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000002\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000003\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000002\"],[\"TO:0000003\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000005\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000007\"],[\"TO:0000008\"], inherited=False, as_weight=False) == 0.5\n\tassert ontology.similarity_ic([\"TO:0000005\"],[\"TO:0000009\"], inherited=False, as_weight=False) == 0\n\n\tassert ontology.similarity_ic([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False, as_weight=False) == 0\n\tassert ontology.similarity_ic([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426\n\tassert ontology.similarity_ic([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False, as_weight=False) == 1.3690702464285427\n\tassert ontology.similarity_ic([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False, as_weight=False) == 0.3690702464285426", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def test_ontology_similarity_jaccard(ontology):\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000003\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\"], inherited=False) == 1/3\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000007\"],[\"TO:0000008\"], inherited=False) == 2/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\"], inherited=False) == 1/8\n\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False) == 1/5\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False) == 2/3 \n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False) == 3/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False) == 2/8", "def similarity_euclid(matrix, business1, business2):\n selected_features = matrix.loc[business1].notna() & matrix.loc[business2].notna()\n\n if not selected_features.any():\n return 0\n\n features1 = matrix.loc[business1][selected_features]\n features2 = matrix.loc[business2][selected_features]\n distance = math.sqrt(((features1 - features2) ** 2).sum())\n\n if distance is np.nan:\n return 0\n\n return 1 / (1 + distance)", "def match(*args, current=None):\n # current is the original edge and clone is the change\n # this function should only be getting nodes with the same edges\n # if I change this to assume nodes of the same edge attr then I can\n # send this function \"equivalent edges\"\n scores = []\n for clone in args:\n if current.edge_attribute == clone.edge_attribute:\n source_condit = (\n clone.source.original_id == current.source.id\n or clone.source.id == current.source.id\n )\n target_condit = (\n clone.target.original_id == current.target.id\n or clone.target.id == current.target.id\n )\n if source_condit and target_condit:\n scores.append(2)\n return scores\n elif source_condit or target_condit:\n\n scores.append(1)\n else:\n # TODO: check subgraph/call is_similar\n # if subgraph is isomorphic then return 2\n scores.append(0)\n elif len(current.edge_attribute) > len(clone.edge_attribute):\n scores.append(-1)\n else: # edge attribute of current is shorter than of clone\n scores.append(-2)\n return scores", "def wordSimilarityRatio(sent_1,sent_2):", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def wordNet_similarity(sentence1, sentence2):\r\n # Tokenize and tag\r\n \r\n # sentence1 = pos_tag(word_tokenize(sentence1))\r\n sentence1=st_tagger.tag(word_tokenize(sentence1))\r\n \r\n # sentence2 = pos_tag(word_tokenize(sentence2))\r\n sentence2=st_tagger.tag(word_tokenize(sentence2))\r\n\r\n \r\n # Get the synsets for the tagged words\r\n #################################################\r\n\r\n # synsets1=[]\r\n # synsets2=[]\r\n # for tagged_word in sentence1:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # synsets1.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n # for tagged_word in sentence2:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # print(tagged_word)\r\n # synsets2.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n\r\n # The code above is the elaboration of code below\r\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\r\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\r\n \r\n # Filter out the Nones in the synonym set\r\n synsets1 = [ss for ss in synsets1 if ss]\r\n synsets2 = [ss for ss in synsets2 if ss]\r\n \r\n score, count = 0.0, 0\r\n \r\n###########################################################################\r\n # for syn1 in synsets1:\r\n # arr_simi_score = []\r\n # print('=========================================')\r\n # print(syn1)\r\n # print('----------------')\r\n # for syn2 in synsets2:\r\n # print(syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n # print(simi_score)\r\n # if simi_score is not None:\r\n # arr_simi_score.append(simi_score)\r\n # print('----------------')\r\n # print(arr_simi_score)\r\n # if(len(arr_simi_score) > 0):\r\n # best = max(arr_simi_score)\r\n # print(best)\r\n # score += best\r\n # count += 1\r\n # # Average the values\r\n # print('score: ', score)\r\n # print('count: ', count)\r\n # score /= count\r\n\r\n###########################################################################\r\n\r\n for syn1 in synsets1:\r\n arr_simi_score = []\r\n # print('=========================================')\r\n print(\"Each word from Synonym se1\",syn1)\r\n # print('----------------')\r\n for syn2 in synsets2:\r\n print(\"Each word from Synonym se2\",syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n simi_score = syn1.wup_similarity(syn2)\r\n print(\"word to word path_similarity score\",simi_score)\r\n if simi_score is not None:\r\n arr_simi_score.append(simi_score)\r\n print('----------------')\r\n print(arr_simi_score)\r\n if(len(arr_simi_score) > 0):\r\n best = max(arr_simi_score)\r\n print(\"best score so far\", best)\r\n score += best\r\n count += 1\r\n # Average the values\r\n print('score: ', score)\r\n print('count: ', count)\r\n if count!=0:\r\n score /= count\r\n else:\r\n score=0.0\r\n return score", "def similarity_score(self, lhs, rhs):\n pass", "def testFindAllSimilarityFromNodeOnPathSimExampleThree(self):\n\n graph, authorMap, conferenceMap = SampleGraphUtility.constructPathSimExampleThree()\n metaPath = [Author, Paper, Conference, Paper, Author]\n strategy = PathSimStrategy(graph, metaPath)\n\n mike = authorMap['Mike']\n mostSimilarNodes = strategy.findMostSimilarNodes(mike, 5)\n\n self.assertEquals([authorMap['Bob'], authorMap['Mary'], authorMap['Jim']], mostSimilarNodes)", "def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim", "def nodes_ratio(data: Data, position_against_soma=None, node_types=None, filter_layers=None):\n soma = data.morphology.get_soma()\n if position_against_soma=='above':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']<soma['y']\n elif position_against_soma=='below':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']>soma['y']\n else:\n criterion=lambda nod:nod['type'] in node_types if node_types is not None else True\n num_type_nodes = data.morphology.get_node_by_types(node_types)\n if filter_layers and isinstance(filter_layers, list):\n result = {}\n for layer in filter_layers:\n filter_layer_depth = data.reference_layer_depths.get(layer)\n y_min, y_max, is_scale = filter_layer_depth.pia_side, filter_layer_depth.wm_side, filter_layer_depth.scale\n if is_scale:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and y_min<nod['y']<y_max)\n else:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and nod['y']>y_min)\n result[layer]=len(selected_nodes)/len(num_type_nodes)\n return result\n else:\n selected_nodes = data.morphology.filter_nodes(criterion)\n return len(selected_nodes)/len(num_type_nodes)", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def hierarchy_dist(self,synset_1, synset_2):\n\t h_dist = sys.maxsize\n\t if synset_1 is None or synset_2 is None: \n\t return h_dist\n\t if synset_1 == synset_2:\n\t # return the depth of one of synset_1 or synset_2\n\t h_dist = max([x[1] for x in synset_1.hypernym_distances()])\n\t else:\n\t # find the max depth of least common subsumer\n\t hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}\n\t hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}\n\t lcs_candidates = set(hypernyms_1.keys()).intersection(\n\t set(hypernyms_2.keys()))\n\t if len(lcs_candidates) > 0:\n\t lcs_dists = []\n\t for lcs_candidate in lcs_candidates:\n\t lcs_d1 = 0\n\t if lcs_candidate in hypernyms_1.keys():\n\t lcs_d1 = hypernyms_1[lcs_candidate]\n\t lcs_d2 = 0\n\t if lcs_candidate in hypernyms_2.keys():\n\t lcs_d2 = hypernyms_2[lcs_candidate]\n\t lcs_dists.append(max([lcs_d1, lcs_d2]))\n\t h_dist = max(lcs_dists)\n\t else:\n\t h_dist = 0\n\t return ((math.exp(self.BETA * h_dist) - math.exp(-self.BETA * h_dist)) / \n\t (math.exp(self.BETA * h_dist) + math.exp(-self.BETA * h_dist)))", "def sentence_similarity(sentence1, sentence2):\n sentence1 = sentence1.tags\n sentence2 = sentence2.tags\n \n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n \n for synset in synsets1:\n \n li=[synset.path_similarity(ss) for ss in synsets2]\n m=0\n for i in range(len(li)):\n if li[i] is not None and m<li[i]:\n m=li[i]\n if m != 0:\n score += m\n count += 1\n\n if count is 0:\n score = 0\n else:\n score /= count\n return score", "def test_check_tree_exact_match(self):\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_3tips_fp)\r\n\r\n # Should find all and give True, True result\r\n\r\n self.assertEqual(actual_subset_results, [True, True])\r\n\r\n # Should get tips not found in fasta labels with 5 tip tree\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, [True, ['seq5', 'seq4']])\r\n\r\n # Change two of the fasta labels to not match tree tips\r\n\r\n fasta_labels = ['seq1_1', 'seqX_2', 'seq2_3', 'seqY_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find seqX and seqY as not being a subset\r\n\r\n self.assertEqual(actual_subset_results, [['seqX', 'seqY'],\r\n ['seq3', 'seq5', 'seq4']])", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def sample_cond_on_subtree_nodes(new, tree, subtree_nodes, subtree_edges, subtree_adjlist):\n new_separators = {}\n new_cliques = set()\n old_cliques = set()\n subtree_order = len(subtree_nodes)\n #print(\"subtree nodes:\" + str(subtree_nodes))\n\n if subtree_order == 0:\n # If the tree, tree is empty (n isolated node),\n # add random neighbor.\n c = frozenset([new])\n new_cliques.add(c)\n #c2 = tree.nodes()[0] # nx 1.9\n c2 = list(tree.nodes())[0] # GraphTool\n #c2 = list(tree.nodes)[0] # nx 2.1\n tree.add_node(c, label=tuple([new]), color=\"red\")\n tree.add_edge(c, c2, label=tuple([]))\n\n sep = frozenset()\n #tree.fix_graph()\n trilearn.graph.junction_tree.randomize_at_sep(tree, sep)\n\n new_separators[sep] = [(c, c2)]\n # tree TODO: the actual value for the key is not needed.\n P = {c: np.exp(-tree.log_nu(sep))}\n return (old_cliques, new_cliques, new_separators, P, {c: 1.0})\n\n S = {c: set() for c in subtree_nodes}\n M = {c: set() for c in subtree_nodes}\n for c in S:\n for neig in subtree_adjlist[c]:\n #S[c] = S[c] | (c & neig)\n S[c] |= (c & neig)\n RM = {c: c - S[c] for c in S}\n C = {c: set() for c in subtree_nodes}\n P = {}\n N_S = {c: set() for c in subtree_nodes}\n\n sepCondition = {}\n for c in RM:\n sepCondition[c] = len({neig for neig in subtree_adjlist[c] if\n S[c] == neig & c}) > 0 or len(subtree_adjlist) == 1\n\n if sepCondition[c] is True:\n tmp = np.array(list(RM[c]))\n first_node = []\n if len(tmp) > 0:\n # Connect to one node\n first_ind = np.random.randint(len(tmp))\n first_node = tmp[[first_ind]]\n tmp = np.delete(tmp, first_ind)\n\n rest = set()\n if len(tmp) > 0:\n # Connect to the rest of the nodes if there are any left\n rest = aux.random_subset(tmp)\n M[c] = frozenset(rest | set(first_node))\n else:\n M[c] = frozenset(aux.random_subset(RM[c]))\n\n # Create the new cliques\n for clique in M:\n C[clique] = frozenset(M[clique] | S[clique] | {new})\n new_cliques.add(C[clique])\n\n # Get the neighbor set of each c which can be moved to C[c]\n for clique in subtree_nodes:\n N_S[clique] = {neig for neig in tree.neighbors(clique)\n if neig & clique <= C[clique] and neig not in subtree_nodes}\n\n # Add the new cliques\n #for c in subtree_nodes:\n # tree.add_node(C[c], label=str(tuple(C[c])), color=\"red\")\n tree.add_nodes_from([C[c] for c in subtree_nodes])\n\n # Construct and add the new edges between the new cliques,\n # replicating the subtree\n new_subtree_edges = []\n for e in subtree_edges:\n sep = C[e[0]] & C[e[1]]\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[e[0]], C[e[1]]))\n #new_subtree_edges.append((C[e[0]], C[e[1]]))\n tree.add_edge(C[e[0]], C[e[1]])\n # tree.add_edges_from(new_subtree_edges)\n\n # Connect cliques in the subtree to the new cliques\n for c in subtree_nodes:\n # Move the neighbors of a swallowed node to the swallowing node\n # Remove the swallowed node\n\n if C[c] - {new} == c:\n # If connecting to all nodes in a clique (the node should be replaces instead)\n for neig in tree.neighbors(c):\n if neig not in subtree_nodes:\n tree.add_edge(C[c], neig)#, label=lab)\n\n tree.remove_node(c)\n old_cliques.add(c)\n else: # If not connecting to every node in a clique\n sep = C[c] & c\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[c], c))\n\n #print \"adding edge: \" + str((C[c], c))\n tree.add_edge(C[c], c)\n # Pick random subset of neighbors intersecting with subset of S U M\n\n N = aux.random_subset(N_S[c])\n #for neig in N:\n # tree.add_edge(C[c], neig)\n\n tree.add_edges_from([(C[c], neig) for neig in N])\n tree.remove_edges_from([(c, neig) for neig in N])\n\n # Compute probabilities\n N = {}\n for c in subtree_nodes:\n if sepCondition[c] is False:\n # Every internal node in c belongs to a separator\n P[c] = np.power(2.0, - len(RM[c]))\n #P[c] = Fraction(1, 2 ** len(RM[c]))\n if not len(c) + 1 == len(C[c]):\n N[c] = np.power(2.0, - len(N_S[c]))\n #N[c] = Fraction(1, 2 ** len(N_S[c]))\n else:\n N[c] = 1.0\n #N[c] = Fraction(1,1)\n else:\n P[c] = 1.0\n #P[c] = Fraction(1, 1)\n N[c] = 1.0\n #N[c] = Fraction(1, 1)\n if len(RM[c]) > 1:\n P[c] = 1.0 / len(RM[c])\n #P[c] = Fraction(1, len(RM[c]))\n P[c] *= np.power(2.0, - (len(RM[c]) - 1.0)) * len(M[c])\n #P[c] *= Fraction(len(M[c]), 2 ** (len(RM[c]) - 1.0)) \n if not len(c) + 1 == len(C[c]): # c not swallowed by C[c]\n #N[c] = np.power(2.0, - len(N_S[c]))\n N[c] = Fraction(1, 2 ** len(N_S[c]))\n\n # Remove the edges in tree\n tree.remove_edges_from(subtree_edges)\n # Todo: This will introduce a bug if we instead replace a node.\n return (old_cliques, new_cliques, new_separators, P, N)", "def object_similarity(obj_1, obj_2):\n w_list = []\n obj_1_bag_size = sum(obj_1['bag_of_words'].values())\n obj_2_bag_size = sum(obj_2['bag_of_words'].values())\n obj_1_set = obj_1['set_of_words']\n obj_2_set = obj_2['set_of_words']\n obj_1_diff_2_set = obj_1_set - obj_2_set\n obj_2_diff_1_set = obj_2_set - obj_1_set\n w_list.append(weight_calculator(obj_1_bag_size, obj_2_bag_size))\n w_list.append(weight_calculator(len(obj_1_set), len(obj_2_set)))\n w_list.append(weight_calculator(len(obj_1_diff_2_set),\n len(obj_2_diff_1_set)))\n if 'total_lines' in obj_1.keys() and 'total_lines' in obj_2.keys():\n w_list.append(weight_calculator(obj_1['total_lines'],\n obj_2['total_lines']))\n if 'total_conversations' in obj_1.keys() and 'total_conversations' in obj_2.keys():\n w_list.append(weight_calculator(obj_1['total_conversations'],\n obj_2['total_conversations']))\n # Added as observations of genre -> rating relations\n if 'metadata' in obj_1.keys() and 'metadata' in obj_2.keys():\n w_list.append(weight_calculator(eval(obj_1['metadata']['genres']),\n eval(obj_2['metadata']['genres'])))\n return mean(w_list)", "def closest_matching_child(self,instance):\n\t\tbest = 0\n\t\tsmallest_diff = float('inf')\n\t\tinst_attributes = instance.getAttributes()\n\t\tfor i in range(len(self.tree.children)):\n\t\t\tchild = self.tree.children[i]\n\t\t\tsum_diff = 0.0\n\t\t\tcount = 0.0\n\t\t\tfor attribute in child.utility.av_counts:\n\t\t\t\tfor value in self.utility.av_counts[attribute]:\n\t\t\t\t\tcount += 1\n\t\t\t\t\tif value == 'numerically_valued_attribute':\n\t\t\t\t\t\tsum_diff += inst_attributes[attribute] - (self.utility.av_counts[attribute][value][0] / self.utility.count)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif attribute in instance and inst_attributes[attribute] == value:\n\t\t\t\t\t\t\tsum_diff += 1.0 - (self.utility.av_counts[attribute][value][0] / self.utility.count)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsum_diff += 1.0\n\n\t\t\tif count > 0:\n\t\t\t\tsum_diff /= count\n\t\t\telse:\n\t\t\t\tsum_diff = float('inf')\n\n\t\t\tif sum_diff < smallest_diff:\n\t\t\t\tbest = i\n\t\t\t\tsmallest_diff = sum_diff\n\t\t\n\t\treturn best" ]
[ "0.65414286", "0.6379984", "0.6302926", "0.6180403", "0.6123897", "0.61019725", "0.60856956", "0.5958234", "0.5918465", "0.588291", "0.58822876", "0.58705306", "0.58403915", "0.58131826", "0.5741274", "0.5721955", "0.57152736", "0.5703978", "0.5695917", "0.56623244", "0.56454694", "0.5637975", "0.56073815", "0.5595677", "0.55901134", "0.55836326", "0.5572844", "0.55645776", "0.5560169", "0.554051" ]
0.6841713
0
Substructure similarity based on siblinghood, PARTIAL match May use the subtree calculation as the metric is basically similar (just change the input)
def siblinghood_sim_partial(siblinghood_1, siblinghood_2): return SubstructureAgreement.substructure_sim_partial(siblinghood_1, siblinghood_2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substructure_sim_partial(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n f2 = np.zeros(n)\n for i in range(n):\n if (subtrees_1[i] == subtrees_2[i]): # exact match, or both are dropped\n f1[i] = 1.0\n f2[i] = 1.0\n else: # partial match\n f1[i] = SubstructureAgreement.save_div( len(subtrees_1[i].intersection(subtrees_2[i])), float(len(subtrees_2[i])))\n f2[i] = SubstructureAgreement.save_div( len(subtrees_2[i].intersection(subtrees_1[i])), float(len(subtrees_1[i])))\n\n return (np.sum(f1) + np.sum(f2)) / (2.0 * float(n)) # average of average recall", "def substructure_sim_exact(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n for i in range(n):\n f1[i] = subtrees_1[i] == subtrees_2[i] # calculate the number of matching pairs\n\n return float(np.count_nonzero(f1)) / float(len(f1))", "def siblinghood_sim_exact(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_exact(siblinghood_1, siblinghood_2)", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def find_similarities(data):\n\n good_one = copy.deepcopy(data[\"no_vpn\"])\n del data[\"no_vpn\"]\n\n all_minhashes = []\n\n # put everything easy to work with\n for extension in data:\n for webpage in data[extension]:\n for f in data[extension][webpage]:\n path = (extension, webpage, f)\n all_minhashes.append((path, data[extension][webpage][f]))\n aux = copy.deepcopy(all_minhashes)\n path_data, data = aux.pop(0)\n all_minhashes.pop(0)\n\n result = {}\n while path_data and data:\n for file in good_one[path_data[1]]:\n # print(\"Jaccard among %s and %s\" % (path_data[2], file))\n path_to_file = \"/\".join((\"no_vpn\", path_data[1], file))\n similarity = data[\"hash\"].jaccard(good_one[path_data[1]][file][\"hash\"])\n try:\n result[\"/\".join(path_data)].append((path_to_file, similarity))\n except KeyError:\n result[\"/\".join(path_data)] = [\n (path_to_file, similarity),\n ]\n\n all_minhashes.append((path_data, data))\n try:\n path_data, data = aux.pop(0)\n except IndexError:\n path_data = data = None\n\n return result", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def subpath_sim(subpaths_1, subpaths_2):\n u = subpaths_1.union(subpaths_2)\n f1 = np.zeros(len(u))\n f2 = np.zeros(len(u))\n u = list(u)\n\n # convert graph into one-hot-vector (based on the precense of subpaths)\n for i in range(len(u)):\n if u[i] in subpaths_1:\n f1[i] = 1\n if u[i] in subpaths_2:\n f2[i] = 1\n\n score = np.dot(f1, f2) * (np.count_nonzero(f1) + np.count_nonzero(f2)) / (2 * (np.count_nonzero(f1) * np.count_nonzero(f2)))\n\n if math.isnan(score): # in case of empty set\n return 0.0\n else:\n return score", "def test_tree_intersection_on_challenge_example(first_example, second_example):\n expected = [500, 350, 200, 175, 160, 125, 100]\n actual = tree_intersection(first_example, second_example)\n assert expected == actual", "def sim(w1, w2, tree):\n w1_node = tree.search(w1)\n w2_node = tree.search(w2)\n\n if w1_node is None or w2_node is None:\n return\n\n sum_of_w1 = 0\n sum_of_w2 = 0\n dot_product = 0\n for i in range(len(w1_node.data)):\n sum_of_vector = float(w1_node.data[i]) * float(w2_node.data[i])\n\n sum_of_w1 += (float(w1_node.data[i]) * float(w1_node.data[i]))\n sum_of_w2 += (float(w2_node.data[i]) * float(w2_node.data[i]))\n\n dot_product += sum_of_vector\n return dot_product / (sqrt(sum_of_w1) * sqrt(sum_of_w2))", "def test_tree_support(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n\r\n tc.tree_support(master_tree, t2)\r\n assert_almost_equal(\r\n master_tree.getNodeMatchingName('rt').bootstrap_support, 1.0)", "def nodes_ratio(data: Data, position_against_soma=None, node_types=None, filter_layers=None):\n soma = data.morphology.get_soma()\n if position_against_soma=='above':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']<soma['y']\n elif position_against_soma=='below':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']>soma['y']\n else:\n criterion=lambda nod:nod['type'] in node_types if node_types is not None else True\n num_type_nodes = data.morphology.get_node_by_types(node_types)\n if filter_layers and isinstance(filter_layers, list):\n result = {}\n for layer in filter_layers:\n filter_layer_depth = data.reference_layer_depths.get(layer)\n y_min, y_max, is_scale = filter_layer_depth.pia_side, filter_layer_depth.wm_side, filter_layer_depth.scale\n if is_scale:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and y_min<nod['y']<y_max)\n else:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and nod['y']>y_min)\n result[layer]=len(selected_nodes)/len(num_type_nodes)\n return result\n else:\n selected_nodes = data.morphology.filter_nodes(criterion)\n return len(selected_nodes)/len(num_type_nodes)", "def reduce_rec(node):\n if node.is_leaf():\n return\n for edge in node.child_nodes:\n # replacing the subdiagram with a singular isomorphic one\n node.child_nodes[edge] = hashtable[node.child_nodes[edge].__hash__()]\n # and going down recursively along that subdiagram\n reduce_rec(node.child_nodes[edge])", "def remove_duplicates_by_matching():\n # 1) and 2)\n all_blobs = vision.pqr_r\n all_angles = np.zeros(0)\n right = np.transpose(vision.pqr_r)\n left = np.transpose(vision.pqr_l)\n\n if not right.size and not left.size:\n return (0, 0)\n\n if not right.size:\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_l, all_angles)\n\n if not left.size:\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_r, all_angles)\n\n\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n cand_r = np.zeros((3,1))\n if angle < 15:\n cand_r = np.append(cand_r, [[r[0]], [r[1]], [r[2]]], axis=1)\n cand_r = np.delete(cand_r, 0, axis=1)\n cand_r = np.transpose(cand_r)\n\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n dot = 0\n if angle > -15:\n dl = max(0.001, np.linalg.norm(l))\n for r in cand_r:\n dr = max(0.001, np.linalg.norm(r))\n dot = np.dot(r, l) / (dr * dl)\n print(dot)\n if dot > 0.9:\n continue\n \n if dot <= 0.9:\n all_blobs = np.append(all_blobs, [[l[0]], [l[1]], [l[2]]], axis=1)\n all_angles = np.append(all_angles, angle)\n\n # make even number of blobs if necessary\n #if all_blobs.shape[1] % 2:\n # all_blobs = np.delete(all_blobs, 0, axis=1)\n # all_angles = np.delete(all_angles, 0)\n\n\n\n return (all_blobs, all_angles)", "def volume_similarity_pd(pd1,pd2):\n\tvolume_similarity = {}\n\n\t# print(\"aaaaa\")\n\n\t# union = vtk.vtkBooleanOperationPolyDataFilter()\n\t# union.SetOperationToDifference()\n\t# union.SetInputData(0,pd1)\n\t# union.SetInputData(1,pd2)\n\t# union.Update()\n\t# u = union.GetOutput()\n\n\t# massUnion = vtk.vtkMassProperties()\n\t# massUnion.SetInputData(u)\n\n\t# intersection = vtk.vtkBooleanOperationPolyDataFilter()\n\t# intersection.SetOperationToIntersection()\n\t# intersection.SetInputData(0,pd1)\n\t# intersection.SetInputData(1,pd2)\n\t# intersection.Update()\n\t# i = intersection.GetOutput()\n\t# massIntersection = vtk.vtkMassProperties()\n\t# massIntersection.SetInputData(i)\n\n\t# # metrics\n\t# tqdm.write(\"intersection vol: {:.2f}\".format(massIntersection.GetVolume()))\n\t# tqdm.write(\"union vol: {:.2f}\".format(massUnion.GetVolume()))\n\n\t# volume_similarity[\"jaccard\"] = 1 - massIntersection.GetVolume()/massUnion.GetVolume()\n\n\t# tqdm.write(\"Jaccard distance: {:.2f}\".format(volume_similarity[\"jaccard\"]))\n\n\thausdorffDistFilter = vtk.vtkHausdorffDistancePointSetFilter()\n\thausdorffDistFilter.SetInputData(0, pd1)\n\thausdorffDistFilter.SetInputData(1, pd2)\n\thausdorffDistFilter.Update()\n\n\tvolume_similarity[\"hausdorff\"] = hausdorffDistFilter.GetHausdorffDistance()\n\tvolume_similarity[\"relative0\"] = hausdorffDistFilter.GetRelativeDistance()[0]\n\tvolume_similarity[\"relative1\"] = hausdorffDistFilter.GetRelativeDistance()[1]\n\ttqdm.write(\"Hausdorff distance: {:.2f} mm\".format(volume_similarity[\"hausdorff\"]))\n\ttqdm.write(\"Relative distance from pd1 to pd2: {:.2f} mm\".format(volume_similarity[\"relative0\"]))\n\ttqdm.write(\"Relative distance from pd2 to pd1: {:.2f} mm\".format(volume_similarity[\"relative1\"]))\n\n\treturn volume_similarity, hausdorffDistFilter.GetOutput(0), hausdorffDistFilter.GetOutput(1)", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def test_calc_shared_phylotypes_pairwise(self):\r\n\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 0), 5)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 1), 2)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 2), 3)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 2, 2), 3)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S1'),\r\n 5)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S2'),\r\n 2)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S3'),\r\n 3)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S3',\r\n 'S3'),\r\n 3)", "def all_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.5,234/2)\n S2.add_peak(54.8,585/2)\n S2.add_peak(61.0,773/2)\n S2.add_peak(65.4,387/2)\n S2.add_peak(88.0,546/2)\n S2.add_peak(104.3,598/2)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,1.0), \"Incorrect score with greedy method\"\n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(score,1.0), \"Incorrect score with maximum weighted method\"", "def test_frac_similar(self):\n transitions = dict.fromkeys(\n [\n (\"A\", \"A\"),\n (\"A\", \"G\"),\n (\"G\", \"A\"),\n (\"G\", \"G\"),\n (\"U\", \"U\"),\n (\"U\", \"C\"),\n (\"C\", \"U\"),\n (\"C\", \"C\"),\n ]\n )\n\n s1 = self.RNA(\"UCAGGCAA\")\n s2 = self.RNA(\"CCAAAUGC\")\n s3 = self.RNA(\"GGGGGGGG\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_similar(y, transitions), z)\n\n test(e, e, 0)\n test(s1, e, 0)\n test(s1, s1, 1)\n test(s1, s2, 7.0 / 8)\n test(s1, s3, 5.0 / 8)\n test(s2, s3, 4.0 / 8)", "def struct_sim(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n ssim = np.zeros(n)\n for ii in range(n):\n ssim[ii] = structural_similarity(image1[ii], image2[ii], **kwargs)\n return ssim", "def parent_children_match(parent_snap_index, child_parent_snap_index):\n # too slow\n #parent_child_start_time = time.time()\n #parent_index_dict = dict( \n # (k, i) \n # for i, k in enumerate(parent_snap_index) \n # )\n #child_index_dict = dict( \n # (k, i) \n # for i, k in enumerate(child_parent_snap_index) \n # )\n #parent_child_meet = np.intersect1d( parent_snap_index, child_parent_snap_index ) \n\n #parents = np.array([\n # parent_index_dict[k] for k in parent_child_meet\n # ])\n #children = np.array([\n # child_index_dict[k] for k in parent_child_meet\n # ])\n #print 'Parent Child match takes ', time.time() - parent_child_start_time, ' seconds'\n \n #parent_child_start_time = time.time()\n parent_sort_index = np.argsort(parent_snap_index)\n child_sort_index = np.argsort(child_parent_snap_index)\n sorted_parent_snap_index = parent_snap_index[parent_sort_index]\n sorted_child_parent_snap_index = child_parent_snap_index[child_sort_index]\n\n parent_child_in1d = np.in1d(sorted_parent_snap_index, sorted_child_parent_snap_index)\n child_parent_in1d = np.in1d(sorted_child_parent_snap_index, sorted_parent_snap_index)\n \n parents = parent_sort_index[parent_child_in1d]\n children = child_sort_index[child_parent_in1d]\n #print 'Parent Child match takes ', time.time() - parent_child_start_time, ' seconds'\n\n return parents, children", "def sample_cond_on_subtree_nodes(new, tree, subtree_nodes, subtree_edges, subtree_adjlist):\n new_separators = {}\n new_cliques = set()\n old_cliques = set()\n subtree_order = len(subtree_nodes)\n #print(\"subtree nodes:\" + str(subtree_nodes))\n\n if subtree_order == 0:\n # If the tree, tree is empty (n isolated node),\n # add random neighbor.\n c = frozenset([new])\n new_cliques.add(c)\n #c2 = tree.nodes()[0] # nx 1.9\n c2 = list(tree.nodes())[0] # GraphTool\n #c2 = list(tree.nodes)[0] # nx 2.1\n tree.add_node(c, label=tuple([new]), color=\"red\")\n tree.add_edge(c, c2, label=tuple([]))\n\n sep = frozenset()\n #tree.fix_graph()\n trilearn.graph.junction_tree.randomize_at_sep(tree, sep)\n\n new_separators[sep] = [(c, c2)]\n # tree TODO: the actual value for the key is not needed.\n P = {c: np.exp(-tree.log_nu(sep))}\n return (old_cliques, new_cliques, new_separators, P, {c: 1.0})\n\n S = {c: set() for c in subtree_nodes}\n M = {c: set() for c in subtree_nodes}\n for c in S:\n for neig in subtree_adjlist[c]:\n #S[c] = S[c] | (c & neig)\n S[c] |= (c & neig)\n RM = {c: c - S[c] for c in S}\n C = {c: set() for c in subtree_nodes}\n P = {}\n N_S = {c: set() for c in subtree_nodes}\n\n sepCondition = {}\n for c in RM:\n sepCondition[c] = len({neig for neig in subtree_adjlist[c] if\n S[c] == neig & c}) > 0 or len(subtree_adjlist) == 1\n\n if sepCondition[c] is True:\n tmp = np.array(list(RM[c]))\n first_node = []\n if len(tmp) > 0:\n # Connect to one node\n first_ind = np.random.randint(len(tmp))\n first_node = tmp[[first_ind]]\n tmp = np.delete(tmp, first_ind)\n\n rest = set()\n if len(tmp) > 0:\n # Connect to the rest of the nodes if there are any left\n rest = aux.random_subset(tmp)\n M[c] = frozenset(rest | set(first_node))\n else:\n M[c] = frozenset(aux.random_subset(RM[c]))\n\n # Create the new cliques\n for clique in M:\n C[clique] = frozenset(M[clique] | S[clique] | {new})\n new_cliques.add(C[clique])\n\n # Get the neighbor set of each c which can be moved to C[c]\n for clique in subtree_nodes:\n N_S[clique] = {neig for neig in tree.neighbors(clique)\n if neig & clique <= C[clique] and neig not in subtree_nodes}\n\n # Add the new cliques\n #for c in subtree_nodes:\n # tree.add_node(C[c], label=str(tuple(C[c])), color=\"red\")\n tree.add_nodes_from([C[c] for c in subtree_nodes])\n\n # Construct and add the new edges between the new cliques,\n # replicating the subtree\n new_subtree_edges = []\n for e in subtree_edges:\n sep = C[e[0]] & C[e[1]]\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[e[0]], C[e[1]]))\n #new_subtree_edges.append((C[e[0]], C[e[1]]))\n tree.add_edge(C[e[0]], C[e[1]])\n # tree.add_edges_from(new_subtree_edges)\n\n # Connect cliques in the subtree to the new cliques\n for c in subtree_nodes:\n # Move the neighbors of a swallowed node to the swallowing node\n # Remove the swallowed node\n\n if C[c] - {new} == c:\n # If connecting to all nodes in a clique (the node should be replaces instead)\n for neig in tree.neighbors(c):\n if neig not in subtree_nodes:\n tree.add_edge(C[c], neig)#, label=lab)\n\n tree.remove_node(c)\n old_cliques.add(c)\n else: # If not connecting to every node in a clique\n sep = C[c] & c\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[c], c))\n\n #print \"adding edge: \" + str((C[c], c))\n tree.add_edge(C[c], c)\n # Pick random subset of neighbors intersecting with subset of S U M\n\n N = aux.random_subset(N_S[c])\n #for neig in N:\n # tree.add_edge(C[c], neig)\n\n tree.add_edges_from([(C[c], neig) for neig in N])\n tree.remove_edges_from([(c, neig) for neig in N])\n\n # Compute probabilities\n N = {}\n for c in subtree_nodes:\n if sepCondition[c] is False:\n # Every internal node in c belongs to a separator\n P[c] = np.power(2.0, - len(RM[c]))\n #P[c] = Fraction(1, 2 ** len(RM[c]))\n if not len(c) + 1 == len(C[c]):\n N[c] = np.power(2.0, - len(N_S[c]))\n #N[c] = Fraction(1, 2 ** len(N_S[c]))\n else:\n N[c] = 1.0\n #N[c] = Fraction(1,1)\n else:\n P[c] = 1.0\n #P[c] = Fraction(1, 1)\n N[c] = 1.0\n #N[c] = Fraction(1, 1)\n if len(RM[c]) > 1:\n P[c] = 1.0 / len(RM[c])\n #P[c] = Fraction(1, len(RM[c]))\n P[c] *= np.power(2.0, - (len(RM[c]) - 1.0)) * len(M[c])\n #P[c] *= Fraction(len(M[c]), 2 ** (len(RM[c]) - 1.0)) \n if not len(c) + 1 == len(C[c]): # c not swallowed by C[c]\n #N[c] = np.power(2.0, - len(N_S[c]))\n N[c] = Fraction(1, 2 ** len(N_S[c]))\n\n # Remove the edges in tree\n tree.remove_edges_from(subtree_edges)\n # Todo: This will introduce a bug if we instead replace a node.\n return (old_cliques, new_cliques, new_separators, P, N)", "def _molge(x, y):\n if x is None or y is None:\n return False\n if hasattr(x, '_substructfp'):\n if not hasattr(y, '_substructfp'):\n y._substructfp = _fingerprinter(y, True)\n if not DataStructs.AllProbeBitsMatch(y._substructfp, x._substructfp):\n return False\n match = x.GetSubstructMatch(y)\n x.__sssAtoms = []\n if match:\n if highlightSubstructures:\n x.__sssAtoms = list(match)\n return True\n else:\n return False", "def test_similarity_measure_size_compatibility():\n\n patch1 = torch.randn(size=(4, 6, 2))\n patch2 = torch.randn(size=(4, 6, 2))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(4, 3))\n patch2 = torch.randn(size=(4, 3))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(5,))\n patch2 = torch.randn(size=(5,))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(3, 7, 2, 4))\n patch2 = torch.randn(size=(3, 7, 2, 4))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successful", "def Path_Similarity(word1, word2):\n\n # Initial test to confirm unique nouns, otherwise passes back 1\n\n if word1 == word2:\n return 1, word1\n\n # Sets up Initial Variables & Dictionaries\n\n stepup1 = [word1]\n stepup2 = [word2]\n dict1= {}\n dict2= {}\n currentstep1 = []\n currentstep2 = []\n\n # Iterates through a loop an arbitrary # of times, adding new hypernyms\n # for each word to individual dictionaries with the number of iterations\n # as the key to the dictionary. The two dictionaries are saved perpetually\n\n for la in range(50):\n for i in range(len(stepup1)):\n currentstep1 += (stepup1[i].hypernyms())\n for syn in stepup1:\n dict1[syn] = la\n for i in range(len(stepup2)):\n currentstep2 = (stepup2[i].hypernyms())\n for syn in stepup2:\n dict2[syn] = la\n\n # Variables are reset and converted to continue the next stage of the loop\n\n stepup1 = currentstep1\n stepup2 = currentstep2\n currentstep1 = []\n currentstep2 = []\n\n\n # Each loop the dictionaries are checked for matches. I have confirmed that\n # checking each cycle is faster than checking at the end of an arbitrary\n # number of cycles.\n\n # Removes applied words as Possible Subsumers Per Fridays Clas\n dict1.pop(word1)\n dict2.pop(word2)\n\n #Gets possible Least Common Subsumers\n dict1Set = set(dict1)\n dict2Set = set(dict2)\n d = {}\n for name in dict1Set.intersection(dict2Set):\n d[name] = dict1[name]\n pos_lcs = [key for min_value in (min(d.values()),) for key in d if d[key] == min_value]\n\n #Returns Actual LCS\n key_lcs = []\n for i in pos_lcs:\n key_lcs.append(shortestPath(wn.synset('entity.n.01'),i))\n lcs = (pos_lcs[key_lcs.index(max(key_lcs))])\n\n #Returns path Similarity Value and Synset of LCS; Must Error Proof\n\n return 1/(dict1[lcs] + dict2[lcs]), lcs", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def reduce(self):\n # initializing a hashtable for all the nodes in the tree\n hashtable = {}\n for node_it in self.nodes:\n # storing each node only once in the table\n if not node_it.__hash__() in hashtable:\n hashtable[node_it.__hash__()] = node_it\n\n def reduce_rec(node):\n \"\"\"\n The recursive method for the reduction.\n \"\"\"\n if node.is_leaf():\n return\n for edge in node.child_nodes:\n # replacing the subdiagram with a singular isomorphic one\n node.child_nodes[edge] = hashtable[node.child_nodes[edge].__hash__()]\n # and going down recursively along that subdiagram\n reduce_rec(node.child_nodes[edge])\n\n # calling the reduction method\n reduce_rec(self)\n # reinitializing the diagram\n self.reinitialize()\n return self", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def needleMatching(self):\r\n # productive\r\n profprint()\r\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLModelNode')\r\n nbNode = modelNodes.GetNumberOfItems()\r\n result = []\r\n found = []\r\n # print nbNode\r\n for nthNode in range(nbNode):\r\n node = slicer.mrmlScene.GetNthNodeByClass(nthNode, 'vtkMRMLModelNode')\r\n if node.GetID() not in found and node.GetAttribute('type') != 'Validation':\r\n dist = []\r\n polydata = node.GetPolyData()\r\n if polydata != None:\r\n bounds = polydata.GetBounds()\r\n for nthNode2 in range(nbNode):\r\n node2 = slicer.mrmlScene.GetNthNodeByClass(nthNode2, 'vtkMRMLModelNode')\r\n if node2.GetID() not in found and node2.GetAttribute('type') == 'Validation':\r\n polydata2 = node2.GetPolyData()\r\n if polydata2 != None and polydata2.GetNumberOfPoints() > 100 and polydata.GetNumberOfPoints() > 100:\r\n tipDistance = self.distTip(int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\r\n baseDistance = self.distBase(int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\r\n name = node.GetName()\r\n manualName = name.lstrip('auto-seg_').lstrip('manual-seg_').lstrip('obturator-seg_').lstrip('0123456789').lstrip('-ID-vtkMRMLModelNode').lstrip('0123456789-')\r\n if manualName==node2.GetName(): dist.append([tipDistance, node2.GetID(), node2.GetName()])\r\n # print tipDistance\r\n if dist != []:\r\n match = [min(dist)[0], min(dist)[1], node.GetID(), min(dist)[2]]\r\n result.append(match)\r\n found.append(min(dist)[1])\r\n found.append(node.GetID())\r\n node.GetDisplayNode().SetSliceIntersectionVisibility(1)\r\n # print result\r\n return result" ]
[ "0.6594799", "0.6575489", "0.64707655", "0.58146197", "0.5612342", "0.5606178", "0.5466558", "0.5404911", "0.5382928", "0.5352824", "0.5314969", "0.5312861", "0.5297801", "0.52746844", "0.52396566", "0.52378243", "0.52237767", "0.5201112", "0.5174336", "0.51694596", "0.51549923", "0.51526266", "0.5125546", "0.5114943", "0.5108138", "0.51040137", "0.50976187", "0.5075049", "0.5060798", "0.50351006" ]
0.66305006
0
Substructure similarity based on siblinghood, EXACT match May use the subtree calculation as the metric is basically similar (just change the input)
def siblinghood_sim_exact(siblinghood_1, siblinghood_2): return SubstructureAgreement.substructure_sim_exact(siblinghood_1, siblinghood_2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substructure_sim_exact(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n for i in range(n):\n f1[i] = subtrees_1[i] == subtrees_2[i] # calculate the number of matching pairs\n\n return float(np.count_nonzero(f1)) / float(len(f1))", "def substructure_sim_partial(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n f2 = np.zeros(n)\n for i in range(n):\n if (subtrees_1[i] == subtrees_2[i]): # exact match, or both are dropped\n f1[i] = 1.0\n f2[i] = 1.0\n else: # partial match\n f1[i] = SubstructureAgreement.save_div( len(subtrees_1[i].intersection(subtrees_2[i])), float(len(subtrees_2[i])))\n f2[i] = SubstructureAgreement.save_div( len(subtrees_2[i].intersection(subtrees_1[i])), float(len(subtrees_1[i])))\n\n return (np.sum(f1) + np.sum(f2)) / (2.0 * float(n)) # average of average recall", "def siblinghood_sim_partial(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_partial(siblinghood_1, siblinghood_2)", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def subpath_sim(subpaths_1, subpaths_2):\n u = subpaths_1.union(subpaths_2)\n f1 = np.zeros(len(u))\n f2 = np.zeros(len(u))\n u = list(u)\n\n # convert graph into one-hot-vector (based on the precense of subpaths)\n for i in range(len(u)):\n if u[i] in subpaths_1:\n f1[i] = 1\n if u[i] in subpaths_2:\n f2[i] = 1\n\n score = np.dot(f1, f2) * (np.count_nonzero(f1) + np.count_nonzero(f2)) / (2 * (np.count_nonzero(f1) * np.count_nonzero(f2)))\n\n if math.isnan(score): # in case of empty set\n return 0.0\n else:\n return score", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def sim(w1, w2, tree):\n w1_node = tree.search(w1)\n w2_node = tree.search(w2)\n\n if w1_node is None or w2_node is None:\n return\n\n sum_of_w1 = 0\n sum_of_w2 = 0\n dot_product = 0\n for i in range(len(w1_node.data)):\n sum_of_vector = float(w1_node.data[i]) * float(w2_node.data[i])\n\n sum_of_w1 += (float(w1_node.data[i]) * float(w1_node.data[i]))\n sum_of_w2 += (float(w2_node.data[i]) * float(w2_node.data[i]))\n\n dot_product += sum_of_vector\n return dot_product / (sqrt(sum_of_w1) * sqrt(sum_of_w2))", "def test_tree_intersection_on_challenge_example(first_example, second_example):\n expected = [500, 350, 200, 175, 160, 125, 100]\n actual = tree_intersection(first_example, second_example)\n assert expected == actual", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def match(*args, current=None):\n # current is the original edge and clone is the change\n # this function should only be getting nodes with the same edges\n # if I change this to assume nodes of the same edge attr then I can\n # send this function \"equivalent edges\"\n scores = []\n for clone in args:\n if current.edge_attribute == clone.edge_attribute:\n source_condit = (\n clone.source.original_id == current.source.id\n or clone.source.id == current.source.id\n )\n target_condit = (\n clone.target.original_id == current.target.id\n or clone.target.id == current.target.id\n )\n if source_condit and target_condit:\n scores.append(2)\n return scores\n elif source_condit or target_condit:\n\n scores.append(1)\n else:\n # TODO: check subgraph/call is_similar\n # if subgraph is isomorphic then return 2\n scores.append(0)\n elif len(current.edge_attribute) > len(clone.edge_attribute):\n scores.append(-1)\n else: # edge attribute of current is shorter than of clone\n scores.append(-2)\n return scores", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def test_tree_support(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n\r\n tc.tree_support(master_tree, t2)\r\n assert_almost_equal(\r\n master_tree.getNodeMatchingName('rt').bootstrap_support, 1.0)", "def similarity_euclid(matrix, business1, business2):\n selected_features = matrix.loc[business1].notna() & matrix.loc[business2].notna()\n\n if not selected_features.any():\n return 0\n\n features1 = matrix.loc[business1][selected_features]\n features2 = matrix.loc[business2][selected_features]\n distance = math.sqrt(((features1 - features2) ** 2).sum())\n\n if distance is np.nan:\n return 0\n\n return 1 / (1 + distance)", "def nodes_ratio(data: Data, position_against_soma=None, node_types=None, filter_layers=None):\n soma = data.morphology.get_soma()\n if position_against_soma=='above':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']<soma['y']\n elif position_against_soma=='below':\n criterion=lambda nod:((nod['type'] in node_types) if node_types is not None else True) and nod['y']>soma['y']\n else:\n criterion=lambda nod:nod['type'] in node_types if node_types is not None else True\n num_type_nodes = data.morphology.get_node_by_types(node_types)\n if filter_layers and isinstance(filter_layers, list):\n result = {}\n for layer in filter_layers:\n filter_layer_depth = data.reference_layer_depths.get(layer)\n y_min, y_max, is_scale = filter_layer_depth.pia_side, filter_layer_depth.wm_side, filter_layer_depth.scale\n if is_scale:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and y_min<nod['y']<y_max)\n else:\n selected_nodes = data.morphology.filter_nodes(lambda nod: criterion(nod) and nod['y']>y_min)\n result[layer]=len(selected_nodes)/len(num_type_nodes)\n return result\n else:\n selected_nodes = data.morphology.filter_nodes(criterion)\n return len(selected_nodes)/len(num_type_nodes)", "def find_similarities(data):\n\n good_one = copy.deepcopy(data[\"no_vpn\"])\n del data[\"no_vpn\"]\n\n all_minhashes = []\n\n # put everything easy to work with\n for extension in data:\n for webpage in data[extension]:\n for f in data[extension][webpage]:\n path = (extension, webpage, f)\n all_minhashes.append((path, data[extension][webpage][f]))\n aux = copy.deepcopy(all_minhashes)\n path_data, data = aux.pop(0)\n all_minhashes.pop(0)\n\n result = {}\n while path_data and data:\n for file in good_one[path_data[1]]:\n # print(\"Jaccard among %s and %s\" % (path_data[2], file))\n path_to_file = \"/\".join((\"no_vpn\", path_data[1], file))\n similarity = data[\"hash\"].jaccard(good_one[path_data[1]][file][\"hash\"])\n try:\n result[\"/\".join(path_data)].append((path_to_file, similarity))\n except KeyError:\n result[\"/\".join(path_data)] = [\n (path_to_file, similarity),\n ]\n\n all_minhashes.append((path_data, data))\n try:\n path_data, data = aux.pop(0)\n except IndexError:\n path_data = data = None\n\n return result", "def test_calc_shared_phylotypes_pairwise(self):\r\n\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 0), 5)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 1), 2)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 2), 3)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 2, 2), 3)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S1'),\r\n 5)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S2'),\r\n 2)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S3'),\r\n 3)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S3',\r\n 'S3'),\r\n 3)", "def object_similarity(obj_1, obj_2):\n w_list = []\n obj_1_bag_size = sum(obj_1['bag_of_words'].values())\n obj_2_bag_size = sum(obj_2['bag_of_words'].values())\n obj_1_set = obj_1['set_of_words']\n obj_2_set = obj_2['set_of_words']\n obj_1_diff_2_set = obj_1_set - obj_2_set\n obj_2_diff_1_set = obj_2_set - obj_1_set\n w_list.append(weight_calculator(obj_1_bag_size, obj_2_bag_size))\n w_list.append(weight_calculator(len(obj_1_set), len(obj_2_set)))\n w_list.append(weight_calculator(len(obj_1_diff_2_set),\n len(obj_2_diff_1_set)))\n if 'total_lines' in obj_1.keys() and 'total_lines' in obj_2.keys():\n w_list.append(weight_calculator(obj_1['total_lines'],\n obj_2['total_lines']))\n if 'total_conversations' in obj_1.keys() and 'total_conversations' in obj_2.keys():\n w_list.append(weight_calculator(obj_1['total_conversations'],\n obj_2['total_conversations']))\n # Added as observations of genre -> rating relations\n if 'metadata' in obj_1.keys() and 'metadata' in obj_2.keys():\n w_list.append(weight_calculator(eval(obj_1['metadata']['genres']),\n eval(obj_2['metadata']['genres'])))\n return mean(w_list)", "def _molge(x, y):\n if x is None or y is None:\n return False\n if hasattr(x, '_substructfp'):\n if not hasattr(y, '_substructfp'):\n y._substructfp = _fingerprinter(y, True)\n if not DataStructs.AllProbeBitsMatch(y._substructfp, x._substructfp):\n return False\n match = x.GetSubstructMatch(y)\n x.__sssAtoms = []\n if match:\n if highlightSubstructures:\n x.__sssAtoms = list(match)\n return True\n else:\n return False", "def sample_cond_on_subtree_nodes(new, tree, subtree_nodes, subtree_edges, subtree_adjlist):\n new_separators = {}\n new_cliques = set()\n old_cliques = set()\n subtree_order = len(subtree_nodes)\n #print(\"subtree nodes:\" + str(subtree_nodes))\n\n if subtree_order == 0:\n # If the tree, tree is empty (n isolated node),\n # add random neighbor.\n c = frozenset([new])\n new_cliques.add(c)\n #c2 = tree.nodes()[0] # nx 1.9\n c2 = list(tree.nodes())[0] # GraphTool\n #c2 = list(tree.nodes)[0] # nx 2.1\n tree.add_node(c, label=tuple([new]), color=\"red\")\n tree.add_edge(c, c2, label=tuple([]))\n\n sep = frozenset()\n #tree.fix_graph()\n trilearn.graph.junction_tree.randomize_at_sep(tree, sep)\n\n new_separators[sep] = [(c, c2)]\n # tree TODO: the actual value for the key is not needed.\n P = {c: np.exp(-tree.log_nu(sep))}\n return (old_cliques, new_cliques, new_separators, P, {c: 1.0})\n\n S = {c: set() for c in subtree_nodes}\n M = {c: set() for c in subtree_nodes}\n for c in S:\n for neig in subtree_adjlist[c]:\n #S[c] = S[c] | (c & neig)\n S[c] |= (c & neig)\n RM = {c: c - S[c] for c in S}\n C = {c: set() for c in subtree_nodes}\n P = {}\n N_S = {c: set() for c in subtree_nodes}\n\n sepCondition = {}\n for c in RM:\n sepCondition[c] = len({neig for neig in subtree_adjlist[c] if\n S[c] == neig & c}) > 0 or len(subtree_adjlist) == 1\n\n if sepCondition[c] is True:\n tmp = np.array(list(RM[c]))\n first_node = []\n if len(tmp) > 0:\n # Connect to one node\n first_ind = np.random.randint(len(tmp))\n first_node = tmp[[first_ind]]\n tmp = np.delete(tmp, first_ind)\n\n rest = set()\n if len(tmp) > 0:\n # Connect to the rest of the nodes if there are any left\n rest = aux.random_subset(tmp)\n M[c] = frozenset(rest | set(first_node))\n else:\n M[c] = frozenset(aux.random_subset(RM[c]))\n\n # Create the new cliques\n for clique in M:\n C[clique] = frozenset(M[clique] | S[clique] | {new})\n new_cliques.add(C[clique])\n\n # Get the neighbor set of each c which can be moved to C[c]\n for clique in subtree_nodes:\n N_S[clique] = {neig for neig in tree.neighbors(clique)\n if neig & clique <= C[clique] and neig not in subtree_nodes}\n\n # Add the new cliques\n #for c in subtree_nodes:\n # tree.add_node(C[c], label=str(tuple(C[c])), color=\"red\")\n tree.add_nodes_from([C[c] for c in subtree_nodes])\n\n # Construct and add the new edges between the new cliques,\n # replicating the subtree\n new_subtree_edges = []\n for e in subtree_edges:\n sep = C[e[0]] & C[e[1]]\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[e[0]], C[e[1]]))\n #new_subtree_edges.append((C[e[0]], C[e[1]]))\n tree.add_edge(C[e[0]], C[e[1]])\n # tree.add_edges_from(new_subtree_edges)\n\n # Connect cliques in the subtree to the new cliques\n for c in subtree_nodes:\n # Move the neighbors of a swallowed node to the swallowing node\n # Remove the swallowed node\n\n if C[c] - {new} == c:\n # If connecting to all nodes in a clique (the node should be replaces instead)\n for neig in tree.neighbors(c):\n if neig not in subtree_nodes:\n tree.add_edge(C[c], neig)#, label=lab)\n\n tree.remove_node(c)\n old_cliques.add(c)\n else: # If not connecting to every node in a clique\n sep = C[c] & c\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[c], c))\n\n #print \"adding edge: \" + str((C[c], c))\n tree.add_edge(C[c], c)\n # Pick random subset of neighbors intersecting with subset of S U M\n\n N = aux.random_subset(N_S[c])\n #for neig in N:\n # tree.add_edge(C[c], neig)\n\n tree.add_edges_from([(C[c], neig) for neig in N])\n tree.remove_edges_from([(c, neig) for neig in N])\n\n # Compute probabilities\n N = {}\n for c in subtree_nodes:\n if sepCondition[c] is False:\n # Every internal node in c belongs to a separator\n P[c] = np.power(2.0, - len(RM[c]))\n #P[c] = Fraction(1, 2 ** len(RM[c]))\n if not len(c) + 1 == len(C[c]):\n N[c] = np.power(2.0, - len(N_S[c]))\n #N[c] = Fraction(1, 2 ** len(N_S[c]))\n else:\n N[c] = 1.0\n #N[c] = Fraction(1,1)\n else:\n P[c] = 1.0\n #P[c] = Fraction(1, 1)\n N[c] = 1.0\n #N[c] = Fraction(1, 1)\n if len(RM[c]) > 1:\n P[c] = 1.0 / len(RM[c])\n #P[c] = Fraction(1, len(RM[c]))\n P[c] *= np.power(2.0, - (len(RM[c]) - 1.0)) * len(M[c])\n #P[c] *= Fraction(len(M[c]), 2 ** (len(RM[c]) - 1.0)) \n if not len(c) + 1 == len(C[c]): # c not swallowed by C[c]\n #N[c] = np.power(2.0, - len(N_S[c]))\n N[c] = Fraction(1, 2 ** len(N_S[c]))\n\n # Remove the edges in tree\n tree.remove_edges_from(subtree_edges)\n # Todo: This will introduce a bug if we instead replace a node.\n return (old_cliques, new_cliques, new_separators, P, N)", "def similarity(self, e1, e2):\n\t\tpass", "def test_calc_shared_phylotypes(self):\r\n\r\n observed = calc_shared_phylotypes(self.biom_as_string)\r\n expected = \"\"\"\\tS1\\tS2\\tS3\r\nS1\\t5\\t2\\t3\r\nS2\\t2\\t2\\t1\r\nS3\\t3\\t1\\t3\\n\"\"\"\r\n self.assertEqual(observed, expected)", "def test_ontology_similarity_jaccard(ontology):\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000003\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\"], inherited=False) == 1/3\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000007\"],[\"TO:0000008\"], inherited=False) == 2/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\"], inherited=False) == 1/8\n\n\tassert ontology.similarity_jaccard([\"TO:0000001\"],[\"TO:0000002\",\"TO:0000001\"], inherited=False) == 1/2\n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000001\",\"TO:0000009\"], inherited=False) == 1/5\n\tassert ontology.similarity_jaccard([\"TO:0000002\"],[\"TO:0000003\",\"TO:0000002\"], inherited=False) == 2/3 \n\tassert ontology.similarity_jaccard([\"TO:0000003\"],[\"TO:0000005\",\"TO:0000002\"], inherited=False) == 2/5\n\tassert ontology.similarity_jaccard([\"TO:0000008\"],[\"TO:0000008\",\"TO:0000007\"], inherited=False) == 3/3\n\tassert ontology.similarity_jaccard([\"TO:0000005\"],[\"TO:0000009\",\"TO:0000002\"], inherited=False) == 2/8", "def struct_sim(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n ssim = np.zeros(n)\n for ii in range(n):\n ssim[ii] = structural_similarity(image1[ii], image2[ii], **kwargs)\n return ssim", "def parent_children_match(parent_snap_index, child_parent_snap_index):\n # too slow\n #parent_child_start_time = time.time()\n #parent_index_dict = dict( \n # (k, i) \n # for i, k in enumerate(parent_snap_index) \n # )\n #child_index_dict = dict( \n # (k, i) \n # for i, k in enumerate(child_parent_snap_index) \n # )\n #parent_child_meet = np.intersect1d( parent_snap_index, child_parent_snap_index ) \n\n #parents = np.array([\n # parent_index_dict[k] for k in parent_child_meet\n # ])\n #children = np.array([\n # child_index_dict[k] for k in parent_child_meet\n # ])\n #print 'Parent Child match takes ', time.time() - parent_child_start_time, ' seconds'\n \n #parent_child_start_time = time.time()\n parent_sort_index = np.argsort(parent_snap_index)\n child_sort_index = np.argsort(child_parent_snap_index)\n sorted_parent_snap_index = parent_snap_index[parent_sort_index]\n sorted_child_parent_snap_index = child_parent_snap_index[child_sort_index]\n\n parent_child_in1d = np.in1d(sorted_parent_snap_index, sorted_child_parent_snap_index)\n child_parent_in1d = np.in1d(sorted_child_parent_snap_index, sorted_parent_snap_index)\n \n parents = parent_sort_index[parent_child_in1d]\n children = child_sort_index[child_parent_in1d]\n #print 'Parent Child match takes ', time.time() - parent_child_start_time, ' seconds'\n\n return parents, children", "def test_similarity_measure_size_compatibility():\n\n patch1 = torch.randn(size=(4, 6, 2))\n patch2 = torch.randn(size=(4, 6, 2))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(4, 3))\n patch2 = torch.randn(size=(4, 3))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(5,))\n patch2 = torch.randn(size=(5,))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(3, 7, 2, 4))\n patch2 = torch.randn(size=(3, 7, 2, 4))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successful", "def similarity(self, other, ignore_abundance=False, downsample=False):\n return self._methodcall(lib.kmerminhash_similarity,\n other._get_objptr(),\n ignore_abundance, downsample)", "def compare_trees(tree1, tree2):\n \tresponse = {}\n \tstart_time = time.time()\n \ttry:\t\n \t\ttns = dendropy.TaxonNamespace() \t\n \t\n \t\ttree_obj1 = dendropy.Tree.get(data=tree1, schema=\"newick\",taxon_namespace=tns)\n \t\ttree_obj2 = dendropy.Tree.get(data=tree2, schema=\"newick\",taxon_namespace=tns)\n\n \t\ttree_obj1.encode_bipartitions()\n \t\ttree_obj2.encode_bipartitions()\n\n \t\t#-----------------------------------------------------------\n \t\t#This method returns the symmetric distance between two trees. \n \t\t#The symmetric distance between two trees is the sum of the number of splits found in one of the trees but not the other. \n \t\t#It is common to see this statistic called the Robinson-Foulds distance\n\n \t\tareSame = True if treecompare.symmetric_difference(tree_obj1, tree_obj2) == 0 else False\n \t\tstatus = 200\n \t\tmessage = \"Success\"\n \t\tresponse['are_same_tree'] = areSame\n \n \texcept Exception, e:\n \t\tif \"Incomplete or improperly-terminated tree statement\" in str(e): #invalid: \"((A,B),C,D));\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderIncompleteTreeStatementError: \" + str(e)\n \t \t\tstatus = 400\n \t\telif \"Unbalanced parentheses at tree statement\" in str(e): #invalid: \"((A,B),(C,D);\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderMalformedStatementError: \"+str(e) \n \t \t\tstatus = 400\n \t\telif \"Multiple occurrences of the same taxa\" in str(e): #invalid: \"((A,B),(C,C));\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"NewickReaderDuplicateTaxonError: \"+str(e)\n \t \t\tstatus = 400\n \t\telif \"Unexpected end of stream\" in str(e): # invalid: \"((A,B),(C,D))\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"UnexpectedEndOfStreamError: \"+str(e)\n \t \t\tstatus = 400\n \t\telse:\n \t\t\tmessage = \"Error: Failed to compare trees. \"+str(e)\n \t \t\tstatus = 500\n \t \t\n \tresponse['status_code'] = status\n \tresponse['message'] = message\n\n \tend_time = time.time()\n \texecution_time = end_time-start_time\n #service result creation time\n \tcreation_time = datetime.datetime.now().isoformat()\n \tmeta_data = {'creation_time': creation_time, 'execution_time': float('{:4.2f}'.format(execution_time)), 'source_urls':[\"http://dendropy.org/library/treecompare.html#module-dendropy.calculate.treecompare\"] }\n\n \tresponse['meta_data'] = meta_data\n \tprint response\n \treturn response", "def test_check_tree_exact_match(self):\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_3tips_fp)\r\n\r\n # Should find all and give True, True result\r\n\r\n self.assertEqual(actual_subset_results, [True, True])\r\n\r\n # Should get tips not found in fasta labels with 5 tip tree\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, [True, ['seq5', 'seq4']])\r\n\r\n # Change two of the fasta labels to not match tree tips\r\n\r\n fasta_labels = ['seq1_1', 'seqX_2', 'seq2_3', 'seqY_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find seqX and seqY as not being a subset\r\n\r\n self.assertEqual(actual_subset_results, [['seqX', 'seqY'],\r\n ['seq3', 'seq5', 'seq4']])" ]
[ "0.68818945", "0.6238286", "0.62245893", "0.60398227", "0.5757258", "0.5752473", "0.5651702", "0.56473386", "0.5640161", "0.5596965", "0.5589498", "0.5548749", "0.5539112", "0.55268145", "0.5466854", "0.54479176", "0.54473263", "0.5358903", "0.5344205", "0.53412336", "0.53407085", "0.5338473", "0.53222066", "0.53098476", "0.53064114", "0.5305555", "0.5295903", "0.5279927", "0.5272699", "0.52537775" ]
0.66463166
1
Calculate graphbased IAA measure
def graph_iaa(adj_matrix1, shortest_path_dist1, adj_matrix2, shortest_path_dist2, mode): n_nodes = len(adj_matrix1) n_edges_1 = np.count_nonzero(adj_matrix1) n_edges_2 = np.count_nonzero(adj_matrix2) sum_of_inverse_1 = 0.0 sum_of_inverse_2 = 0.0 for i in range(n_nodes): for j in range(n_nodes): if adj_matrix1[i][j] != NO_REL_SYMBOL: sum_of_inverse_1 += 1.0 / shortest_path_dist2[i][j] if adj_matrix2[i][j] != NO_REL_SYMBOL: sum_of_inverse_2 += 1.0 / shortest_path_dist1[i][j] sum_of_inverse_1 /= float(n_edges_1) sum_of_inverse_2 /= float(n_edges_2) if mode == "avg": return (sum_of_inverse_1 + sum_of_inverse_2) / 2.0 elif mode=="f1": return (2.0 * sum_of_inverse_1 * sum_of_inverse_2) / (sum_of_inverse_1 + sum_of_inverse_2) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_iam(a_1, a_2, a_3, a_4, a_5, a_6, aoi, loss_method):\n if loss_method == 'Janotte':\n iam = 1 - a_1 * abs(aoi) - a_2 * aoi**2\n\n if loss_method == 'Andasol':\n iam = (1 - a_1 * abs(aoi) - a_2 * aoi**2 - a_3 * aoi**3 - a_4 * aoi**4\n - a_5 * aoi**5 - a_6 * aoi**6)\n return iam", "def test_am_ia(Simulator, plt, seed, rng):\n\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n def input_func(t):\n if t < 0.2:\n return 'A + 0.8 * B'\n else:\n return '0.6 * A + B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = IAAssocMem(input_vocab=vocab, mapping=vocab.keys())\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.reset = nengo.Node(lambda t: 0.2 < t < 0.4)\n\n m.stimulus >> m.am\n nengo.Connection(m.reset, m.am.input_reset, synapse=0.1)\n\n in_p = nengo.Probe(m.am.input)\n reset_p = nengo.Probe(m.reset)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with nengo.Simulator(m) as sim:\n sim.run(0.7)\n t = sim.trange()\n more_a = (t > 0.15) & (t < 0.2)\n more_b = t > 0.65\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.plot(t, sim.data[reset_p], c='k', linestyle='--')\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab))\n plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.9, c='tab:blue', lw=2)\n plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.9, c='tab:orange', lw=2)\n plt.ylabel(\"Output\")\n\n assert_sp_close(t, sim.data[out_p], vocab['A'], skip=0.15, duration=0.05)\n assert_sp_close(t, sim.data[out_p], vocab['B'], skip=0.65, duration=0.05)", "def DA_AUC(data):\r\n xs = []\r\n ys =[]\r\n for row in data:\r\n xs.append(row[0])\r\n ys.append(row[2]) #DA0 for now idrk\r\n print(auc(xs,ys))", "def get_metrics(cfg, model, X_anchor, y_anchor, X_gal, y_gal, annoy_index, vec_dim):\n rank10_acc = 0\n rank5_acc = 0\n rank1_acc = 0\n avg_acc = 0\n vote_res = 0\n\n l2 = []\n for anchor in range(0, len(X_anchor)):\n res = get_result(get_image_features(cfg, model, X_anchor[anchor]), annoy_index)\n vote = defaultdict(int)\n # Accuracy\n correct = 0\n for i in res[:10]:\n vote[y_gal[i]] += 1\n\n max_key = max(vote, key=vote.get)\n if max_key == y_anchor[anchor]:\n vote_res += 1\n \n\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1 \n\n avg_acc += correct/len(res)\n\n # Mean Average Precision\n l1 = []\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1\n l1.append(1)\n else:\n l1.append(0)\n l2.append(l1) \n\n # Rank10 Accuracy\n for each_val in res[:10]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank10_acc += 1\n break\n \n # Rank5 Accuracy\n for each_val in res[:5]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank5_acc += 1\n break\n\n # Rank1 Accuracy\n for each_val in res[:1]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank1_acc += 1\n break\n\n print(\"Avg acc is :: {avg_acc}\".format(avg_acc = avg_acc/len(X_anchor)))\n print(\"Rank 10 acc is :: {rank10_acc}\".format(rank10_acc = rank10_acc/len(X_anchor)))\n print(\"Rank 5 acc is :: {rank5_acc}\".format(rank5_acc = rank5_acc/len(X_anchor)))\n print(\"Rank 1 acc is :: {rank1_acc}\".format(rank1_acc = rank1_acc/len(X_anchor)))\n print(\"Mean Avg Precision is :: {mAP}\".format(mAP=mean_average_precision(l2)))\n print(\"Vote res :: \", vote_res/len(X_anchor))\n\n return rank1_acc/len(X_anchor), mean_average_precision(l2)", "def metric_iaf(self, x):\n data = np.asarray(x['data'])\n iaf = [10.0] * data.shape[0]\n for ch, ch_data in enumerate(data):\n pxx, freqs = mlab.psd(ch_data, Fs=128.0, NFFT=256)\n alpha_mask = np.abs(freqs - 10) <= 2.0\n alpha_pxx = 10*np.log10(pxx[alpha_mask])\n alpha_pxx = scipy.signal.detrend(alpha_pxx)\n # iaf[ch] = alpha_pxx.shape\n iaf[ch] = freqs[alpha_mask][np.argmax(alpha_pxx)]\n return iaf", "def spectrumify(scattering_data, instr_broadening=0.1):\n graph_angles = n.linspace(0,180,10000)\n graph_intensities = n.zeros(graph_angles.shape)\n \n for angle, intensity in sorted(scattering_data.items()):\n graph_intensities += intensity * \\\n n.exp(-(graph_angles - angle)**2 / \\\n (2*(instr_broadening)**2))\n \n return graph_angles, graph_intensities", "def getMeasures():", "def get_aa (tRNA):\n\tpass", "def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r", "def kA_func(self):\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_mix_ph(i1, T0=self.inl[0].T.val_SI)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2:\n T_i1 = T_o2 + 0.01\n if T_i1 <= T_o2:\n T_o2 = T_i1 - 0.01\n if T_i1 <= T_o2:\n T_o1 = T_i2 + 0.02\n if T_o1 <= T_i2:\n T_i2 = T_o1 - 0.02\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log", "def compute_statistics(self):", "def kA_func(self):\n\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_bp_p(i1)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2 and not self.inl[0].T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not self.outl[1].T.val_set:\n T_o2 = T_i1 - 0.5\n\n if T_o1 <= T_i2 and not self.outl[0].T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not self.inl[1].T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log", "def metric(self, i1, i2):\n xx = self._try_cache(self.i2e[i1[0]])\n yy = self._try_cache(self.i2e[i2[0]])\n\n \"\"\"Don't need to cache the xy similarity because it doesn't have other uses\"\"\"\n xy = parasail.nw_stats(self.i2e[i1[0]], self.i2e[i2[0]], **self.paraParams).score\n\n D = xx + yy - 2 * xy\n return D", "def advancedStats():", "def calculate_attitude_metric(moment, mass, knowledge, axis):\n # print(moment, mass)\n moment_met = np.asarray(moment / mass).clip(min=0, max=1)\n # print(moment_met)\n know_met = np.asarray(10 - knowledge).clip(min=0, max=10) / 10\n # print(know_met)\n axis_met = np.asarray(axis / 3).clip(min=0, max=1)\n # print(axis_met)\n # print('Att Met:' + str(((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1)))\n return ((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1)", "def calc_av_aoi( d_c_mat, start_calc_window, end_calc_window,input_type=\"datetime\",output_units='hours'):\n\n # Now sum up trapezoidal sections of AoI curve (integration)\n\n conv_func = MetricsCalcs.t_conv_func\n\n aoi_summation = 0\n for t in range(1,len(d_c_mat)):\n trap_addition = (conv_func(d_c_mat[t][0],d_c_mat[t-1][1],input_type,output_units)**2 - conv_func(d_c_mat[t-1][0],d_c_mat[t-1][1],input_type,output_units)**2)/2\n aoi_summation += trap_addition\n\n av_aoi = aoi_summation / conv_func(end_calc_window,start_calc_window,input_type,output_units)\n return av_aoi", "def create_iou_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n ab_areas = w * h\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def iou(gt_boxes):\n ious = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n gt_area = (gt_x1 - gt_x0) * (gt_y1 - gt_y0)\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n ious.append(int_area / (ab_areas + gt_area - int_area))\n ious = np.transpose(ious)\n gt_indices = np.argmax(ious, axis=1)\n ious = np.squeeze(np.take_along_axis(ious, gt_indices[:, np.newaxis], axis=1))\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return ious, gt_boxes\n return iou", "def _calc_ap(self, mol):\n matches = mol.GetSubstructMatches(self.aromatic_query)\n return len(matches) / mol.GetNumAtoms()", "def ADP (self):", "def az (a, score):\r\n z = (score-amean(a)) / asamplestdev(a)\r\n return z", "def calcAWAIforOptim(slopeaz, latitude):\n\tdf = calcTotalInsolation(latitude, slopeaz[0], slopeaz[1])\n\treturn np.dot(\n\t\tnp.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]),\n\t\tdf['insolation_tilted']\n\t\t) / -365.0", "def ecg_diagnose(apn):\n # Total minutes of apnea\n apnea_total = sum(apn)\n\n # Maximum hourly Apnea Index\n total_hour = int(len(apn) / 60)\n y_pred_hourly = np.reshape(apn[: total_hour * 60], (total_hour, 60))\n AI_hourly = y_pred_hourly.sum(axis=1)\n # If data in the last hour exceed 30 minutes, then convert to hourly result\n y_pred_left = apn[total_hour * 60 :]\n # if len(y_pred_left) >= 30:\n # AI_hourly = np.append(AI_hourly, sum(y_pred_left) * 60 / len(y_pred_left))\n # total_hour += 1\n AI_max = AI_hourly.max()\n\n if AI_max >= 10 and apnea_total >= 100:\n return \"A\"\n elif AI_max >= 5 and apnea_total >= 5:\n return \"B\"\n else:\n return \"C\"", "def calculate_a(self):\n self.a = float(len(self.neighbors)) / total_connections", "def adjacency_matrix():\n file_path = PROJECT_PATH + \"/geographycal_data/adjacency_matrix/Howgrp.txt\"\n router = Router(adjacency_metrix=file_path)\n # router.write2vtk(router.graph, \"adjacency_matrix\")\n # nx.draw(router.graph)\n # plt.show()\n # adjacency matrix\n A = nx.adjacency_matrix(router.graph, weight=None).toarray()\n # ... and its spectrum\n nx.adjacency_spectrum(router.graph, weight=None)\n # weighted adjacency\n W = nx.adjacency_matrix(router.graph)\n # D\n I = np.reshape(np.ones(12), (-1, 1))\n D = np.matmul(A, I)\n # combinatorial graph Laplacian L = D - A\n L = nx.laplacian_matrix(router.graph, weight=None)\n # ... and his spectrum\n nx.laplacian_spectrum(router.graph, weight=None)\n # weighted Laplacian\n Y = nx.laplacian_matrix(router.graph)\n\n # Note\n sumD = np.matmul(I.transpose(), D)\n sumD = sumD[0][0]\n sumA = 0\n for row in np.nditer(A):\n for e in np.nditer(row):\n sumA += e\n\n # Fielder vector\n fiedler_vector = nx.fiedler_vector(router.graph, weight=None)\n\n # Matrix Double index Sum\n\n def D_app(F):\n return D * F\n\n def A_app(F):\n AF = np.zeros(len(F))\n for i, e_i in enumerate(F):\n for j, e_j in enumerate(F):\n if (A[i][j] != 0):\n AF[i] += F[j]\n return AF", "def amine(listAmine, count):\n \n for type in listAmine.keys():\n for nitrogen in listAmine[type]:\n nbNeighbor = numberNeigthbor(nitrogen[\"neighbors\"])\n for neighbor in nitrogen[\"neighbors\"]:\n if not nbNeighbor in count[type].keys():\n count[type][nbNeighbor] = structure.countElements()\n if not nbNeighbor in count[\"GlobalAmine\"].keys():\n count[\"GlobalAmine\"][nbNeighbor] = structure.countElements()\n\n\n if neighbor[\"element\"] in count[type][nbNeighbor].keys():\n count[type][nbNeighbor][neighbor[\"element\"]] = count[type][nbNeighbor][neighbor[\"element\"]] + 1\n count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] = count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] + 1\n\n else:\n count[type][nbNeighbor][\"others\"] = count[type][nbNeighbor][\"others\"] + 1\n count[\"GlobalAmine\"][nbNeighbor][\"others\"] = count[\"GlobalAmine\"][nbNeighbor][\"others\"] + 1", "def measure(self, X2, X3, start_index): \n #linearization of data X2 and X3\n X2 = np.array(X2)\n X3 = np.array(X3)\n e = self.e\n fit = self.Linearization(X2)\n fit2 = self.Linearization(X3)\n \n #line alinement \n Index = []\n test_list = fit['index'] + fit2['index']\n [Index.append(x) for x in test_list if x not in Index]\n Y = 0\n \n #Similarity Computation\n for i in Index:\n if i in fit['index'] and i in fit2['index']:\n Y += abs(fit['Y'+str(i)]-fit2['Y'+str(i)])\n\n elif i in fit['index']:\n J = np.max(np.where(np.array(fit2['index']) < i ))\n index = fit2['index'][J]\n k = fit2['reg'+str(index)][0]\n b = fit2['reg'+str(index)][1]\n value = abs(k * i + b - fit['Y'+str(i)])\n Y += value\n elif i in fit2['index']:\n J = np.max(np.where(np.array(fit['index']) < i ))\n index = fit['index'][J]\n k = fit['reg'+str(index)][0]\n b = fit['reg'+str(index)][1]\n value = abs(k * i + b - fit2['Y'+str(i)])\n Y += value\n if len(Index) != 0: \n score = Y/len(Index)\n else:\n score = 0\n self.decision_scores_.append((start_index, score))\n if len(X2) == 1:\n print('Error! SSA measure doesn\\'t apply to singleton' )\n else:\n return score", "def adstatistic(self, X):\n n = len(X)\n Y = X[:]\n ybar = mean(Y)\n yvar = var(Y)\n ysd = sqrt(yvar)\n Y = [(y- ybar)/ysd for y in Y]\n A2 = -n\n S =0.0 \n Y.sort() # don't forget this!!!\n for i, y in enumerate(Y):\n j = i+1\n p = self.pnorm(y)\n q = 1- p\n S += (j+j - 1)*log(p)+ (2 *(n-j)+1)* log(q)\n A2 -= S/n\n \n \n A2 *= (1.0 + 4.0/n - 25.0/n**2)\n return A2", "def optimize_anchors(ds, num_anchors):\n y = np.array(flatten_list(ds.y))\n out = kmeans(y[:, 2:4], num_anchors)\n percentage = avg_iou(y[:, 2:4], out)\n print('IoU = ', percentage)\n print(str(out).replace(']\\n ', '],\\n').replace(' ', ', ').replace('\\n[', '\\n ['))", "def baseline(data):\n weights = weighting(data)\n return np.inner(weights,data['clicks'])/weights.sum()", "def calc_stat_values(self):" ]
[ "0.58205086", "0.571059", "0.5541013", "0.5521272", "0.5515177", "0.5497382", "0.54781765", "0.54200995", "0.5405992", "0.53759086", "0.5374236", "0.5366215", "0.53630555", "0.535613", "0.5339427", "0.53139883", "0.53016055", "0.52901673", "0.52805847", "0.5274683", "0.5271787", "0.52660805", "0.5261652", "0.52390575", "0.5234933", "0.5210889", "0.51916337", "0.5168215", "0.51529074", "0.5136171" ]
0.63533676
0
Get query, key and value tensors.
def to_qkv( self, features: torch.Tensor, context: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: query = self.to_q(features) context = context if context is not None else features key = self.to_k(context) value = self.to_v(context) query = self._heads2batch(query) key = self._heads2batch(key) value = self._heads2batch(value) return query, key, value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __compute_qkv(queries, keys, values, n_head, d_key, d_value):\n q = layers.fc(input=queries, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n fc_layer = wrap_layer_with_block(\n layers.fc, fluid.default_main_program().current_block().parent_idx\n ) if cache is not None and static_kv else layers.fc\n k = fc_layer(input=keys, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n v = fc_layer(input=values, size=d_value * n_head,\n bias_attr=False, num_flatten_dims=2)\n return q, k, v", "def get(self):\n data = dict(obs=self.obs_buf[:self.ptr, :], act=self.act_buf[:self.ptr], ret=self.ret_buf[:self.ptr],\n adv=self.adv_buf[:self.ptr], logp=self.logp_buf[:self.ptr])\n\n self.ptr, self.path_start_idx = 0, 0\n\n return {k: torch.as_tensor(v, dtype=torch.float32) for k, v in data.items()}", "def call(self, querys, keys, values, mask = None, training = False):\n\n sequence_len = shape_list(keys)[1]\n\n # q_i = Q * W^Q_i\n #print('query.shape=', querys.shape)\n querys = self.w_q(querys)\n querys = tf.reshape(querys, shape = (-1, sequence_len, self.num_attention_heads, self.d_k))\n # At this point, the shapes of querys, keys, and values are (batches, sequence_len, num_attention_heads, d_k)\n # we need to change the shapes to (batches, num_attention_heads, sequence_len, d_k)\n querys = tf.transpose(querys, perm = [0, 2, 1, 3])\n\n # the same operations on keys and values\n # k_i = K * W^K_i\n keys = self.w_k(keys)\n keys = tf.reshape(keys, shape = (-1, sequence_len, self.num_attention_heads, self.d_k))\n keys = tf.transpose(keys, perm = [0, 2, 1, 3])\n \n # v_i = V * W^V_i\n values = self.w_v(values)\n values = tf.reshape(values, shape = (-1, sequence_len, self.num_attention_heads, self.d_k))\n values = tf.transpose(values, perm = [0, 2, 1, 3])\n\n v, self.attn = attention(querys, keys, values, mask = mask, dropout = self.dropout)\n\n # the shape of v is (batches, h, sequence_len, d_k)\n # we need to change it to (batches, sequence_len, num_attention_heads, d_k)\n # and then concatenate last two dims (batches, sequence_len, num_attention_heads * d_k)\n return tf.reshape(tf.transpose(v, perm = [0, 2, 1, 3]), (-1, sequence_len, self.num_attention_heads * self.d_k))", "def result(self) -> Dict[str, tf.Tensor]:\n return super().result()", "def result(self) -> Dict[str, tf.Tensor]:\n return super().result()", "def __getitem__(self, key):\n return self.query(key)", "def compute_qkv(query_antecedent,\n memory_antecedent,\n total_key_depth,\n total_vale_depth,\n q_filter_width=1,\n kv_filter_width=1,\n q_padding=\"VALID\",\n kv_padding=\"VALID\"):\n if memory_antecedent is None:\n memory_antecedent = query_antecedent\n q = compute_attention_component(\n query_antecedent, total_key_depth, q_filter_width, q_padding, \"q\")\n k = compute_attention_component(\n memory_antecedent, total_key_depth, kv_filter_width, kv_padding, \"k\")\n v = compute_attention_component(\n memory_antecedent, total_vale_depth, kv_filter_width, kv_padding, \"v\")\n return q, k, v", "def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:\n data = self._get_tensor(query)\n key = \"image\"\n sample = {key: data, \"crs\": self.crs, \"bbox\": query}\n\n return sample", "def forward(self, query_images: Tensor, key_images: Tensor) -> Tuple[Tensor, Tensor]:\n q = self.encoder_q(query_images)\n if self.head_q is not None:\n q = self.head_q(q)\n q = nn.functional.normalize(q, dim=1)\n\n with torch.no_grad():\n # The keys are shuffled between the GPUs before encoding them, to avoid batch normalization leaking\n # information between the samples. This works only when using the DDP strategy.\n if isinstance(self.trainer.strategy, DDPStrategy):\n key_images, original_order = shuffle_batch(key_images)\n\n k = self.encoder_k(key_images)\n if self.head_k is not None:\n k = self.head_k(k)\n k = nn.functional.normalize(k, dim=1)\n\n if isinstance(self.trainer.strategy, DDPStrategy):\n k = sort_batch(k, original_order)\n\n return q, k", "def forward(self, key, value, query, mask=None, return_key=False, all_attn=False):\n\n\t\t# CHECKS\n\t\t# batch, k_len, d = key.size()\n\t\t# batch_, k_len_, d_ = value.size()\n\t\t# aeq(batch, batch_)\n\t\t# aeq(k_len, k_len_)\n\t\t# aeq(d, d_)\n\t\t# batch_, q_len, d_ = query.size()\n\t\t# aeq(batch, batch_)\n\t\t# aeq(d, d_)\n\t\t# aeq(self.model_dim % 8, 0)\n\t\t# if mask is not None:\n\t\t# \tbatch_, q_len_, k_len_ = mask.size()\n\t\t# \taeq(batch_, batch)\n\t\t# \taeq(k_len_, k_len)\n\t\t# \taeq(q_len_ == q_len)\n\t\t# END CHECKS\n\n\t\tbatch_size = key.size(0) #2\n\t\tdim_per_head = self.dim_per_head #768/n_head\n\t\thead_count = self.head_count\n\t\tkey_len = key.size(1)\n\t\tquery_len = query.size(1)\n\n\t\tdef shape(x):\n\t\t\treturn x.view(batch_size, key_len, key_len, head_count, dim_per_head) \\\n\t\t\t\t.transpose(1, 2)\n\n\t\tdef shape_q(x):\n\t\t\treturn x.view(batch_size, query_len, head_count, dim_per_head)\n\n\t\tdef unshape(x):\n\t\t\treturn x.transpose(1, 2).contiguous() \\\n\t\t\t\t\t.view(batch_size, key_len, head_count * dim_per_head)\n\n\t\t# 1) Project key, value, and query.\n\t\tkey_up = shape(self.linear_keys(key))\n\t\tvalue_up = shape(self.linear_values(value))\n\n\t\tquery_up = shape_q(self.linear_query(query))\n\t\tquery_up = query_up.unsqueeze(1)\n\n\t\tkey_up = key_up.transpose(1, 3).transpose(3, 4)\n\n\t\t# 2) Calculate and scale scores.\n\t\tquery_up = query_up / math.sqrt(dim_per_head)\n\t\tscores = torch.matmul(query_up.transpose(1,3), key_up) #(2, 1, 14, 1, 768 )(2, 1, 14, 768, 14)->(2, 1, 14, 1, 14)\n\n\t\tif mask is not None:\n\t\t\t# mask = mask.unsqueeze(1).expand_as(scores)\n\t\t\t# scores = scores.masked_fill(Variable(mask), -1e18)\n\t\t\tmask = mask.unsqueeze(1)\n\t\t\tmask = mask.unsqueeze(3)\n\t\t\tscores = scores * mask\n\n\t\t# 3) Apply attention dropout and compute context vectors.\n\t\tattn = self.sm(scores)\n\n\t\tdrop_attn = self.dropout(attn)\n\t\t#reconstruct shape\n\t\tcontext = unshape(torch.matmul(drop_attn, value_up.transpose(1, 3)))\n\t\t#(2, max_len, 1, 1, 768)\n\n\t\t# context = unshape(torch.matmul(drop_attn, value_up))\n\n\t\toutput = self.final_linear(context).squeeze(1)\n\n\t\t#(2, max_len, 1, 768)\n\n\t\t# batch_, q_len_, d_ = output.size()\n\n\t\t# aeq(q_len, q_len_)\n\t\t# aeq(batch, batch_)\n\t\t# aeq(d, d_)\n\n\t\t# END CHECK\n\t\treturn output", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else: return dy.pick(self.expr_tensor, key)", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor is not None:\n return torch.index_select(self.expr_tensor, dim=1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(1)\n else:\n return torch.index_select(self.expr_transposed_tensor, dim=-1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(-1)", "def get(self):\n if now()-self.last_query < 1./self.query_rate:\n return None,None\n self.last_query = now()\n\n # query from saver (an old strategy that may be desired at points): \n #self.saver.query_flag.value = True\n #fr = mp2np(self.saver.query_queue)\n #frts = self.saver.query_queue_ts.value\n \n # query from _PSEye (a newer strategy that is preferable for most uses):\n self.pseye.query_flag.value = True\n while self.pseye.query_flag.value == True:\n pass\n fr = self.pseye.query_queue[0]\n frts = self.pseye.query_queue_ts.value\n\n x,y = self.resolution[self.query_idx]\n return frts,fr.reshape([y,x])", "def multi_head_attention(queries, keys, values, attn_bias, d_key, d_value, d_model, pos_enc,\n n_head=1, dropout_rate=0., cache=None, static_kv=False):\n keys = queries if keys is None else keys\n values = keys if values is None else values\n if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):\n raise ValueError(\n \"Inputs: quries, keys and values should all be 3-D tensors.\"\n )\n\n def __compute_qkv(queries, keys, values, n_head, d_key, d_value):\n \"\"\"\n Add linear projection to queries, keys, and values.\n \"\"\"\n q = layers.fc(input=queries, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n fc_layer = wrap_layer_with_block(\n layers.fc, fluid.default_main_program().current_block().parent_idx\n ) if cache is not None and static_kv else layers.fc\n k = fc_layer(input=keys, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n v = fc_layer(input=values, size=d_value * n_head,\n bias_attr=False, num_flatten_dims=2)\n return q, k, v\n\n def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value)\n \"\"\"\n Reshape input tensors at the last dimension to split multi-heads\n and then transpose. Specifically, transform the input tensor with shape\n [bs, max_sequence_length, n_head * hidden_dim] to the output tensor\n with shape [bs, n_head, max_sequence_length, hidden_dim].\n \"\"\"\n # The value 0 in shape attr means copying the corresponding dimension\n # size of the input as the output dimension size.\n reshaped_q = layers.reshape(\n x=queries, shape=[0, 0, n_head, d_key], inplace=True)\n # permute the dimensions into:\n # [batch_size, n_head, max_sequence_len, hidden_size_per_head]\n q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])\n # For encoder-decoder attention in inference, insert the ops and vars\n # into global block to use as cache among beam search.\n reshape_layer = wrap_layer_with_block(\n layers.reshape,\n fluid.default_main_program().current_block(\n ).parent_idx) if cache is not None and static_kv else layers.reshape\n transpose_layer = wrap_layer_with_block(\n layers.transpose,\n fluid.default_main_program().current_block().\n parent_idx) if cache is not None and static_kv else layers.transpose\n reshaped_k = reshape_layer(\n x=keys, shape=[0, 0, n_head, d_key], inplace=True)\n k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])\n reshaped_v = reshape_layer(\n x=values, shape=[0, 0, n_head, d_value], inplace=True)\n v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])\n\n if cache is not None: # only for faster inference\n cache_, i = cache\n if static_kv: # For encoder-decoder attention in inference\n cache_k, cache_v = cache_[\"static_k\"], cache_[\"static_v\"]\n # To init the static_k and static_v in global block.\n static_cache_init = wrap_layer_with_block(\n layers.assign,\n fluid.default_main_program().current_block().parent_idx)\n static_cache_init(\n k,\n fluid.default_main_program().global_block().var(\n \"static_k_%d\" % i))\n static_cache_init(\n v,\n fluid.default_main_program().global_block().var(\n \"static_v_%d\" % i))\n k, v = cache_k, cache_v\n else: # For decoder self-attention in inference\n # use cache and concat time steps.\n cache_k, cache_v = cache_[\"k\"], cache_[\"v\"]\n k = layers.concat([cache_k, k], axis=2)\n v = layers.concat([cache_v, v], axis=2)\n cache_[\"k\"], cache_[\"v\"] = (k, v)\n return q, k, v\n\n def __combine_heads(x):\n \"\"\"\n Transpose and then reshape the last two dimensions of inpunt tensor x\n so that it becomes one dimension, which is reverse to __split_heads.\n \"\"\"\n if len(x.shape) != 4:\n raise ValueError(\"Input(x) should be a 4-D Tensor.\")\n\n trans_x = layers.transpose(x, perm=[0, 2, 1, 3])\n # The value 0 in shape attr means copying the corresponding dimension\n # size of the input as the output dimension size.\n return layers.reshape(\n x=trans_x,\n shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],\n inplace=True)\n \n def _shift(BD):\n \"\"\"\n -3 -2 -1 0 1 2\n -3 -2 -1 0 1 2\n -3 -2 -1 0 1 2\n\n to\n 0 1 2\n -1 0 1\n -2 -1 0\n\n :param BD: batch_size x n_head x max_len x 2max_len\n :return: batch_size x n_head x max_len x max_len\n \"\"\"\n bsz, n_head, max_len, _ = BD.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))\n BD = BD[:, :, :, max_len:]\n return BD\n\n def _transpose_shift(E):\n \"\"\"\n -3 -2 -1 0 1 2\n -30 -20 -10 00 10 20\n -300 -200 -100 000 100 200\n\n to\n 0 -10 -200\n 1 00 -100\n 2 10 000\n\n\n :param E: batch_size x n_head x max_len x 2max_len\n :return: batch_size x n_head x max_len x max_len\n \"\"\"\n bsz, n_head, max_len, _ = E.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n indice = layers.arange(start=0, end=max_len, dtype=int)\n E = layers.index_select(input=E, index=indice, dim=-2)\n E = layers.transpose(E, perm=[0, 1, 3, 2])\n return E\n\n def scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key, dropout_rate):\n \"\"\"\n Scaled Dot-Product Attention\n\n Change:\n - Different from the original one.\n We will remove the scale factor math: \\sqrt{d_k} according to the paper.\n - Bias for attention and position encoding are added.\n \n \"\"\"\n # product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)\n\n # now q, k should be shaped like\n # [batch_size, n_head, max_sequence_len, hidden_size_per_head]\n # pos_enc should be shaped like [2 X l, head_dim], and head_dim = d_key\n max_sequence_len = q.shape[2]\n \n r_r_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]\n r_w_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]\n rw_head_q = q + r_r_bias[:, None] # [batch, n_head, max_sequence_len, head_dim]\n AC = layers.matmul(x=rw_head_q, y=k, transpose_y=True) # [batch, n_head, max_sequence_len, max_seqence_len]\n \n # position bias for each head, shaped like [n_head, 2 X max_sequence_len].\n # Then add two dimensions at `batch` and `maxlen`.\n D_ = layers.matmul(x=r_w_bias, y=pos_enc, transpose_y=True)[None, :, None]\n # position bias for each query, shaped like [batch, n_head, max_len, 2 X max_len]\n B_ = layers.matmul(x=q, y=pos_enc, transpose_y=True)\n # bias for each key, shaped like [batch, n_head, max_len, 2 X max_len]\n E_ = layers.matmul(x=k, y=pos_enc, transpose_y=True)\n \n # shaped like [batch, n_head, max_len, 2 X max_len]\n # change it to [batch, n_head, max_len, max_len]\n BD = B_ + D_\n BDE = _shift(BD) + _transpose_shift(E_)\n product = AC + BDE\n\n # product = layers.matmul(x=q, y=k, transposed_y=True, alpha=1.0) + \\\n # layers.matmul(x=q, y=pos_enc, transposed_y=True) +\\\n # layers.transpose(x=last_two, perm=[0, 1, 3, 2])\n if attn_bias:\n product += attn_bias\n weights = layers.softmax(product)\n if dropout_rate:\n weights = layers.dropout(\n weights,\n dropout_prob=dropout_rate,\n seed=dropout_seed,\n is_test=False)\n out = layers.matmul(weights, v)\n return out\n\n q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)\n q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)\n\n ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,\n dropout_rate)\n\n out = __combine_heads(ctx_multiheads)\n\n # Project back to the model size.\n proj_out = layers.fc(input=out,\n size=d_model,\n bias_attr=False,\n num_flatten_dims=2)\n return proj_out", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor:\n return dy.pick(self.expr_tensor, key, dim=len(self.expr_tensor.dim()[0])-1)\n else:\n return dy.pick(self.expr_transposed_tensor, key, dim=0)", "def get(self):\n data = dict(state= np.concatenate(self.state_buf), act=np.concatenate(self.act_buf),\n rew=np.concatenate(self.rew_buf), next_state = np.concatenate(self.next_state_buf))\n\n return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}", "def forward(self, key, value, query, mask=None, return_key=False, all_attn=False):\n\n\t\tbatch_size = key.size(0) # 2\n\t\tdim_per_head = self.dim_per_head # 768/n_head\n\t\thead_count = self.head_count\n\t\tkey_len_1 = key.size(1)\n\t\tken_len_2 = key.size(3)\n\t\tquery_len = query.size(1)\n\n\t\tdef shape(x):\n\t\t\treturn x.view(batch_size, key_len_1, key_len_1, ken_len_2, head_count, dim_per_head) \\\n\t\t\t\t.transpose(1, 2)\n\n\t\tdef shape_q(x):\n\t\t\treturn x.view(batch_size, query_len, head_count, dim_per_head)\n\n\t\tdef unshape(x):\n\t\t\treturn x.transpose(1, 2).contiguous() \\\n\t\t\t\t.view(batch_size, key_len_1, key_len_1, head_count * dim_per_head)\n\n\t\t# 1) Project key, value, and query.\n\t\tkey_up = shape(self.linear_keys(key))\n\t\tvalue_up = shape(self.linear_values(value))\n\n\t\tquery_up = shape_q(self.linear_query(query))\n\t\tquery_up = query_up.unsqueeze(1).expand(batch_size, query_len, query_len, head_count, dim_per_head).unsqueeze(3)\n\n\n\n\t\tkey_up = key_up.transpose(3, 5).transpose(3, 4)\n\n\t\t# 2) Calculate and scale scores.\n\t\tquery_up = query_up / math.sqrt(dim_per_head)\n\t\tscores = torch.matmul(query_up,\n\t\t\t\t\t\t\t key_up) # (2, 1, 14, 1, 768 )(2, 1, 14, 768, 14)->(2, 1, 14, 1, 14)\n\t\t# score (2, 17, 17, 1, 23)\n\t\t# value (2, 17, 17, 23, 768)\n\n\t\tif mask is not None:\n\t\t\t# mask = mask.unsqueeze(1).expand_as(scores)\n\t\t\t# scores = scores.masked_fill(Variable(mask), -1e18)\n\t\t\tmask = mask.unsqueeze(1)\n\t\t\tmask = mask.unsqueeze(3)\n\t\t\tscores = scores * mask\n\n\t\t# 3) Apply attention dropout and compute context vectors.\n\t\tattn = self.sm(scores)\n\n\t\tdrop_attn = self.dropout(attn)\n\t\t# reconstruct shape\n\t\t# context = unshape(torch.matmul(drop_attn, value_up.transpose(1, 3)))\n\t\tcontext = unshape(torch.matmul(drop_attn, value_up.transpose(3,4))) #(2, 17, 17, 1, 768)(2, 17, 17, 768)\n\n\t\t# (2, max_len, 1, 1, 768)\n\n\t\t# context = unshape(torch.matmul(drop_attn, value_up))\n\n\t\toutput = self.final_linear(context).squeeze(1)\n\n\t\t# (2, max_len, 1, 768)\n\n\t\treturn output", "def forward(self, query: Tensor, key: Tensor, value: Tensor,\n query_pos: Tensor, key_pos: Tensor, key_padding_mask: Tensor,\n **kwargs) -> Tensor:\n intermediate = []\n for layer in self.layers:\n query = layer(\n query,\n key=key,\n value=value,\n query_pos=query_pos,\n key_pos=key_pos,\n key_padding_mask=key_padding_mask,\n **kwargs)\n if self.return_intermediate:\n intermediate.append(self.post_norm(query))\n query = self.post_norm(query)\n\n if self.return_intermediate:\n return torch.stack(intermediate)\n\n return query.unsqueeze(0)", "def forward(self, query, key, value, mask=None):\n\n # Gathers the batch size\n batch_size = query.shape[0]\n\n # Performs the linear projections to calculate Q, K and V\n Q = self.q(query)\n K = self.k(key)\n V = self.v(value)\n\n # Reshapes Q, K and V\n Q = Q.reshape(batch_size, -1, self.n_heads, self.head_size).permute(0, 2, 1, 3)\n K = K.reshape(batch_size, -1, self.n_heads, self.head_size).permute(0, 2, 1, 3)\n V = V.reshape(batch_size, -1, self.n_heads, self.head_size).permute(0, 2, 1, 3)\n\n # Calculates the energy\n energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale\n\n # Checks if a mask is supplied\n if mask is not None:\n # Fills the energy with a low value where mask equals to zero\n energy = energy.masked_fill(mask == 0, -c.EPSILON)\n\n # Calculates the attention\n attention = torch.softmax(energy, dim=-1)\n\n # Performs the energy-value projection\n x = (torch.matmul(self.drop(attention), V)).permute(0, 2, 1, 3)\n\n # Reshapes back to hidden units\n x = x.reshape(batch_size, -1, self.n_hidden)\n\n # Passes down through output layer\n x = self.out(x)\n\n return x, attention", "def __getitem__(self, index): # data for one dialogue file\n input_tensor, target_tensor, bs_tensor, db_tensor = \\\n self.input_tensor[index], self.target_tensor[index], self.bs_tensor[index], self.db_tensor[index]\n mask_tensor = self.mask_tensor[index] if self.mask_tensor else None\n return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor", "def __getitem__(self, item):\n if \":\" in item:\n split = item.split(\":\")\n tensor_name = split[0]\n device = split[1]\n else:\n tensor_name = item\n device = \"0\"\n\n try:\n result = self.graph.get_tensor_by_name(\"{0}/{1}:{2}\".format(self.variable_scope, tensor_name, device))\n except KeyError:\n result = None\n\n if result is None:\n try:\n result = self.graph.get_operation_by_name(\"{0}/{1}\".format(self.variable_scope, tensor_name))\n except KeyError:\n result = tf.get_variable(tensor_name, [])\n\n return result", "def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = None,\n ):\n batch_size = query.size(0)\n q_proj = (\n self.Q_linear(query)\n .view(batch_size, -1, self.nheads, self.d_k)\n .permute(0, 2, 1, 3)\n )\n k_proj = (\n self.K_linear(key)\n .view(batch_size, -1, self.nheads, self.d_k)\n .permute(0, 2, 3, 1)\n )\n v_proj = (\n self.V_linear(value)\n .view(batch_size, -1, self.nheads, self.d_k)\n .permute(0, 2, 1, 3)\n )\n weights = torch.matmul(q_proj, k_proj) # batch, nheads, seq_len1, seq_len2\n weights = weights / (self.d_k ** 0.5)\n if mask is not None:\n weights = weights.masked_fill(mask == 0, -1e12)\n weights = torch.softmax(weights, dim=3)\n # weights - batch, nheads, seq_len1, seq_len2\n # V_proj - batch, nhead, seq_len2, d_k\n output = torch.matmul(weights, v_proj) # batch, nheads, seq_len1, d_k\n output = output.transpose(1, 2)\n output = output.contiguous()\n output = output.view(batch_size, -1, self.d_model)\n output = self.out(output)\n output = self.dropout(output)\n return output", "def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n **kwargs):\r\n\r\n if value is None:\r\n value = query\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n\r\n if reference_points.shape[-1] == 2:\r\n \"\"\"\r\n For each BEV query, it owns `num_Z_anchors` in 3D space that having different heights.\r\n After proejcting, each BEV query has `num_Z_anchors` reference points in each 2D image.\r\n For each referent point, we sample `num_points` sampling points.\r\n For `num_Z_anchors` reference points, it has overall `num_points * num_Z_anchors` sampling points.\r\n \"\"\"\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n\r\n bs, num_query, num_Z_anchors, xy = reference_points.shape\r\n reference_points = reference_points[:, :, None, None, None, :, :]\r\n sampling_offsets = sampling_offsets / \\\r\n offset_normalizer[None, None, None, :, None, :]\r\n bs, num_query, num_heads, num_levels, num_all_points, xy = sampling_offsets.shape\r\n sampling_offsets = sampling_offsets.view(\r\n bs, num_query, num_heads, num_levels, num_all_points // num_Z_anchors, num_Z_anchors, xy)\r\n sampling_locations = reference_points + sampling_offsets\r\n bs, num_query, num_heads, num_levels, num_points, num_Z_anchors, xy = sampling_locations.shape\r\n assert num_all_points == num_points * num_Z_anchors\r\n\r\n sampling_locations = sampling_locations.view(\r\n bs, num_query, num_heads, num_levels, num_all_points, xy)\r\n\r\n elif reference_points.shape[-1] == 4:\r\n assert False\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n\r\n # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2\r\n # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points\r\n #\r\n\r\n if torch.cuda.is_available() and value.is_cuda:\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n if not self.batch_first:\r\n output = output.permute(1, 0, 2)\r\n\r\n return output", "def get_value(self, query):\n indices = self.get_dimension_indices(query)\n index = self.get_value_index(indices)\n value = self.get_value_by_index(index)\n return value", "def get_qvalues(self, prev_state, obs_t):\r\n obs_t = torch.tensor(obs_t, device=self.device, dtype=torch.float)\r\n (h, c), qvalues = self.forward(prev_state, obs_t)\r\n return (h.detach(), c.detach()), qvalues.data.cpu().numpy()", "def query(self, key):\n arr_types = (\"forest\", \"prog\", \"tree\")\n if isinstance(key, tuple):\n if len(key) != 2:\n raise SyntaxError(\n \"Must be either 1 or 2 arguments.\")\n ftype, field = key\n if ftype not in arr_types:\n raise SyntaxError(\n f\"First argument must be one of {str(arr_types)}.\")\n if not isinstance(field, str):\n raise SyntaxError(\"Second argument must be a string.\")\n\n self.arbor._node_io.get_fields(self, fields=[field], root_only=False)\n indices = getattr(self, f\"_{ftype}_field_indices\")\n\n data_object = self.find_root()\n return data_object.field_data[field][indices]\n\n else:\n if not isinstance(key, str):\n raise SyntaxError(\"Single argument must be a string.\")\n\n # return the progenitor list or tree nodes in a list\n if key in arr_types:\n self.arbor._setup_tree(self)\n return getattr(self, f\"_{key}_nodes\")\n\n # return field value for this node\n self.arbor._node_io.get_fields(self, fields=[key],\n root_only=self.is_root)\n data_object = self.find_root()\n return data_object.field_data[key][self.tree_id]", "def get(self, action: Action) -> Qval:\n b_q_values = self.with_batch_dim(self.q_values)\n return b_q_values.gather(1, action.long().view(-1, 1)).squeeze(1)", "def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result", "def query(self, q):\n for key in self.metadb.query(q):\n yield key, self.datadb[key]", "def get_value(self, indices):\r\n assert len(indices) == 3, indices\r\n if self.model_tensor is None:\r\n raise ValueError(\"Please set the tensor\")\r\n return self.model_tensor[indices[0], indices[1], indices[2]]" ]
[ "0.6457257", "0.60970044", "0.5843072", "0.58027285", "0.58027285", "0.5798024", "0.57467616", "0.56939214", "0.5666517", "0.5653372", "0.56192636", "0.55921423", "0.558059", "0.55803174", "0.55428225", "0.5536122", "0.5497946", "0.54878473", "0.5464319", "0.54265434", "0.5420361", "0.54179716", "0.54162455", "0.5414854", "0.53956807", "0.5349487", "0.5337928", "0.5336565", "0.5334821", "0.53291035" ]
0.6237002
1
Compute the embedding of g as the average of its word embeddings
def _get_emb_avg(g, lang): emb = np.zeros(emb_dims[lang]) known_words_count = 0 words = g.split() for w in words: if w in models[lang]: emb += models[lang][w] known_words_count += 1 emb /= len(words) return emb, known_words_count > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_emb_wavg(g, lang, a=0.001):\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0", "def _get_mean_embedding(self, words):\n\n # ensure the size still matches if it's loaded from pretrained word vectors\n size = self.size\n if self.w2v is not None:\n size = next(iter(self.w2v_.values())).size\n\n zero = np.zeros(size)\n if self.tfidf:\n embedding = np.mean([self.w2v_[w] * self.w2idf_[w]\n if w in self.w2v_ else zero for w in words], axis = 0)\n else:\n embedding = np.mean([self.w2v_.get(w, zero) for w in words], axis = 0)\n\n return embedding", "def get_mean_emb(self, text):\n return np.mean([self.emb.get(w.lower(), self.emb.get(\"_UNK\")) for w in text.split()], axis=0)", "def get_sentence_average_w2v(sent, word_to_vec, embedding_dim):\n sum_vec = np.zeros((embedding_dim,))\n known_tokens = 0\n for token in sent.text:\n if (token in word_to_vec.dict):\n known_tokens += 1\n sum_vec += word_to_vec[token]\n if (known_tokens != 0):\n return sum_vec / known_tokens\n else:\n return sum_vec", "def get_review_embedding(review):\n review_sentences = nltk_tokenize.sent_tokenize(review)\n sentence_embeddings = list(map(get_sentence_embedding, review_sentences))\n if len(sentence_embeddings) == 0:\n print(\"Sentence_embeddings are empty!\")\n print(review)\n return torch.zeros(1,128)\n if review_embedding_type == \"avg\":\n # avg over all pairs [pairs, 1, 128] => [1, 128]\n mean = torch.mean(torch.stack(sentence_embeddings), axis=0)\n return mean", "def create_embedding_matrix(self):\n self.id2word = dict([(self.vocab[word]['id'], word) for word in self.vocab])\n vocab_size = len(self.vocab)\n result = np.zeros((vocab_size, self.embed_dim))\n unknown_token_set = set()\n\n found_words = 0\n avg = np.zeros(self.embed_dim)\n for _ in range(1, vocab_size): # skip PAD embedding (initialize as zero embedding)\n try:\n result[_] = self.pretrained_embedding[self.id2word[_]]\n avg += result[_]\n found_words += 1\n except:\n unknown_token_set.add(_)\n\n avg /= found_words\n for _ in unknown_token_set:\n result[_] = avg\n self.embedding = result\n return found_words, len(self.id2word)", "def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding", "def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean", "def get_embeddings(vectors, text, generate_missing=False, k=300):\r\n embeddings = text.apply(lambda x: get_average_vec(x, vectors, generate_missing=generate_missing, k=k))\r\n return list(embeddings)", "def word_average(self, sent):\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word))\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean", "def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)", "def aggregate_embeddings(list_of_embeddings):\n\n return np.mean(list_of_embeddings, axis=0)", "def word_averaging(wv, words):\n all_words, mean = set(), []\n \n for word in words:\n if isinstance(word, np.ndarray):\n mean.append(word)\n elif word in wv.vocab:\n mean.append(wv.vectors_norm[wv.vocab[word].index])\n all_words.add(wv.vocab[word].index)\n\n if not mean:\n logging.warning(\"Cannot compute similarity with no input: %s\", words)\n # Remove these examples in pre-processing...\n return np.zeros(50,)\n\n mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)\n \n return mean", "def compute_avg_w2v_vector(w2v_dict, text_nlp_proc):\n SIZE = 50 # size of the w2v dimension\n list_of_word_vectors = [w2v_dict[w] for w in text_nlp_proc if w in w2v_dict.vocab.keys()]\n if len(list_of_word_vectors) == 0:\n result = [0.0]*SIZE\n else:\n result = np.sum(list_of_word_vectors, axis=0) / len(list_of_word_vectors)\n return result", "def kernels_bayes_average(g_y, w):\n return np.dot(g_y, w)", "def avg_sentence_vector(words, model, num_features, index2word_set):\n featureVec = np.zeros((num_features,), dtype=\"float32\")\n nwords = 0\n\n for word in words:\n if word in index2word_set:\n nwords = nwords+1\n featureVec = np.add(featureVec, model.wv[word])\n # featureVec = np.add(featureVec, model.wv.__getitem__(word))\n\n if nwords>0:\n featureVec = np.divide(featureVec, nwords)\n return featureVec", "def mean_encoder(description):\n global wv, table\n d = description.translate(table).lower().split()\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n return r.mean(axis=0)", "def generate_avg_vector(self, data):\r\n doc=nlp(data)\r\n data_vector = [token.vector for token in doc]\r\n mean_vector = np.mean(data_vector, axis=0)\r\n return mean_vector", "def embedd_data(training_data_text, e_arr, e_dict):\n num_samples = len(training_data_text)\n embedded = np.zeros([num_samples, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n for i in range(num_samples):\n review_mat = np.zeros([MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n # Iterate to either the end of the sentence of the max num of words, whichever is less\n for w in range(min(len(training_data_text[i]), MAX_WORDS_IN_REVIEW)):\n # assign embedding of that word or to the UNK token if that word isn't in the dict\n review_mat[w] = e_arr[e_dict.get(training_data_text[i][w], 0)]\n embedded[i] = review_mat\n return embedded", "def _merge_embeddings(self, hidden_states:List[List[float]], indices_subwords:List[List[int]]):\n embed_output = []\n # ignore the first and the last tokens which are respectively the [CLS] and [SEP] tokens\n hidden_states = hidden_states[1:-1 ,:]\n sentence_output = []\n for indices_to_merge in indices_subwords:\n # average the embeddings of the subwords of a word \n sentence_output.append(torch.mean(hidden_states[indices_to_merge], axis=0))\n embed_output.append(torch.stack(sentence_output).to(self.device))\n return embed_output", "def generate_initial_embs(emb_type):\n def _get_emb_avg(g, lang):\n \"\"\"Compute the embedding of g as the average of its word embeddings\n :param g: the input genre\n :param lang: language\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _get_emb_wavg(g, lang, a=0.001):\n \"\"\"Compute the embeddings of g with a sentence embedding algorithm (average weighted by the word estimated frequencies)\n :param g: the input genre\n :param lang: language\n :param a: a model hyper-parameter (see Arora et al. in the paper)\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _remove_pc(df_embs, npc=1):\n \"\"\"Remove the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the normalized embeddings\n \"\"\"\n pc = _compute_pc(df_embs, npc)\n if npc == 1:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()) * pc\n else:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()).dot(pc)\n return df_embs_out\n\n def _compute_pc(df_embs, npc=1):\n \"\"\"Compute the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the principal component\n \"\"\"\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(df_embs)\n return svd.components_\n\n embs = {}\n known = {}\n for g in G.nodes:\n lang = g[:2]\n norm_g = TagManager.normalize_tag_wtokenization(g, tries[lang], prefixed=True)\n if emb_type == 'avg':\n embs[g], known[g] = _get_emb_avg(norm_g, lang)\n else:\n embs[g], known[g] = _get_emb_wavg(norm_g, lang)\n\n embs = pd.DataFrame(embs).T # the embeddings are columns\n if emb_type == 'sif': # the algorithm imposes a normalization\n norm_embs = _remove_pc(embs.to_numpy())\n embs = pd.DataFrame(norm_embs, columns=embs.columns, index=embs.index)\n return embs, known", "def encode(input_dict: dict):\n # Calculation\n n_embeddings: int = 0\n # Course View:\n if len(input_dict['course_view']) > 0:\n course_view_embedding = np.mean(\n [MODELS[\"user_course_view_embedding\"].wv[_word] for _word in input_dict['course_view']], axis=0)\n n_embeddings += 1\n else:\n course_view_embedding = np.zeros(100,)\n # User Interests:\n if len(input_dict['user_interests']) > 0:\n user_interests_embedding = np.mean(\n [MODELS[\"user_interests_embedding\"].wv[_word] for _word in input_dict['user_interests']], axis=0)\n n_embeddings += 1\n else:\n user_interests_embedding = np.zeros(100,)\n\n # Assessment:\n if len(input_dict['assessment']) > 0:\n # weighted average:\n assessment_embedding = np.array(\n [MODELS[\"user_assessment_scores_embedding\"].wv[_word[0]] for _word in input_dict['assessment']])\n weights = np.array([_word[1]\n for _word in input_dict['assessment']]).reshape(1, -1)\n assessment_embedding = weights.dot(assessment_embedding)\n n_embeddings += 1\n else:\n user_interests_embedding = np.zeros(100,)\n\n all_avg_embedding = (course_view_embedding +\n user_interests_embedding + user_interests_embedding)/n_embeddings\n return all_avg_embedding", "def calculate_embeddings_with_graph(sess, images): \n \n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(INPUT_LAYER)\n embeddings = tf.get_default_graph().get_tensor_by_name(EMBEDDINGS_LAYER)\n\n # Run forward pass to calculate embeddings\n feed_dict = { images_placeholder: images }\n emb = sess.run(embeddings, feed_dict=feed_dict)\n \n return emb", "def sentence_to_avg(sentence, word_to_vec_map):\n # Get a valid word contained in the word_to_vec_map. \n any_word = list(word_to_vec_map.keys())[0]\n \n ### START CODE HERE ###\n # Step 1: Split sentence into list of lower case words (≈ 1 line)\n words = sentence.lower().split()\n\n # Initialize the average word vector, should have the same shape as your word vectors.\n avg = np.zeros(word_to_vec_map[any_word].shape)\n \n # Initialize count to 0\n count = 0\n \n # Step 2: average the word vectors. You can loop over the words in the list \"words\".\n for w in words:\n # Check that word exists in word_to_vec_map\n if w in word_to_vec_map:\n avg += word_to_vec_map[w]\n # Increment count\n count +=1\n \n if count > 0:\n # Get the average. But only if count > 0\n avg = avg / count\n \n ### END CODE HERE ###\n \n return avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_word_length(self, text):\n return np.mean([len(word) for word in text])", "def average_one_hots(sent, word_to_ind):\n known_words = 0\n size = len(word_to_ind.keys())\n sum_vec = np.zeros((size,))\n for token in sent.text: #going over all tokens and summing their embeddings\n if (token in word_to_ind):\n sum_vec += get_one_hot(size, word_to_ind[token])\n known_words += 1\n if (known_words != 0):\n return sum_vec / known_words\n else:\n return sum_vec", "def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index" ]
[ "0.7578313", "0.69586146", "0.6763189", "0.6715363", "0.66505134", "0.66335744", "0.662246", "0.65837944", "0.6570178", "0.6482712", "0.6480774", "0.6461312", "0.6448638", "0.623814", "0.61564434", "0.61225295", "0.6091059", "0.6087193", "0.60220784", "0.6008058", "0.6005115", "0.5994095", "0.5984646", "0.5978634", "0.5970233", "0.5970233", "0.5970233", "0.5950524", "0.5917339", "0.59121" ]
0.7281524
1
Compute the embeddings of g with a sentence embedding algorithm (average weighted by the word estimated frequencies)
def _get_emb_wavg(g, lang, a=0.001): emb = np.zeros(emb_dims[lang]) known_words_count = 0 words = g.split() for w in words: if w in models[lang]: emb += a / (a + word_freqs[lang][w]) * models[lang][w] known_words_count += 1 emb /= len(words) return emb, known_words_count > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_embeddings(vectors, text, generate_missing=False, k=300):\r\n embeddings = text.apply(lambda x: get_average_vec(x, vectors, generate_missing=generate_missing, k=k))\r\n return list(embeddings)", "def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding", "def create_embedding_matrix(self):\n self.id2word = dict([(self.vocab[word]['id'], word) for word in self.vocab])\n vocab_size = len(self.vocab)\n result = np.zeros((vocab_size, self.embed_dim))\n unknown_token_set = set()\n\n found_words = 0\n avg = np.zeros(self.embed_dim)\n for _ in range(1, vocab_size): # skip PAD embedding (initialize as zero embedding)\n try:\n result[_] = self.pretrained_embedding[self.id2word[_]]\n avg += result[_]\n found_words += 1\n except:\n unknown_token_set.add(_)\n\n avg /= found_words\n for _ in unknown_token_set:\n result[_] = avg\n self.embedding = result\n return found_words, len(self.id2word)", "def get_sentence_average_w2v(sent, word_to_vec, embedding_dim):\n sum_vec = np.zeros((embedding_dim,))\n known_tokens = 0\n for token in sent.text:\n if (token in word_to_vec.dict):\n known_tokens += 1\n sum_vec += word_to_vec[token]\n if (known_tokens != 0):\n return sum_vec / known_tokens\n else:\n return sum_vec", "def generate_initial_embs(emb_type):\n def _get_emb_avg(g, lang):\n \"\"\"Compute the embedding of g as the average of its word embeddings\n :param g: the input genre\n :param lang: language\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _get_emb_wavg(g, lang, a=0.001):\n \"\"\"Compute the embeddings of g with a sentence embedding algorithm (average weighted by the word estimated frequencies)\n :param g: the input genre\n :param lang: language\n :param a: a model hyper-parameter (see Arora et al. in the paper)\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _remove_pc(df_embs, npc=1):\n \"\"\"Remove the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the normalized embeddings\n \"\"\"\n pc = _compute_pc(df_embs, npc)\n if npc == 1:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()) * pc\n else:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()).dot(pc)\n return df_embs_out\n\n def _compute_pc(df_embs, npc=1):\n \"\"\"Compute the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the principal component\n \"\"\"\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(df_embs)\n return svd.components_\n\n embs = {}\n known = {}\n for g in G.nodes:\n lang = g[:2]\n norm_g = TagManager.normalize_tag_wtokenization(g, tries[lang], prefixed=True)\n if emb_type == 'avg':\n embs[g], known[g] = _get_emb_avg(norm_g, lang)\n else:\n embs[g], known[g] = _get_emb_wavg(norm_g, lang)\n\n embs = pd.DataFrame(embs).T # the embeddings are columns\n if emb_type == 'sif': # the algorithm imposes a normalization\n norm_embs = _remove_pc(embs.to_numpy())\n embs = pd.DataFrame(norm_embs, columns=embs.columns, index=embs.index)\n return embs, known", "def analyze_embeddings(emb):\n dic = {\"Hi\": 0, \"En\": 1, \"Ot\": 2}\n count = [0, 0, 0, 0]\n count_zero = [0, 0, 0, 0]\n for i, j in zip(emb, corpus_trans):\n for k, l in zip(i, j):\n count[dic[l[1]]] += 1\n if sum(k) == 0:\n count_zero[dic[l[1]]] += 1\n count[-1] = sum(count)\n count_zero[-1] - sum(count_zero)\n print(\"hi, en, ot, total\")\n print(\"count: \", count)\n print(\"zero count: \", count_zero)", "def _get_emb_avg(g, lang):\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies", "def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings", "def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index", "def embedd_data(training_data_text, e_arr, e_dict):\n num_samples = len(training_data_text)\n embedded = np.zeros([num_samples, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n for i in range(num_samples):\n review_mat = np.zeros([MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n # Iterate to either the end of the sentence of the max num of words, whichever is less\n for w in range(min(len(training_data_text[i]), MAX_WORDS_IN_REVIEW)):\n # assign embedding of that word or to the UNK token if that word isn't in the dict\n review_mat[w] = e_arr[e_dict.get(training_data_text[i][w], 0)]\n embedded[i] = review_mat\n return embedded", "def _get_mean_embedding(self, words):\n\n # ensure the size still matches if it's loaded from pretrained word vectors\n size = self.size\n if self.w2v is not None:\n size = next(iter(self.w2v_.values())).size\n\n zero = np.zeros(size)\n if self.tfidf:\n embedding = np.mean([self.w2v_[w] * self.w2idf_[w]\n if w in self.w2v_ else zero for w in words], axis = 0)\n else:\n embedding = np.mean([self.w2v_.get(w, zero) for w in words], axis = 0)\n\n return embedding", "def forward(self, doc):\n out = torch.tensor([]).float().to(self.device)\n\n for i in range(len(doc)):\n sentences_raw = sentencesplit(cleantxt(doc[i]))\n sentences_ready = torch.tensor([]).float().to(self.device)\n for sentence in sentences_raw:\n sentence = sentence.split()\n if sentence == []:\n continue\n lookup_tensor = torch.tensor([]).long().to(self.device)\n for word in sentence:\n if word in self.embedd_dict:\n lookup_tensor = torch.cat((lookup_tensor,\n torch.LongTensor([self.embedd_dict[word]])), 0)\n else:\n lookup_tensor = torch.cat((lookup_tensor, torch.LongTensor([0])), 0)\n # Word embedding\n xw = self.word_embedding(lookup_tensor).view(1, -1, self.embedding_dim).to(self.device)\n # Word GRU\n self.hidden_gru_words = self.init_hidden_words()\n hw, self.hidden_gru_words = self.gru_word(xw, self.hidden_gru_words)\n # Word MLP\n uw = nn.Tanh()(self.MLP_word(hw)).to(self.device)\n # Word attention\n attention_score = torch.matmul(uw, self.attention_word).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(uw.size(0), uw.size(1), 1).to(self.device)\n scored_x = (hw * attention_score).to(self.device)\n s = torch.sum(scored_x, dim=1).to(self.device)\n #collecting sentences\n sentences_ready = torch.cat((sentences_ready, s), 0)\n # Sentence GRU\n if len(sentences_ready) == 0:\n out = torch.cat((out,\n torch.randn(1, self.number_cat).to(self.device)), 0).to(self.device)\n continue\n sentences_ready_gru = sentences_ready.view(1, -1, self.embedding_dim).to(self.device)\n self.hidden_gru_sentences = self.init_hidden_sentences()\n hs, self.hidden_gru_sentences = self.gru_sentence(torch.tensor(sentences_ready_gru), self.hidden_gru_sentences)\n # SENTENCE MLP\n us = nn.Tanh()(self.MLP_sentence(hs)).to(self.device)\n # Sentence attention\n attention_score = torch.matmul(us, self.attention_sentence).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(us.size(0), us.size(1), 1).to(self.device)\n scored_x = (hs * attention_score).to(self.device)\n v = torch.sum(scored_x, dim=1).to(self.device)\n # classification\n p = self.MLP_classification(v).to(self.device)\n out = torch.cat((out, p.float()), 0).float().to(self.device)\n return out", "def compute_avg_w2v_vector(w2v_dict, text_nlp_proc):\n SIZE = 50 # size of the w2v dimension\n list_of_word_vectors = [w2v_dict[w] for w in text_nlp_proc if w in w2v_dict.vocab.keys()]\n if len(list_of_word_vectors) == 0:\n result = [0.0]*SIZE\n else:\n result = np.sum(list_of_word_vectors, axis=0) / len(list_of_word_vectors)\n return result", "def glove_embedding(self, texts, file):\n self.embedding_dict = dict()\n glove_file = open(file, encoding='utf-8')\n for line in glove_file:\n word_vector = line.split()\n word = word_vector[0]\n word_vector_arr = np.asarray(word_vector[1:], dtype='float32')\n self.embedding_dict[word] = word_vector_arr\n glove_file.close()\n \n i = 0\n with pgb.ProgressBar(max_value=len(texts)) as bar:\n for text in texts:\n vec = []\n text = text.split()\n for t in text:\n try:\n vec.append(self.embedding_dict[t.lower()])\n except KeyError:\n pass\n ## There are no matched words\n if len(vec) == 0:\n print(\"len 0 vec\")\n self.word_vec.append(np.zeros((100)))\n else:\n #print(np.array(vec))\n #print(np.array(vec).shape)\n sentence = self.sentence_vec(np.array(vec))\n #print(sentence)\n #print(sentence.shape)\n self.word_vec.append(sentence)\n i += 1\n bar.update(i)\n self.word_vec = np.array(self.word_vec)\n print(self.word_vec.shape)", "def _calc_final_dist(_enc_batch_extend_vocab, vocab_dists, attn_dists, p_gens, batch_oov_len, vocab_size, batch_size):\n\n # Multiply vocab dists by p_gen and attention dists by (1-p_gen)\n vocab_dists = [p_gen * dist for (p_gen, dist) in zip(p_gens, vocab_dists)]\n attn_dists = [(1 - p_gen) * dist for (p_gen, dist) in zip(p_gens, attn_dists)]\n\n # Concatenate some zeros to each vocabulary dist, to hold the probabilities for in-article OOV words\n extended_vsize = vocab_size + batch_oov_len # the maximum (over the batch) size of the extended vocabulary\n extra_zeros = tf.zeros((batch_size, batch_oov_len))\n # list length max_dec_steps of shape (batch_size, extended_vsize)\n vocab_dists_extended = [tf.concat(axis=1, values=[dist, extra_zeros]) for dist in vocab_dists]\n\n # Project the values in the attention distributions onto the appropriate entries in the final distributions\n # This means that if a_i = 0.1 and the ith encoder word is w, and w has index 500 in the vocabulary,\n # then we add 0.1 onto the 500th entry of the final distribution\n # This is done for each decoder timestep.\n # This is fiddly; we use tf.scatter_nd to do the projection\n batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)\n batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)\n attn_len = tf.shape(_enc_batch_extend_vocab)[1] # number of states we attend over\n batch_nums = tf.tile(batch_nums, [1, attn_len]) # shape (batch_size, attn_len)\n indices = tf.stack((batch_nums, _enc_batch_extend_vocab), axis=2) # shape (batch_size, enc_t, 2)\n shape = [batch_size, extended_vsize]\n\n # list length max_dec_steps (batch_size, extended_vsize)\n attn_dists_projected = [tf.scatter_nd(indices, copy_dist, shape) for copy_dist in attn_dists]\n\n # Add the vocab distributions and the copy distributions together to get the final distributions\n # final_dists is a list length max_dec_steps; each entry is a tensor shape (batch_size, extended_vsize) giving\n # the final distribution for that decoder timestep\n # Note that for decoder timesteps and examples corresponding to a [PAD] token, this is junk - ignore.\n final_dists = [vocab_dist + copy_dist for (vocab_dist, copy_dist) in\n zip(vocab_dists_extended, attn_dists_projected)]\n\n return final_dists", "def avg_sentence_vector(words, model, num_features, index2word_set):\n featureVec = np.zeros((num_features,), dtype=\"float32\")\n nwords = 0\n\n for word in words:\n if word in index2word_set:\n nwords = nwords+1\n featureVec = np.add(featureVec, model.wv[word])\n # featureVec = np.add(featureVec, model.wv.__getitem__(word))\n\n if nwords>0:\n featureVec = np.divide(featureVec, nwords)\n return featureVec", "def encode(input_dict: dict):\n # Calculation\n n_embeddings: int = 0\n # Course View:\n if len(input_dict['course_view']) > 0:\n course_view_embedding = np.mean(\n [MODELS[\"user_course_view_embedding\"].wv[_word] for _word in input_dict['course_view']], axis=0)\n n_embeddings += 1\n else:\n course_view_embedding = np.zeros(100,)\n # User Interests:\n if len(input_dict['user_interests']) > 0:\n user_interests_embedding = np.mean(\n [MODELS[\"user_interests_embedding\"].wv[_word] for _word in input_dict['user_interests']], axis=0)\n n_embeddings += 1\n else:\n user_interests_embedding = np.zeros(100,)\n\n # Assessment:\n if len(input_dict['assessment']) > 0:\n # weighted average:\n assessment_embedding = np.array(\n [MODELS[\"user_assessment_scores_embedding\"].wv[_word[0]] for _word in input_dict['assessment']])\n weights = np.array([_word[1]\n for _word in input_dict['assessment']]).reshape(1, -1)\n assessment_embedding = weights.dot(assessment_embedding)\n n_embeddings += 1\n else:\n user_interests_embedding = np.zeros(100,)\n\n all_avg_embedding = (course_view_embedding +\n user_interests_embedding + user_interests_embedding)/n_embeddings\n return all_avg_embedding", "def prepare_embeddings_dict(ixtow, source, output):\n dim = source[list(source)[0]].shape[0]\n print('Embedding dimension : %d' % dim)\n G = np.zeros((len(ixtow) + 1, dim), dtype=\"float32\")\n print(\"EOS norm:\", np.sum(G[0] ** 2))\n for i in range(1, len(ixtow) + 1):\n word = ixtow[str(i)]\n if word.lower() in source:\n G[i] = source[word.lower()]\n if not np.sum(G[i] ** 2):\n raise ValueError(\"Norm of the embedding null > token %d | word %s\" % (i, word))\n else:\n try:\n if CORRECT[word.lower()] in source:\n print(\"Correcting %s into %s\" % (word.lower(), CORRECT[word.lower()]))\n word = CORRECT[word.lower()]\n G[i] = source[word]\n if not np.sum(G[i] ** 2):\n raise ValueError(\"Norm of the embedding null > token %d | word %s\" % (i, word))\n except:\n print(\"Missing word %s in the given embeddings\" % word)\n pd(G, output)\n return G", "def build_sense_embedding(target_sense_to_id, word_freq, EMBEDDING_DIM):\r\n res = {}\r\n wordvecs = load_glove(EMBEDDING_DIM)\r\n \r\n for target_sense_list in target_sense_to_id:\r\n for key, _ in target_sense_list.items():\r\n sense_vector = np.zeros(EMBEDDING_DIM)\r\n senses = key.split(',')\r\n n = 0\r\n for sensekey in senses:\r\n #print(sensekey) \r\n if '/' in sensekey:\r\n continue\r\n sense_synset = sc2ss(sensekey)\r\n if sense_synset:\r\n sense_vector += build_sense_vector(sense_synset, word_freq, wordvecs)\r\n n += 1\r\n if n != 0:\r\n res[key] = sense_vector/n\r\n return res", "def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")", "def w2v_aggregation_letters(X, length_vector=100):\n global w2v_model_3gram\n if w2v_model_3gram == None:\n w2v_model_3gram = gensim.models.KeyedVectors.load_word2vec_format(os.path.join(os.environ['NOBULL_PATH'], 'w2v_char.vec'))\n X_raw = []\n for x in X:\n x_letter = cleanText_letters(x)\n X_raw.append(x_letter)\n\n\n num_row = len(X_raw)\n\n max_matrix = np.zeros(shape=(num_row, length_vector))\n\n average_matrix = np.zeros(shape=(num_row, length_vector))\n\n for row in range(num_row):\n \n temp_text = X_raw[row] \n temp_vector = temp_text.split()\n \n unique_vector = list(set(temp_vector))\n num_index = len(unique_vector)\n \n temp_matrix = np.zeros(shape=(num_index, length_vector))\n \n j = 0\n for word in unique_vector:\n \n temp_matrix[j] = get_vector(word, w2v_model_3gram, 100)\n j += 1\n\n max_matrix[row] = np.maximum.reduce(temp_matrix)\n average_matrix[row] = np.mean(temp_matrix, axis=0)\n \n result = np.concatenate((average_matrix, max_matrix), axis=1)\n result = sparse.csr_matrix(result)\n \n header = []\n \n for i in range(length_vector):\n temp_string = \"neww2v_average_\" + str(i) + \"-th\"\n header.append(temp_string)\n \n for i in range(length_vector):\n temp_string = \"neww2v_maximum_\" + str(i) + \"-th\"\n header.append(temp_string)\n\n return result, header", "def text2vec(doc_tok, model, dim=300):\n doc_embedding = np.zeros(dim)\n valid_words = 0\n for word in doc_tok:\n if word in model:\n valid_words += 1\n doc_embedding += model.query(word)\n else:\n continue\n if valid_words > 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding", "def calculate_vocab_distribution(text, steam=False):\n fdist = calculate_fdist(text, steam)\n fdist = {k: v for k, v in fdist.items()}\n len_vocab = sum(fdist.values())\n pairs = [(key, value / len_vocab) for key, value in fdist.items()]\n pairs.sort(key=lambda x: int(x[1]), reverse=True)\n return [x[0] for x in pairs], [x[1] for x in pairs], pairs", "def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)", "def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean", "def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score", "def get_word_embeddings(t, folder, lang=\"en\"):\n vecs_url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz\"\n vecs_gz_filename = vecs_url.rpartition(\"/\")[2]\n os.makedirs(folder, exist_ok=True)\n vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)\n\n tokenizer_vocab_size = len(t.vocab)\n\n if wait_for_file_stable(vecs_gz_filepath):\n print(\"Using existing embeddings file\")\n else:\n print(\"Downloading word vectors...\")\n subprocess.run([\" \".join([\"wget\", \"-NP\", folder, vecs_url])], check=True, shell=True)\n\n print(\"Loading into memory...\")\n embeddings_index = dict()\n with gzip.open(vecs_gz_filepath, \"rt\") as zipf:\n firstline = zipf.readline()\n emb_vocab_size, emb_d = firstline.split(\" \")\n emb_vocab_size = int(emb_vocab_size)\n emb_d = int(emb_d)\n for line in zipf:\n values = line.split()\n word = values[0]\n # Only load subset of the embeddings recognised by the tokenizer:\n if word in t.vocab.stoi:\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_index[word] = coefs\n print(\"Loaded {} of {} word vectors for tokenizer vocabulary length {}\".format(\n len(embeddings_index),\n emb_vocab_size,\n tokenizer_vocab_size,\n ))\n\n # create a weight matrix for words in training docs\n embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))\n for word, i in t.vocab.stoi.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix", "def get_embedding(num_embeddings, embedding_dim, padding_idx=None):\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)\n if embedding_dim % 2 == 1:\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb" ]
[ "0.7166899", "0.68677586", "0.66370195", "0.6629657", "0.6576097", "0.64722365", "0.6452334", "0.6338862", "0.63319784", "0.63172877", "0.6285265", "0.6279344", "0.6274509", "0.624932", "0.6236681", "0.62097955", "0.6200014", "0.61913866", "0.61293215", "0.6097888", "0.6097105", "0.6090064", "0.60839903", "0.60812", "0.6078627", "0.6064572", "0.60307443", "0.60128266", "0.60031486", "0.5994985" ]
0.69361633
1
Dashboard for {minister_term} with important links
async def ministry(self, ctx): # """Dashboard for {minister_term} with important links and updates on new bills""" embed = text.SafeEmbed() embed.set_author( icon_url=self.bot.mk.NATION_ICON_URL, name=f"The {self.bot.mk.MINISTRY_NAME} of {self.bot.mk.NATION_FULL_NAME}", ) # pretty_bills = await self.get_pretty_vetoes() # if not pretty_bills: # pretty_bills = "There are no new bills to vote on." # else: # pretty_bills = ( # f"You can vote on new bills, check `{config.BOT_PREFIX}{mk.MarkConfig.MINISTRY_COMMAND} bills`." # ) minister_value = [] emperor = self._safe_get_member(mk.DemocracivRole.EMPEROR) if isinstance(emperor, discord.Member): minister_value.append( f"Emperor of Japan: {emperor.mention} {escape_markdown(str(emperor))}" ) else: minister_value.append(f"Emperor of Japan: -") if isinstance(self.prime_minister, discord.Member): minister_value.append( f"{self.bot.mk.pm_term}: {self.prime_minister.mention} {escape_markdown(str(self.prime_minister))}" ) else: minister_value.append(f"{self.bot.mk.pm_term}: -") # if isinstance(self.lt_prime_minister, discord.Member): # minister_value.append(f"{self.bot.mk.lt_pm_term}: {self.lt_prime_minister.mention}") # else: # minister_value.append(f"{self.bot.mk.lt_pm_term}: -") embed.add_field( name=self.bot.mk.MINISTRY_LEADERSHIP_NAME, value="\n".join(minister_value), inline=False, ) try: ministers = self.bot.get_democraciv_role(mk.DemocracivRole.MINISTER) ministers = [ f"{m.mention} {escape_markdown(str(m))}" for m in ministers.members ] or ["-"] except exceptions.RoleNotFoundError: ministers = ["-"] embed.add_field( name=f"{self.bot.mk.minister_term}s", value="\n".join(ministers), inline=False, ) embed.add_field( name="Links", value=f"[Constitution]({self.bot.mk.CONSTITUTION})\n[Legal Code]({self.bot.mk.LEGAL_CODE})\n" f"[Docket/Worksheet]({self.bot.mk.LEGISLATURE_DOCKET})", inline=False, ) # embed.add_field(name="Veto-able Bills", value=pretty_bills, inline=False) await ctx.send(embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dashboard():", "def admin_index():\n return 'Super-seekrit admin page.'", "def dashboard():\n return render_template('home/dashboard.html',title='SycliQ Dashboard')", "def dashboard(self):\r\n return {}", "def terms(request):\n return render(request, 'terms.html')", "def terms_and_conditions(request):\n\treturn render(request, 'tailored/terms_and_conditions.html', {})", "def index(request):\n import datetime\n values = default_values(request)\n values['recent_links'] = Link.objects.all().order_by('-date_submitted')[0:10] \n values['most_popular_links'] = Link.objects.filter(date_submitted__gte=(datetime.datetime.today() - datetime.timedelta(days=1)) ).annotate(num_clicks_views=Count('stat')).order_by('-num_clicks_views')[0:10]\n return render_to_response(\n 'shortener/index.html',\n values,\n context_instance=RequestContext(request))", "def explainerdashboard_cli(ctx):", "def microtask_similarity_score(request):\n data = {}\n return render_to_response('my_admin_tools/menu/microtask_similarity_score.html',data,context_instance=RequestContext(request))", "def home():\n return(\n f\"Available Routes:<br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperature for one year: /api/v1.0/tobs<br/>\"\n f\"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n f\"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n )", "def dashboard():\n return render_template(\"admin/dashboard.html\", title=\"Dashboard\")", "def admin_only():\n return 'Super-seekrit admin page.'", "def home_page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )", "def action_atom_site_l(obj: AtomSiteL, thread: QtCore.QThread):\n w_actions = []\n if obj.is_attribute(\"type_symbol\"):\n qtb_1 = QtWidgets.QToolButton()\n qtb_1.setText(\"Show b_scat\")\n qtb_1.clicked.connect(lambda: run_function(\n obj.report, (), thread))\n w_actions.append(qtb_1)\n return w_actions", "def barbican_url(self):", "def displayMl(self, *args):\n\t\tself.callMethod(('ManialinkManager', 'displayManialinkToLogin'), *args)", "def make_navbar_for_homepage(self):\n links = [\n \"home\", [\"Result Pages\", self._result_page_links()], \"Version\"\n ]\n if len(self.samples) > 1:\n links[1][1] += [\"Comparison\"]\n if self.publication:\n links.insert(2, \"Publication\")\n if self.gwdata is not None:\n links.append([\"Detchar\", [i for i in self.gwdata.keys()]])\n if self.notes is not None:\n links.append(\"Notes\")\n return links", "def homepage():\n return {'sample': 'ADAL'}", "def index(self):\n return render(\"/derived/rock/index.mako\")", "def index():\n title = \"Application process \"\n links = {'mentors': 'mentors',\n 'schools': 'all-school',\n 'mentors_by_country': 'mentors-by-country',\n 'contacts': 'contacts',\n 'applicants': 'applicants',\n 'applicants_and_mentors': 'applicants-and-mentors'}\n menu = ['Show mentors and schools',\n 'Show mentors and all schools',\n 'Show mentors by country',\n 'Show contacts',\n 'Show applicants',\n 'Show applicants and mentors']\n return render_template('index.html', links=links, menu=menu, title=title)", "def various(self):\n # Changer le default d'un tb, ici ne rien mettre au niveau le plus haut\n context = self\n from imio.dashboard.utils import getCollectionLinkCriterion\n criterion = getCollectionLinkCriterion(context)\n criterion.default = u''\n from eea.facetednavigation.criteria.interfaces import ICriteria\n ICriteria(context).criteria._p_changed = True\n\n # Réparer la vue de la page pst\n context.setLayout('view')\n from imio.project.pst.setuphandlers import configure_faceted_folder\n configure_faceted_folder(context, xml='default_dashboard_widgets.xml', default_UID=None)", "def logosmall(self):\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.logo)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['medium_user']\n url = self.app.url_for(\"asset\", asset_id = v._id)\n return \"\"\"<a href=\"%s\"><img src=\"%s\" width=\"%s\" height=\"%s\"></a>\"\"\" %(\n self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug),\n url,\n v.metadata['width'],\n v.metadata['height'])", "def short(request):\n\n assert isinstance(request, HttpRequest)\n template = 'short.html'\n context = {'title': 'Short overview',\n 'is_longdale_user': user_is_ingroup(request, 'longdale_user'),\n 'message': 'Radboud University CESAR short intro',\n 'year': datetime.now().year}\n return render(request, template, context)", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>Precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>Stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>Temperature</a><br/>\"\n f\"<a href='/api/v1.0/start'>Start Date</a><br/>\"\n f\"<a href='/api/v1.0/start/end'>End Date</a><br/>\"\n )", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )", "def before_map(self, map):\n map.connect('/about/terms-and-conditions',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='toc')\n map.connect('/about/accessibility',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='accessibility')\n map.connect('/about/code-of-conduct',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='coc')\n map.connect('/about/moderation-policy',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='moderation')\n map.connect('/about/faq',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='faq')\n map.connect('/about/privacy',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='privacy')\n map.connect('/about/contact-us',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='contact')\n map.connect('/about/suggest-a-dataset',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='suggest_a_dataset')\n map.connect('/atlas-for-africa',\n controller='ckanext.syngenta.controller:CustomPageController',\n action='atlas')\n return map", "def index_site(site, text):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def dashboard(request, template_name=\"admin/dashboard.html\"):\n return render_to_response(template_name, RequestContext(request, {\n 'user_count': User.objects.count(),\n 'reviewgroup_count': Group.objects.count(),\n 'defaultreviewer_count': DefaultReviewer.objects.count(),\n 'repository_count': Repository.objects.accessible(request.user).count(),\n 'has_cache_stats': get_has_cache_stats(),\n 'title': _(\"Dashboard\"),\n 'root_path': settings.SITE_ROOT + \"admin/db/\"\n }))", "def regression_page():\n return render_template(\"regr-matmortality.html\")", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )" ]
[ "0.5908503", "0.56341463", "0.51922596", "0.5165035", "0.5127278", "0.51028496", "0.50016934", "0.49779627", "0.49574888", "0.49415234", "0.49412113", "0.49330497", "0.4883521", "0.48593384", "0.48565412", "0.48513862", "0.4821009", "0.48205975", "0.4820169", "0.48173407", "0.4809524", "0.47962663", "0.47741517", "0.47732422", "0.47661132", "0.47632477", "0.47586676", "0.47439346", "0.47435656", "0.47277603" ]
0.6425963
0
This method configures the job's parameters. Adds some parameters to the usual configuration in case argument snappy is enabled.
def jobconf(self): conf = super().jobconf() if self.options.snappy: enable_compression_options = {'mapred.output.compress': 'true', 'mapred.output.compression.codec': 'org.apache.hadoop.io.compress.SnappyCodec', 'mapred.output.compression.type': 'BLOCK'} conf.update(enable_compression_options) return conf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_options(self):\n super(MRJob, self).configure_options()\n self.add_passthrough_option('--snappy', action='store_true')", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config", "def apply_startup_params(self):\n config = self._protocol.get_startup_config()\n \n if not isinstance(config, dict):\n raise InstrumentParameterException(\"Incompatible initialization parameters\")\n \n log.trace(\"BARS driver applying config: %s\", config)\n self._protocol.set_readonly_values()\n self.set_resource(config)", "def _load_backtesting_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n\n # If -i/--ticker-interval is used we override the configuration parameter\n # (that will override the strategy configuration)\n if 'ticker_interval' in self.args and self.args.ticker_interval:\n config.update({'ticker_interval': self.args.ticker_interval})\n logger.info('Parameter -i/--ticker-interval detected ...')\n logger.info('Using ticker_interval: %s ...', config.get('ticker_interval'))\n\n # If -l/--live is used we add it to the configuration\n if 'live' in self.args and self.args.live:\n config.update({'live': True})\n logger.info('Parameter -l/--live detected ...')\n\n # If --enable-position-stacking is used we add it to the configuration\n if 'position_stacking' in self.args and self.args.position_stacking:\n config.update({'position_stacking': True})\n logger.info('Parameter --enable-position-stacking detected ...')\n\n # If --disable-max-market-positions is used we add it to the configuration\n if 'use_max_market_positions' in self.args and not self.args.use_max_market_positions:\n config.update({'use_max_market_positions': False})\n logger.info('Parameter --disable-max-market-positions detected ...')\n logger.info('max_open_trades set to unlimited ...')\n else:\n logger.info('Using max_open_trades: %s ...', config.get('max_open_trades'))\n\n # If --timerange is used we add it to the configuration\n if 'timerange' in self.args and self.args.timerange:\n config.update({'timerange': self.args.timerange})\n logger.info('Parameter --timerange detected: %s ...', self.args.timerange)\n\n # If --datadir is used we add it to the configuration\n if 'datadir' in self.args and self.args.datadir:\n config.update({'datadir': self._create_datadir(config, self.args.datadir)})\n else:\n config.update({'datadir': self._create_datadir(config, None)})\n logger.info('Using data folder: %s ...', config.get('datadir'))\n\n # If -r/--refresh-pairs-cached is used we add it to the configuration\n if 'refresh_pairs' in self.args and self.args.refresh_pairs:\n config.update({'refresh_pairs': True})\n logger.info('Parameter -r/--refresh-pairs-cached detected ...')\n\n if 'strategy_list' in self.args and self.args.strategy_list:\n config.update({'strategy_list': self.args.strategy_list})\n logger.info('Using strategy list of %s Strategies', len(self.args.strategy_list))\n\n if 'ticker_interval' in self.args and self.args.ticker_interval:\n config.update({'ticker_interval': self.args.ticker_interval})\n logger.info('Overriding ticker interval with Command line argument')\n\n # If --export is used we add it to the configuration\n if 'export' in self.args and self.args.export:\n config.update({'export': self.args.export})\n logger.info('Parameter --export detected: %s ...', self.args.export)\n\n # If --export-filename is used we add it to the configuration\n if 'export' in config and 'exportfilename' in self.args and self.args.exportfilename:\n config.update({'exportfilename': self.args.exportfilename})\n logger.info('Storing backtest results to %s ...', self.args.exportfilename)\n\n return config", "def configure(self, args):\n pass", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def setOptions(self):\n self.parser.add_option(\"--jobid\",\n dest=\"jobid\",\n default=None,\n type=\"int\",\n help=\"Optional id of the job you want to execute locally\")\n\n self.parser.add_option(\"--enableStageout\",\n dest=\"enableStageout\",\n default=False,\n action=\"store_true\",\n help=\"After the job runs copy the output file on the storage destination\")\n\n self.parser.add_option(\"--destdir\",\n dest=\"destdir\",\n default=None)", "def configure(self, parser: argparse.ArgumentParser) -> None:\n pass", "def configure_args(self):\n super(MRTextClassifier, self).configure_args()\n\n self.add_passthru_arg(\n '--min-df', dest='min_df', default=2, type=int,\n help=('min number of documents an n-gram must appear in for us to'\n ' count it. Default: %(default)s'))\n self.add_passthru_arg(\n '--max-df', dest='max_df', default=10000000, type=int,\n help=('max number of documents an n-gram may appear in for us to'\n ' count it (this keeps reducers from running out of memory).'\n ' Default: %(default)s'))\n self.add_passthru_arg(\n '--max-ngram-size', dest='max_ngram_size',\n default=DEFAULT_MAX_NGRAM_SIZE, type=int,\n help='maximum phrase length to consider')\n self.add_passthru_arg(\n '--stop-words', dest='stop_words',\n default=', '.join(DEFAULT_STOP_WORDS),\n help=(\"comma-separated list of words to ignore. For example, \"\n \"--stop-words 'in, the' would cause 'hole in the wall' to be\"\n \" parsed as ['hole', 'wall']. Default: %(default)s\"))\n self.add_passthru_arg(\n '--short-doc-threshold', dest='short_doc_threshold',\n type=int, default=None,\n help=('Normally, for each n-gram size, we take the average score'\n ' over all n-grams that appear. This allows us to penalize'\n ' short documents by using this threshold as the denominator'\n ' rather than the actual number of n-grams.'))\n self.add_passthru_arg(\n '--no-test-set', dest='no_test_set',\n action='store_true', default=False,\n help=(\"Choose about half of the documents to be the testing set\"\n \" (don't use them to train the classifier) based on a SHA1\"\n \" hash of their text\"))", "def __init__(\n self,\n processing_job,\n ):\n super(ClarifyBaseliningJob, self).__init__(\n sagemaker_session=processing_job.sagemaker_session,\n job_name=processing_job.job_name,\n inputs=processing_job.inputs,\n outputs=processing_job.outputs,\n output_kms_key=processing_job.output_kms_key,\n )", "def configure(self, config: ConfigParams):\n parameters = config.get_section(\"parameters\")\n if len(parameters) > 0:\n self.__parameters = parameters", "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "def configure(self):\n\n pass", "def configure(self, options, conf):", "def configure(self) -> None:", "def _configure(self):\n pass", "def configure(self):\n pass", "def configure(self):\n pass", "def _create_job_config(\n self,\n experiment_id: str,\n params: Optional[dict],\n pipeline_package_path: Optional[str],\n pipeline_id: Optional[str],\n version_id: Optional[str],\n enable_caching: Optional[bool],\n ):\n\n class JobConfig:\n\n def __init__(self, spec, resource_references):\n self.spec = spec\n self.resource_references = resource_references\n\n params = params or {}\n pipeline_json_string = None\n if pipeline_package_path:\n pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)\n\n # Caching option set at submission time overrides the compile time settings.\n if enable_caching is not None:\n self._override_caching_options(pipeline_obj, enable_caching)\n\n pipeline_json_string = json.dumps(pipeline_obj)\n api_params = [\n kfp_server_api.V1Parameter(\n name=sanitize_k8s_name(name=k, allow_capital_underscore=True),\n value=str(v) if type(v) not in (list, dict) else json.dumps(v))\n for k, v in params.items()\n ]\n resource_references = []\n key = kfp_server_api.models.V1ResourceKey(\n id=experiment_id,\n type=kfp_server_api.models.V1ResourceType.EXPERIMENT)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key, relationship=kfp_server_api.models.V1Relationship.OWNER)\n resource_references.append(reference)\n\n if version_id:\n key = kfp_server_api.models.V1ResourceKey(\n id=version_id,\n type=kfp_server_api.models.V1ResourceType.PIPELINE_VERSION)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key,\n relationship=kfp_server_api.models.V1Relationship.CREATOR)\n resource_references.append(reference)\n\n spec = kfp_server_api.models.V1PipelineSpec(\n pipeline_id=pipeline_id,\n workflow_manifest=pipeline_json_string,\n parameters=api_params)\n return JobConfig(spec=spec, resource_references=resource_references)", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def prepare(self, config, **kwargs):\n pass", "def configure(self):\r\n pass", "def __configure(self):\n\n # CUDA\n if self.__cuda:\n if isinstance(self.__cuda, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-cuda={}'.format(self.__cuda))\n elif self.__toolchain.CUDA_HOME:\n self.__configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n # Default location\n self.__configure_opts.append('--with-cuda=/usr/local/cuda')\n else:\n self.__configure_opts.append('--without-cuda')\n\n # GDRCOPY\n if self.__gdrcopy:\n if isinstance(self.__gdrcopy, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-gdrcopy={}'.format(self.__gdrcopy))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-gdrcopy')\n elif self.__gdrcopy == False:\n self.__configure_opts.append('--without-gdrcopy')\n\n # KNEM\n if self.__knem:\n if isinstance(self.__knem, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-knem={}'.format(self.__knem))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-knem')\n elif self.__knem == False:\n self.__configure_opts.append('--without-knem')\n\n # OFED\n if self.__ofed:\n if isinstance(self.__ofed, string_types):\n # Use specified path\n self.__configure_opts.extend(\n ['--with-verbs={}'.format(self.__ofed),\n '--with-rdmacm={}'.format(self.__ofed)])\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.extend(['--with-verbs', '--with-rdmacm'])\n elif self.__ofed == False:\n self.__configure_opts.extend(['--without-verbs',\n '--without-rdmacm'])\n\n # XPMEM\n if self.__xpmem:\n if isinstance(self.__xpmem, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-xpmem={}'.format(self.__xpmem))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-xpmem')\n elif self.__xpmem == False:\n self.__configure_opts.append('--without-xpmem')\n\n # Workaround for format warning considered an error on Power\n if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:\n if not self.__toolchain.CFLAGS:\n self.__toolchain.CFLAGS = '-Wno-error=format'", "def _load_config(self):\n\n options = dict()\n\n job_stores = self.app.config.get('SCHEDULER_JOBSTORES')\n if job_stores:\n options['jobstores'] = job_stores\n\n executors = self.app.config.get('SCHEDULER_EXECUTORS')\n if executors:\n options['executors'] = executors\n\n job_defaults = self.app.config.get('SCHEDULER_JOB_DEFAULTS')\n if job_defaults:\n options['job_defaults'] = job_defaults\n\n timezone = self.app.config.get('SCHEDULER_TIMEZONE')\n if timezone:\n options['timezone'] = timezone\n\n self._scheduler.configure(**options)\n\n\n self.jobconfig = self.app.config.get('SCHEDULER_JOBCONFIG', None) # Textual reference to the jobs dictionary.\n self.auth = self.app.config.get('SCHEDULER_AUTH', self.auth)\n self.api_enabled = self.app.config.get('SCHEDULER_VIEWS_ENABLED', self.api_enabled) # for compatibility reason\n self.api_enabled = self.app.config.get('SCHEDULER_API_ENABLED', self.api_enabled)\n self.allowed_hosts = self.app.config.get('SCHEDULER_ALLOWED_HOSTS', self.allowed_hosts)", "def configure(self, *args):\n raise NotImplementedError(self, \"configure\")", "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg", "def configure_step(self, *args, **kwargs):\n # compiler and MPI suite should always be specified -- MUCH quicker and SAVER than autodetect\n # --with-nocross-compiler-suite=(gcc|ibm|intel|pgi|studio)\n # --with-mpi=(bullxmpi|hp|ibmpoe|intel|intel2|intelpoe|lam|mpibull2|mpich|mpich2|mpich3|openmpi|\n # platform|scali|sgimpt|sun)\n comp_opts = {\n toolchain.GCC: 'gcc',\n toolchain.INTELCOMP: 'intel',\n }\n comp_fam = self.toolchain.comp_family()\n if comp_fam in comp_opts:\n self.cfg.update('configopts', \"--with-nocross-compiler-suite=%s\" % comp_opts[comp_fam])\n else:\n self.log.error(\"Compiler family %s not supported yet (only: %s)\" % (comp_fam, ', '.join(comp_opts.keys())))\n\n mpi_opts = {\n toolchain.INTELMPI: 'intel2', # intel: Intel MPI v1.x (ancient); intelpoe: IBM POE MPI for Intel platforms\n toolchain.OPENMPI: 'openmpi',\n toolchain.MPICH: 'mpich',\n toolchain.MPICH2: 'mpich2',\n }\n mpi_fam = self.toolchain.mpi_family()\n if mpi_fam in mpi_opts:\n self.cfg.update('configopts', \"--with-mpi=%s\" % mpi_opts[mpi_fam])\n else:\n self.log.error(\"MPI family %s not supported yet (only: %s)\" % (mpi_fam, ', '.join(mpi_opts.keys())))\n\n # auto-detection for dependencies mostly works fine, but hard specify paths anyway to have full control\n deps = {\n 'binutils': ['--with-libbfd=%%s/%s' % get_software_libdir('binutils', fs=['libbfd.a'])],\n 'Cube': ['--with-cube=%s/bin'],\n 'CUDA': ['--with-libcudart=%s'],\n 'OTF2': ['--with-otf2=%s/bin'],\n 'OPARI2': ['--with-opari2=%s/bin'],\n 'PAPI': ['--with-papi-header=%s/include', '--with-papi-lib=%%s/%s' % get_software_libdir('PAPI')],\n 'PDT': ['--with-pdt=%s/bin'],\n }\n for (dep_name, dep_opts) in deps.items():\n dep_root = get_software_root(dep_name)\n if dep_root:\n for dep_opt in dep_opts:\n self.cfg.update('configopts', dep_opt % dep_root)\n\n super(EB_Score_minus_P, self).configure_step(*args, **kwargs)" ]
[ "0.76620996", "0.5828961", "0.58238906", "0.5815086", "0.5812793", "0.5800588", "0.5775317", "0.57366854", "0.5691596", "0.5638217", "0.563505", "0.56271166", "0.56101215", "0.56101215", "0.56101215", "0.5608142", "0.5561883", "0.55375654", "0.55280805", "0.5515315", "0.5515315", "0.54819256", "0.5480759", "0.54776925", "0.5461189", "0.5429615", "0.5425664", "0.5422657", "0.5408895", "0.5391527" ]
0.63829356
1
This method configures the options of the job. It adds the argument snappy which is responsible for the data compression.
def configure_options(self): super(MRJob, self).configure_options() self.add_passthrough_option('--snappy', action='store_true')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jobconf(self):\n conf = super().jobconf()\n if self.options.snappy:\n enable_compression_options = {'mapred.output.compress': 'true',\n 'mapred.output.compression.codec': 'org.apache.hadoop.io.compress.SnappyCodec',\n 'mapred.output.compression.type': 'BLOCK'}\n conf.update(enable_compression_options)\n return conf", "def setOptions(self):\n self.parser.add_option(\"--jobid\",\n dest=\"jobid\",\n default=None,\n type=\"int\",\n help=\"Optional id of the job you want to execute locally\")\n\n self.parser.add_option(\"--enableStageout\",\n dest=\"enableStageout\",\n default=False,\n action=\"store_true\",\n help=\"After the job runs copy the output file on the storage destination\")\n\n self.parser.add_option(\"--destdir\",\n dest=\"destdir\",\n default=None)", "def setOptions(self):\n self.parser.add_option( \"--outputdir\",\n dest = \"outdir\",\n default = None,\n help = \"Directory to write JSON summary to.\" )\n\n self.parser.add_option( \"--dbs\",\n dest = \"usedbs\",\n default = 'no',\n help = \"Use information in DBS to build the input lumi lists and the output lumi lists.\"+\\\n \" Allowed values are yes/no. Default is no.\" )", "def options(self, parser, env):\n super(ReporterPlugin, self).options(parser, env)\n parser.add_option('--file-name', \n dest='file_name', default='result.txt',\n help=\"save output file to this directory\")\n parser.add_option('--directory', action='store_true',\n dest='directory', default=self.__getDefault(),\n help=\"save output file to this directory. default is current nose worspace\")\n parser.add_option('--icycle', action='store', type='string', metavar=\"STRING\",\n dest='icycle', default=None, help=\"total cycle flag\")\n parser.add_option('--livereport', action='store_true',\n dest='livereport', default=False,\n help=\"switcher of uploading result to live report server. default is enable the feature\")\n parser.add_option('--server-config', action='store', metavar=\"FILE\",\n dest='livereport_config', default='server.config',\n help=\"specify the live report server configuration file path\")\n parser.add_option('--client-config', action='store', metavar=\"FILE\",\n dest='device_config', default='client.config',\n help=\"specify the device config file path\")\n parser.add_option('--duration', dest='duration', type='string', metavar=\"STRING\",\n action='callback', callback=self.__validate_duration, \n help='The minumum test duration before ending the test.\\\n Here format must follow next format: xxDxxHxxMxxS.\\\n e.g. --duration=2D09H30M12S, which means 2 days, 09 hours, 30 minutes and 12 seconds')", "def setup(self, optparser):\n\t\tpass", "def configure(self, options, conf):", "def prepare_optparser ():\n usage = \"usage: %prog -c mysample.cfg -s A01A -1 A01_1.fq -2 A02_2.fq\"\n description = \"Please set the sample name. e.g. L04A, L04C, L04T.\"\n optparser = OptionParser(version = \"0.0.1\", description = description, usage = usage, add_help_option = False)\n optparser.add_option(\"-h\", \"--help\", action = \"help\", help = \"Show this help message and exit.\")\n optparser.add_option(\"-c\", \"--config\", dest = \"config\", default = \"config.cfg\" ,type = \"string\",\n help = \"Set the config File.\")\n optparser.add_option(\"-s\", \"--samplename\", dest = \"samplename\" ,type = \"string\",\n help = \"Set the samplename.\")\n optparser.add_option(\"-1\", \"--fastq1\", dest = \"fastq1\", type = \"string\",\n help = \"input fastq file paired 1\")\n optparser.add_option(\"-2\", \"--fastq2\", dest = \"fastq2\", type = \"string\",\n help = \"input fastq file paired 2\")\n optparser.add_option(\"-d\", \"--dataprocess\", dest = \"dataprocess\", default = \"1111111111\",type = \"string\",\n help = \"Need point 6 digit number, eg. 111111: Conduct Genome Process, fastq_mapping, Add Read Group, Reorder Contig, Mark Duplicates, split_ntrim step one by one;100000 only conduct Genome Process; 000001:Only conduct split_ntrim step\")\n optparser.add_option(\"-i\", \"--in_bam\", dest = \"in_bam\" ,type = \"string\",\n help = \"You can set this to your bam file path.(If fastq1 is empty, required!)\")\n optparser.add_option(\"-o\", \"--out_dir\", dest = \"out_dir\" ,type = \"string\", default = \"vcf\",\n help = \"Set the vcf file out_dir.[vcf]\")\n return(optparser)", "def add_options(_parser):\n\n _parser.add_option(\"-y\", \"--year\",\n dest=\"year\", action=\"store\",\n help=\"Year for the merge\")\n\n _parser.add_option(\"-m\", \"--month\",\n dest=\"month\", action=\"store\",\n help=\"Month for the merge\")\n\n _parser.add_option(\"-d\", \"--day\",\n dest=\"day\", action=\"store\",\n help=\"Day for the merge\")\n\n _parser.add_option(\"-D\", \"--directory\",\n dest=\"directory\", action=\"store\",\n help=\"Directory containing files to merge\")\n\n _parser.add_option(\"-f\", \"--file\",\n dest=\"file\", action=\"store\",\n help=\"File containing list of input directories\")\n\n _parser.add_option(\"-w\", \"--window\",\n dest=\"window\", action=\"store\",\n help=\"Window in days (merge for the past *n* days\")\n\n _parser.add_option(\"-l\", \"--lookback\",\n dest=\"lookback\", action=\"store\",\n help=\"Lookback period (merge for 1 day *n* days prior)\")\n\n _parser.add_option(\"-t\", \"--topic\",\n dest=\"topic\", action=\"store\",\n help=\"Topic for the merge\")\n\n _parser.add_option(\"-i\", \"--input-prefix\",\n dest=\"input_prefix\", action=\"store\",\n help=\"Input directory prefix\")\n\n _parser.add_option(\"-o\", \"--output-prefix\",\n dest=\"output_prefix\", action=\"store\",\n help=\"Output directory prefix\")\n\n _parser.add_option(\"-n\", \"--num-reducers\",\n dest=\"num_reducers\", action=\"store\",\n help=\"Number of reducers\")\n\n _parser.add_option(\"-c\", \"--codec\",\n dest=\"codec\", action=\"store\",\n help=\"Compression codec to use\")\n\n _parser.add_option(\"-q\", \"--queue\",\n dest=\"queue\", action=\"store\",\n help=\"Mapreduce job queue\")\n\n _parser.add_option(\"-r\", \"--dry-run\",\n dest=\"dry_run\", action=\"store_true\", default=False,\n help=\"Dry run; create, but dont execute the Pig script\")", "def setup_argparse(parser):\n parser.add_argument(\"--debug\", default=False, action=\"store_true\", help=\"Enable debug mode\")\n parser.add_argument(\n \"--host\", help=\"Server host\", default=os.environ.get(\"SCELVIS_HOST\", \"0.0.0.0\")\n )\n parser.add_argument(\n \"--port\", type=int, help=\"Server port\", default=int(os.environ.get(\"SCELVIS_PORT\", 8050))\n )\n parser.add_argument(\n \"--fake-data\",\n default=False,\n action=\"store_true\",\n help=\"Enable display of fake data set (for demo purposes).\",\n )\n parser.add_argument(\n \"--data-source\",\n dest=\"data_sources\",\n default=[],\n action=\"append\",\n help=\"Path to data source(s)\",\n )\n\n parser.add_argument(\n \"--public-url-prefix\",\n default=os.environ.get(\"SCELVIS_URL_PREFIX\", \"\"),\n help=\"The prefix that this app will be served under (e.g., if behind a reverse proxy.)\",\n )\n\n parser.add_argument(\n \"--cache-dir\",\n default=os.environ.get(\"SCELVIS_CACHE_DIR\"),\n help=\"Path to cache directory, default is to autocreate one.\",\n )\n parser.add_argument(\n \"--cache-redis-url\",\n default=os.environ.get(\"SCELVIS_CACHE_REDIS_URL\"),\n help=\"Redis URL to use for caching, enables Redis cache\",\n )\n parser.add_argument(\n \"--cache-default-timeout\",\n default=os.environ.get(\"SCELVIS_CACHE_DEFAULT_TIMEOUT\", 7 * 24 * 60 * 60),\n type=int,\n help=\"Default timeout for cache\",\n )\n parser.add_argument(\n \"--cache-preload-data\",\n dest=\"cache_preload_data\",\n default=os.environ.get(\"SCELVIS_CACHE_PRELOAD_DATA\", \"0\") not in (\"\", \"0\", \"N\", \"n\"),\n action=\"store_true\",\n help=\"whether to preload data at startup\",\n )\n\n parser.add_argument(\n \"--upload-dir\",\n default=os.environ.get(\"SCELVIS_UPLOAD_DIR\"),\n help=\"Directory for visualization uploads, default is to create temporary directory\",\n )\n parser.add_argument(\n \"--max-upload-data-size\",\n default=os.environ.get(\"SCELVIS_MAX_UPLOAD_DATA_SIZE\", \"1000000000\"),\n type=int,\n help=\"Maximal size for data upload in bytes\",\n )\n parser.add_argument(\n \"--disable-upload\",\n default=os.environ.get(\"SCELVIS_UPLOAD_DISABLED\", \"0\") not in (\"\", \"0\", \"N\", \"n\"),\n dest=\"upload_disabled\",\n action=\"store_true\",\n help=\"Whether or not to disable visualization uploads\",\n )\n\n parser.add_argument(\n \"--disable-conversion\",\n default=os.environ.get(\"SCELVIS_CONVERSION_DISABLED\", \"0\") not in (\"\", \"0\", \"N\", \"n\"),\n dest=\"conversion_disabled\",\n action=\"store_true\",\n help=\"Directory for visualization uploads, default is to create temporary directory\",\n )\n\n parser.add_argument(\n \"--custom-home-md\",\n default=os.environ.get(\"SCELVIS_CUSTOM_HOME_MD\", None),\n help=\"Use custom markdown file for home screen\",\n )\n parser.add_argument(\n \"--custom-static-folder\",\n default=os.environ.get(\"SCELVIS_CUSTOM_STATIC_FOLDER\", None),\n help=\"Use custom static folder for files included in home screen markdown file\",\n )\n\n parser.add_argument(\n \"--irods-client-server-negotiation\",\n default=os.environ.get(\"IRODS_CLIENT_SERVER_NEGOTIATION\", \"request_server_negotiation\"),\n help=\"IRODS setting\",\n )\n parser.add_argument(\n \"--irods-client-server-policy\",\n default=os.environ.get(\"IRODS_CLIENT_SERVER_POLICY\", \"CS_NEG_REQUIRE\"),\n help=\"IRODS setting\",\n )\n parser.add_argument(\n \"--irods-ssl-verify-server\",\n default=os.environ.get(\"IRODS_SSL_VERIFY_SERVER\", \"none\"),\n help=\"IRODS setting\",\n )\n parser.add_argument(\n \"--irods-encryption-algorithm\",\n default=os.environ.get(\"IRODS_ENCRYPTION_ALGORITHM\", \"AES-256-CBC\"),\n help=\"IRODS setting\",\n )\n parser.add_argument(\n \"--irods-encryption-key-size\",\n default=os.environ.get(\"IRODS_ENCRYPTION_KEY_SIZE\", 32),\n type=int,\n help=\"IRODS setting\",\n )\n parser.add_argument(\n \"--irods-encryption-num-hash-rounds\",\n default=os.environ.get(\"IRODS_ENCRYPTION_NUM_HASH_ROUNDS\", 16),\n type=int,\n help=\"IRODS setting\",\n )\n parser.add_argument(\n \"--irods-encryption-salt-size\",\n default=os.environ.get(\"IRODS_ENCRYPTION_SALT_SIZE\", 8),\n type=int,\n help=\"IRODS setting\",\n )", "def main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Use the machine learning meta library shrynk to compress'\n )\n subparsers = parser.add_subparsers(dest=\"command\")\n compress = subparsers.add_parser('compress')\n compress.add_argument('file', help='file you want to compress')\n compress.add_argument('--size', '-s', default=3, type=int, help='Size weight for model')\n compress.add_argument('--write', '-w', default=1, type=int, help='Write-time weight for model')\n compress.add_argument('--read', '-r', default=1, type=int, help='Read-time weight for model')\n decompress = subparsers.add_parser('decompress')\n decompress.add_argument('file', help='file you want to decompress')\n benchmark = subparsers.add_parser('benchmark')\n benchmark.add_argument('file', help='file you want to benchmark')\n benchmark.add_argument('--size', '-s', default=3, type=int, help='Size weight for model')\n benchmark.add_argument('--write', '-w', default=1, type=int, help='Write-time weight for model')\n benchmark.add_argument('--read', '-r', default=1, type=int, help='Read-time weight for model')\n benchmark.add_argument('--predict', help='Read-time weight for model', action=\"store_true\")\n benchmark.add_argument('--save', help='Read-time weight for model', action=\"store_true\")\n args = parser.parse_args()\n if args.command == \"compress\":\n data = load(args.file)\n print(save(data, args.file, size=args.size, write=args.write, read=args.read))\n if args.command == \"decompress\":\n data = load(args.file)\n if \"json\" in args.file:\n ext = \"json\"\n kwargs = {\"compression\": None}\n end = args.file.index(\".\" + ext)\n destination = args.file[:end] + \".\" + ext\n elif \"csv\" in args.file or \"parquet\" in args.file:\n ext = \"csv\"\n kwargs = {\"engine\": \"csv\", \"compression\": None}\n end = args.file.index(\".\" + ext)\n destination = args.file[:end] + \".\" + ext\n else:\n kwargs = {\"compression\": None}\n destination = \".\".join(args.file.split(\".\")[:-1])\n save(data, destination, kwargs)\n elif args.command == \"benchmark\":\n if args.predict:\n data = load(args.file)\n print(\"Predicted:\", infer(data, size=args.size, write=args.write, read=args.read))\n if args.save:\n bench = run_benchmarks(args.file)\n bench = pd.DataFrame(bench, columns=[\"kwargs\", \"size\", \"write_time\", \"read_time\"])\n return print(add_z_to_bench(bench, args.size, args.write, args.read))\n else:\n print(show_benchmark(args.file, size=args.size, write=args.write, read=args.read))", "def optparse_init(self):\n\n from optparse import OptionParser, OptionGroup\n usage = \"Usage: %prog [options] input_file(s) [output]\"\n p = OptionParser(usage, version=\"%prog \"+ __version__)\n p.add_option(\"-p\", \"--profile\", dest='profile', type='choice', choices=profile_list,\n help=\"Tile cutting profile (%s) - default 'mercator' (Google Maps compatible)\" % \",\".join(profile_list))\n p.add_option(\"-r\", \"--resampling\", dest=\"resampling\", type='choice', choices=resampling_list,\n help=\"Resampling method (%s) - default 'average'\" % \",\".join(resampling_list))\n p.add_option(\"-f\", \"--tile-format\", dest=\"tile_format\", type='choice', choices=tile_formats_list,\n help=\"Image format of generated tiles (%s) - default 'png'\" % \",\".join(tile_formats_list))\n p.add_option('-s', '--s_srs', dest=\"s_srs\", metavar=\"SRS\",\n help=\"The spatial reference system used for the source input data\")\n p.add_option('-z', '--zoom', dest=\"zoom\",\n help=\"Zoom levels to render (format:'2-5' or '10').\")\n p.add_option('-e', '--resume', dest=\"resume\", action=\"store_true\",\n help=\"Resume mode. Generate only missing files.\")\n p.add_option('-a', '--srcnodata', dest=\"srcnodata\", metavar=\"NODATA\",\n help=\"NODATA transparency value to assign to the input data\")\n p.add_option('-i', '--init-dest', dest=\"init_dest\",\n help=\"Colour used to initialize output, only for 'jpeg' tile format\")\n p.add_option('', '--tilesize', dest=\"tilesize\",\n help=\"Size of the tiles - default 256\")\n p.add_option('', '--osm', dest=\"tms_osm\", action=\"store_true\",\n help=\"tms or osm numbering - default tms\")\n p.add_option('', '--mbtiles', dest=\"mbtiles\", action=\"store_true\",\n help=\"mbtiles - tiles creation to mbtiles file\")\n p.add_option('', '--mbtiles_to_disk', dest=\"mbtiles_todisk\", action=\"store_true\",\n help=\"mbtiles tiles- write mbtiles tiles to a directory\")\n p.add_option('', '--mbtiles_from_disk', dest=\"mbtiles_fromdisk\", action=\"store_true\",\n help=\"mbtiles tiles- create mbtiles file from tile directory\")\n p.add_option('', \"--te\", dest=\"te_bounds\",\n help=\"bounds to extract (coordinates in the output SRS): xmin ymin xmax ymax OR xmin,ymin,xmax,ymax\")\n p.add_option(\"-v\", \"--verbose\", dest=\"verbose\",action=\"store_true\",\n help=\"Print status messages to stdout\")\n # KML options\n g = OptionGroup(p, \"KML (Google Earth) options\", \"Options for generated Google Earth SuperOverlay metadata\")\n g.add_option(\"-k\", \"--force-kml\", dest='kml', action=\"store_true\",\n help=\"Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326. For a dataset with different projection use with caution!\")\n g.add_option(\"-n\", \"--no-kml\", dest='kml', action=\"store_false\",\n help=\"Avoid automatic generation of KML files for EPSG:4326\")\n g.add_option(\"-u\", \"--url\", dest='url',\n help=\"URL address where the generated tiles are going to be published\")\n g.add_option('-d', '--kml-depth', dest=\"kml_depth\",\n help=\"How many levels to store before linking, default 1\")\n g.add_option('--kmz', dest=\"kmz\", action=\"store_true\",\n help=\"Compress KML files into KMZ format, default for 'gearth' profile\")\n g.add_option('--no-kmz', dest=\"kmz\", action=\"store_false\",\n help=\"Do not compress KML files into KMZ format, default for 'mercator', 'geodetic' and 'raster' profiles\")\n p.add_option_group(g)\n\n # HTML options\n g = OptionGroup(p, \"Web viewer options\", \"Options for generated HTML viewers a la Google Maps\")\n g.add_option(\"-w\", \"--webviewer\", dest='webviewer', type='choice', choices=webviewer_list,\n help=\"Web viewer to generate (%s) - default 'all'\" % \",\".join(webviewer_list))\n g.add_option(\"-t\", \"--title\", dest='title',\n help=\"Title of the map\")\n g.add_option(\"-c\", \"--copyright\", dest='copyright',\n help=\"Copyright for the map\")\n g.add_option(\"-g\", \"--googlekey\", dest='googlekey',\n help=\"Google Maps API key from http://code.google.com/apis/maps/signup.html\")\n g.add_option(\"-y\", \"--yahookey\", dest='yahookey',\n help=\"Yahoo Application ID from http://developer.yahoo.com/wsregapp/\")\n p.add_option_group(g)\n\n # TODO: MapFile + TileIndexes per zoom level for efficient MapServer WMS\n #g = OptionGroup(p, \"WMS MapServer metadata\", \"Options for generated mapfile and tileindexes for MapServer\")\n #g.add_option(\"-i\", \"--tileindex\", dest='wms', action=\"store_true\"\n # help=\"Generate tileindex and mapfile for MapServer (WMS)\")\n # p.add_option_group(g)\n\n p.set_defaults(verbose=False, profile=\"mercator\", kml=False, url=None,\n copyright='', resampling='average', resume=False, tilesize=None,mbtiles=False,tms_osm=False,\n mbtiles_todisk=False,mbtiles_fromdisk=False,te_bounds='',\n googlekey='INSERT_YOUR_KEY_HERE', yahookey='INSERT_YOUR_YAHOO_APP_ID_HERE')\n\n self.parser = p", "def set_options():\n parser = argparse.ArgumentParser(description=\"imageseries builder\")\n\n parser.add_argument(\"-i\", \"--info\", help=\"describe the input files and quit\",\n action=\"store_true\")\n\n # file options\n parser.add_argument(\"-o\", \"--outfile\",\n help=\"name of HDF5 output file\",\n default=\"imageseries.h5\")\n help_d = \"path to HDF5 data group\"\n parser.add_argument(\"-d\", \"--dset\",\n help=help_d,\n metavar=\"PATH\", default=\"/imageseries\")\n\n # image options\n parser.add_argument(\"imagefiles\", nargs=\"+\", help=\"image files\")\n\n parser.add_argument(\"--empty\", \"--blank\",\n help=\"number of blank frames in beginning of file\",\n metavar=\"N\", type=int, action=\"store\", default=0)\n parser.add_argument(\"--max-frames\",\n help=\"maximum number of frames to write\",\n metavar=\"M\", type=int, action=\"store\", default=0)\n\n # compression/chunking\n help_d = \"compression level for gzip (1-9); 0 or less for no compression; \"\\\n \"above 9 sets level to 9\"\n parser.add_argument(\"-c\", \"--compression-level\",\n help=help_d,\n metavar=\"LEVEL\", type=int, action=\"store\", default=4)\n help_d = \"target chunk size in KB (0 means single image size)\"\n parser.add_argument(\"--chunk-KB\",\n help=help_d,\n metavar=\"K\", type=int, action=\"store\", default=0)\n\n return parser", "def spark_config():\n sqlContext.sql(\"SET spark.sql.parquet.binaryAsString=true\")\n sqlContext.sql(\"SET spark.sql.parquet.compression.codec=snappy\")\n sqlContext.setConf('spark.sql.hive.convertMetastoreParquet', 'False')\n sqlContext.clearCache()\n # print(sc._conf.get(\"spark.yarn.queue\"))", "def add_parse_options(cls, parser):\n # Decoder params\n parser.add_argument(\"-beam_size\", default=1, type=int, help=\"Beam size\")\n parser.add_argument(\"-lm_weight\", default=0.0, type=float, help=\"LM weight in decoding\")\n parser.add_argument(\"-lm_path\", default=\"/share/data/speech/shtoshni/research/asr_multi/\"\n \"code/lm/models/best_models/run_id_301/lm.ckpt-250000\", type=str,\n help=\"LM ckpt path\")\n parser.add_argument(\"-cov_penalty\", default=0.0, type=float,\n help=\"Coverage penalty\")", "def setupOptions():\n define(\"conf\", default=\"\",help=\"path to configuration file\")\n define(\"DB_CACHE\", default=\"False\", help=\"Flag\")\n define(\"CELL_NAME\", default=\"beolink.org\", help=\"Default Cell\")\n\n afs.orm.DbMapper.setupOptions() \n return", "def __init__(self, filename = None, dbalias = None, smkey = None ):\n super(HLTJobOptionsAccess,self).__init__( ConfigType.HLTJO, mainkey = \"properties\",\n filename = filename, dbalias = dbalias, dbkey = smkey )\n self.loader.setQuery([\n \"SELECT JO.HJO_DATA FROM {schema}.SUPER_MASTER_TABLE SMT, {schema}.HLT_JOBOPTIONS JO WHERE JO.HJO_ID=SMT.SMT_HLT_JOBOPTIONS_ID AND SMT.SMT_ID={dbkey}\", # for new db schema\n \"SELECT JO.JO_CONTENT FROM {schema}.SUPER_MASTER_TABLE SMT, {schema}.JO_MASTER_TABLE JO WHERE JO.JO_ID=SMT.SMT_JO_MASTER_TABLE_ID AND SMT.SMT_ID={dbkey}\" # for current db schema\n ])\n self.load()", "def optparse_init():\n\n from optparse import OptionParser, OptionGroup\n usage = \"Usage: %prog [options] input_file [output]\"\n p = OptionParser(usage, version=\"%prog \" + __version__)\n p.add_option(\"-p\", \"--profile\", dest='profile',\n type='choice', choices=profile_list,\n help=(\"Tile cutting profile (%s) - default 'mercator' \"\n \"(Google Maps compatible)\" % \",\".join(profile_list)))\n p.add_option(\"-r\", \"--resampling\", dest=\"resampling\",\n type='choice', choices=resampling_list,\n help=\"Resampling method (%s) - default 'average'\" % \",\".join(resampling_list))\n p.add_option(\"-s\", \"--s_srs\", dest=\"s_srs\", metavar=\"SRS\",\n help=\"The spatial reference system used for the source input data\")\n p.add_option(\"-z\", \"--zoom\", dest=\"zoom\",\n help=\"Zoom levels to render (format:'2-5' or '10').\")\n p.add_option(\"-e\", \"--resume\", dest=\"resume\", action=\"store_true\",\n help=\"Resume mode. Generate only missing files.\")\n p.add_option(\"-a\", \"--srcnodata\", dest=\"srcnodata\", metavar=\"NODATA\",\n help=\"NODATA transparency value to assign to the input data\")\n p.add_option(\"-d\", \"--tmscompatible\", dest=\"tmscompatible\", action=\"store_true\",\n help=(\"When using the geodetic profile, specifies the base resolution \"\n \"as 0.703125 or 2 tiles at zoom level 0.\"))\n p.add_option(\"-x\", \"--xyz\", \n action='store_true', dest='xyz',\n help=\"Use XYZ tile numbering instead of TMS\")\n p.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\",\n help=\"Print status messages to stdout\")\n p.add_option(\"-q\", \"--quiet\",\n action=\"store_true\", dest=\"quiet\",\n help=\"Disable messages and status to stdout\")\n p.add_option(\"--processes\",\n dest=\"nb_processes\",\n type='int',\n help=\"Number of processes to use for tiling\")\n\n # KML options\n g = OptionGroup(p, \"KML (Google Earth) options\",\n \"Options for generated Google Earth SuperOverlay metadata\")\n g.add_option(\"-k\", \"--force-kml\", dest='kml', action=\"store_true\",\n help=(\"Generate KML for Google Earth - default for 'geodetic' profile and \"\n \"'raster' in EPSG:4326. For a dataset with different projection use \"\n \"with caution!\"))\n g.add_option(\"-n\", \"--no-kml\", dest='kml', action=\"store_false\",\n help=\"Avoid automatic generation of KML files for EPSG:4326\")\n g.add_option(\"-u\", \"--url\", dest='url',\n help=\"URL address where the generated tiles are going to be published\")\n p.add_option_group(g)\n\n # HTML options\n g = OptionGroup(p, \"Web viewer options\",\n \"Options for generated HTML viewers a la Google Maps\")\n g.add_option(\"-w\", \"--webviewer\", dest='webviewer', type='choice', choices=webviewer_list,\n help=\"Web viewer to generate (%s) - default 'all'\" % \",\".join(webviewer_list))\n g.add_option(\"-t\", \"--title\", dest='title',\n help=\"Title of the map\")\n g.add_option(\"-c\", \"--copyright\", dest='copyright',\n help=\"Copyright for the map\")\n g.add_option(\"-g\", \"--googlekey\", dest='googlekey',\n help=\"Google Maps API key from http://code.google.com/apis/maps/signup.html\")\n g.add_option(\"-b\", \"--bingkey\", dest='bingkey',\n help=\"Bing Maps API key from https://www.bingmapsportal.com/\")\n p.add_option_group(g)\n\n p.set_defaults(verbose=False, profile=\"mercator\", kml=False, url='', zoom='0-2', xyz=True,\n webviewer='none', copyright='', resampling='near', resume=False,\n googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',\n processes=1)\n\n return p", "def configure(self, options, conf):\n pass", "def configure(self, config):\n super(MemoryPersistence, self).configure(config.with_default_tuples(\"options.path\", \"\"))", "def configure_options(self):\n super(MRExodus2Seq, self).configure_options()\n \n self.add_passthrough_option(\n '-t', '--timesteps', dest='timesteps',\n type='int',\n help='-t NUM or --timesteps NUM, Groups the output into batches of NUM timesteps' \n )\n self.add_passthrough_option(\n '-d', '--outdir', dest='outdir',\n help='-d DIR or --outdir DIR, Write the output to the directory DIR'\n )\n self.add_passthrough_option(\n '--variables', dest='variables',\n help='--variables VARS, Only output the variables in the comma delimited list' \n )\n self.add_passthrough_option(\n '--timestepfile', dest='timestepfile',\n help='--timestepfile FILE, Get the normalized timesteps' \n )", "def prepare(self, **options):\r\n raise NotImplementedError", "def configure_args(self):\n super(MRTextClassifier, self).configure_args()\n\n self.add_passthru_arg(\n '--min-df', dest='min_df', default=2, type=int,\n help=('min number of documents an n-gram must appear in for us to'\n ' count it. Default: %(default)s'))\n self.add_passthru_arg(\n '--max-df', dest='max_df', default=10000000, type=int,\n help=('max number of documents an n-gram may appear in for us to'\n ' count it (this keeps reducers from running out of memory).'\n ' Default: %(default)s'))\n self.add_passthru_arg(\n '--max-ngram-size', dest='max_ngram_size',\n default=DEFAULT_MAX_NGRAM_SIZE, type=int,\n help='maximum phrase length to consider')\n self.add_passthru_arg(\n '--stop-words', dest='stop_words',\n default=', '.join(DEFAULT_STOP_WORDS),\n help=(\"comma-separated list of words to ignore. For example, \"\n \"--stop-words 'in, the' would cause 'hole in the wall' to be\"\n \" parsed as ['hole', 'wall']. Default: %(default)s\"))\n self.add_passthru_arg(\n '--short-doc-threshold', dest='short_doc_threshold',\n type=int, default=None,\n help=('Normally, for each n-gram size, we take the average score'\n ' over all n-grams that appear. This allows us to penalize'\n ' short documents by using this threshold as the denominator'\n ' rather than the actual number of n-grams.'))\n self.add_passthru_arg(\n '--no-test-set', dest='no_test_set',\n action='store_true', default=False,\n help=(\"Choose about half of the documents to be the testing set\"\n \" (don't use them to train the classifier) based on a SHA1\"\n \" hash of their text\"))", "def setup(parser):\n global debug\n global config\n global file_list\n global job_sets\n global from_saved_state\n\n args = parser.parse_args()\n\n if args.debug:\n debug = True\n print_message('Running in debug mode', 'ok')\n\n # read through the config file and setup the config dict\n config = {}\n if not args.config:\n parser.print_help()\n sys.exit()\n else:\n try:\n confParse = ConfigParser.ConfigParser()\n confParse.read(args.config)\n for section in confParse.sections():\n config[section] = {}\n for option in confParse.options(section):\n opt = confParse.get(section, option)\n if not opt:\n if 'pass' in option and not args.no_monitor:\n opt = getpass('>> ' + option + ': ')\n else:\n opt = raw_input('>> ' + option + ': ')\n if opt.startswith('[') or opt.startswith('{'):\n opt = json.loads(opt)\n config[section][option] = opt\n except Exception as e:\n msg = 'Unable to read config file, is it properly formatted json?'\n print_message(msg)\n print_debug(e)\n return -1\n\n if args.no_ui:\n config['global']['ui'] = False\n else:\n debug = False\n config['global']['ui'] = True\n\n if args.dry_run:\n config['global']['dry_run'] = True\n else:\n config['global']['dry_run'] = False\n\n if args.no_cleanup:\n config['global']['no_cleanup'] = True\n else:\n config['global']['no_cleanup'] = False\n\n if args.no_monitor:\n config['global']['no_monitor'] = True\n print \"Turning off remote monitoring\"\n else:\n config['global']['no_monitor'] = False\n \n if args.size:\n config['transfer']['size'] = args.size\n else:\n config['transfer']['size'] = 100\n \n if args.viewer:\n print 'Turning on output_viewer mode'\n config['global']['viewer'] = True\n else:\n config['global']['viewer'] = False\n\n # setup config for file type directories\n for key, val in config.get('global').get('output_patterns').items():\n new_dir = os.path.join(\n config['global']['data_cache_path'],\n key)\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n if val == 'mpaso.hist.am.timeSeriesStatsMonthly':\n config['global']['mpas_dir'] = new_dir\n elif val == 'mpascice.hist.am.timeSeriesStatsMonthly':\n config['global']['mpas_cice_dir'] = new_dir\n elif val == 'cam.h0':\n config['global']['atm_dir'] = new_dir\n elif val == 'mpaso.rst.0':\n config['global']['mpas_rst_dir'] = new_dir\n elif val == 'rpointer':\n config['global']['rpt_dir'] = new_dir\n elif val == 'mpas-o_in':\n config['global']['mpas_o-in_dir'] = new_dir\n elif val == 'mpas-cice_in':\n config['global']['mpas_cice-in_dir'] = new_dir\n elif 'stream' in val:\n config['global']['streams_dir'] = new_dir\n\n if not os.path.exists(config['global']['output_path']):\n os.makedirs(config['global']['output_path'])\n if not os.path.exists(config['global']['data_cache_path']):\n os.makedirs(config['global']['data_cache_path'])\n\n # setup run_scipts_path\n config['global']['run_scripts_path'] = os.path.join(\n config['global']['output_path'],\n 'run_scripts')\n # setup tmp_path\n config['global']['tmp_path'] = os.path.join(\n config['global']['output_path'],\n 'tmp')\n\n # setup logging\n if args.log:\n log_path = args.log\n else:\n log_path = os.path.join(\n config.get('global').get('output_path'),\n 'workflow.log')\n logging.basicConfig(\n format='%(asctime)s:%(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n filename=log_path,\n filemode='w',\n level=logging.DEBUG)\n\n endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]\n if not setup_globus(endpoints):\n return -1\n print 'Globus setup complete'\n return config", "def configure(self, options, conf):\n super(ReporterPlugin, self).configure(options, conf)\n if not self.enabled: return\n\n self.write_hashes = conf.verbosity == 2\n self.conf = conf\n self.opt = options\n if self.opt.icycle and not self.__counter:\n self.__counter = TestCounter(cycles=self.opt.icycle)\n elif not self.__counter:\n self.__counter = TestCounter()\n\n if self.opt.duration and not self.__timer:\n self.__timer = Timer(self.opt.duration)\n\n if not self.__configuration:\n if self.opt.livereport:\n if not exists(options.livereport_config):\n raise Exception(\"couldn't find the report server local setting file: '%s'\" % options.livereport_config)\n self.__configuration.update(_getServerConfiguration(options.livereport_config))\n if not exists(options.device_config):\n raise Exception(\"couldn't find device configuration file: '%s'\" % options.device_config)\n #self.__configuration.update(_getServerConfiguration(options.livereport_config))\n self.__configuration.update(_getDeviceConfiguration(options.device_config))\n self.__configuration.update({'planname': os.path.basename(self.conf.options.plan_file)})\n self.result_properties = {'payload': None, 'extras': None}\n if self.opt.livereport and not self.__report_client:\n self.__report_client = ReportClient(**self.__configuration)\n self.token = self.__report_client.regist()\n if not self.token:\n raise Exception(\"couldn't get token from report server. check report server settings\")", "def add_opts(self, optparser):\n optlist = [\n optparse.make_option(\n '-d', '--jnc-output',\n dest='directory',\n help='Generate output to DIRECTORY.'),\n optparse.make_option(\n '--jnc-package',\n dest='package',\n help='Root package name of generated sources'),\n optparse.make_option(\n '--jnc-help',\n dest='jnc_help',\n action='store_true',\n help='Print help on usage of the JNC plugin and exit'),\n optparse.make_option(\n '--jnc-serial',\n dest='serial',\n action='store_true',\n help='Turn off usage of multiple threads.'),\n optparse.make_option(\n '--jnc-verbose',\n dest='verbose',\n action='store_true',\n help='Verbose mode: Print detailed debug messages.'),\n optparse.make_option(\n '--jnc-debug',\n dest='debug',\n action='store_true',\n help='Print debug messages. Redundant if verbose mode is on.'),\n optparse.make_option(\n '--jnc-no-classes',\n dest='no_classes',\n action='store_true',\n help='Do not generate classes.'),\n optparse.make_option(\n '--jnc-no-schema',\n dest='no_schema',\n action='store_true',\n help='Do not generate schema.'),\n optparse.make_option(\n '--jnc-no-pkginfo',\n dest='no_pkginfo',\n action='store_true',\n help='Do not generate package-info files.'),\n optparse.make_option(\n '--jnc-ignore-errors',\n dest='ignore',\n action='store_true',\n help='Ignore errors from validation.'),\n optparse.make_option(\n '--jnc-import-on-demand',\n dest='import_on_demand',\n action='store_true',\n help='Use non explicit imports where possible.'),\n optparse.make_option(\n '--jnc-classpath-schema-loading',\n dest='classpath_schema_loading',\n help='Load schema files using classpath rather than location.')\n ]\n g = optparser.add_option_group('JNC output specific options')\n g.add_options(optlist)", "def initialize_options(self):\n pass", "def initialize_options(self):\n pass", "def parser_config(p):\n p.add_argument('--duration',\n type=float,\n help='The capture duration.')\n p.add_argument('--endpoint_id',\n type=int,\n default=2,\n help='The endpoint identifier.')\n p.add_argument('--threaded', '-t',\n default=0,\n action='count',\n help='Use the threaded wrapper')\n p.add_argument('filename',\n help='The filename for output data.')\n return on_cmd", "def definearguments(self, customparser):\n\n customparser.add_option(\n '--disable',\n action=\"store_false\",\n dest=\"enableFeature\",\n help=\"Disable the Scalable Persistent Memory feature. Warning: \"\\\n \"any pending configuration changes will be lost.\"\n )", "def __configure(self):\n\n # CUDA\n if self.__cuda:\n if isinstance(self.__cuda, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-cuda={}'.format(self.__cuda))\n elif self.__toolchain.CUDA_HOME:\n self.__configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n # Default location\n self.__configure_opts.append('--with-cuda=/usr/local/cuda')\n else:\n self.__configure_opts.append('--without-cuda')\n\n # GDRCOPY\n if self.__gdrcopy:\n if isinstance(self.__gdrcopy, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-gdrcopy={}'.format(self.__gdrcopy))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-gdrcopy')\n elif self.__gdrcopy == False:\n self.__configure_opts.append('--without-gdrcopy')\n\n # KNEM\n if self.__knem:\n if isinstance(self.__knem, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-knem={}'.format(self.__knem))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-knem')\n elif self.__knem == False:\n self.__configure_opts.append('--without-knem')\n\n # OFED\n if self.__ofed:\n if isinstance(self.__ofed, string_types):\n # Use specified path\n self.__configure_opts.extend(\n ['--with-verbs={}'.format(self.__ofed),\n '--with-rdmacm={}'.format(self.__ofed)])\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.extend(['--with-verbs', '--with-rdmacm'])\n elif self.__ofed == False:\n self.__configure_opts.extend(['--without-verbs',\n '--without-rdmacm'])\n\n # XPMEM\n if self.__xpmem:\n if isinstance(self.__xpmem, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-xpmem={}'.format(self.__xpmem))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-xpmem')\n elif self.__xpmem == False:\n self.__configure_opts.append('--without-xpmem')\n\n # Workaround for format warning considered an error on Power\n if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:\n if not self.__toolchain.CFLAGS:\n self.__toolchain.CFLAGS = '-Wno-error=format'" ]
[ "0.73688", "0.6528374", "0.6009201", "0.57185924", "0.55749536", "0.5518058", "0.5389948", "0.53854734", "0.5381025", "0.53500783", "0.5334079", "0.5309261", "0.5307604", "0.52912265", "0.52566737", "0.5228797", "0.5199708", "0.5196785", "0.5193552", "0.5192408", "0.51900756", "0.5184797", "0.51730645", "0.51105833", "0.5093218", "0.50893986", "0.50893986", "0.5066461", "0.5065465", "0.5053786" ]
0.82809466
0
This is a mapper function of the job. It parses some given line of the input file using the special regular expression to extract ip adress, amount of bytes and the user agent used in this request (It also increments the corresponding counter).
def mapper(self, _, line): if line: try: line_values = re.match(self.REGEXP, line).groupdict() user_agent = user_agents.parse(line_values['user_agent']).browser.family self.increment_counter('Browsers', user_agent, 1) ip = line_values['ip'] try: byte_count = int(line_values['byte_count']) except ValueError: byte_count = 0 yield ip, ValueFormat(byte_count, 1) except AttributeError: self.increment_counter('ERRORS', 'ERRORS', 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_line(self, line):\n find_result = re.findall(LINE_REGEX, line)\n line_data = {r[0]: r[1] for r in find_result}\n self.process_url(line_data.get('request_to'))\n self.process_status_code(line_data.get('response_status'))", "def process_idx_file_line(self, line):\n pass", "def parse_line(self, line: str) -> None:\n self._count += 1", "def process_line(line: str) -> Instruction:\n register, op, value, _, base, check, limit = line.split()\n return Instruction(register, op, int(value), base, check, int(limit))", "def __init__(self, line):\n (self.timestamp, self.status_code, self.content_length, self.url, self.hop_path, self.via,\n self.mime, self.thread, self.start_time_plus_duration, self.hash, self.source,\n self.annotation_string) = re.split(\" +\", line.strip(), maxsplit=11)\n # Account for any JSON 'extra info' ending, strip or split:\n if self.annotation_string.endswith(' {}'):\n self.annotation_string = self.annotation_string[:-3]\n elif ' {\"' in self.annotation_string and self.annotation_string.endswith('}'):\n self.annotation_string, self.extra_json = re.split(re.escape(' {\"'), self.annotation_string, maxsplit=1)\n self.extra_json = '{\"%s' % self.extra_json\n # And split out the annotations:\n self.annotations = self.annotation_string.split(',')\n\n # Some regexes:\n self.re_ip = re.compile('^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$')\n self.re_tries = re.compile('^\\d+t$')\n self.re_dol = re.compile('^dol:\\d+') # Discarded out-links - make a total?", "def process_log(f):\n data = f.readlines()\n\n for line in data:\n m = re.match(reg, line)\n if m:\n route = m.groups()[2].strip()\n ret_val = m.groups()[3]\n if route not in route_map.keys():\n route_map[route] = 0\n route_map[route] = route_map[route] + 1\n code = check_range(int(ret_val))\n if code not in return_code_map.keys():\n return_code_map[code] = 0\n return_code_map[code] = return_code_map[code] + 1\n\n set_offset(f)\n f.close()", "def _parse_line(line: Match[str]) -> dict:\n request = line.group(\"request\")\n request = request.split()\n req_method = request[0] # GET, POST, PUT, etc.\n url = request[1]\n x = url.split(\"/\")[3:]\n uri = f'/{\"/\".join(x)}'\n\n timestamp = line.group(\"timestamp\") # timestamp in ISO format\n timestamp = MyTime._try_isoformat(timestamp, tzinfo=\"UTC\").dt\n\n res = {\n \"url\": url,\n \"uri\": uri,\n \"req_method\": req_method,\n \"timestamp\": timestamp,\n \"user_agent\": line.group(\"user_agent\"),\n }\n return res", "def task1_mapper():\n line_count = 0\n for line in sys.stdin:\n # Clean input and split it\n lines = line.strip().split(\",\")\n line_count += 1\n # Check that the line is of the correct format and filtering the HEADER record \n # If line is malformed, we ignore the line and continue to the next line\n if line_count == 1:\n continue\n else:\n if len(lines) != 12:\n continue\n \n category = lines[3].strip()\n videoid = lines[0].strip()\n country = lines[11].strip()\n k_key = category+','+videoid\n\n print(\"{}\\t{}\".format(k_key, country))", "def parse(cls, line):\r\n raise NotImplementedError", "def parse_line(self, line):\n raise NotImplementedError", "def parseIP(line):\n match = re.search(r'hostname=\\\"(.*)\\\" port=\\\"(\\d+)\\\" username=\\\"yt4xb\\\"/', line)\n if match:\n name = match.group(1)\n port = match.group(2)\n\tprint name\n\tprint '\\n'\n\tprint port\n return name+\":\"+port\n else:\n return -1", "def ParseLine(line):\n fields = line.split()\n ip = fields[0]\n datestr = ' '.join(fields[3:5])[1:-1]\n timestamp = datetime.strptime(\n datestr, '%d/%b/%Y:%H:%M:%S %z'\n ).timestamp()\n command = fields[5][1:]\n uri = fields[6]\n protocol = fields[7][:-1]\n status = int(fields[8])\n size = int(fields[9])\n meta = [var.strip('\"') for var in fields[11:-1]]\n return {\n 'timestamp': timestamp,\n 'ip': ip,\n 'command': command,\n 'uri': uri,\n 'protocol': protocol,\n 'status': status,\n 'size': size,\n 'meta': meta\n }", "def process_from_file():\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\"", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def parse(lines): \n replied = len(lines)\n avg_delay, lost = 0, 0\n qos = 1.0\n \n if replied != 0:\n for line in lines:\n line.strip() #remove leading and trailing spaces\n \"\"\"\n Each line has the following fields:\n [status code] [reply time (seconds since epoch)] [source IP] [source url] [source query] [serving delay]\n \n e.g.:\n 200 1296756182 192.168.10.2 /home.php ?N=192 11045\n 200 1296756183 192.168.10.2 /home.php ?N=192 230036\n 200 1296756183 192.168.10.2 /home.php ?N=192 230684\n \"\"\"\n status, time, sourceIP, url, query, delay = line.split()\n \n time = int(time)\n delay = int(delay)\n \n if delay > DEADLINE:\n lost += 1\n avg_delay += delay\n avg_delay /= replied\n qos = (replied - lost) / replied\n\n return {'replied': replied, 'delay' : avg_delay, 'qos' : qos, 'lost': lost}", "def do_Promo_line_parse (Promo_line, line_number, filehash) :\n result = [filehash,\n line_number,\n Promo_line[0:8].strip(),\n Promo_line[9:13].strip(),\n Promo_line[14:19].strip(),\n Promo_line[20:26].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[27:30].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[31:40].strip(),\n Promo_line[41:49].strip(),\n Promo_line[50:].strip()\n ]\n return result\n # Having the line number passed in is ugly, but kind of works :/\n # Having all the field extraction explicit is kind of ugly too...\n # We're using the hash here to link? Yeah, that's because Python\n # doesn't know what the autonumbered load table is up to in the\n # DB when it starts to coalesce the raw files together.", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def buscarEnLineas(ipSrc, maskSrc, ipDst, maskDst, fileLines):\n\tipsAnteriores = {}\n\tipsActuales = {}\n\tfor actual in fileLines:\n\t\tmatches = re.search(\n\t\t\t# nro de hop ((nombre) IP del server RTT) o un * \n\t\t\t'\\s*([0-9]{,2})\\s+(?:(?:.+?\\s*\\(([0-9.]+)\\))\\s+((?:\\s*[0-9.]+\\s+ms)+)\\s+|\\*)$'\n\t\t\t, actual\n\t\t) #pueden ir a http://www.regexper.com/ y ver que significa\n\t\tif matches.group(1): #es un nuevo hop\n\t\t\thop = matches.group(1)\n\t\t\tipsAnteriores = ipsActuales\n\t\t\tipsActuales = {}\n\t\tip = matches.group(2)\n\t\tif ip:\t#me respondieron\n\t\t\tif masked(ip, maskDst) == masked(ipDst, maskDst) and ipsAnteriores.__contains__(masked(ipSrc, maskSrc)):\n\t\t\t\tparsearTiempos = lambda string: map(float, re.findall('(?:\\s*([0-9.]+)+\\s+ms)', string))\n\t\t\t\tpromedio = lambda l: sum(l)/len(l)\n\t\t\t\ttiempoAnterior = promedio(parsearTiempos(ipsAnteriores[masked(ipSrc, maskSrc)]))\n\t\t\t\ttiempoActual = promedio(parsearTiempos(matches.group(3)))\n\t\t\t\trtt = tiempoActual - tiempoAnterior\n\t\t\t\treturn hop, rtt\n\t\t\telse:\n\t\t\t\tif ipsActuales.__contains__(masked(ip, maskDst)):\n\t\t\t\t\tipsActuales[masked(ip, maskSrc)] += ' ' + matches.group(3)\t#te quiero python\n\t\t\t\telse:\n\t\t\t\t\tipsActuales[masked(ip, maskSrc)] = matches.group(3)\n\treturn -1, 0", "def test_parsing_webserver_logs(file, host_ip):\n\n ip_regexp = r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\" # regexp for searching ip in logfile\n client_error_regexp = r'\" 4\\d{2} ' # 4xx number\n server_error_regexp = r'\" 5\\d{2} ' # 5xx number\n request_time_regexp = r'] \\d+ \"' # any number between ] and \" (configured apache's /D (request time by\n # microseconds logging parameter to be between these symbols)\n timestamp_regexp = r'\\d{2}/\\D+/\\d{4}:\\d{2}:\\d{2}:\\d{2}'\n\n values = {}\n top_count = \"10\"\n\n def file_last_timestamp(file):\n with open(file) as f:\n last_line = list(f)[-1]\n print(last_line)\n timestamp = re.search(timestamp_regexp, last_line)\n if timestamp:\n return str(timestamp[0]).replace('/', '.').replace(':', '.')\n else:\n return path.basename(file)\n\n\n def copy_unique_elements(list_of_all):\n \"\"\"\n Copies unique elements from list to unique_list\n :param list_of_all:\n :return:\n \"\"\"\n unique_list = []\n for i in list_of_all:\n if i not in unique_list:\n unique_list.append(i)\n if len(unique_list) == int(top_count):\n break\n return unique_list\n\n def findall_unique_exps(regexp, data):\n \"\"\"\n Returns list of unique expressions in data (may be limited by length\n if copy_unique_elements function's used)\n :param regexp:\n :param data:\n :return:\n \"\"\"\n list_of_all = re.findall(regexp, data)\n count = Counter(list_of_all)\n sorted_list = sorted(list_of_all, key=lambda x: (count[x], x), reverse=True)\n return copy_unique_elements(sorted_list)\n\n def findall_unique_exps_lines(regexp, logfile):\n \"\"\"\n Returns a list of lines with unique expressions in logfile (may be limited by length\n if copy_unique_elements function's used)\n :param regexp:\n :param logfile:\n :return:\n \"\"\"\n list_of_all = []\n with open(logfile) as logfile:\n for line in logfile:\n exp = re.compile(regexp)\n if exp.search(line):\n list_of_all.append(line)\n return copy_unique_elements(list_of_all)\n\n def requests_count(regexp, data):\n \"\"\"\n Returns requests count containing expression like 'GET' or 'POST'\n :param regexp:\n :param data:\n :return:\n \"\"\"\n requests_list = re.findall(regexp, data)\n return int(list(Counter(requests_list).values())[0])\n\n def requests_exec_time_list(regexp, data, logfile):\n \"\"\"\n Returns lines with longest requests (number of lines is set in top_count variable)\n :param regexp:\n :param data:\n :param logfile:\n :return:\n \"\"\"\n # find every request time and add to collection\n request_time_list = re.findall(regexp, data)\n request_time_count = Counter(request_time_list)\n sorted_request_time_list = sorted(request_time_list, key=lambda x: (request_time_count[x], x), reverse=True)\n # convert strings in requests time integer values\n for index, item in enumerate(sorted_request_time_list):\n sorted_request_time_list[index] = int(''.join(c for c in item if c.isdigit()))\n # collect top <top_count> longest requests\n top_request_time_list = []\n for i in range(int(top_count)):\n max_request = max(sorted_request_time_list)\n top_request_time_list.append(max_request)\n sorted_request_time_list.remove(max_request)\n # search top <top_count> longest requests in a file and collect lines with them to a list\n top_request_lines = []\n with open(logfile) as f:\n for line in f:\n for i in top_request_time_list:\n if str(i) in line:\n top_request_lines.append(line)\n return top_request_lines\n\n def parse(log):\n\n timestamp = file_last_timestamp(log)\n json_file_name = 'log_parse_{}.json'.format(timestamp)\n open(json_file_name, 'w').close() # clears json-file for every test run\n\n print('\\n{}'.format(log)) # to see what log was parsed\n\n unique_client_error_lines_list = findall_unique_exps_lines(client_error_regexp, log)\n unique_server_error_lines_list = findall_unique_exps_lines(server_error_regexp, log)\n\n with open(log) as f:\n data = f.read()\n\n unique_ips_list = findall_unique_exps(ip_regexp, data)\n\n get_count = requests_count('GET', data)\n post_count = requests_count('POST', data)\n\n total_requests_count = get_count + post_count\n\n top_request_lines = requests_exec_time_list(request_time_regexp, data, log)\n\n with open(json_file_name, 'a') as json_file:\n values[\"{}\".format(log)] = {'TOP {} IPs'.format(top_count): unique_ips_list,\n 'Total requests count': total_requests_count,\n 'GET requests count': get_count,\n 'POST requests count': post_count,\n 'TOP {} client errors'.format(top_count): unique_client_error_lines_list,\n 'TOP {} server errors'.format(top_count): unique_server_error_lines_list,\n 'TOP {} longest requests'.format(top_count): top_request_lines}\n json.dump(values, json_file)\n\n if isinstance(file, list):\n for i in file:\n try:\n parse(i)\n except UnicodeDecodeError:\n print(\"File wasn't parsed (check format) ({})\".format(file))\n pass\n else:\n try:\n parse(file)\n except UnicodeDecodeError:\n print(\"File wasn't parsed (check format) ({})\".format(file))", "def parse_line(cls, line):\n regex = re.compile(cls.pattern)\n m = regex.search(line)\n if m:\n data = m.groupdict()\n data = cls.post_process(data)\n if cls.date_format:\n data['time'] = cls.convert_time(data['time'])\n else:\n data['time'] = datetime.now()\n return data\n else:\n return {}", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def parse(\n ignore_files=False,\n ignore_urls=None,\n start_at=None,\n stop_at=None,\n request_type=None,\n ignore_www=False,\n slow_queries=False\n):\n params = {'ignore_files': ignore_files, 'ignore_urls': ignore_urls,\n 'start_at': start_at, 'stop_at': stop_at,\n 'request_type': request_type, 'ignore_www': ignore_www}\n\n with open(\"log.log\", 'r') as log_file:\n regular_exp = r'\\[.*\\] \\\".*\\\" \\d+ \\d+'\n # Find correct requests in log file\n request_dict = defaultdict(lambda: [0, 0])\n\n for line in log_file:\n if re.match(regular_exp, line):\n request = parse_request(line)\n add_to_request_dict(request, request_dict, params)\n top5 = find_top_five(request_dict, slow_queries)\n return top5", "def __init__(self, url):\n\n lines = [l.strip().split() for l in open(url)]\n\n # Split the lines and the headers\n i = 0\n self.refs = {} # name: [len, offset-to-first-record, offset-to-last-record + 1]\n\n while(lines[i][0][0] == '@'):\n line = lines[i]\n if line[0] == '@SQ':\n sn = line[1].split(':')[1]\n ln = int(line[2].split(':')[1])\n self.refs[sn] = [ln, None, None]\n i += 1\n\n # Process the mapped reads\n # - create offset pointers to the start of each chromosome\n # - convert the position to an int\n cur_chr = lines[i][2]\n self.refs[cur_chr][1] = i\n \n while(i < len(lines)):\n if not (int(lines[i][1]) & 0x4): \n lines[i][3] = int(lines[i][3])\n\n if lines[i][2] != cur_chr:\n self.refs[cur_chr][2] = i # mark the end\n cur_chr = lines[i][2] \n self.refs[cur_chr][1] = i # mark the start\n i += 1\n\n self.lines = lines\n \n return", "def parse(self,line,mult,notifies):\n return 0", "def process_line(line, recent_sessions, open_sessions):\n HOST_PREFIX = 'Host: '\n # Ignore lines that don't start with a number. Health checks show up in the\n # log and break things if processed.\n if not re.match('^ *[0-9]', line):\n return\n session_id, op, talking_to, info = line.split(None, 3)\n if talking_to != 'b': # b = backend\n # This won't happen if you give -b to varnishlog.\n return\n if op == 'BackendOpen':\n session = Session(session_id, time(), None, info.split()[0], [])\n recent_sessions.append(session)\n open_sessions[session_id] = session\n return\n try:\n session = open_sessions[session_id]\n except KeyError:\n# print (\"A log entry came in for a session that this program doesn't \"\n# \"know about. This is probably because it started before you \"\n# \"ran the program.\")\n return\n if op == 'BackendClose':\n session.end = time()\n del open_sessions[session_id]\n return\n # Ignoring BackendReuse. Assuming anything not closed is reused.\n \n if op == 'TxRequest':\n session.requests.append(Request(time(), None, None, None, None))\n return\n try:\n request = session.requests[-1]\n except IndexError:\n# print (\"A log entry came in for a request that this program doesn't \"\n# \"know about. This is probably because it started before you \"\n# \"ran the program.\")\n return\n if op == 'TxURL':\n request.url = info\n elif op == 'TxHeader' and info.startswith(HOST_PREFIX):\n request.host = info[len(HOST_PREFIX):]\n elif op == 'RxStatus':\n request.end = time()\n request.status = info", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def parse_line(self, line):\n\t\tif line[0] == \"#\":\n\t\t\treturn False\n\t\tparts = [x.strip() for x in line.strip().split(\",\")]\n\t\tself.unix_time = int(parts[0])\n\t\tself.cycles_done = int(parts[1])\n\t\tself.cur_path = int(parts[2])\n\t\tself.paths_total = int(parts[3])\n\t\tself.pending_total = int(parts[4])\n\t\tself.pending_favs = int(parts[5])\n\t\tself.map_size = float(parts[6].replace(\"%\",\"\"))\n\t\tself.unique_crashes = int(parts[7])\n\t\tself.unique_hangs = int(parts[8])\n\t\tself.max_depth = int(parts[9])\n\t\tself.execs_per_sec = float(parts[10])\n\t\treturn True", "def decodeline(self, line):\n result = ApacheLogLine()\n result.full_line = line\n linepatternmatch = self._linepattern.match(line)\n if linepatternmatch:\n result.hostname = linepatternmatch.group(1)\n result.user = linepatternmatch.group(2)\n if result.user == '-':\n result.user = ''\n (result.accesstime_seconds, result.serveroffset) = self.parsedate(linepatternmatch.group(3))\n result.accesstime_string = stringdate(result.accesstime_seconds, offset=result.serveroffset)\n result.file = linepatternmatch.group(4)\n result.code = linepatternmatch.group(5)\n result.code_description = self._codetranslator.get_description(result.code)\n result.size = linepatternmatch.group(6)\n if result.size == '-':\n result.size = 0\n result.referer = linepatternmatch.group(7)\n if result.referer == '-':\n result.referer = ''\n result.browser = linepatternmatch.group(8)\n else:\n self._notparsable += 1\n warn(\"The line '%s' could not be parsed\" % line)\n return None\n if self._line_fits_pattern(result):\n self._acceptedlines += 1\n return result\n else:\n self._rejectedlines += 1\n return None", "def parse(filename):\n data = {}\n for line in reversed(list(open(filename))):\n date, time, ip, source = line.strip().split()\n log_time = datetime.datetime.strptime(date +\" \"+time, '%Y-%m-%d %H:%M:%S')\n diff = datetime.datetime.now() - log_time\n if diff.seconds > 600:\n break\n if ip not in data:\n data[ip] = set()\n data[ip].add(source)\n return data", "def mapper(self, line_no, line):\n cell = csv_readline(line)\n if cell[0] == 'V':\n yield cell[4],1" ]
[ "0.6739221", "0.62861466", "0.6171111", "0.61156", "0.60773605", "0.58872736", "0.57866424", "0.578513", "0.5761597", "0.5749003", "0.5631643", "0.56234014", "0.55981034", "0.5581087", "0.55439436", "0.5520048", "0.5506902", "0.5490785", "0.54784197", "0.54761577", "0.5452478", "0.5422047", "0.54040843", "0.53870934", "0.53794444", "0.53785896", "0.5367424", "0.53553396", "0.5349218", "0.5341271" ]
0.704579
0
Updates the mod database.
def update_mod_database(): mydb = database() cursor = mydb.cursor() mod_path = "data/stats.json" info = dict_from_json_file(mod_path) cursor.execute("DELETE FROM poe.mod") # Clear table for mod_type in info["result"]: for mod in mod_type["entries"]: mod_id = mod["id"] mod_text = mod["text"] mod_type = mod["type"] # If the mod has options we need to add these to the options table if "option" in mod: query = "INSERT INTO poe.mod (id, text, type, options) VALUES (%s, %s, %s, %s);" val = (mod_id, mod_text, mod_type, 1) cursor.execute(query, val) for option_mod in mod["option"]["options"]: option_mod_id = option_mod["id"] option_mod_text = option_mod["text"] mod_query = "INSERT INTO poe.options (mod_id, id, text) VALUES (%s, %s, %s)" mod_val = (mod_id, option_mod_id, option_mod_text) cursor.execute(mod_query, mod_val) # If there are no mods, simply add the mod to the table else: query = "INSERT INTO poe.mod (id, text, type) VALUES (%s, %s, %s);" val = (mod_id, mod_text, mod_type) cursor.execute(query, val) mydb.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update():\r\n\t#print(\"Updating poeninja database...\")\r\n\t#poeninja.update_database()\r\n\t#print(\"Updateing poeninja name-dict...\")\r\n\t#poeninja.update_name_dict()\r\n\t#print(\"Updating stats...\")\r\n\t#update_stats()\r\n\t#print(\"Updating items...\")\r\n\t#update_items()\r\n\t#print(\"Updating mod MySQL database...\")\r\n\t#update_mod_database()\r\n\tmydb = database()\r\n\twhile True:\r\n\t\trandom_mod(mydb)", "def update(self):\n self.getDbRecord().update()", "def update_database(self) -> None:\n \n # Simulate that we update a database\n time.sleep(10)", "def model_update(self, db):\n db.session.commit()", "def update(self):\n db.session.commit()", "def update(self):\n db.session.commit()", "def update(self):\n\n if not self.db: self.validate()\n\n self.logging.debug( \"update(%s)\" % (self.db) )\n\n for name in self.tables:\n self.dbs_tables[name]['md5'] = get_md5( self.dbs_tables[name]['path'] )\n\n self._get_magnitudes()\n self._get_events()", "def update(self):\n if not os.path.exists(self._db_file):\n return\n with open(self._db_file, 'r') as fp:\n self.from_dict(json.load(fp), \"JSON\")", "def __update_module(item):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM modules '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n\n entry = [(item.name, item.about, item.version,\n item.author, item.install_name)]\n\n # Update a Module Entry\n sql = ('INSERT INTO modules (name, about, version, '\n 'author, install_name)'\n 'VALUES (?, ?, ?, ?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return cur.rowcount", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def sync_db():\n pass", "def update(self) -> None:\n self.app.notifier.set_value(\"Database update started on {}\".format(datetime.datetime.now().strftime(\"%c\")))\n self.app.notifier.clear()\n self.app.logger.info(\"Starting database update\")\n card_parser = JSonCardParser(self.app)\n card_parser.check_update()\n try:\n self.__update(card_parser)\n except mysql.connector.errors.ProgrammingError as exc:\n if exc.errno == mysql.connector.errorcode.ER_BAD_DB_ERROR:\n self.setup_db()\n self.__update(card_parser)\n else:\n raise\n else:\n self.app.logger.info(\"Finished database update\")\n self.app.notifier.set_value(\"Database update finished on {}\".format(datetime.datetime.now().strftime(\"%c\")))\n self.app.notifier.clear()", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def update():", "def update():", "def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()", "def update_model(self):\n pass", "def updateModel(self):\n pass", "def save_db(self) -> None:", "def update(id, data):\n db = core.connect()\n theShift = db[id]\n theShift.update(data)\n theShift[\"modified\"] = utils.utctime()\n db[id] = theShift\n return db[id]", "def db_changed(self):\n self.dbstate.db.connect('person-add', self.update)\n self.dbstate.db.connect('person-delete', self.update)\n self.dbstate.db.connect('person-update', self.update)\n self.dbstate.db.connect('family-add', self.update)\n self.dbstate.db.connect('family-delete', self.update)\n self.dbstate.db.connect('family-update', self.update)", "def update( ):\r\n pass", "def update_database(days=1):\n if not os.path.isfile('data.db'):\n create_db_from_scratch()\n challenger_ids = riot_api.get_challenger_summoner_ids()\n match_ids = riot_api.get_match_ids_from_challenger(\n challenger_ids, days=days)\n match_ids = get_match_ids_not_in_db(match_ids)\n matches = riot_api.get_matches(match_ids)\n match_loader(matches)", "def _update(self):\n with sqlite3.connect(self.dbpath) as connection:\n cursor = connection.cursor()\n UPDATESQL = \"\"\"UPDATE accounts\n SET first_name=:first_name, last_name=:last_name, \n username=:username, email_address=:email_address, \n password_hash=:password_hash, balance=:balance, \n account_number=:account_number, admin=:admin\n WHERE id=:id;\"\"\"\n values = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"username\": self.username,\n \"email_address\": self.email_address,\n \"password_hash\": self.password_hash, \n \"balance\": self.balance, \n \"account_number\": self.account_number,\n \"admin\": self.admin,\n \"id\": self.id\n }\n try:\n cursor.execute(UPDATESQL, values)\n except sqlite3.IntegrityError:\n raise ValueError(\"ID (id) does not set in datebase.\")", "def update(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.update(self.__class__.__name__, data['id'], data)\n\n self.__dict__.update(saved_data)", "def edit_db(self, query, args=()):\n conn = self.get_db()\n try:\n cur = conn.execute(query, args)\n conn.commit()\n cur.close()\n except Exception as e:\n print(e)\n return False\n return True", "def save(self):\n self.db.commit()", "def db_update_entry():\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n name = get_name()\n if name in db:\n phone_number = get_phone_number(db[name.capitalize()])\n print(\"Updating existing entry ..... {name}\\n\".format(name=name))\n db[name.capitalize()] = phone_number\n db.sync()\n else:\n print_error()\n db.close()\n db_show_all()" ]
[ "0.76122123", "0.71540016", "0.70316404", "0.70300096", "0.68975616", "0.68975616", "0.6710741", "0.6642286", "0.66239715", "0.6530174", "0.6488016", "0.64495766", "0.63282275", "0.63222265", "0.6291294", "0.61177063", "0.61177063", "0.6116924", "0.6089856", "0.6086441", "0.60731155", "0.6068593", "0.60482156", "0.59850967", "0.5965445", "0.5957597", "0.59432", "0.59378886", "0.5931331", "0.5925371" ]
0.7822041
0
Gets a random mod.
def random_mod(mydb): cursor = mydb.cursor() cursor.execute("SELECT * FROM poe.mod ORDER BY RAND() LIMIT 1") result = cursor.fetchone() if result[3] == 1: print(result) option_query = "SELECT * FROM poe.options WHERE mod_id='%s' ORDER BY RAND() LIMIT 1" % result[0] print(option_query) cursor.execute(option_query) option_result = cursor.fetchone() print(result[1].replace("#", str(option_result[2]))) else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _random_int(self, modulo):\n r_seed = int(os.urandom(32).encode('hex'), 16)\n r_state = gmp.random_state(r_seed)\n raw = gmp.mpz_urandomb(r_state, self.RANDOM_BITS)\n\n return gmp.t_mod(raw, modulo)", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def random():\r\n return R.NextDouble()", "def apply_mod(num):\n return num % MODULO", "def get_random(self):\n return self._get_random()", "def random():\n return constant(1)", "def randomperc(self):\n\n self.jrand += 1\n if (self.jrand >= 55):\n self.jrand = 1\n self.oldrand = advance_random(self.oldrand)\n return self.oldrand[self.jrand]", "def getRandom(self):\n return random.choice(self.ls)", "def random(self):\r\n return random.randint(1, 4)", "def randomMethod(self):\n return random.random()", "def i_rand_a():\n return i_random() % 95 + 32", "def mersenne(p):\n return 2 ** p -1", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def pickSecretNumber(): \n return random.randrange(1, 11)", "def _get_random_value(self):\r\n return random.randint(1, 10)", "def generate_prime_module() -> int:\n p = generate_random_prime()\n print('[CLIENT LOG] generate prime module (p) with the value equal {}'.format(p))\n return p", "def getRandom(self) -> int:\n return random.choice(self.l)", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def random(self):\n return self._random", "def _random(self, key):\n\n if hasattr(key, \"encode\"):\n key = key.encode('ascii')\n\n value = (zlib.crc32(key, self.seed) & MAX_VALUE)\n\n return value * INV_MAX_VALUE", "def random(self):\n return self._randomize()", "def getRandom(self):\n return random.choice(self.data)", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def rand(self):\n raise NotImplementedError", "def random() -> float:\n ...", "def _random_function(self, random_state):\n return random_state.rand", "def get_random_2(number):\n return ''.join(random.sample(field, number))" ]
[ "0.6971659", "0.6460533", "0.6460533", "0.63854176", "0.62373364", "0.61621237", "0.6155205", "0.61268276", "0.6096018", "0.60734254", "0.6058225", "0.6026166", "0.59996486", "0.5966405", "0.5966405", "0.5941724", "0.5940138", "0.5916687", "0.5910973", "0.5875053", "0.58578557", "0.584783", "0.58231527", "0.57908076", "0.5784267", "0.57728106", "0.57661754", "0.5755262", "0.5739771", "0.5737405" ]
0.6616409
1
Split docker image by image name and tag
def split_tag(image_name): image = image_name.split(":", maxsplit=1) if len(image) > 1: image_repo = image[0] image_tag = image[1] else: image_repo = image[0] image_tag = None return image_repo, image_tag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_docker_image(image):\n # add defaults\n if '/' not in image:\n image = 'library/' + image\n if ':' not in image:\n image = image + ':latest'\n\n # parse\n tokens1 = image.split('/')\n namespace = tokens1[0]\n\n tokens2 = tokens1[1].split(':')\n name = tokens2[0]\n tag = tokens2[1]\n\n return namespace, name, tag", "def cli(ctx, image_file):\n if not image_file:\n return\n for pull_image in image_file.readline():\n pull_image = pull_image.rstrip('\\n')\n if len(pull_image) == 0:\n continue\n docker.pull(pull_image)\n push_image = '%s/%s/%s' % (DEFAULT_REGISTRY,\n DEFAULR_NAMESPACE,\n pull_image.split('/')[-1])\n docker.tag(pull_image, push_image)\n docker.push(push_image)", "def split_image(image_name):\n #pil_image = Image.fromarray(image_name)\n red, green, blue = img.split()\n\n return red, green, blue", "def get_images(name):\n url = \"/\".join([REGISTRY_BASE, name, \"/tags/list\"])\n response = req(url)\n image_list = []\n if response is not None:\n headers = {\"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\"}\n tags = response[\"tags\"]\n for tag in tags:\n url = \"/\".join([REGISTRY_BASE, name, \"/manifests\", tag])\n response = req(url, headers)\n if response is not None:\n image = {}\n image[\"size\"] = response[\"config\"][\"size\"]\n for i in response[\"layers\"]:\n image[\"size\"] += i[\"size\"]\n image[\"size\"] = round(float(image[\"size\"]) / 1024 / 1024, 2)\n image[\"id\"] = response[\"config\"][\"digest\"][7:19]\n image[\"tag\"] = tag\n image[\"cmd\"] = \"docker pull uk8s.com/\" + name + \":\" + tag\n image_list.append(image)\n return sorted(image_list, reverse=True)", "def _split_imagespec(self, imagerepo):\n if not imagerepo:\n return (\"\", \"\", \"\", \"\")\n transport = \"\"\n hostname = \"\"\n image = imagerepo\n tag = \"\"\n try:\n if \"://\" in imagerepo:\n (transport, dummy, hostname, image) = imagerepo.split('/', 4)\n elif '/' in imagerepo:\n (hostname, image) = imagerepo.split('/', 1)\n except (ValueError, IndexError, TypeError):\n pass\n if hostname and '.' not in hostname:\n image = hostname + '/' + image\n hostname = \"\"\n if ':' in image:\n (image, tag) = image.split(':', 1)\n return (transport, hostname, image, tag)", "def _format_image(image, tags):\n text = \", \".join(md.pre(tag) for tag in tags)\n dest = _to_dockerfile_url(image)\n return md.item(md.link(text, dest))", "def _get_image_basename_and_tag(full_name):\n # the tag is either after the last (and only) colon, or not given at all,\n # in which case \"latest\" is implied\n tag_splits = full_name.rsplit(\":\", 1)\n if len(tag_splits) == 2:\n image_name = tag_splits[0]\n tag = tag_splits[1]\n else:\n image_name = full_name\n tag = \"latest\"\n\n if re.fullmatch(\"[a-z0-9]{4,40}/[a-z0-9._-]{2,255}\", image_name):\n # if it looks like a Docker Hub image name, we're done\n return image_name, tag\n # if the image isn't implied to origin at Docker Hub,\n # the first part has to be a registry\n image_basename = \"/\".join(image_name.split(\"/\")[1:])\n return image_basename, tag", "def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)", "def do_stage(self, images):\n\n for i, image in enumerate(images):\n pass\n # logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)", "def test_parse_image_meta_docker(image_docker):\n ret = {\n \"description\": (\n \"Docker image imported from \"\n \"busybox42/zimbra-docker-centos:latest on \"\n \"2019-03-23T01:32:25.320Z.\"\n ),\n \"name\": \"busybox42/zimbra-docker-centos:latest\",\n \"os\": \"linux\",\n \"published\": \"2019-03-23T01:32:25.320Z\",\n \"source\": \"https://docker.io\",\n \"version\": \"62487cf6a7f6\",\n }\n assert _parse_image_meta(image_docker, True) == ret", "def make(tag_masks: str = \"*\", poetry_version: str = \"master\"):\n tags = requests.get(\n \"https://registry.hub.docker.com/v1/repositories/python/tags\"\n ).json()\n\n def match_tag(tag) -> bool:\n tag_name = tag[\"name\"]\n return [\n tag_mask\n for tag_mask in tag_masks\n if tag_mask == \"*\" or fnmatch.fnmatch(tag_name, tag_mask)\n ]\n\n tags = list(filter(match_tag, tags))\n\n click.echo(f\"Found {len(tags)} tags.\")\n click.echo(\"Generating \", nl=False)\n\n docker_3_template = Path(\"./Dockerfile-3.template\").read_text(\"utf8\")\n docker_2_template = Path(\"./Dockerfile-2.template\").read_text(\"utf8\")\n\n for tag in tags:\n tag_name = tag[\"name\"]\n\n docker_template = docker_3_template\n\n try:\n tag_major_version = int(tag_name[0])\n tag_major_path = Path(str(tag_major_version))\n try:\n tag_major_path.mkdir()\n except FileExistsError:\n pass\n tag_path = tag_major_path / Path(tag_name)\n if tag_major_version == 2:\n docker_template = docker_2_template\n except ValueError:\n tag_path = Path(tag_name)\n\n try:\n tag_path.mkdir()\n except FileExistsError:\n pass\n\n (tag_path / \"Dockerfile\").write_text(\n docker_template.format(python_tag=tag_name, poetry_version=poetry_version)\n )\n click.echo(\".\", nl=False)\n click.echo(\" Done.\")", "def containers_from_image(image_name):\n jobs = Queue(maxsize=0)\n containers = list(\n filter(\n lambda x: image_name in x.attrs['Config']['Image'],\n doxy.containers.list()\n )\n )\n for container in containers:\n jobs.put(container)\n return jobs", "def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None, skip_build=False):\n value_modifications = {}\n for name, options in images.items():\n image_path = options.get('contextPath', os.path.join('images', name))\n image_tag = tag\n # include chartpress.yaml itself as it can contain build args and\n # similar that influence the image that would be built\n paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml']\n last_commit = last_modified_commit(*paths)\n if tag is None:\n if chart_version:\n image_tag = \"{}-{}\".format(chart_version, last_commit)\n else:\n image_tag = last_commit\n image_name = prefix + name\n image_spec = '{}:{}'.format(image_name, image_tag)\n\n value_modifications[options['valuesPath']] = {\n 'repository': image_name,\n 'tag': SingleQuotedScalarString(image_tag),\n }\n\n if skip_build:\n continue\n\n template_namespace = {\n 'LAST_COMMIT': last_commit,\n 'TAG': image_tag,\n }\n\n if tag or image_needs_building(image_spec):\n build_args = render_build_args(options, template_namespace)\n build_image(image_path, image_spec, build_args, options.get('dockerfilePath'))\n else:\n print(f\"Skipping build for {image_spec}, it already exists\")\n\n if push:\n if tag or image_needs_pushing(image_spec):\n check_call([\n 'docker', 'push', image_spec\n ])\n else:\n print(f\"Skipping push for {image_spec}, already on registry\")\n return value_modifications", "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def split(image, shape=(64,64)):\n \n if isinstance(image, basestring):\n image = imread(image)\n \n bin_image = to_binary(image)\n clear_image = clear_border(bin_image)\n \n # We need the +1 to properly offset the labels for regionprops\n label_image = label(clear_image, background=0) + 1\n props = [\n 'Image', 'BoundingBox', 'Centroid', 'Area',\n ]\n regions = regionprops(label_image, properties=props)\n regions = [NormalizedRegion(region['Image'], shape=shape) \\\n for region in regions]\n return regions", "def getGrouppedRawImages():\n imagesGlob = ['**/*_timestamped.jpg', '**/*_timestamped.JPG']\n images = func.reduce(operator.add, [[path for path in path.Path(\n '.').glob(glob)] for glob in imagesGlob], [])\n labelled = sorted([{\n 'label': image.parent.parent.name,\n 'time': image.parent.name,\n 'path': image\n } for image in images], key=lambda label: label['label'])\n return iter.groupby(labelled, key=lambda label: label['label'])", "def add_files_to_image(image, with_files, label=None):\n tag_idx = image.find(\":\")\n if tag_idx == -1:\n jobset.message(\n \"FAILED\", \"invalid docker image %s\" % image, do_newline=True\n )\n sys.exit(1)\n orig_tag = \"%s_\" % image\n subprocess.check_output([\"docker\", \"tag\", image, orig_tag])\n\n lines = [\"FROM \" + orig_tag]\n if label:\n lines.append(\"LABEL %s\" % label)\n\n temp_dir = tempfile.mkdtemp()\n atexit.register(lambda: subprocess.call([\"rm\", \"-rf\", temp_dir]))\n\n # Copy with_files inside the tmp directory, which will be the docker build\n # context.\n for f in with_files:\n shutil.copy(f, temp_dir)\n lines.append(\"COPY %s %s/\" % (os.path.basename(f), _BUILD_INFO))\n\n # Create a Dockerfile.\n with open(os.path.join(temp_dir, \"Dockerfile\"), \"w\") as f:\n f.write(\"\\n\".join(lines))\n\n jobset.message(\"START\", \"Repackaging %s\" % image, do_newline=True)\n build_cmd = [\"docker\", \"build\", \"--rm\", \"--tag\", image, temp_dir]\n subprocess.check_output(build_cmd)\n dockerjob.remove_image(orig_tag, skip_nonexistent=True)", "def data_splits(im_dir='/media/ignacio/Datos/plant_net/images_ori', tag=False):\n homedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n splits_dir = os.path.join(homedir, 'data', 'data_splits')\n print(\"Loading data...\")\n file_list = os.listdir(splits_dir)\n\n # Metadata labels\n metadata = np.genfromtxt(os.path.join(splits_dir, 'synsets.txt'), dtype='str', delimiter='/n')\n\n # Training splits\n train = np.genfromtxt(os.path.join(splits_dir, 'train.txt'), dtype='str', delimiter=' ')\n y_train = train[:, -1].astype(np.int32)\n if tag:\n X_train = train[:, 0:2].astype(object)\n X_train[:, 0] = np.array([os.path.join(im_dir, i) for i in X_train[:, 0]])\n else:\n X_train = np.array([os.path.join(im_dir, i) for i in train[:, 0]])\n\n # Validation splits\n if 'val.txt' in file_list:\n val = np.genfromtxt(os.path.join(splits_dir, 'val.txt'), dtype='str', delimiter=' ')\n y_val = val[:, -1].astype(np.int32)\n if tag:\n X_val = val[:, 0:2].astype(object)\n X_val[:, 0] = np.array([os.path.join(im_dir, i) for i in X_val[:, 0]])\n else:\n X_val = np.array([os.path.join(im_dir, i) for i in val[:, 0]])\n else:\n print 'Training with no validation data.'\n X_val, y_val = None, None\n\n return X_train, y_train, X_val, y_val, metadata", "def cmd_gallery_tag_image(client, args):\n gallery_tag_image = client.gallery_tag_image(args.tag, args.image_id)\n data = gallery_tag_image.__dict__\n generate_output({'gallery_tag_image': data})", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def testSplitImage(root):\n\n s1, s2 = splitImage(\"vck.tif\")\n v = OR(s1, s2).view(root)\n return v", "def parse(image_path):\n if image_path[-1] != '/': image_path += '/'\n images = sorted(os.listdir(image_path))\n if images[0] == '.directory':\n images = images[1:]\n if images[0] == '.DS_Store':\n images = images[1:]\n return images", "def test_retag_valid_image(self):\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertTrue(alpine.tag(\"demo\", \"rename\"))\n\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertNotIn(\"demo:test\", alpine.tags)", "def tag_image(image):\n\n headers = {\n \"Content-Type\": \"application/octet-stream\",\n \"Ocp-Apim-Subscription-Key\": COMPUTER_VISION_KEY\n }\n\n endpoint = COMPUTER_VISION_URL + \"/vision/v3.1/tag\"\n if DEBUG:\n print(\"Calling endpoint %s\" % endpoint)\n\n response = requests.post(endpoint, data=image, headers=headers)\n if response.status_code == 200:\n tags = json.loads(response.content)\n return tags['tags']\n else:\n if DEBUG:\n print(\"Call to endpoint '%s' returned status code %s. Reason: %s\" % (endpoint, str(response.status_code), response.content))\n return None", "def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\t# print(path)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\t# print(len(train_imgs))\n\t\t\t# break\n\t\t\tfor imgname in train_imgs:\n\t\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\t\timg = cv2.imread(imgname)\n\t\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\t\timg_label = img[:,:,0]\n\t\t\t\tcv2.imwrite(path_train+\"/\"+midname+\".\"+self.img_type,img_train)\n\t\t\t\tcv2.imwrite(path_label+\"/\"+midname+\".\"+self.img_type,img_label)", "def image_version(self, image_name, image_tag, ignore_not_found=False):\n if image_tag == \"local\":\n image_tag = \"latest\"\n try:\n docker_info = self.host.client.inspect_image(\"{}:{}\".format(image_name, image_tag))\n return docker_info['Id']\n except NotFound:\n # TODO: Maybe auto-build if we can?\n if ignore_not_found:\n return None\n else:\n raise ImageNotFoundException(\n \"Cannot find image {}:{}\".format(image_name, image_tag),\n image=image_name,\n image_tag=image_tag,\n )", "def img_split(img):\n\tbands = img.shape[2]\n\tif bands is 1:\n\t\treturn \"Image already is 1D. Why would you split it?\"\n\n\tband1 = img[:, :, 0]\n\tband2 = img[:, :, 1]\n\tband3 = img[:, :, 2]\n\tif bands is 4:\n\t\tband4 = img[:, :, 4]\n\t\treturn(band1, band2, band3, band4)\n\treturn(band1, band2, band3)", "def image_tagger_representations(dataset, text_encoder, image_encoder):\n\n vectorizer = get_text_vectorizer(text_encoder)\n\n if image_encoder == 'inceptionresnetv2':\n stream_encoder, imread = get_inceptionresnetv2_tagger()\n else:\n raise NotImplementedError('{} not recognized image_encoder'.format(image_encoder))\n\n captions = load_captions(dataset)\n images = load_images(dataset)\n\n encoding_path = BASE_PATH / dataset / ('{}_{}'.format(text_encoder, image_encoder))\n encoding_path.mkdir(exist_ok=True)\n\n count = 0\n for split in images:\n split_images = [i for i in images[split]]\n split_images_stream = get_image_stream(images, imread, stream_encoder, split)\n caption_stream = [list(captions[imid].values()) for imid in split_images]\n\n encoded_filename = encoding_path / '{}-tagencoded-captions-and-images.json'.format(split)\n\n with encoded_filename.open('w') as fout:\n for image_id, capts, image_tags in zip(split_images, caption_stream, split_images_stream):\n print(count)\n count += 1\n for c in capts:\n vec = vectorizer.transform([c]).tolist()[0]\n image_vec = vectorizer.transform(image_tags).mean(axis=0).tolist()\n\n print(json.dumps(\n {'id': str(image_id),\n 'text': c,\n 'x_text': vec,\n 'x_image': image_vec}), file=fout)", "def split_image(image_path: str, image_paths_iter: List[str]) -> List[str]:\n try:\n im = Image.open(image_path)\n except PIL.UnidentifiedImageError:\n return []\n\n width, height = im.size\n\n result = []\n\n splits = [[0, 0], [width // 2, 0], [0, height // 2], [width // 2, height // 2]]\n for (x, y), file_path in zip(splits, image_paths_iter):\n left = x\n top = y\n right = x + width // 2\n bottom = y + height // 2\n\n im.crop((left, top, right, bottom)).save(file_path)\n result.append(os.path.basename(file_path))\n os.remove(image_path)\n return result" ]
[ "0.6818008", "0.6265655", "0.6186208", "0.57555246", "0.5738188", "0.57254416", "0.57249683", "0.56937796", "0.5607085", "0.55492526", "0.5532096", "0.5484324", "0.54626226", "0.5412449", "0.5354997", "0.5315514", "0.53103733", "0.5297963", "0.52868354", "0.52652454", "0.5243758", "0.52366346", "0.52357495", "0.52258724", "0.52248", "0.5199021", "0.5185322", "0.5183596", "0.51824105", "0.5170035" ]
0.7175431
0
Fills each list by the longest one, thereby aligning them in length Last element is used as the fill value
def fill_lists_by_longest(lists): max_len = len(max(lists, key=len)) for current_list in lists: current_list.extend(repeat(current_list[-1], max_len - len(current_list)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zip_longest_ffill(*args):\n lists = [x if isinstance(x, list) else list(x) for x in args]\n max_total_len = max([len(l) for l in lists])\n\n ffill_lists = []\n\n for sub_list in lists:\n max_list_len = len(sub_list)\n max_diff = max_total_len - max_list_len\n\n # empty lists with no -1 index\n try:\n last_val = sub_list[-1]\n complete_list = sub_list + [last_val] * max_diff\n except:\n complete_list = [[] for _ in range(max_diff)]\n ffill_lists.append(complete_list)\n\n return zip(*ffill_lists)", "def by_longest_length():\n# fill it out\n return sorted(STRING_LIST, key = lambda s: len(s), reverse=True)", "def longest_grouper(iterable: Iterable, group_size: int, fillvalue=None):\n\t#\n\targs = [iter(iterable)] * group_size\n\treturn zip_longest(*args, fillvalue=fillvalue)", "def pad(lst, pad_size, filler):\n assert(isinstance(lst, list))\n assert(pad_size - len(lst) >= 0)\n lst.extend([filler] * (pad_size - len(lst)))", "def uniform_list_length(labels):\n max_num = max([len(i) for i in labels])\n for label in labels:\n for num in range(1, max_num):\n if len(label) == num:\n label.extend([\" \" for i in range(max_num - num)])\n return labels", "def zip_longest(list1, list2):\n zipped = zip(list1, list2)\n if len(list1) < len(list2):\n zipped += [(None, item) for item in list2[len(list1):]]\n elif len(list1) > len(list2):\n zipped += [(item, None) for item in list1[len(list2):]]\n return zipped", "def pad_list(xs, pad_value=0.0, pad_left=False):\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, * xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad", "def pad_list(xs, pad_value=0.0, pad_left=False):\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, *xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad", "def pad_data(d):\n max_len = set((len(i) for i in d))\n if len(max_len) == 1:\n return d\n else:\n max_len = max(max_len)\n return [i + [\"\"] * (max_len - len(i)) for i in d]", "def longest(my_list):\r\n\treturn sorted(my_list, key=len)[-1]", "def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence", "def make_same_length(l1, l2):\n ln = max(len(l1), len(l2))\n l1.extend([None] * (ln - len(l1)))\n l2.extend([None] * (ln - len(l2)))", "def pad_seq_list(array, sentinel):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def lfill(self, v=0, s=None):\n s = _successor(v - 1) if s is None else s\n try:\n return [x.lfill(s=s) for x in self]\n except Exception:\n return [s.s() for _ in range(len(self))]", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]", "def _pad_large(self, arrays, sentinel):\n # Compute max length.\n maxlen_ctx = 0\n maxlen_sent = 0\n for array in arrays:\n maxlen_ctx = max(maxlen_ctx, len(array))\n for seq in array:\n maxlen_sent = max(maxlen_sent, len(seq))\n\n # Pad contexts\n ctx_lens = []\n ctx_sent_lens = []\n padded_ctxs = []\n for array in arrays:\n ctx_lens.append(len(array))\n padding = maxlen_ctx - len(array)\n padded_ctx = array + [[sentinel]] * padding\n # Pad sents\n padded = []\n lens = []\n for i, seq in enumerate(padded_ctx):\n padding = maxlen_sent - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq) if i < ctx_lens[-1] else 0)\n\n padded_ctxs.append(padded)\n ctx_sent_lens.append(lens)\n\n return padded_ctxs, ctx_lens, ctx_sent_lens", "def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list", "def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))", "def _grow(self): \n limit = 0\n #Iterating through the list to find the number of elements\n for i in xrange(len(self)):\n if self._items[i] != self._fillValue:\n #There's an element at index i, so update the limit\n limit = i\n \n #Only grow the array if the limit+1 and the physical size is the same.\n if limit+1 == len(self):\n temp = Array(len(self)*2)\n \n #Copy existing elements to the new Array\n for i in xrange(len(self)):\n temp[i] = self._items[i]\n \n #Initialize the new elements to the fillValue\n for j in xrange(len(self), len(self)*2):\n temp[j] = self._fillValue\n self._items = temp", "def zip_longest_strings(arr):\n # Pad lengths with whitespace strings\n max_row_len = max(len(row) for row in arr)\n arr = [row + [rep_str(' ', len(row[0]))] * (max_row_len - len(row))\n for row in arr]\n # Transpose\n return map(list, zip(*arr))", "def grouper(size, iterable, fillvalue=None):\n args = [iter(iterable)] * size\n return zip_longest(fillvalue=fillvalue, *args)", "def bottom_clearing(fieldlist):\n fieldlist[int((width+1)/2)][length] = plain\n fieldlist[int((width+1)/2)][length-1] = plain\n fieldlist[int((width+1)/2)][length-2] = plain\n fieldlist[int(((width+1)/2)+1)][length] = plain\n fieldlist[int(((width+1)/2)+1)][length-1] = plain\n fieldlist[int(((width+1)/2)+1)][length-2] = plain\n fieldlist[int((width-1)/2)][length] = plain\n fieldlist[int((width-1)/2)][length-1] = plain\n fieldlist[int((width-1)/2)][length-2] = plain\n fieldlist[int(((width-1)/2)-1)][length] = plain\n fieldlist[int(((width-1)/2)-1)][length-1] = plain\n fieldlist[int(((width-1)/2)-1)][length-2] = plain", "def test_fill():\r\n CONSTANTE_DEBUT = 0\r\n CONSTANTE_FIN= 20\r\n CONSTANTE_DEBUT_2 = 0\r\n CONSTANTE_FIN_2 = 1000\r\n TAILLE_MAX= 10\r\n print(\"test 1\")\r\n # on teste avec que des valeurs comprises entre 0 et 20\r\n print(fill_list(CONSTANTE_DEBUT,CONSTANTE_FIN,TAILLE_MAX))\r\n print(\"test 2\")\r\n # on teste avec des valeurs non comprses dans l'intervalle(sauf la première)\r\n print(fill_list(CONSTANTE_DEBUT, CONSTANTE_FIN, TAILLE_MAX))\r\n print(\"test 3\")\r\n #on saisie de suite une valeur incorrecte\r\n print(fill_list(CONSTANTE_DEBUT, CONSTANTE_FIN, TAILLE_MAX))\r\n\r\n #on teste avec des intervalles plus grandes\r\n print(\"on teste avec des intervalles plus grandes\")\r\n print(\"test 1\")\r\n # on teste avec que des valeurs comprises entre 0 et 1000\r\n print(fill_list(CONSTANTE_DEBUT_2, CONSTANTE_FIN_2, TAILLE_MAX))\r\n print(\"test 2\")\r\n # on teste avec des valeurs non comprses dans l'intervalle(sauf la première)\r\n print(fill_list(CONSTANTE_DEBUT_2, CONSTANTE_FIN_2, TAILLE_MAX))\r\n print(\"test 3\")\r\n # on saisie de suite une valeur incorrecte\r\n print(fill_list(CONSTANTE_DEBUT_2, CONSTANTE_FIN_2, TAILLE_MAX))", "def _pad(self, array, sentinel, max_len=None):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n if max_len is not None:\n maxlen = max(maxlen, max_len)\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def pad_from_beginning_fast(vals, maxlen):\r\n length = len(vals)\r\n matrix = np.zeros((length, maxlen))\r\n lens = [len(v) for v in vals] # only iteration\r\n mask = np.arange(maxlen)[::-1] < np.array(lens)[:, None] # key line\r\n matrix[mask] = np.concatenate(vals)\r\n return matrix", "def filler(your_list, length):\n for i in range(int(length/2)):\n your_list.append('None')", "def zfill(a, width):\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = int(numpy.max(width_arr.flat))\n return _vec_string(\n a_arr, type(a_arr.dtype)(size), 'zfill', (width_arr,))", "def _batchify(self, data, align_right=False, include_lengths=False):\n lengths = [x.size(0) for x in data]\n max_length = max(lengths)\n out = data[0].new(len(data), max_length).fill_(neusum.Constants.PAD)\n for i in range(len(data)):\n data_length = data[i].size(0)\n offset = max_length - data_length if align_right else 0\n out[i].narrow(0, offset, data_length).copy_(data[i])\n\n if include_lengths:\n return out, lengths\n else:\n return out", "def left_zero_pad(l: List[int], n: int) -> List[int]:\n return [0] * (n - len(l)) + l", "def item4():\n names = ['Cecilia', 'Lisa', 'Marie']\n letters = [len(n) for n in names]\n print(letters)\n\n longest_name = None\n max_letters = 0\n for i in range(len(names)):\n count = letters[i]\n if count > max_letters:\n longest_name = names[i]\n max_letters = count\n print(longest_name)\n\n print(list(zip(names, letters))) # zip returns generator\n\n for name, count in zip(names, letters):\n if count > max_letters:\n longest_name = name\n max_letters = count\n print(longest_name)\n # Python 2, zip is not a generator - use izip from itertools to get generator\n\n names.append('Rosalind')\n for name, count in zip(names, letters):\n print('%s has %d letters' % (name, count)) # Rosalind isn't printed since letters is exhausted\n\n from itertools import zip_longest\n for name, count in zip_longest(names, letters):\n if count is None:\n print('%s is of unknown length' % name)\n else:\n print('%s has %d letters' % (name, count))" ]
[ "0.6954341", "0.6811927", "0.67876506", "0.623776", "0.61913204", "0.6180386", "0.6159936", "0.61568874", "0.6154108", "0.6140816", "0.60373944", "0.60165656", "0.6011669", "0.5957337", "0.58364516", "0.5810363", "0.57752806", "0.5755078", "0.5731733", "0.5730576", "0.5728703", "0.57209283", "0.56912035", "0.5670461", "0.5648832", "0.5640861", "0.5618508", "0.5608277", "0.56026", "0.55979913" ]
0.7967835
0
Create nested dicts by keys chain >>> create_dicts_by_chain(['some', 'keys'])
def create_dicts_by_chain(keys_chain: list): result = {} current_dict = result for key in keys_chain: current_dict[key] = {} current_dict = current_dict[key] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _expand_keys(entities):\n keys = list(entities.keys())\n values = list(product(*[entities[k] for k in keys]))\n return [{k: v for k, v in zip(keys, combs)} for combs in values]", "def make_tree(dot_separated_keys):\n tree = {}\n for item in dot_separated_keys:\n inside_tree = tree\n for part in item.split('.'):\n inside_tree = inside_tree.setdefault(part, {})\n return tree", "def _flatten_keys(obj: Any, paths: List[str], existing_path: str):\n # Store path when we reach end, which is either non-Dict or empty Dict\n if isinstance(obj, list) and len(obj) > 0:\n for i, elm in enumerate(obj):\n _flatten_keys(elm, paths, f'{existing_path}/{i}')\n elif isinstance(obj, dict) and len(obj) > 0:\n for k, v in obj.items():\n _flatten_keys(v, paths, f'{existing_path}/{k}')\n # Remove leading /\n paths.append(existing_path.lstrip('/'))", "def nest_dict(dct, keys):\n nested_dict = dct\n for key in reversed(keys):\n nested_dict = RecursiveDict({key: nested_dict})\n return nested_dict", "def get_by_list_of_keys(dictionary: Dict, key_path: List[Any]) -> Dict:\n if len(key_path) == 1:\n return dictionary[key_path[0]]\n else:\n return get_by_list_of_keys(dictionary[key_path[0]], key_path[1:])", "def _group_by_keys(self, dicts, *keys):\n\n current = self._node\n queue = deque(keys)\n if not queue:\n yield _Node(value=dicts, prev=current)\n else:\n k = queue.popleft()\n for value, group in groupby(dicts, key=lambda d: d.pop(k, None)):\n if value:\n self._node = _Node(value, prev=current)\n yield self._node\n yield from self._group_by_keys(list(group), *queue)", "def add_by_list_of_keys(dictionary: Dict, key_path: List[Any], value: Any) -> Dict:\n key = key_path[0]\n dictionary[key] = (\n value\n if len(key_path) == 1\n else add_by_list_of_keys(\n dictionary[key] if key in dictionary else dict(),\n key_path[1:],\n value,\n )\n )\n return dictionary", "def recursive_key_lookup(keys, obj):\n if not isinstance(keys, list):\n return obj.get(keys)\n\n for key in keys:\n if obj is not None:\n obj = obj.get(key)\n\n return obj", "def dict(self, keys) -> dict:\n return {k: self(k) for k in keys}", "def chain_maps(*args):\n def merge(d1, d2):\n d1.update(d2)\n return d1\n\n return reduce(merge, reversed(args), {})", "def flatten_dict(in_obj: Dict[Any, Any], *, sep: str = '_', key_maker: Callable = None) -> Dict[str, Any]:\n\n if key_maker is None:\n key_maker = sep.join\n out_dict = {}\n for key, obj in in_obj.items():\n\n try:\n\n for inner_key, value in obj.items():\n try:\n\n out_dict.update(\n flatten_dict(\n {(key, inner_key): value},\n sep=sep,\n key_maker=key_maker\n )\n )\n\n except AttributeError:\n out_dict[key_maker(flatten_keys([key, inner_key]))] = value\n\n except AttributeError:\n out_dict[key_maker(flatten_keys(key))] = obj\n\n return out_dict", "def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict:\n if not keys:\n return d\n\n if len(keys) == 1:\n d[keys[0]] = value\n return d\n\n subd = d\n for key in keys[:-1]:\n if key not in subd:\n subd = subd.setdefault(key, {})\n else:\n subd = subd[key]\n\n subd[keys[-1]] = value\n return d", "def iter_dict(iterator, keys, file_obj, defaults):\n result = []\n if len(keys):\n name = keys[0]\n rest = keys[1:]\n for item in resolve(iterator[name], **defaults):\n defaults['iter'][name] = item\n result += iter_dict(iterator, rest, file_obj, deepcopy(defaults))\n else:\n final_defaults = defaults\n final_defaults.update(resolve(file_obj.get('defaults', {}), **defaults))\n return [(file_obj, final_defaults)]\n return result", "def keys_breadth_first(self, include_dicts=False):\n namespaces = []\n for key in self._key_order:\n if isinstance(getattr(self, key), DotDict):\n namespaces.append(key)\n if include_dicts:\n yield key\n else:\n yield key\n for a_namespace in namespaces:\n for key in self[a_namespace].keys_breadth_first(include_dicts):\n yield '%s.%s' % (a_namespace, key)", "def FlattenDictionary(value, keys=[]):\n result = {}\n if type(value) is dict:\n for key in value:\n result.update(FlattenDictionary(value[key], keys + [key]))\n return result\n else:\n key = '.'.join(keys)\n return {key: value}", "def nested_set(data, keys, value):\n for key in keys[:-1]:\n data = data.setdefault(key, {})\n data[keys[-1]] = value", "def _set_by_path(dic, keys, value, create_missing=True):\n d = dic\n i = 0\n n_key = len(keys) - 1\n while i < n_key:\n k = keys[i]\n if isinstance(k, int):\n assert isinstance(d, list), \"Internal Error: %s is Expected as a list for %s.\" % (d, k)\n\n while len(d) <= k:\n d.insert(k, {})\n d = d[k]\n elif k in d:\n d = d[k]\n elif create_missing:\n next_key = keys[i + 1]\n if isinstance(next_key, int):\n if isinstance(d, list):\n d.insert(k, [])\n else:\n d[k] = []\n else:\n d[k] = {}\n d = d[k]\n else:\n return dic\n i += 1\n\n if isinstance(d, list) and keys[-1] >= len(d):\n d.insert(keys[-1], value)\n else:\n d[keys[-1]] = value\n return dic", "def extract_from_dict(*keys):\n def extractor(dictionary):\n return [dictionary(key) for key in keys]\n return extractor", "def build_dependency_tree(nodes):\n\n return { k: deps_for(nodes, k) for k in list(nodes.keys()) }", "def pull_key(key_fun):\n def pull_key_fun(objs):\n return dict((key_fun(value), value) for value in objs)\n return pull_key_fun", "def from_iterable(iterables):\n for it in iterables:\n for element in it:\n if isinstance(element, dict):\n for key in element:\n yield key\n else:\n yield element", "def add_keys(destdict, srclist, value=None):\n if len(srclist) > 1:\n destdict[srclist[0]] = {}\n destdict[srclist[0]] = destdict.get(srclist[0], {})\n add_keys(destdict[srclist[0]], srclist[1:], value)\n else:\n destdict[srclist[0]] = value\n return destdict", "def make_chains(word_list):\n\n chains = {}\n for index in range(0, len(word_list) - 2):\n # only making small chains because I like maximum absurdity\n key = tuple(word_list[index:index + 2])\n if key not in chains:\n chains[key] = [word_list[index + 2]]\n else:\n chains[key].append(word_list[index + 2])\n return chains", "def get_tree(self, list_of_keys):\n cur_obj = self\n for key in list_of_keys:\n cur_obj = cur_obj.get(key)\n if not cur_obj: break\n return cur_obj", "def dicts_product(**kwargs):\n keys = kwargs.keys()\n vals = kwargs.values()\n for instance in itertools.product(*vals):\n yield dict(zip(keys, instance))", "def group_by_keys(param_list, keys):\n\tkeys = list(keys)\n\tnames = {}\n\tfor p in param_list:\n\t\t\n\t\tif len(keys) > 0:\n\t\t\tkey = join_params(**{k: p.get(k, None) for k in keys})\n\t\t\t#vals = {k: p.get(k, None) for k in keys}\n\t\t\t#name = join_params(**vals)\n\t\t\t#names[name]=vals\n\t\telse:\n\t\t\tkey = ''\n\t\tif key in names:\n\t\t\tnames[key].append(p)\n\t\telse:\n\t\t\tnames[key]=[p]\n\treturn names", "def build(keys: List[str]):\n api = API()\n api.build(*keys)", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def make_kvps(lumpy, iterator):\n seq = [Binding(lumpy, make_thing(lumpy, k), make_thing(lumpy, v))\n for k, v in iterator]\n return seq", "def createDict(given_dict, words, value):\n\tresult_dict = given_dict\n\t# base case: if list is empty, add the value to the dict\n\tif not words:\n\t\tif '$value' in result_dict:\n\t\t\tresult_dict['$value'].append(value)\n\t\telse:\n\t\t\tresult_dict['$value'] = [value]\n\telse:\n\t\t# if the first word is already in dict, traverse through treemap with that word\n\t\t# call createDict with the tail of the words list\n\t\tif words[0] in result_dict:\n\t\t\tresult_dict[words[0]] = createDict(result_dict[words[0]], words[1:], value)\n\t\telse:\n\t\t\t# if the first word is not in the dict, create a new path\n\t\t\t# call createDict with the tail of the words list\n\t\t\tresult_dict[words[0]] = createDict({}, words[1:], value)\n\n\treturn result_dict" ]
[ "0.5748406", "0.56493646", "0.55906945", "0.5574522", "0.5420664", "0.53361344", "0.52231", "0.52206796", "0.52200127", "0.5192167", "0.5190618", "0.516658", "0.514745", "0.5144215", "0.50835", "0.505698", "0.5041884", "0.5041876", "0.50195855", "0.4984651", "0.49757776", "0.49746108", "0.49413374", "0.4920082", "0.4909398", "0.48830757", "0.48794788", "0.48713323", "0.4860765", "0.48562568" ]
0.85650545
0
Attach full HTTP request dump to Allure report
def attach_request_log(response): allure.attach( dump.dump_all(response).decode("utf-8"), name="Full request log", extension="txt", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dumpHTTP(self, req, resp, content):\n fmt = '[%s] %s %s %s| %s %s\\n'\n dt_now = datetime.datetime.now()\n str_now = dt_now.strftime('%Y-%m-%d %H:%M:%S')\n\n #\n url = req['URL']\n method = req['method']\n body = req['request-body']\n post_data = ''\n if body and isinstance(body, StringType) and len(body):\n post_data = body\n pass\n #print(type(resp))\n if resp and isinstance(resp, DictionaryType) and len(resp):\n resp = 'response_status : %s' % (resp.get('status', 'None'))\n content = ''\n pass\n\n ss = fmt % (str_now, method, url, post_data, resp, content)\n print(ss)\n #\n\n\n logfile1 = os.path.join(os.getenv('G_LOG', '/root/automation/logs'), 'current/post_file_requests.log')\n logfile2 = os.path.join(\n os.getenv('U_DEBUG_HTTP_PLAY', os.getenv('G_CUREENTLOG', '/root/automation/logs/current')),\n 'post_file_requests.log')\n\n logfiles = []\n logfiles.append(logfile1)\n if logfile1 != logfile2:\n logfiles.append(logfile2)\n pass\n\n for logfile in logfiles:\n try:\n fd = open(logfile, 'a+')\n if fd:\n fd.write(ss)\n fd.close()\n pass\n pass\n except Exception, e:\n print('write debug log file(%s) failed : %s' % (logfile, e))\n pass\n pass\n\n pass", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def copy_report(cls, req):\n try:\n if req.report:\n report_url = cls.dvs_api_v1 + '/download/' + req.report\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(req.tracking_id))\n response = requests.post(url=report_url)\n file = open(os.path.join(upload_path, req.report), \"w+\")\n file.write(response.text)\n file.close()\n except Exception as e:\n app.logger.exception(e)\n raise e", "def report(self, url, query=\"\", depth=0):\n return self.request(url, \"REPORT\", query,\n {'Depth': str(depth), \"Content-Type\":\n \"application/xml; charset=\\\"utf-8\\\"\"})", "def report(self, url):\n\n print(self.get(url))", "def fusion_api_generate_li_forwarding_information_base_dump_file(self, uri, api=None, headers=None):\n param = '/forwarding-information-base'\n return self.li.post(uri=uri, api=api, headers=headers, param=param)", "def trace():\n logger.debug('[FLASKWEB /trace] Trace debug request')\n output = {}\n output['args'] = request.args\n output['form'] = request.form\n output['method'] = request.method\n output['url'] = request.url\n output['client_ip'] = request.remote_addr\n output['headers'] = {k: str(v) for k,v in request.headers.items()}\n return jsonify(output), 200", "def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')", "def debug_requests_on():\n HTTPConnection.debuglevel = 2\n\n logging.basicConfig(filename='example1.log', filemode='w', level=logging.INFO, format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n logging.getLogger().setLevel(logging.DEBUG)\n\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True", "def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response", "def report(self) -> Any:", "def log(self):\n\n\t\theader_dict = dict(request.headers)\n\n\t\ttry:\n\t\t\ttracker_id = header_dict[\"tracker_id\"]\n\t\texcept Exception:\n\t\t\ttracker_id = None\n\t\t\n\t\ttry:\n\t\t\tuser_agent = header_dict[\"User-Agent\"]\n\t\texcept Exception:\n\t\t\tuser_agent = None\n\n\t\ttry:\n\t\t\tlanguage = header_dict[\"Accept-Language\"]\n\t\texcept Exception:\n\t\t\tlanguage = None\n\n\t\ttry:\n\t\t\treferer = header_dict[\"Referer\"]\n\t\texcept Exception:\n\t\t\treferer = None\n\n\t\ttry:\n\t\t\torigin = header_dict[\"Origin\"]\n\t\texcept Exception:\n\t\t\torigin = None\n\n\t\ttry:\n\t\t\tjson_data = request.json\n\t\texcept Exception:\n\t\t\tjson_data = None\n\n\t\ttry:\n\t\t\tplatform = request.user_agent.platform.title()\n\t\texcept Exception:\n\t\t\tplatform = None\n\n\t\ttry:\n\t\t\tbrowser = request.user_agent.browser.title()\n\t\texcept Exception:\n\t\t\tbrowser = None\n\n\t\ttry:\n\t\t\tauth_header_token = header_dict[\"Authorization\"].split(\" \")[1]\n\t\texcept Exception:\n\t\t\tauth_header_token = None\n\t\t\n\t\t## If set to run before a request: This is the default setting\n\t\tif self.pre_request:\n\t\t\[email protected]_request()\n\t\t\tdef run():\n\t\t\t\t## If the path accessed is in the do_not_log list, it is skipped\n\t\t\t\tif request.path in self.do_not_log:\n\t\t\t\t\treturn\n\t\t\t\t## If the path accessed is not in the do_not_log list, it is posted\n\t\t\t\telse:\n\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\"status_code\": 200, ## Assumed to be 200 due to the nature of the function\n\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t}\n\n\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\treturn\n\n\t\t\treturn run\n\t\t\n\t\t## If set to as a wrapper to a function\n\t\telse:\n\t\t\tdef log_decorator(func):\n\n\t\t\t\t@wraps(func)\n\t\t\t\tdef execute(*args, **kwargs):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\n\t\t\t\t\t\tresult_response = make_response(result)\n\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": result_response.status_code,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\t\t\t\t\t\t\n\t\t\t\t\t\ttrace = traceback.format_exc()\n\n\t\t\t\t\t\tkwargs = {\n\t\t\t\t\t\t\t\"trace\": trace,\n\t\t\t\t\t\t\t\"exception\": str(e)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": str(e),\n\t\t\t\t\t\t\t\"stack_trace\": trace,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": 500,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\t\t\t\t\t\n\t\t\t\t\treturn result\n\t\t\t\t\n\t\t\t\treturn execute\n\t\t\t\n\t\t\treturn log_decorator", "def log_request(self, code='-', size='-'):\n print self._heading(\"HTTP Request\")\n #First, print the resource identifier and desired operation.\n print self.raw_requestline,\n #Second, print the request metadata\n for header, value in self.headers.items(): \n print header + \":\", value", "def report():\n pass", "def _trace(self):\n self.__aceQLHttpApi.trace()", "def log_request(self, code='-', size='-'):\n pass", "def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reason=[{res.reason}]\")", "def log_request(task_request, request):\n msg = \"{0.method} {0.url}: {0.body}\".format(request)\n log_info(task_request, msg)", "def write_debug_info(self):\n #path = self.request.uri.split('?')[0]\n #method = path.split('/')[-1]\n \n self.write(\"Handler: \" + str(self.__class__.__name__)+\"<br>\")\n self.write(\"<hr>\")\n self.write(str(dir(self.request)))\n self.write(\"<br><hr>\")\n self.write(\"query_arguments:\" + str(self.request.query_arguments))\n self.write(\"<br>\")\n self.write(\"uri:\" + self.uri)\n self.write(\"<br>\")\n self.write(\"path:\" + self.path)\n self.write(\"<br>\")\n self.write(\"method to call: \" + self.request.method.lower() + \"_\" + self.method)\n self.write(\"<hr>\")\n self.write(\"request method: \" + self.request.method)\n self.write(\"<hr>\")\n self.write(\"request headers: \" + str(self.request.headers))\n self.write(\"<hr>\")\n self.flush()", "def post_traceback(self, req):\n debug_info = req.debug_info\n long_xml_er = formatter.format_xml(debug_info.exc_data, \n show_hidden_frames=True, show_extra_data=False, \n libraries=self.libraries)[0]\n host = req.GET['host']\n headers = req.headers\n conn = httplib.HTTPConnection(host)\n headers = {'Content-Length':len(long_xml_er), \n 'Content-Type':'application/xml'}\n conn.request(\"POST\", req.GET['path'], long_xml_er, headers=headers)\n resp = conn.getresponse()\n res = Response()\n for header, value in resp.getheaders():\n if header.lower() in ['server', 'date']: continue\n res.headers[header] = value\n res.body = resp.read()\n return res", "def setup_requests_debugging(self):\n\n # These two lines enable debugging at httplib level (requests->urllib3->http.client)\n # You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.\n # The only thing missing will be the response.body which is not logged.\n try:\n import http.client as http_client\n except ImportError:\n # Python 2\n import httplib as http_client\n http_client.HTTPConnection.debuglevel = 1\n\n # You must initialize logging, otherwise you'll not see debug output.\n self.logger.setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True", "def DebugInfo( self, request_data ):\n pass", "def dump_requests(wpr_archive):\n requests = '['\n requests_added = False\n for r in wpr_archive.get_requests():\n requests_added = True\n req = JsonObject()\n scheme = 'https' if r.is_ssl else 'http'\n req.url = '%s://%s%s' % (scheme, r.host, r.full_path)\n req.method = r.command\n req.headers = []\n for k in r.headers:\n h = JsonObject()\n h.key = k\n h.val = r.headers[k]\n req.headers.append(h)\n if r.request_body:\n req.body = base64.encodestring(r.request_body)\n requests += req.toJSON() + ',\\n'\n if requests_added:\n requests = requests[:-2]\n requests += ']'\n return requests", "def report(self, **options):\n pass", "def log_request(self, code='-', size='-'):\n if self.server.log_requests:\n BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)", "def rest_api_log(self):\n with self.resource_lock:\n pass", "def _assemble_and_send_request(self):\r\n client = self.client\r\n # Fire off the query.\r\n response = client.service.track(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n IncludeDetailedScans=self.IncludeDetailedScans,\r\n PackageIdentifier=self.TrackPackageIdentifier,\r\n TrackingNumberUniqueIdentifier = self.TrackingNumberUniqueIdentifier)\r\n\r\n return response", "def before():\n app.logger.info(\"Local Timestamp: {}\".format(str(datetime.now())))\n app.logger.info(\"Request Method: {}\".format(request.method))\n app.logger.info(\"Request URL: {}\".format(request.url))\n app.logger.info(\"Request Access Route: {}\".format(request.access_route[0]))\n headers = \"\"\n for (key, value) in request.headers:\n # hide authorization header from logs\n if key == \"Authorization\":\n value = \"[provided]\" \n headers += \"{}: {}\\n\".format(key, value)\n app.logger.info(\"Request Headers:{}\\n{}\\n{}\".format(\"-\"*45,str(headers)[:-1], \"-\"*60))\n body = copy.deepcopy(request.json)\n if type(body) is dict and \"password\" in body:\n body['password'] = \"[provided]\"\n app.logger.info(\"Request Body: {}\".format(body))", "def log_request(self, r):\n\n token = r.headers.get(self.header, None)\n r.token = token\n self.requests.append(r)\n if r.token:\n self.log.debug('[%s] %s', token or '/', r.url)", "def report(self, output_dir):" ]
[ "0.6134068", "0.59537244", "0.58560354", "0.57951957", "0.5685743", "0.56847835", "0.5683323", "0.56782234", "0.56680465", "0.56327325", "0.5608646", "0.55771434", "0.55731076", "0.5465097", "0.54577047", "0.5447542", "0.54445636", "0.5435014", "0.53476423", "0.5324516", "0.52998835", "0.5292733", "0.5280096", "0.5241056", "0.52406377", "0.51874685", "0.5184952", "0.5176642", "0.51687175", "0.5153448" ]
0.70564216
0
Gets the next page of products on the QVC.com search results page.
def go_product_search_next(self, driver): try: pagination = driver.find_element_by_class_name("divPageLinks") pagination.find_element_by_class_name("next").click() except NoSuchElementException: raise NoSuchElementException
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _scrape_next_results_page_link(self, response):\n next_pages = response.xpath('//*[@id=\"pagnNextLink\"]/@href |'\n '//ul[contains(@class, \"a-pagination\")]'\n '/a[contains(text(), \"eiter\")]/@href').extract()\n next_page_url = None\n\n if len(next_pages) == 1:\n next_page_url = next_pages[0]\n elif len(next_pages) > 1:\n self.log(\"Found more than one 'next page' link.\", ERROR)\n\n return next_page_url", "def next_item(self):\n if self.page + 1 > len(self.result) - 1:\n self.page = 0\n else:\n self.page += 1\n return self.present_view()", "def go_product_reviews_next(self, driver, website):\n paginator = driver.find_element_by_class_name(\"BVRRPager\")\n next_link = paginator.find_element_by_class_name(\"BVRRNextPage\")\n next_link.find_element_by_name(\"BV_TrackingTag_Review_Display_NextPage\").click()\n time.sleep(1)", "def next_results_page(driver, delay):\n try:\n # wait for the next page button to load\n print(\" Moving to the next page of search results... \\n\" \\\n \" If search results are exhausted, will wait {} seconds \" \\\n \"then either execute new search or quit\".format(delay))\n wait_for_clickable_element_css(driver, delay, \"a.next-btn\")\n # navigate to next page\n driver.find_element_by_css_selector(\"a.next-btn\").click()\n except Exception as e:\n print (\"\\nFailed to click next page link; Search results \" \\\n \"may have been exhausted\\n{}\".format(e))\n raise ValueError(\"Next page link not detected; search results exhausted\")\n else:\n # wait until the first job post button has loaded\n first_job_button = \"a.job-title-link\"\n # wait for the first job post button to load\n wait_for_clickable_element_css(driver, delay, first_job_button)", "def all_products_view(request):\n products = Product.objects.all()\n paginator = Paginator(products, 6)\n page_number = request.GET.get('page', 1)\n page = paginator.page(page_number)\n\n \"\"\"render a products html page and within that page we will have access to products, so all_products\"\"\"\n\n if page.has_next():\n\n next_url = f'?page={page.next_page_number()}'\n\n else:\n\n next_url = ''\n\n if page.has_previous():\n\n prev_url = f'?page={page.previous_page_number()}'\n\n else:\n\n prev_url = ''\n\n \n return render(request, 'products.html', {'page': page, 'next_page_url': next_url, 'prev_page_url': prev_url})", "def get_next_page(self, next_url):\n response, err_msg = self.api_call(\"GET\", next_url, None, refresh_authentication=True)\n\n return response.json()", "def parse(self, response):\n product_urls = response.css(\n '.product > a.woocommerce-loop-product__link::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse_list_page(self, response):\n url=response.url\n parsed_data = json.loads(response.text)\n next_link =url[:url.find('page=')] + \"page=\"+str(parsed_data['query']['page']+1) +url[url.find('&size=10'):]\n request = requests.get(next_link)\n\n \n if request.status_code == 200 and len(parsed_data['items'])!=0:\n request = scrapy.Request(url=next_link,callback = self.parse_list_page)\n request.meta['url'] = response.meta['url']\n yield request\n\n #Find product link and yield request back\n for req in self.extract_product(response):\n yield req", "def get_next_page(self, data):\n\n next_page = None\n\n if \"d\" in data:\n logger.debug(f\"'d' found (OData v2).\")\n if \"__next\" in data[\"d\"]:\n logger.debug(f\"'d.__next' found\")\n next_page = data[\"d\"].get(\"__next\")\n elif \"value\" in data:\n logger.debug(f\"'value' found (OData v3 or v4).\")\n if \"odata.nextLink\" in data:\n logger.debug(f\"'odata.nextLink' found (Odata v3).\")\n next_page = data.get(\"odata.nextLink\")\n elif \"@odata.nextLink\" in data:\n logger.debug(f\"'@odata.nextLink' found (Odata v4).\")\n next_page = data.get(\"@odata.nextLink\")\n else:\n logger.debug(f\"No more pages.\")\n\n return next_page", "def next_page(self):\r\n if self.page.has_next():\r\n self.query_dict['page'] = self.page.next_page_number()\r\n return \"%s?%s\" % (self.path, urlencode(self.query_dict))\r\n return \"\"", "def get_next_page(self, raw=False):\n return self.account.get_orders(page=self.current_page + 1, raw=raw)", "def parse(self, response):\n product_urls = response.css(\n '.woocommerce-loop-product__link::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def nextprev(np, page=None):\n glsq = g.last_search_query\n content = g.model.songs\n max_results = getxy().max_results\n\n if \"user\" in g.last_search_query:\n function, query = usersearch_id, glsq['user']\n\n elif \"related\" in g.last_search_query:\n function, query = related_search, glsq['related']\n\n elif \"term\" in g.last_search_query:\n function, query = search, glsq['term']\n\n elif \"playlists\" in g.last_search_query:\n function, query = pl_search, glsq['playlists']\n content = g.ytpls\n\n elif \"playlist\" in g.last_search_query:\n function, query = plist, glsq['playlist']\n\n good = False\n\n if np == \"n\":\n if len(content) == max_results and glsq:\n if (g.current_page + 1) * max_results < 500:\n if g.more_pages:\n g.current_page += 1\n good = True\n\n elif np == \"p\":\n\n if g.last_search_query:\n if page and int(page) in range(1,20):\n g.current_page = int(page)-1\n good = True\n\n elif g.current_page > 0:\n g.current_page -= 1\n good = True\n\n if good:\n function(query, page=g.current_page, splash=True)\n\n else:\n norp = \"next\" if np == \"n\" else \"previous\"\n g.message = \"No %s items to display\" % norp\n\n g.content = generate_songlist_display(frmat=\"search\")\n return good", "def parse(self, response):\n product_urls = response.css('.product-details > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def get_next_page_response(self):\n if not self.has_next_page():\n raise RuntimeError('No more pages. Try resetting the iterator.')\n\n response = self.client.connection.api_request(\n method='GET', path=self.path, query_params=self.get_query_params())\n\n self.page_number += 1\n self.next_page_token = response.get('nextPageToken')\n\n return response", "def _has_next_page(self):\n if self._search_data is None:\n return True\n begin_index = int(self._params['beginIndex'])\n product_count = int(self._search_data['totalCount'])\n page_size = int(self._search_data['pageSize'])\n # return True if there are more products to parse\n return begin_index < product_count", "def nextPage(self):\n raise NotImplementedError()", "def list_get_next_page(self, soup):\n # Get the 'next page' element at the bottom of the page\n next_page_tag = soup.find('a', class_='pager pager-next')\n\n # Extract the link from this element\n if next_page_tag:\n page_url = self.base_url_apartments + next_page_tag['href']\n return generate_soup(page_url)\n else:\n return None", "def parse(self, response):\n product_urls = response.css(\n '.product-li .product-image a::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n\n next_page_number = 2\n if '?' in response.url:\n return\n while next_page_number < 37:\n # import logging\n # logging.log(logging.WARNING, f\"This is a warning {len(product_urls)} : {product_urls[0]}\")\n next_page_url = f'{response.url}?p={next_page_number}'\n yield scrapy.Request(response.urljoin(next_page_url))\n next_page_number += 1", "def page13(self):\n result = request1301.GET('/Cars_Sample_App/sell.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result", "def do_search(request):\n products = Product.objects.filter(title__icontains=request.GET['q'])\n paginator = Paginator(products, 4) # Show 4 products per page\n \n page = request.GET.get('page')\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n return render(request, \"products.html\", {\"products\": products})", "def navigate_search_results(self):\n driver = self.driver\n search_results_exhausted = False\n results_page = self.results_page\n delay = 60\n date = get_date_time()\n # css elements to view job pages\n list_element_tag = '/descendant::a[@class=\"job-title-link\"]['\n print_num_search_results(driver, self.keyword, self.location)\n # go to a specific results page number if one is specified\n go_to_specific_results_page(driver, delay, results_page)\n results_page = results_page if results_page > 1 else 1\n\n while not search_results_exhausted:\n for i in range(1,26): # 25 results per page\n # define the css selector for the blue 'View' button for job i\n job_selector = list_element_tag + str(i) + ']'\n if search_suggestion_box_is_present(driver, \n job_selector, i, results_page):\n continue\n # wait for the selector for the next job posting to load.\n # if on last results page, then throw exception as job_selector \n # will not be detected on the page\n if not link_is_present(driver, delay, \n job_selector, i, results_page):\n continue\n robust_wait_for_clickable_element(driver, delay, job_selector)\n extract_transform_load(driver,\n delay,\n job_selector,\n date,\n self.keyword,\n self.location,\n self.filename)\n # attempt to navigate to the next page of search results\n # if the link is not present, then the search results have been \n # exhausted\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(results_page + 1))\n except ValueError:\n search_results_exhausted = True\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")\n else:\n results_page += 1", "async def go_to_next_page(self, payload):\n await self.show_checked_page(self.current_page + 1)", "def parse(self, response):\n product_urls = response.css('.product__title > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.pagination__item--next a::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse(self, r):\n blocs = r.xpath(\"//article[@itemtype='https://schema.org/Product']//a/@href\").extract()\n if blocs:\n for product_sheet_link in blocs:\n next_page = r.urljoin(f\"http://qazaimmobilier.la-boite-immo.com{product_sheet_link}\")\n yield scrapy.Request(next_page, callback=self.parse_product)\n\n # paginate\n self.page += 1\n yield scrapy.Request(self.base_url + f\"{self.page}\")", "async def next_page(self):\n if self.page_num == len(self.pages):\n self.page_num = 1 # Loop around to the first item\n else:\n self.page_num += 1\n return await self.update()", "def get_next_page(self):\n return min((self.get_page() + 1), self.get_last_page())", "def next_page():\n\tprint('-> \\nClicking next page')\n\told_html = driver.find_element_by_tag_name('html').text\n\tlink = driver.find_element_by_xpath(XPATHS['next_page']) \n\tlink.click()\n\treturn wait_for(old_html)", "def __next_page(self):\n self.current_page = self.current_page + 1\n tree = ET.parse(urlopen(self.url + '&start=' + str(self.current_page)))\n self.iterator = tree.iterfind(self.GLOBAL_NP + 'entry')", "def next_page(self):\n return self._next_page" ]
[ "0.67518705", "0.6525951", "0.63847774", "0.6296486", "0.6235156", "0.61986136", "0.6148619", "0.61433864", "0.6139499", "0.6089231", "0.607919", "0.6075388", "0.6061374", "0.6030385", "0.6018592", "0.5967784", "0.59492147", "0.5898055", "0.5887975", "0.5857885", "0.5856847", "0.5837946", "0.5827157", "0.58022493", "0.57851774", "0.5783765", "0.57650214", "0.5729862", "0.5704817", "0.5703531" ]
0.7560375
0
Goes to a QVC.com product page.
def go_product_page(self, driver, product_id, website): link = self.product_url(website, product_id) self.go_and_assert(driver, link, website)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_products_page(catalog_menu):\n catalog_menu.open_products_page()", "def get_product_web_page(product = None):\n \n products_list = get_product_list()\n modis_url_dict = {prod: '{}v006'.format(prod.lower()) for prod in \n products_list if prod[0] == 'M'}\n viirs_url_dict = {prod: '{}v001'.format(prod.lower()) \n for prod in products_list if prod[:3] == 'VNP'}\n modis_url_dict.update(viirs_url_dict)\n base_addr = ('https://lpdaac.usgs.gov/products/{0}')\n if product is None or not product in modis_url_dict.keys():\n print 'Product not found... redirecting to data discovery page'\n addr = ('https://lpdaac.usgs.gov')\n else:\n addr = base_addr.format(modis_url_dict[product])\n webbrowser.open(addr)", "def show_homepage():\n return flask.redirect(\"products.show_product_manager\")", "def products_page(driver, open_login_page):\n return ProductsPage(driver)", "def goto_cart(self):\n self.driver.find_element(*BasePageLocators.GO_CART).click()\n return CartPage(self.driver)", "def go_product_reviews_page(self, driver, product_id, website):\n try:\n tab_list = driver.find_element_by_id(\"divProductDetailsCustomerReviewOptions\")\n review_tab = tab_list.find_element_by_id(\"tabProductDetailCustomerReviewNav1\")\n review_tab.click()\n except (NoSuchElementException, ElementNotVisibleException):\n pass\n time.sleep(1)", "def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()", "def product_card(driver, open_login_page):\n return ProductPage(driver)", "def onProductLinkClicked(self, linkId=None):\n self.OpenProductWeb.emit()", "def go_product_reviews_next(self, driver, website):\n paginator = driver.find_element_by_class_name(\"BVRRPager\")\n next_link = paginator.find_element_by_class_name(\"BVRRNextPage\")\n next_link.find_element_by_name(\"BV_TrackingTag_Review_Display_NextPage\").click()\n time.sleep(1)", "def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n print(request.path)\n template = './product_detail.html'\n context = {\n 'product': product,\n }\n\n # products = Product.objects.all()\n\n return render(request, template, context)", "def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)", "def go_product_search_next(self, driver):\n try:\n pagination = driver.find_element_by_class_name(\"divPageLinks\")\n pagination.find_element_by_class_name(\"next\").click()\n except NoSuchElementException:\n raise NoSuchElementException", "def all_products_view(request):\n products = Product.objects.all()\n paginator = Paginator(products, 6)\n page_number = request.GET.get('page', 1)\n page = paginator.page(page_number)\n\n \"\"\"render a products html page and within that page we will have access to products, so all_products\"\"\"\n\n if page.has_next():\n\n next_url = f'?page={page.next_page_number()}'\n\n else:\n\n next_url = ''\n\n if page.has_previous():\n\n prev_url = f'?page={page.previous_page_number()}'\n\n else:\n\n prev_url = ''\n\n \n return render(request, 'products.html', {'page': page, 'next_page_url': next_url, 'prev_page_url': prev_url})", "def buySingleProduct(url):\n #parsed_url = urlparse(url)\n assert \"http\" and \"://\" in url, \"Bitte die URL komplett kopieren, inklusive \\\"http://\\\" bzw. \\\"https://\\\" am Anfang.\"\n assert \"amazon\" in url, \"Die aufzurufende Seite ist nicht die Amazon-Seite oder konnte nicht erkannt werden.\"\n print(\"Open page '\"+url+\"'\")\n driver.get(url)\n print(\"Find add-to-cart element\")\n try:\n print(\"actually find element\")\n #add_to_cart_button = driver.find_element_by_css_selector(amazon_add_to_cart)\n\n print(\"scroll element into view using native js\")\n driver.execute_script(\"window.scrollTo(0, document.GetElementById(\"+amazon_add_to_cart+\"));\")\n print(\"Send 'click' to element\")\n add_to_cart_button.click()\n print(\"Success.\")\n except Exception, e:\n print(\"Element could not be found. General exception: \"+str(e))\n #driver.close()", "def detail_url(product_id):\n return reverse('product:product-detail', args=[product_id])", "def __print_products_navigation(self, category):\n\n def input_validation(string: str):\n if string.startswith('p'):\n try:\n int(string[1:])\n except ValueError:\n return False\n else:\n return True\n return False\n\n page = 1\n while True:\n clean_terminal()\n data = self.api_operator.get_products_from_category(\n category, page)\n number_page = int(ceil(data['count'] / 20))\n cprint(str(number_page) + \" page(s) pour \" + str(\n data['count']) + \" résultat(s).\")\n print('Choisir un produit :')\n\n self.print_products_line(data['products'])\n\n print('page ' + str(page) + ' sur ' + str(number_page))\n reply_3 = self.ask_with_input('Choisir un numéro (tapez \"quit\" pour'\n ' quitter, \"pp\" pour pagge précedente'\n ', \"ps\" pour page suivante, p<numéro>'\n ' pour aller à la page numéro) : ',\n 20,\n ('quit', 'pp', 'ps'),\n input_validation)\n if reply_3 == 'quit':\n break\n elif reply_3.startswith('p'):\n if reply_3 == \"ps\":\n if page <= number_page - 1:\n page += 1\n elif reply_3 == \"pp\":\n if page >= 2:\n page -= 1\n else:\n reply_3 = int(reply_3[1:])\n if 1 <= reply_3 <= number_page:\n page = reply_3\n else:\n product_number = int(reply_3) - 1\n self.render(data['products'][product_number])", "def product_detail(request, product_id):\n # Search for product in Product Model using pk identifier obtained from project_id\n product = get_object_or_404(Product, pk=product_id)\n context = {\n 'product': product,\n }\n return render(request, 'products/product_detail.html', context)", "def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product_detail.html', context)", "def test_buy_now(self):\n catalog_page = CatalogPage(self.driver)\n product_page = ProductPage(self.driver)\n payment_page = PaymentPage(self.driver)\n payment_review_page = PaymentReviewPage(self.driver)\n payment_info_page = PaymentInfoPage(self.driver)\n success_page = SuccessPage(self.driver)\n # buy the new product\n navigate_to(self.driver, ProductPage.URL(self.new_product['product']['title']))\n product_page.add_to_cart.click()\n # by an old product\n catalog_page.catalog.click()\n # Sort products to move the newly created to last page\n catalog_page.sorting_order.select_by_visible_text(\"Date, old to new\")\n catalog_page.image.random_click()\n product = product_page.product.get_text()\n product_page.add_to_cart.click()\n catalog_page.catalog.click()\n catalog_page.cart.click()\n payment_dic = {\n 'address' : f'{randint(1, 99999)} {random_name(5, 8)}',\n 'city' : \"San Francisco\",\n 'email_or_mobile_phone_number_input' : random_name(8) + \"@gmail.com\",\n 'last_name' : random_name(3, 12),\n 'zip_code' : '94107',\n }\n if randint(0, 1):\n payment_dic['first_name'] = random_name(4, 16)\n if randint(0, 1):\n payment_dic['address2'] = random_name(5)\n for _ in payment_dic:\n exec(f\"payment_page.{_}.enter(payment_dic['{_}'])\")\n payment_page.continue_to_shipping.click()\n payment_review_page.continue_to_payment.click()\n payment_info_page.full_address.get_text()\n # validate address\n for _ in ['address', 'city', 'zip_code']:\n assert_and_log(payment_dic[_] in payment_info_page.full_address.get_text(),\n f\"{_} in full address\")\n payment_info_page.enter_bogus_payment(1)\n assert_and_log(success_page.thank_you.find_visible_element(),\n \"'Thank you' appeared as a sign of successful transaction\",\n continue_on_error=False)\n validate(success_page.basic_validation_list)", "def test_product_landing(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create a product\n p = product(save=True)\n\n # Create some topics\n topics = []\n for i in range(11):\n topics.append(topic(save=True))\n\n # Create a document and assign the product and 10 topics.\n doc = revision(is_approved=True, save=True).document\n doc.products.add(p)\n for i in range(10):\n doc.topics.add(topics[i])\n\n self.refresh()\n\n # GET the topic page and verify the content\n url = reverse('products.product', args=[p.slug])\n r = self.client.get(url, follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(10, len(doc('#help-topics li')))", "def get(self, request, product_id=None, page_no=None):\n if product_id is None:\n return Response(\"product_id cannot be null\", status=status.HTTP_400_BAD_REQUEST)\n \n if page_no is None:\n page_no = 1\n if page_no <= 0:\n return Response(\"Page Number must be >= 1\", status=status.HTTP_400_BAD_REQUEST)\n\n queryset = Qanda.objects.using('scraped').filter(product_id=product_id)\n if queryset.count() == 0:\n return Response(f\"No QandA exists for this product - {product_id}\", status=status.HTTP_404_NOT_FOUND)\n \n ITEMS_PER_PAGE = 10\n queryset = queryset[(page_no - 1) * ITEMS_PER_PAGE : (page_no) * ITEMS_PER_PAGE]\n \n serializer = QandASerializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def click_nav_components_webcamera(self):\n self.click_nav_components()\n self.driver.find_element(*BasePageLocators.WEBCAMERAS).click()\n return ProductsPage(self.driver)", "def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')", "def page13(self):\n result = request1301.GET('/Cars_Sample_App/sell.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result", "def click_nav_desktops_pc(self):\n self.click_nav_desktops()\n self.driver.find_element(*BasePageLocators.DESKTOPS).click()\n return ProductsPage(self.driver)", "def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)", "def go_to_basket(self):\n link = self.browser.find_element(*BasePageLocators.BASKET_LINK)\n link.click()", "def step_impl(context):\r\n context.browser.get('https://opensource-demo.orangehrmlive.com/')\r\n time.sleep(10)", "def get(self, request, **kwargs):\n item = self.get_object()\n correct_path = item.get_absolute_url() \n if correct_path != request.path:\n return HttpResponsePermanentRedirect(correct_path)\n \n response = super(ItemDetailView, self).get(request, **kwargs)\n \n # Send signal to record the view of this product\n product_viewed.send(sender=self, product=item, user=request.user, request=request, response=response)\n return response;" ]
[ "0.68711495", "0.6527769", "0.6513008", "0.6255602", "0.6254523", "0.6189642", "0.615368", "0.6008142", "0.59645754", "0.5947107", "0.59296095", "0.5882329", "0.5873604", "0.5810477", "0.57542366", "0.5748937", "0.5740724", "0.5711322", "0.5692858", "0.56913525", "0.56570077", "0.5627749", "0.5616224", "0.56101155", "0.55772907", "0.5554441", "0.55504245", "0.5526349", "0.5496299", "0.54909176" ]
0.7466344
0
Gets the name and size of a product from a product page. Product name, product size (number), product size (units)
def get_product_name_and_size(self, driver): detailsframe = driver.find_element_by_id("divProductDetailDescriptionAreaDisplay1") li_tags = detailsframe.find_elements_by_xpath('.//li') for i in li_tags: if "weight" in i.text: size_info = i.text.split()[2:] size = size_info[0] units = " ".join(size_info[1:]) else: size, units = "", "" product_name = driver.find_element_by_class_name("fn").text return product_name, size, units
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_product_info(self, product):\n\n product_link = self.url + product.a['href']\n product_page = self.get_response(product_link)\n product_soup = BeautifulSoup(product_page.content, 'html.parser')\n\n # get product details\n product_brand = product_soup.find('h2').text.strip()\n product_name = product_soup.find('h1').text.strip()\n\n product_details = product_soup.find('div', id='z-pdp-detailsSection')\n\n product_attributes = []\n for detail_section in product_details.find_all('div', class_='h-container h-flex-no-shrink h-tabs__panel h-align-left'):\n for tag in detail_section.find_all('p'):\n product_attributes.append(tag.text.strip())\n\n # get product image\n product_img_thumbs = product_soup.find('div', id='z-pdp-topSection')\n product_img_thumbs = product_img_thumbs.find(\n 'div', class_='h-container h-carousel h-carousel-thumbnail vertical h-align-left')\n\n img_links = []\n product_img_link = ''\n for img_thumb in product_img_thumbs.find_all('picture'):\n img_link = img_thumb.find('img')['src'].replace('thumb', 'zoom')\n if 'packshot' in img_link:\n product_img_link = img_link\n else:\n img_links.append(img_link)\n\n # product_img_link = 'https:' + product_img.split('\"')[1].split('?')[0]\n product_img_id = product_img_link.split('/')[-1].split('@')[0]\n\n return {'name': product_name,\n 'brand': product_brand,\n 'id': product_img_id,\n 'img_url': product_img_link,\n 'model_img_urls': ', '.join(img_links),\n 'attributes': ', '.join(product_attributes)}", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.current-price span::attr(content)').get()\n item['available'] = not not response.css('span.font-extra .product-available').get()\n item['promotion'] = not not response.css('.product-discount').get()\n\n\n # weight e.g. : « 0,25 g. NT »\n item['raw_string'] = response.css('div[itemprop~=description]::text').get()\n\n return item", "def get_product(self, page_size=10, pages_number=1):\n products = []\n params = self.params.copy()\n params[\"page_size\"] = page_size\n\n try:\n response = requests.get(self.url, params=params, timeout=3)\n response.json()\n except requests.ConnectionError:\n print(\"Error when fetching the API\")\n for i in range(pages_number):\n params[\"page\"] = i + 1\n response = requests.get(self.url, params=params)\n if response.status_code == 200:\n products.extend(response.json()[\"products\"])\n return products", "def view_product(cls, product_id):\n product = Product.get_by_id(product_id)\n print(f'Product ID: {product.product_id}')\n print(f'Product Name: {product.product_name}')\n print(f'Quantity: {product.product_quantity}')\n print(f'Price: ${product.product_price / 100:.2f}\\n')", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n # Réécriture de l'url\n item['url'] = response.css('meta[property~=\"og:url\"]::attr(content)').get()\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('span[itemprop~=price]::attr(content)').get()\n\n # quantity: « 100 graines * » OR « 5 grammes * »\n item['raw_string'] = response.css('#group_5 option[selected]::text').get()\n\n\n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n \n item['promotion'] = not not response.css('.summary .price del')\n if item['promotion']:\n item['old_price'] = response.css('.summary .price del .amount::text').get()\n item['price'] = response.css('.summary .price ins .amount::text').get()\n else:\n item['price'] = response.css('.summary .price .amount::text').get()\n\n item['available'] = not not response.css('.in-stock')\n # e.g. « 10 en stock »\n item['stock'] = response.css('.in-stock::text').get()\n\n # weight\n item['raw_string'] = response.css(\n '.woocommerce-product-details__short-description p::text'\n ).get()\n \n return item", "def getPrice(self):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36\"}\n response = requests.get(self.__product_URL, headers=headers)\n #print(response.status_code)\n soup = BeautifulSoup(response.content, \"html.parser\")\n file = open(\"testproduct.html\", \"wb\")\n file.write(soup.prettify(\"utf-8\"))\n file.close()\n title = soup.find(\"span\", attrs={\"id\": \"productTitle\", \"class\": \"a-size-large\"}).string.strip()\n self.__product_title = title\n temp = soup.find_all(\"a\", attrs={\"class\": \"a-accordion-row a-declarative accordion-header\"})[1]\n price = temp.find(\"span\", attrs={\"class\": \"a-color-price\"}).text.strip()\n lst = list(price)\n lst.remove(\",\")\n price = int(float(\"\".join(lst)))\n self.__product_price = price\n #print(self.__product_price)", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('.pageTitle span::text').get()\n item['available'] = not response.css('.dispo')\n item['promotion'] = not not response.css('.old')\n if item['promotion']:\n item['old_price'] = response.css('.old .amount::text').get()\n\n item['price'] = response.css('.new .amount::text').get()\n # Un tableau\n item['raw_string'] = response.css('.featureTable tr td::text').getall()\n \n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n # item['product_name'] = response.css('h1::text').get()\n item['promotion'] = any(x == \"PROMOTION\" for x in response.css('h3::text').getall())\n item['description'] = response.css('.product__description p::text').getall()\n item['raw_string'] = response.css('.product__informations li::text').getall() # Array\n\n data = eval(response.css('script[type~=\"application/ld+json\"]::text').get())\n item['product_name'] = data['name']\n item['product_id'] = data['productID']\n item['price'] = data['offers']['price']\n item['currency'] = data['offers']['priceCurrency']\n item['available'] = data['offers']['availability'].endswith('InStock')\n item['latin_name'] = data['alternateName']\n return item", "def get_product_info(self, product_url, location = {}, save = False):\n\t\t# Setting location \n\t\tis_LAD, code = self.set_location(location)\n\n\t\tif is_LAD and code == 200:\n\t\t\tstore = self.databaseHelper.get_store(location)\n\t\telse:\n\t\t\tstore = None\n\n\n\t\t# Retrieve html page\n\t\thtml, code = self.crawler.get(product_url)\n\n\t\tif code == 200:\n\t\t\tself.parser.set_html(html)\n\t\t\tproduct = self.parser.parse_product_full()\n\n\t\t\t# Clean urls\n\t\t\tproduct['reference'] = product_url.split('-')[-1]\n\t\t\tproduct['url'] = product_url\n\t\telse:\n\t\t\tprint 'Error while retrieving product page : error %d'%(code)\n\t\t\tproduct = {\n\t\t\t\t'is_available': False,\n\t\t\t\t'exists': False,\n\t\t\t\t'url': product_url,\n\t\t\t\t'reference': product_url.split('-')[-1]\n\t\t\t\t}\n\n\t\t[product] = self.clean_urls_in_products([product])\n\n\t\t# save product in database\n\t\tif save:\n\t\t\tself.databaseHelper.save_products([product], None, store)\n\t\telse:\n\t\t\treturn product", "def get_product_details(self):\n\n db.execute(\"SELECT * FROM Product WHERE id = %s\", (self.id,))\n product = db.fetch()\n\n self.name = product[1]\n self.brand = product[2]\n self.nutriscore_id = product[3]\n self.store = product[4]\n self.description = product[5]\n self.url = product[6]", "def parse_product(self, response):\n # messy data\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.current-price span::attr(content)').get()\n \n # sometimes quantity and/or seed number\n item['raw_string'] = ' '.join(response.css('.product-information span::text').getall())\n\n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.summary .price bdi::text').get()\n\n item['available'] = not not response.css('.in-stock')\n # e.g. « 10 en stock »\n item['stock'] = response.css('.stock::text').get()\n\n # quantity, e.g. « 15 graines »\n raw_string = response.css(\n '.woocommerce-product-details__short-description p::text'\n ).getall()\n raw_string += response.css('#tab-description td::text').getall()\n item['raw_string'] = ' '.join(raw_string)\n\n return item", "def get_available_sizes(postID, sizeStr, product_mainID):\n api_url = 'https://store.lining.com/ajax/goods_details.html'\n data = {\n 'postID': postID,\n 'sizeStr': sizeStr,\n 'product_mainID': product_mainID\n }\n r = get_json(api_url, data=data)\n onsale_sizes = r['data']['onSale']\n logging.debug('Onsale Sizes: ' + repr(onsale_sizes))\n return onsale_sizes", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['density'] = response.css('#description strong::text').get()\n item['available'] = not response.css('.msgSoldOut')\n\n\n for selector in response.css('.fpBktParam'):\n item['raw_string'] = selector.css('span::text').get()\n item['price'] = selector.css('div::text').getall()[1]\n yield item", "def retrieve_product_infos(self):\n\n # PRODUCT NAME\n try:\n product_name = self.product['product_name'].capitalize()\n except KeyError:\n product_name = None\n\n # PRODUCT CODE\n try:\n product_code = self.product['code'].capitalize()\n except KeyError:\n product_code = None\n\n # URL\n try:\n product_url = self.product['url'].lower()\n except KeyError:\n product_url = None\n\n # IMAGE URL\n try:\n image_url = self.product['image_url'].lower()\n except KeyError:\n image_url = None\n\n # QUANTITY\n try:\n quantity = self.product['quantity'].capitalize()\n except KeyError:\n quantity = None\n\n # INGREDIENTS\n try:\n ingredients = self.product['ingredients_text_fr'].capitalize()\n except KeyError:\n ingredients = None\n\n # BRAND\n brands = []\n try:\n for brand in self.product['brands'].split(','):\n brand = brand.strip().capitalize()\n if (\n brand != ''\n and brand not in brands\n ):\n brands.append(brand)\n except KeyError:\n pass\n\n # STORES\n stores = []\n try:\n for store in self.product['stores'].split(','):\n store = store.strip().capitalize()\n if (\n store != ''\n and store not in stores\n ):\n stores.append(store)\n except KeyError:\n pass\n\n # COUNTRY\n try:\n countries = self.product['countries'].capitalize()\n except KeyError:\n countries = None\n if 'France' in countries:\n countries = 'France'\n else:\n countries = None\n\n # COMPARE TO CATEGORY\n try:\n compare_to = self.product['compared_to_category'].capitalize().split(':')[1]\n except KeyError:\n compare_to = None\n try:\n Categories.objects.get(\n name=compare_to\n )\n except Categories.DoesNotExist:\n compare_to = None\n except:\n importable = False\n\n # CATEGORIES HIERARCHY\n try:\n categories_hierarchy = [\n category.split(':')[1] for category in self.product['categories_hierarchy']\n ]\n except KeyError:\n categories_hierarchy = None\n\n # NUTRISCORE GRADE\n nutriscore_labels = [\n 'nutrition_grade_fr',\n 'nutriscore_grade'\n ]\n nutriscore = 'F'\n i = 0\n while (\n i < len(nutriscore_labels)\n and nutriscore == 'F'\n ):\n try:\n nutriscore = self.product[nutriscore_labels[i]].upper()\n except KeyError:\n i += 1\n\n product_infos = {\n 'product_name': product_name,\n 'product_code': product_code,\n 'product_url': product_url,\n 'image_url': image_url,\n 'quantity': quantity,\n 'ingredients': ingredients,\n 'brands': brands,\n 'stores': stores,\n 'countries': countries,\n 'compare_to': compare_to,\n 'categories_hierarchy': categories_hierarchy,\n 'nutriscore': nutriscore\n }\n\n nutriments = self.product['nutriments']\n for nutriment in self.list_nutriments:\n try:\n product_infos[nutriment] = float(nutriments[nutriment])\n except KeyError:\n product_infos[nutriment] = 0\n\n return product_infos", "def get_information_about_products(store_name: str):\n\n return store_handler.get_information_about_products(store_name)", "def test_get_product(self):\n # get the id of a product\n test_product = self._create_products(1)[0]\n resp = self.app.get(\n \"/products/{}\".format(test_product.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_product.name)\n \n # print the repr of a product\n rep = \"%s\" % test_product", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['description'] = response.css('.partiesouligne p::text').get()\n # densité possible\n\n # sous la forme `0.8 g - 3.40 €`\n item['raw_string'] = response.css('.prixsachet::text').get()\n item['price'] = -1\n\n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.price > span::text').get()\n\n\n return item", "def product(self) -> str:\n return pulumi.get(self, \"product\")", "def scrapeInfoForItem(self, subpage, item):\n\t\thtmlcontent = self.HttpHandler.getHtmlContentFromLink(item.link)\n\t\tsoupPage = BeautifulSoup(htmlcontent, \"html.parser\")\n\n\t\t# brand\n\t\tresult = soupPage.findAll(\"p\", { \"class\" : \"product-brand--details\" })\n\t\tif len(result) > 0:\n\t\t\tres1 = result[0].find(\"a\")\n\t\t\tif res1 == None:\n\t\t\t\titem.Brandname = str(result[0].contents[0])\n\t\t\telif len(res1) > 0:\n\t\t\t\titem.Brandname = str(res1.contents[0])\n\n\t\t# Name\n\t\tresult = soupPage.findAll(\"h1\", { \"class\" : \"product-title--details\" })\n\t\tif len(result) > 0:\n\t\t\tres1 = result[0].find(\"span\", { \"itemprop\" : \"name\" })\n\t\t\tif len(res1) > 0:\n\t\t\t\titem.Productname = str(res1.contents[0])\n\n\t\t# Color\n\t\tresults = soupPage.findAll(\"a\", { \"class\" : \"js-switch-colourVariant\" })\n\t\tif len(results) == 0:\n\t\t\tresult2 = soupPage.findAll(\"h1\", { \"class\" : \"product-title--details\" })\n\t\t\tif len(result) > 0:\n\t\t\t\tres2 = result2[0].find(\"span\", { \"itemprop\" : \"color\" })\n\t\t\t\tif len(res2) > 0:\n\t\t\t\t\titem.Colors = str(res2.contents[0])\n\t\telse:\n\t\t\titem.Colors = \"|\".join([res[\"title\"] for res in results])\n\n\t\t# size\n\t\tresults = soupPage.findAll(\"span\", { \"class\" : \"product-sizeLabel\" })\n\t\titem.Sizes = \"|\".join([res.contents[0] for res in results])\n\n\t\t# beschreibung\n\t\tresult = soupPage.find(\"ul\", { \"class\" : \"product-infoList--twoCol\" })\n\t\tif result:\n\t\t\tresults = result.findAll(\"span\")\n\t\t\titem.Description = \"|\".join([res.contents[0] for res in results])\n\n\t\t# material \n\t\tresults = soupPage.find(\"ul\", { \"class\" : \"product-infoList\" })\n\t\tif results:\n\t\t\tresults = results.findAll(\"span\")\n\t\t\titem.Materials = \"|\".join([res.contents[0] for res in results])\n\n\t\t# pflege\n\t\tresults = soupPage.find(\"ul\", { \"class\" : \"product-care\" })\n\t\tif results:\n\t\t\tresults = results.findAll(\"li\")\n\t\t\titem.Maintenance = \"|\".join([res.get_text() for res in results])\n\n\t\t# current, regular price (current can be reduced)\n\t\tresult = soupPage.find(\"meta\", { \"itemprop\" : \"price\" })\n\t\tif result:\n\t\t\tresult = result[\"content\"]\n\t\t\tif \",\" in result:\n\t\t\t\tresult = str(result).replace(',','.')\n\t\t\tif u'\\xa0' in result:\n\t\t\t\tresult = result.replace(u'\\xa0', u' ')[:-1] # there is a € sign at the end\n\t\t\tif \"ab\" in result:\n\t\t\t\titem.CurrentPrice = result\n\t\t\telse:\n\t\t\t\titem.CurrentPrice = float(result)\n\t\tresult = soupPage.find(\"span\", { \"class\" : \"is-regular\" })\n\t\tif result:\n\t\t\tif \",\" in result.contents[0]:\n\t\t\t\tresult = str(result.contents[0]).replace(',','.')\n\t\t\tif u'\\xa0' in result:\n\t\t\t\tresult = result.replace(u'\\xa0', u' ')[:-1] # there is a € sign at the end\n\t\t\tif \"ab\" in result:\n\t\t\t\titem.RegularPrice = result\n\t\t\telse:\n\t\t\t\titem.RegularPrice = float(result)\n\t\telse:\n\t\t\titem.RegularPrice = item.CurrentPrice", "def product(self):\n return self.__values['product_name']", "def products_for_influencers(parameters, page=0, page_size=50):\n product_ids, _, total_hits = elastic_search_helpers.es_product_query_runner_v2(\n parameters, page, page_size)\n\n num_pages = int(math.ceil(float(total_hits) / page_size))\n\n return product_ids, total_hits, num_pages", "def get_product_details(product_url: str) -> dict:\n def get_available_sizes(postID, sizeStr, product_mainID):\n \"\"\"\n List size haye mojood va tamoom shode ro ba API mide\n POST: https://store.lining.com/ajax/goods_details.htm\n \"\"\"\n api_url = 'https://store.lining.com/ajax/goods_details.html'\n data = {\n 'postID': postID,\n 'sizeStr': sizeStr,\n 'product_mainID': product_mainID\n }\n r = get_json(api_url, data=data)\n onsale_sizes = r['data']['onSale']\n logging.debug('Onsale Sizes: ' + repr(onsale_sizes))\n return onsale_sizes\n\n def get_pid_from_url(url):\n \"\"\" ID mahsool ro az URL darmiare \"\"\"\n return re.findall(r'store.lining.com/shop/goods-(\\w+).html\\w*', url)[0]\n\n def translate_keyword(keyword):\n \"\"\" tarjome key marboot be desctioption \"\"\"\n define = {\n '运动类型': 'Sport Type',\n '性别': 'Sex',\n '颜色': 'Color',\n '鞋透气指数': 'Shoes breathability index',\n '鞋软硬指数': 'Shoe soft and hard index',\n }\n if keyword in define:\n return define[keyword]\n else:\n return keyword\n ###########################################################\n\n details = dict()\n soup = get_soup(product_url)\n\n # product ID\n pid = get_pid_from_url(product_url)\n logging.debug('PID: ' + pid)\n details['pid'] = pid\n\n # product name\n name = soup.find('h1', {'id': 'product_name'}).text.strip()\n logging.debug('Name: ' + name)\n details['name'] = name\n\n # part number\n sku = soup.find('span', {'id': 'partNumber'}).find('span', {'class': 'v'}).text.strip()\n part_number = sku[0:sku.find('-')]\n logging.debug('Part Number: ' + part_number)\n details['sku'] = sku\n details['part_number'] = part_number\n\n # price\n price = soup.find('span', {'id': 'listPrice'}).find('span', {'class': 'v'}).text.strip().replace('¥', '')\n price_offer = soup.find('span', {'id': 'offerPrice'}).find('span', {'class': 'v'}).text.strip().replace('¥', '')\n logging.debug('Price: %s [offer]-> %s' % (price, price_offer))\n details['price'] = price\n details['price_offer'] = price_offer\n\n # all sizes\n all_sizes = list()\n for tag in soup.find('div', {'id': 'sizelist'}).find_all('div', 'size-layer'):\n tag = tag.find('input')\n # all_size -> [(id, size, status), ...]\n all_sizes.append(\n (\n tag.get('id').replace('size_list_', ''),\n tag.get('value'),\n None,\n )\n )\n available_sizes = get_available_sizes(\n postID=pid,\n product_mainID=part_number,\n # first element of all_sizes list\n # all_size -> [(id, size, status), ...]\n sizeStr=','.join([s[0] for s in all_sizes]),\n )\n # update all sizes status\n for i in range(len(all_sizes)):\n if all_sizes[i][1] in available_sizes:\n all_sizes[i] = (\n all_sizes[i][0],\n all_sizes[i][1],\n 'onsale',\n )\n else:\n all_sizes[i] = (\n all_sizes[i][0],\n all_sizes[i][1],\n 'stockout',\n )\n logging.debug('All Sizes: %s' % repr(all_sizes))\n details['all_sizes'] = all_sizes\n\n # description images\n description_images = list()\n desc = soup.find('div', {'id': 'PD_desc_picture'})\n for img in desc.find_all('img'):\n img = img.get('orginalsrc')\n logging.debug('description_images[]: ' + img)\n description_images.append(img)\n details['description_images'] = description_images\n\n # description key/value\n description = dict()\n for li in soup.find('ul', {'id': 'p_spec'}).find_all('li'):\n key = li.find('span', {'class': 't'}).text.replace(':', '').strip()\n key = translate_keyword(key)\n value = li.find('span', {'class': 'v'}).text.strip()\n description[key] = value\n logging.debug('%s -> %s' % (key, value))\n details['description'] = description\n\n # slider images\n slider_images = list()\n for li in soup.find('div', {'class': 'box'}).find_all('li'):\n img = li.find('img').get('big')\n logging.debug('slider_images[]: ' + img)\n slider_images.append(img)\n details['slider_images'] = slider_images\n\n # related products\n related_products_id = list()\n for li in soup.find('div', {'id': 'f_litimg'}).find_all('li'):\n url = li.find('a').get('href')\n url = 'store.lining.com' + url\n pid = get_pid_from_url(url)\n logging.debug('related_products_id[]: %s -> %s' % (pid, url))\n related_products_id.append(pid)\n details['related_products_id'] = related_products_id\n\n return details", "def get_product_info(self, product_id: str) -> Dict:\n product_info_request = \"SELECT * FROM product WHERE id = %s\"\n return self.query(product_info_request, (product_id,))[0]", "def price_size(request, product_id):\n\n # redirect if user not superuser\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, incorrect url')\n return redirect(reverse('shop'))\n\n product = get_object_or_404(Product, pk=product_id)\n size = get_object_or_404(Size, product=product)\n if request.method == 'POST':\n form = SizeForm(request.POST, instance=size)\n if form.is_valid():\n form.save()\n messages.info(request, 'Size Prices Added Successfully.\\\n Process complete')\n return redirect(reverse('view_item', args=[product.id]))\n # return redirect(reverse('add_product'))\n else:\n messages.error(request, 'Failed to add prices.\\\n Please check that the form is valid.')\n else:\n form = SizeForm(instance=size)\n template = 'products/price_size.html'\n context = {\n 'form': form,\n 'product_id': product_id,\n 'product': product\n }\n\n return render(request, template, context)", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('.product_title::text').get()\n item['price'] = response.css('.product-information .price bdi::text').get()\n item['currency'] = 'EUR'\n item['description'] = response.css('.woocommerce-product-details__short-description p::text').get()\n\n item['published'] = response.css('meta[property~=\"article:published_time\"]::attr(content)').get() # '2016-03-25T11:15:56Z'\n item['last_modified'] = response.css('meta[property~=\"article:modified_time\"]::attr(content)').get()\n return item", "def get(self, page=1, sku=None):\n products = []\n if sku:\n product = Product.query.filter(Product.sku == sku).first_or_404()\n products.append(product)\n else:\n paginated_prods = Product.query.order_by(Product.id).paginate(page, 20, False)\n products = paginated_prods.items\n\n return marshal(products, product_fields), 200", "def product_sizes(self):\n return self._product_sizes" ]
[ "0.65338063", "0.6186062", "0.61699903", "0.6053883", "0.6036544", "0.6022222", "0.5981827", "0.5969535", "0.5941986", "0.59390604", "0.5915636", "0.59134763", "0.5905474", "0.5897939", "0.58669215", "0.58654606", "0.5860194", "0.5844665", "0.5825536", "0.57842505", "0.5776984", "0.57716185", "0.5742656", "0.573967", "0.57314336", "0.5719964", "0.571883", "0.5713451", "0.5697175", "0.569528" ]
0.78016984
0
Changes a page to the QVC.com reviews panel.
def go_product_reviews_page(self, driver, product_id, website): try: tab_list = driver.find_element_by_id("divProductDetailsCustomerReviewOptions") review_tab = tab_list.find_element_by_id("tabProductDetailCustomerReviewNav1") review_tab.click() except (NoSuchElementException, ElementNotVisibleException): pass time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def review(site, token, page):\n revid = page.latest_revision_id\n request = Request(site=site,\n action=\"review\",\n token=token,\n revid=revid)\n request.submit()", "def go_product_reviews_next(self, driver, website):\n paginator = driver.find_element_by_class_name(\"BVRRPager\")\n next_link = paginator.find_element_by_class_name(\"BVRRNextPage\")\n next_link.find_element_by_name(\"BV_TrackingTag_Review_Display_NextPage\").click()\n time.sleep(1)", "def review(self, review):\n self._review = review", "def go_to_movie_reviews_page_V2(movie):\n #/film/fichefilm-249877/critiques/spectateurs/\n driver.get(search_url_base+movie)\n # driver.get(\"https://www.allocine.fr/rechercher/?q=yourname\")\n sleep(5)\n movie_link = driver.find_element_by_link_text(movie)\n movie_link.click()\n # sleep(5)\n # close_popup = driver.find_element_by_xpath(\"//button[@class='modal-close icon icon-cross light js-trigger-overlay-close']\")\n # close_popup.click()\n sleep(5)\n movie_reviews_link = driver.find_element_by_link_text(reviews_link_text)\n movie_reviews_link.click()", "def test_add_review_and_display(self):\n # identification\n self.login_user('test_login', '123test')\n\n # add review\n self.add_review()\n\n # see review\n self.browser.implicitly_wait(3)\n self.browser.get(\"%s%s\" %\n (str(self.live_server_url), '/review/list'))\n time.sleep(0.5)\n link = self.browser.find_element_by_partial_link_text('Voir la critique')\n link.click()\n html = self.browser.page_source\n self.assertInHTML(\"\"\"\n <h4 class=\"rouge-fonce\">Critique de test_login</h4>\n \"\"\",\n html)\n self.browser.quit()", "def review(self, review: object):\n\n self._review = review", "def changePage(self, entries, login, name, pageNumber):\n\t\ttry:\n\t\t\tdisplay = self.displays[login]\n\t\t\ttry:\n\t\t\t\twindow = display[name]\n\t\t\t\ttry:\n\t\t\t\t\twindow.setCurrentPage(pageNumber)\n\t\t\t\t\tself.displayMl(window.getManialink(), name, login)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tself.log('error: ' + str(name) + ' does not seem to be a paged window')\n\t\t\texcept KeyError:\n\t\t\t\tself.log('error: ' + str(login) + ' has no window \"' + str(name) + '\" to change page on')\n\t\texcept KeyError:\n\t\t\tself.log('error: ' + str(login) + ' has no windows to change page')", "def add_review(self):\n url = \"/review/create/%s\" % self.picture.id\n self.browser.get(\"%s%s\" %\n (str(self.live_server_url), url))\n\n select = Select(self.browser.find_element_by_id(\n \"id_score_intention\"))\n select.select_by_index(4)\n select = Select(self.browser.find_element_by_id(\n \"id_score_technical\"))\n select.select_by_index(4)\n select = Select(self.browser.find_element_by_id(\n \"id_score_picture\"))\n select.select_by_index(4)\n select = Select(self.browser.find_element_by_id(\n \"id_score_global\"))\n select.select_by_index(4)\n\n self.browser.find_element_by_id(\n \"id_comment_intention\").send_keys(\"Commentaire intention\")\n\n submission_button = self.browser.find_element_by_class_name(\n 'btn-secondary')\n submission_button.click()\n time.sleep(2)\n html = self.browser.page_source\n self.assertInHTML(\"\"\"\n <h4 class=\"rouge-fonce\">Critique de test_login</h4>\n \"\"\",\n html)\n self.assertInHTML(\"\"\"\n <strong>Note moyenne de la revue : 4,0</strong>\n \"\"\",\n html)", "def go_to_movie_reviews_page_v1(movie):\n #get search bar input and send the movie name as key\n search_bar = driver.find_element_by_xpath(\"//input[@id='header-search-input']\")\n search_bar.send_keys(movie)\n sleep(5)\n search_button = driver.find_element_by_xpath(\"//button[@class='header-search-submit icon icon-search']\")\n search_button.click()", "def reviews(request):\n reviews = Review.objects.all()\n\n context = {\n 'reviews': reviews,\n }\n return render(request, 'reviews/reviews.html', context)", "def page_changed(self):\n if self.current >= 0:\n if self.not_again:\n self.not_again = False\n return\n ok = self.check_oldpage(self.current)\n if not ok:\n self.not_again = True\n self.nb.setCurrentIndex(self.current)\n return\n self.current = self.nb.currentIndex()\n go = self.nb.currentWidget()\n if go.first_time:\n go.first_time = False\n go.create_widgets()\n go.create_actions()\n msg = go.refresh_screen(self.current_data)\n if msg:\n qtw.QMessageBox.information(self, self.title, msg)\n self.current = 0\n self.nb.setCurrentIndex(self.current)\n go.refresh_screen(self.current_data)", "def reviews(request):\n review = Review.objects.all()\n return render(request, 'reviews.html', {\"review\": review})", "def reviews(self, reviews: object):\n\n self._reviews = reviews", "def setCurrentPage(self):\n pass", "def increase_page_view(sender, **kwargs):\n pass", "def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews &#187; \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False", "def edit_review(review_id):\n form = EditReviewForm()\n try:\n review = Review.from_mongo(**mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)}))\n except Exception as e:\n raise Exception(e)\n else:\n game = Game.from_mongo(**mongo.db.games.find_one({\"_id\": ObjectId(str(review.game_id))}))\n user_name = session.get('username')\n if user_name == review.author_ref['author_name']:\n user = User.from_mongo(**mongo.db.users.find_one({\"name\": user_name}))\n\n if form.validate_on_submit():\n review.name = form.title.data\n review.text = form.review_text.data\n review_ref = review.create_review_ref()\n review.update_review()\n for game_review in game.reviews:\n if game_review.get('review_pub_date') == review.pub_date:\n game.reviews.remove(game_review)\n game.reviews.append(review_ref)\n game.update_game()\n for user_review in user.reviews:\n if user_review.get('review_pub_date') == review.pub_date:\n user.reviews.remove(user_review)\n user.reviews.append(review_ref)\n user.update_user()\n return redirect(url_for('review', review_id=review_id))\n\n elif request.method == \"GET\":\n form.title.data = review.name\n form.review_text.data = review.text\n\n return render_template('edit_review.html.jinja',\n title='Edit Review',\n review_id=review_id,\n form=form\n )", "def question_comments(request, lot_id, question_pk):\n question = get_object_or_404(Question, lot__slug=lot_id, pk=question_pk)\n question.views += 1\n question.save()\n return render(request, 'comments_on_question.html', {'question': question})", "def review():\r\n\r\n # Ensure isbn_number is submitted\r\n if not request.form.get(\"isbn_number\"):\r\n return apology(\"Invalid book\", 403)\r\n\r\n # Ensure review is submitted\r\n if not request.form.get(\"review\"):\r\n return apology(\"Text is not submitted\", 403)\r\n\r\n # Check if book exist, if not error out\r\n\r\n # add review to db\r\n\r\n return redirect(url_for(details, isbn_number=request.form.get(\"isbn_number\")))", "def set_new_page(self):\n self.num += 1\n c = self.canvas\n c.showPage()\n self.decorate()\n self.x = self.marginsides\n self.lastx = self.marginsides\n self.y = self.height - self.margintop\n #self.print_text([\"Page %s\" % unicode(self.num)], fontsize=8,\n # style=\"right\")\n self.put_page_num()\n #self.x = self.marginsides\n #self.lastx = self.x\n #self.y = self.y - 32\n self.pagebegin = 1", "def edit_review(request, review_id):\n user_profile = get_object_or_404(UserProfile, user=request.user)\n review = get_object_or_404(UserReview, id=review_id)\n review_form = ReviewForm(instance=review)\n\n if request.user == user_profile.user:\n if request.method == 'POST':\n review_form = ReviewForm(request.POST, instance=review)\n if review_form.is_valid():\n if len(request.POST[\"product\" or \"review_content\"]) <= 0:\n messages.error(\n request, \"You have not completed the review form. \\\n Please add content and try again.\")\n return redirect(reverse(\"gallery\"))\n else:\n review = review_form.save(commit=False)\n user_profile = user_profile\n review_form.save()\n messages.success(request, 'Your review has \\\n been updated.')\n return redirect(reverse(\"gallery\"))\n else:\n review_form = ReviewForm(instance=review)\n\n template = 'gallery/edit_review.html'\n context = {\n 'review_form': review_form,\n 'user_profile': user_profile,\n 'review': review,\n }\n\n return render(request, template, context)", "def _update_page(self):\n\n project = self._project\n\n self._pyqt5_page.project = project\n self._pyqt4_page.project = project\n\n if project.application_is_pyqt5:\n self._pyqt5_page.configure()\n self._pyqt4_page.clear()\n\n self.setCurrentWidget(self._pyqt5_page)\n else:\n self._pyqt5_page.clear()\n self._pyqt4_page.configure()\n\n self.setCurrentWidget(self._pyqt4_page)", "def test_add_reviews(self):\n metadata = Metadata(DataSource.CONTENT_CAFE)\n content = self.data_file(\"reviews.html\")\n self.http.queue_requests_response(200, 'text/html', content=content)\n self.api.add_reviews(metadata, self.identifier, self.args)\n\n # We extracted six reviews from the sample file.\n reviews = metadata.links\n eq_(6, len(reviews))\n assert all([x.rel==Hyperlink.REVIEW for x in reviews])\n assert \"isn't a myth!\" in reviews[0].content\n\n # We incidentally figured out the book's title.\n eq_(\"Shadow Thieves\", metadata.title)", "def onUpdatePage1(self, params: dict) -> None:\n if params is not None:\n self._parse_params(1, params)\n self.view.show_page1(**self.page1_config)", "def review_link(self, review_link):\n\n self._review_link = review_link", "def test_review_dashboard(self):\n self.setup_course(True, True)\n response = self.client.get(self.url)\n # the default backend does not support the review dashboard\n self.assertNotContains(response, 'Review Dashboard')\n\n backend = TestBackendProvider()\n config = apps.get_app_config('edx_proctoring')\n with patch.object(config, 'backends', {'test': backend}):\n create_exam(\n course_id=self.course.id,\n content_id='test_content',\n exam_name='Final Test Exam',\n time_limit_mins=10,\n backend='test',\n )\n response = self.client.get(self.url)\n self.assertContains(response, 'Review Dashboard')", "def newreview():\n objectid = request.values.get('objectid', 0, type=int)\n if not objectid:\n abort(400)\n workflow_object = workflow_object_class.get(objectid)\n\n form = AuthorUpdateForm(\n data=workflow_object.extra_data[\"formdata\"], is_review=True)\n ctx = {\n \"action\": url_for('.reviewhandler', objectid=objectid),\n \"name\": \"authorUpdateForm\",\n \"id\": \"authorUpdateForm\",\n \"objectid\": objectid\n }\n\n return render_template('authors/forms/review_form.html', form=form, **ctx)", "def wiki_page_changed(self, page, version, t, comment, author, ipnr):\n if 'wiki' not in self.sources:\n return\n gnp = GrowlNotificationPacket(notification='wiki',\n title='Page updated',\n description=self._wiki_repr(page,\n comment))\n gs = GrowlSender(self.env)\n gs.notify(self._get_hosts('wiki'), gnp)", "def review_pages(catalog):\n review_pages_list = list()\n errors = 0\n for ix, movie in enumerate(catalog.iloc[:, 0], 1):\n try:\n soup_2 = fetch(movie, \"/reviews/?page=1\").find_all(\"span\", {\"class\", \"pageInfo\"})\n if len(soup_2) >= 1:\n for n in range(1, int(soup_2[0].text[-2:]) + 1):\n review_pages_list.append(movie + \"/reviews/?page=\" + str(n))\n except:\n errors += 1\n print('\\r3/4 — {:.2%} of review page URLs scraped. Error rate: {:.2%}'.format(\n ix/len(catalog), errors/ix), end=' ')\n print('\\r{} review page URLs successfully scraped. Error rate: {:.2%}'.format(\n len(review_pages_list)-errors, errors/ix), end='\\n')\n return review_pages_list", "def update():\n print(\"current page is \", wikiPageStackTrace[-1].getTitle())\n if wikiPageStackTrace[-1].getUrl() != goalPage.getUrl(): # no victory\n eel.addRoundNumber()\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(\n wikiPageStackTrace[-1].getFirstSentence())\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.hideLoader()\n elif wikiPageStackTrace[-1].getUrl() == goalPage.getUrl(): # victory\n eel.hideLoader()\n eel.addRoundNumber()\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.showVictory()\n # we need to do this because overwise the JS is not fat egoth to respond so we get an infinit loading\n time.sleep(0.5)\n eel.hideLoader()" ]
[ "0.66095716", "0.6170462", "0.6116648", "0.59345096", "0.58093804", "0.57775396", "0.5776218", "0.57440305", "0.5719591", "0.5656646", "0.56079626", "0.5539385", "0.54990786", "0.5485342", "0.5400703", "0.5329266", "0.5316898", "0.5287261", "0.5271097", "0.5268099", "0.522934", "0.52185494", "0.5215829", "0.5204743", "0.51579654", "0.51566476", "0.51513344", "0.512947", "0.5110404", "0.5098702" ]
0.6413856
1
Gets the total number of reviews from a QVC.com product page.
def get_product_total_reviews(self, driver): try: frame = driver.find_element_by_id("BVRRRatingSummaryLinkReadID") total_reviews = frame.find_element_by_class_name("BVRRNumber").text return total_reviews except NoSuchElementException: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _number_of_review_pages(self, star_num):\n url = self._star_reviews_url(star_num)\n content = urlopen(url).read()\n soup = bs(content, ['fast', 'lxml'])\n span = soup.find('span', 'paging')\n number_of_pages = span.findChildren('a')[1].contents[0]\n return int(number_of_pages)", "def review_count(self):\n\t\tcount = self.reviews.aggregate(\n\t\t\tcount=models.Count('rating'))['count']\n\t\tif count is None:\n\t\t\tcount = 0\n\t\treturn count", "def num_reviews(self):\n return self.review_set.count()", "def get_size(self):\n return len(self.reviews)", "def getNumberOfReviews(self):\n try:\n count = 0\n with open(self.metadata_path, \"r\", newline='') as metadata:\n mdata = csv.reader(metadata, delimiter=' ', quotechar='|')\n for review_data in mdata:\n count += 1\n return count\n except Exception:\n print(\"Cant load metadata file\")\n traceback.print_exc()", "def get_toprated_with_count(self):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\tcursor.execute('''select count(*) from movies;''')\n\t\tpage_count = cursor.fetchone()[0]\n\t\tconnection.close()\n\t\tpage_count = int(ceil(page_count))\n\t\treturn page_count", "def extract_reviews(url, review_count):\n\n api_url = url + \"%3Fstart%3D40\"\n\n html_obj = retrieve_html(url)\n\n review_list = parse_page(html_obj)\n\n result = review_list\n\n num_pages = review_count // 20 + 1\n\n for i in range(1, num_pages):\n curr_offset = i * 20\n curr_url = api_url + \"&start=%d\" % curr_offset\n\n curr_page_reviews = parse_page(retrieve_html(curr_url)[1])\n\n result += curr_page_reviews\n\n return result", "def item_view_reviews(request):\n\n result = {}\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n if p is not None:\n # product details are not needed\n #result = p.details(u)\n\n reviews = Review.objects.filter(product=p).exclude(reviewer=u)\n result['count'] = str(reviews.count())\n result['reviews'] = [r.get_json(me=u) for r in reviews]\n else:\n result['result'] = '0'\n\n return JSONHttpResponse(result)", "def get_reviews(item_id, shop_id, review_num=10) -> list:\n get_url = f\"{_shopee_base_url}/api/v2/item/get_ratings?filter=0&flag=1&itemid={item_id}&limit={review_num}&offset=0&shopid={shop_id}\"\n r = requests.get(get_url, headers=_user_agent_header, proxies=proxy_dict)\n ratings = r.json()['data']['ratings']\n reviews = []\n for rating in ratings:\n reviews.append({\n 'origin': 'Shopee',\n 'author': rating['author_username'],\n 'rating': rating['rating_star'],\n 'review': rating['comment'], \n 'review_likes': rating['like_count'],\n 'summary': 'Summary is very nice. Amazing!'\n })\n return reviews", "def rating_review(catalog):\n reviews = list()\n errors = 0\n for ix, page in enumerate(catalog.iloc[:, 0], 1):\n try:\n soup_2 = fetch(page, \"\").find_all(\"div\", {\"class\": \"col-xs-16 review_container\"})\n for comment in soup_2:\n comment_text = comment.find_all(\"div\", {\"class\": \"the_review\"})[0].text.strip()\n icon = str(comment.find_all(\"div\")[0])\n if \"fresh\" in icon:\n reviews.append('1 - ' + comment_text)\n elif \"rotten\" in icon:\n reviews.append('0 - ' + comment_text)\n except:\n errors += 1\n print('\\r4/4 — {:.2%} of reviews scraped. Error rate: {:.2%}'.format(ix/len(catalog),\n errors/ix), end=' ')\n print('\\r{} reviews successfully scraped. Error rate: {:.2%}'.format(\n len(reviews)-errors, errors/ix), end='\\n')\n return reviews", "def get_product_count(self):\n return self.products.count()", "def _scrape_total_matches(self, response):\n total_matches = is_empty(\n response.xpath(\n '//*[@id=\"resultsTabs\"]/.//a[@data-tabname=\"products\"]'\n '/span/text()'\n ).extract(), '0'\n )\n\n return int(total_matches)", "def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews &#187; \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False", "def getReviewNumbers(singleStoryFooter):\n\twords = singleStoryFooter.get_text()\n\treview = re.compile(r\"Reviews: \\d+\").search(words)\n\tif review:\n\t\treviewNum = review.group()[9:]\n\t\treturn int(reviewNum)\n\telse: return 0", "def product_count(self) -> int:\n return self._product_count", "def reviews(self, **kwargs):\n\n path = self._get_movie_id_path('reviews')\n resp = self._get_method(path, kwargs)\n return resp", "def _scrape_results_per_page(self, response):\n return self.items_per_page", "def number_of_reviews(user: User, only_today: bool) -> int:\n since = arrow.utcnow() if only_today else arrow.get(0)\n since_timestamp = since.format('YYYY-MM-DD')\n\n num_edits = DBDiscussionSession.query(ReviewEdit).filter(\n ReviewEdit.detector_uid == user.uid,\n ReviewEdit.timestamp >= since_timestamp).count()\n num_deletes = DBDiscussionSession.query(ReviewDelete).filter(\n ReviewDelete.detector_uid == user.uid,\n ReviewDelete.timestamp >= since_timestamp).count()\n num_optimizations = DBDiscussionSession.query(ReviewOptimization).filter(\n ReviewOptimization.detector_uid == user.uid,\n ReviewOptimization.timestamp >= since_timestamp).count()\n num_duplicates = DBDiscussionSession.query(ReviewDuplicate).filter(\n ReviewDuplicate.detector_uid == user.uid,\n ReviewOptimization.timestamp >= since_timestamp).count()\n\n return num_edits + num_deletes + num_optimizations + num_duplicates", "def reviews(self) -> object:\n return self._reviews", "def fetch_reviews(self, rb_id, start=0, max_results=25):\r\n return self.api_call('/api/review-requests/%s/reviews/?start=%s&max-results=%s'\r\n % (rb_id, start, max_results))['reviews']", "def calculate_num_scrape_pages(h, r):\n p = (int(search_hits)/res_per_page) + 1\n return p", "def pagecount(self):\r\n \r\n return len(self.results) // self.perpage + 1", "def _scrape_results_per_page(self, response):\n num = is_empty(\n response.xpath(\n '//*[@id=\"pgSize\"]/option[@selected=\"selected\"]'\n '/@value'\n ).extract(), '0'\n )\n\n return int(num)", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def review_details(product_id):\n\n # Gets the product's specifications from the database\n product = mongo.db.products.find_one({\"_id\": ObjectId(product_id)})\n\n # Sets the page title\n page_title = product[\"name\"] + \" Review\"\n\n # Sets the current user\n\n if session.get('user'):\n current_user = \"{} {}\".format(session['user']['first_name'],\n session['user']['last_name'])\n\n else:\n current_user = None\n\n \"\"\"\n Gets the product's reviews from the database and sorts them. Sort method is\n from https://docs.mongodb.com/manual/reference/method/cursor.sort\n /index.html\n \"\"\"\n reviews = list((mongo.db.reviews.find(\n {\"product\": product[\"name\"]})).sort(\"date_added\", -1))\n\n \"\"\"\n Updates the date_added value in the review dictionary to be\n in the correct format. Code is from https://www.w3schools.com/python/\n python_datetime.asp\n \"\"\"\n for review in reviews:\n review['date_added'] = review['date_added'].strftime(\"%d %B %Y\")\n\n \"\"\"\n Calculates the ratings as percentages and returns a dictionary containing\n the ratings values\n \"\"\"\n\n ratings = ratings_percentages(product, len(reviews))\n\n return render_template(\"review_details.html\",\n page_title=page_title,\n product=product,\n ratings=ratings,\n reviews=reviews,\n current_user=current_user)", "def get_cart_counter(request):\n return len(get_cart_items(request))", "def get_item_reviews(self, soup: BeautifulSoup) -> None:\n try:\n reviews = soup.find(\"span\", class_=\"_a7a5sx\").get_text()\n reviews = re.findall(\"[0-9]+\", reviews)[0]\n except AttributeError:\n reviews = None\n self.__collected_dic[\"reviews\"].append(reviews)", "def get_reviews(review_url):\n print review_url\n html = urllib.urlopen(review_url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n rating_scores = soup.findAll(\"span\", \"ratingScore\")\n num_ratings = len(rating_scores) - 1\n\n current_reviews = soup.findAll(\"div\", \"currentVintageProfessinalReviews\")\n num_cur_reviews = str(current_reviews).count('ratingProvider')\n past_reviews = soup.findAll(\"ul\", \"pastVintagesProfessionalReviews\")\n num_past_reviews = str(past_reviews).count('ratingProvider')\n\n print 'There are {0} reviews for prior vintages of this wine.'.format(num_past_reviews)\n print 'There are {0} current reviews for this vintage.\\n'.format(num_cur_reviews)\n\n rating_provider = soup.findAll(\"span\", \"ratingProvider\")\n rating_score = soup.findAll(\"span\", \"ratingScore\")\n reviewers = re.findall('(?<![A-Z])[>]([A-Z]+(?![A-Z]))', str(rating_provider))\n ratings = re.findall('(?<![A-Z])[0-9]{2}(?![A-Z])', str(rating_score))\n\n print \"Ratings List:\", ratings\n print \"Current Reviews: \", num_cur_reviews\n\n currentreviews = []\n for j in range(num_cur_reviews):\n print \"Current Review #\"+str(j+1)+\":\", reviewers[j], ratings[j]\n currentreviews.append((reviewers[j], ratings[j]))\n print currentreviews\n\n print \"\\nPast Reviews: \", num_past_reviews\n past_review_ratings = []\n for k in range(num_cur_reviews, num_past_reviews+num_cur_reviews):\n #print \"Past Review #\"+str(k-num_cur_reviews+1)+\":\", reviewers[k], int(ratings[k])\n past_review_ratings.append(float(ratings[k]))\n if k > 30:\n break\n if num_past_reviews != 0:\n avg_past_reviews = sum(past_review_ratings)/len(past_review_ratings)\n round(avg_past_reviews, 2)\n else:\n avg_past_reviews = 0\n\n print \"Average of Past Reviews: \", avg_past_reviews\n\n return currentreviews, avg_past_reviews", "def getTokenSizeOfReviews(self):\n res = 0\n with open(self.word_to_docs_path, 'rb') as bin:\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n res += frequency\n # skip documents:\n int.from_bytes(bin.read(4 * frequency), 'big')\n return res", "def get_review_page(review_link):\n\n session = r.Session()\n response = session.get(BASE_URL + '/music/albumreviews/' + review_link,\n headers=HEADERS)\n return response" ]
[ "0.6790198", "0.65498984", "0.65494907", "0.6381984", "0.59667504", "0.5930726", "0.59128344", "0.5838525", "0.5756143", "0.5753653", "0.574366", "0.57091105", "0.56921226", "0.56867635", "0.5667898", "0.5663936", "0.5637686", "0.56215745", "0.5612181", "0.5611977", "0.55984026", "0.55900466", "0.5585591", "0.5544834", "0.5522269", "0.55111164", "0.5507508", "0.5505616", "0.5496021", "0.5491357" ]
0.7906734
0
Gets the next page of QVC.com product reviews.
def go_product_reviews_next(self, driver, website): paginator = driver.find_element_by_class_name("BVRRPager") next_link = paginator.find_element_by_class_name("BVRRNextPage") next_link.find_element_by_name("BV_TrackingTag_Review_Display_NextPage").click() time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews &#187; \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False", "def go_product_search_next(self, driver):\n try:\n pagination = driver.find_element_by_class_name(\"divPageLinks\")\n pagination.find_element_by_class_name(\"next\").click()\n except NoSuchElementException:\n raise NoSuchElementException", "def _scrape_next_results_page_link(self, response):\n next_pages = response.xpath('//*[@id=\"pagnNextLink\"]/@href |'\n '//ul[contains(@class, \"a-pagination\")]'\n '/a[contains(text(), \"eiter\")]/@href').extract()\n next_page_url = None\n\n if len(next_pages) == 1:\n next_page_url = next_pages[0]\n elif len(next_pages) > 1:\n self.log(\"Found more than one 'next page' link.\", ERROR)\n\n return next_page_url", "def go_product_reviews_page(self, driver, product_id, website):\n try:\n tab_list = driver.find_element_by_id(\"divProductDetailsCustomerReviewOptions\")\n review_tab = tab_list.find_element_by_id(\"tabProductDetailCustomerReviewNav1\")\n review_tab.click()\n except (NoSuchElementException, ElementNotVisibleException):\n pass\n time.sleep(1)", "def next_item(self):\n if self.page + 1 > len(self.result) - 1:\n self.page = 0\n else:\n self.page += 1\n return self.present_view()", "def nextPage(self):\n raise NotImplementedError()", "def get_next_page(self, data):\n\n next_page = None\n\n if \"d\" in data:\n logger.debug(f\"'d' found (OData v2).\")\n if \"__next\" in data[\"d\"]:\n logger.debug(f\"'d.__next' found\")\n next_page = data[\"d\"].get(\"__next\")\n elif \"value\" in data:\n logger.debug(f\"'value' found (OData v3 or v4).\")\n if \"odata.nextLink\" in data:\n logger.debug(f\"'odata.nextLink' found (Odata v3).\")\n next_page = data.get(\"odata.nextLink\")\n elif \"@odata.nextLink\" in data:\n logger.debug(f\"'@odata.nextLink' found (Odata v4).\")\n next_page = data.get(\"@odata.nextLink\")\n else:\n logger.debug(f\"No more pages.\")\n\n return next_page", "async def go_to_next_page(self, payload):\n await self.show_checked_page(self.current_page + 1)", "def get_next_page_url(self, response):\n return None", "def parse(self, response):\n product_urls = response.css('.product-details > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse(self, response):\n product_urls = response.css(\n '.product > a.woocommerce-loop-product__link::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def next_page(self):\r\n if self.page.has_next():\r\n self.query_dict['page'] = self.page.next_page_number()\r\n return \"%s?%s\" % (self.path, urlencode(self.query_dict))\r\n return \"\"", "def get_next_page(self, next_url):\n response, err_msg = self.api_call(\"GET\", next_url, None, refresh_authentication=True)\n\n return response.json()", "def _get_next_page(questionnaire, page):\n pages = PageDB(questionnaire).get_pages()\n i = 0\n nr_of_pages = PageDB(questionnaire).get_nr_of_pages()\n while i < nr_of_pages - 1 and pages[i].id <= page.id:\n i += 1\n next_page = pages[i]\n return next_page", "def parse(self, response):\n product_urls = response.css(\n '.woocommerce-loop-product__link::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def get_next_page(self):\n return min((self.get_page() + 1), self.get_last_page())", "def _response_next_url(self, response):\n return response.css(\n \"ul.pagination .pager__item--next a::attr(href)\"\n ).extract_first()", "def next_page():\n\tprint('-> \\nClicking next page')\n\told_html = driver.find_element_by_tag_name('html').text\n\tlink = driver.find_element_by_xpath(XPATHS['next_page']) \n\tlink.click()\n\treturn wait_for(old_html)", "def all_products_view(request):\n products = Product.objects.all()\n paginator = Paginator(products, 6)\n page_number = request.GET.get('page', 1)\n page = paginator.page(page_number)\n\n \"\"\"render a products html page and within that page we will have access to products, so all_products\"\"\"\n\n if page.has_next():\n\n next_url = f'?page={page.next_page_number()}'\n\n else:\n\n next_url = ''\n\n if page.has_previous():\n\n prev_url = f'?page={page.previous_page_number()}'\n\n else:\n\n prev_url = ''\n\n \n return render(request, 'products.html', {'page': page, 'next_page_url': next_url, 'prev_page_url': prev_url})", "def next_page(self):\n return self._next_page", "def next_page(self):\n return self._next_page", "def go_to_movie_reviews_page_V2(movie):\n #/film/fichefilm-249877/critiques/spectateurs/\n driver.get(search_url_base+movie)\n # driver.get(\"https://www.allocine.fr/rechercher/?q=yourname\")\n sleep(5)\n movie_link = driver.find_element_by_link_text(movie)\n movie_link.click()\n # sleep(5)\n # close_popup = driver.find_element_by_xpath(\"//button[@class='modal-close icon icon-cross light js-trigger-overlay-close']\")\n # close_popup.click()\n sleep(5)\n movie_reviews_link = driver.find_element_by_link_text(reviews_link_text)\n movie_reviews_link.click()", "def get_comments(self,\n product,\n max_page=1000):\n page = 1\n while True:\n print(\"______________page:{page}______________\".format(page=page))\n self._get_page_comments(product,page)\n page += 1\n if page > max_page:\n break", "def fetch(self):\n self.genre = \"Review\"\n try:\n if not self.__setSoup():\n log.info(self.log_msg(\"Soup not set,returning false\"))\n return False\n #if not self._getParentPage():\n # log.info(self.log_msg(\"Parent page not found\"))\n while True:\n parent_page_soup = copy.copy(self.soup)\n # log.info(self.log_msg('current uri%s'%parent_page_soup))\n if not self.__addReviews():\n log.info(self.log_msg('fetched all reviews for the url %s'\\\n %self.task.instance_data['uri']))\n \n log.info(self.log_msg('Next page%s'%self.currenturi))\n try:\n \n # self.currenturi = self.task.instance_data['uri'].rsplit\\\n # ('/', 1)[0] + '/' + self.soup.find('a', \\\n # title='Go to the next page')['href']\n self.currenturi = 'http://www.phonedog.com' + parent_page_soup.find('a',title='Go to the next page')['href']\n \n if not self.__setSoup():\n log.info(self.log_msg('soup not set for the uri %s'%\\\n self.currenturi))\n break\n except:\n log.info(self.log_msg('Next page not found for the uri %s'%\\\n self.currenturi))\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch\"))\n return False", "def get_next_page(prompt_response, current_page):\n if prompt_response.lower() in [\"n\", \"next\", \"f\", \"forward\"]:\n return current_page + 1\n elif prompt_response.lower() in [\"p\", \"previous\", 'b', \"back\"]:\n return max(current_page - 1, 0)\n elif prompt_response.lower() in [\"q\", \"cancel\", \"c\"]:\n raise click.exceptions.Abort()\n else:\n return -1", "def next_page_token(self) -> global___Snippet.PaginatedResponseHandling.NextPageToken:", "def parse(self, response):\n product_urls = response.css(\n '.product-li .product-image a::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n\n next_page_number = 2\n if '?' in response.url:\n return\n while next_page_number < 37:\n # import logging\n # logging.log(logging.WARNING, f\"This is a warning {len(product_urls)} : {product_urls[0]}\")\n next_page_url = f'{response.url}?p={next_page_number}'\n yield scrapy.Request(response.urljoin(next_page_url))\n next_page_number += 1", "def get_review_page(review_link):\n\n session = r.Session()\n response = session.get(BASE_URL + '/music/albumreviews/' + review_link,\n headers=HEADERS)\n return response", "async def next_page(self):\n await self.checked_show_page(self.current_page + 1)", "def list_get_next_page(self, soup):\n # Get the 'next page' element at the bottom of the page\n next_page_tag = soup.find('a', class_='pager pager-next')\n\n # Extract the link from this element\n if next_page_tag:\n page_url = self.base_url_apartments + next_page_tag['href']\n return generate_soup(page_url)\n else:\n return None" ]
[ "0.6693614", "0.6458025", "0.6130959", "0.60346013", "0.5836712", "0.5675163", "0.5640021", "0.5605672", "0.5547256", "0.55228585", "0.55079174", "0.5505908", "0.54967844", "0.54782814", "0.5464111", "0.5452872", "0.5443018", "0.54381824", "0.5421229", "0.5410125", "0.5410125", "0.5406889", "0.5403304", "0.54029965", "0.5354579", "0.5347811", "0.53345656", "0.53028804", "0.5272502", "0.5263614" ]
0.75420415
0
Given an XForm instance, try to grab the app id, returning None if not available. This is just a shortcut since the app_id might not always be set.
def get_app_id(form): return getattr(form, "app_id", None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")", "def server_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "def get_app(app_id, app_set):\n if app_id in app_set:\n return app_set[app_id]\n if 'default' in app_set:\n return app_set['default']\n return None", "def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")", "def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")", "def single_tenant_app_id(self):\n if \"singleTenantAppId\" in self._prop_dict:\n return self._prop_dict[\"singleTenantAppId\"]\n else:\n return None", "def app_id(self):\n return self._app_id", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")", "def application_object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_object_id\")", "def app_installation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_installation_id\")", "def app_id(self):\n return self._chromecast.app_id if self._chromecast else None", "def appid(self):\n return self._item[\"appid\"]", "def app_id(self) -> str:\n return self._app_id", "def getAppRefByPidofapp(processid):\n try:\n _pidreference = atomac.getAppRefByPid(processid)\n logging.info(\"Application RefferenceID : %s\" % _pidreference)\n except Exception as er:\n logging.info('Not able to get Application ReferenceID')\n return False\n return _pidreference", "def _app_or_default(app=None):\n if app is None:\n return getattr(_tls, \"current_app\", None) or default_app\n return app", "def get_unique_id(self, app):\n if self.module_unique_id:\n return self.module_unique_id\n\n if self.form_module_id:\n return f\"{self.form_module_id}.{self.form_id}\"\n\n # legacy data does not have 'form_module_id'\n form = app.get_form(self.form_id)\n return f\"{form.get_module().unique_id}.{self.form_id}\"", "def get_id(self, app_name):\n _id = []\n apps = [app for app in self.applications.response if app.name == app_name]\n if len(apps) > 0:\n return apps[0].id", "def msa_app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"msa_app_id\")", "def secondary_app_id(self) -> Optional[str]:\n return pulumi.get(self, \"secondary_app_id\")", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def app_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_resource_id\")", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def validate_app(logger, base_url, group_id, token, app_id):\n app_response = apps.get_app(base_url, group_id, token, app_id)\n if not app_response:\n return None\n\n app_dict = common.convert_response(app_response)\n if not app_dict:\n return None\n\n logger.info(\"App: {}\".format(app_dict))\n return app_dict", "def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")" ]
[ "0.74062455", "0.7242979", "0.69978297", "0.69140005", "0.6881562", "0.6881562", "0.6881562", "0.6868891", "0.6766424", "0.6757681", "0.64767206", "0.64748746", "0.64700586", "0.6431081", "0.63553536", "0.6343776", "0.6317141", "0.6298396", "0.6253233", "0.624976", "0.62409395", "0.6172502", "0.61171687", "0.60530347", "0.60193497", "0.6010677", "0.59932953", "0.59811676", "0.59470105", "0.5943655" ]
0.8446758
0
>>> is_valid_case_type('foo') True >>> is_valid_case_type('foobar') True >>> is_valid_case_type('foo bar') False >>> is_valid_case_type('') False >>> is_valid_case_type(None) False
def is_valid_case_type(case_type): return bool(_case_type_regex.match(case_type or ''))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _str_validator(arg):\n if arg is None or arg is '' or type(arg) != str:\n raise ValueError('Incorrect value: input should be a string')", "def typeValidator(self, food_type):\n if type(food_type) != str:\n API.abort(400, error_messages[16]['int_type'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", food_type) or food_type.isspace():\n API.abort(\n 400, error_messages[17]['wrong_format_ty'])\n\n return True", "def check_str(val, name, allow_none=False, allow_empty=False):\n\n if val is None:\n if not allow_none:\n raise ValueError(name + ' of value ' + str(val) + ' should not be None.')\n else:\n\n if not isinstance(val, str) and not isinstance(val, unicode):\n raise TypeError(name + ' of value ' + str(val) + ' should be a string.' + ' but is of type ' + type(val).__name__)\n\n elif len(val.strip()) == 0 and not allow_empty:\n raise ValueError(name + ' of value ' + str(val) + ' should not empty string.')", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def check_input_type(var, type_name):\n\n type_options = [\"int\", \"float\", \"Date\", \"Region\"]\n if type_name == type_options[0]:\n if int(var):\n return True\n else:\n return False\n elif type_name == type_options[1]:\n if float(var):\n return True\n else:\n return False\n elif type_name == type_options[2]:\n if datetime.date.fromisoformat(var):\n return True\n else:\n return False\n elif type_name == type_options[3]:\n valid_regions = [\"NW\", \"SW\", \"MN\", \"MS\", \"NE\", \"SE\"]\n is_valid = False\n for region in valid_regions:\n if var == region:\n is_valid = True\n return is_valid\n else:\n Exception(\"This type doesn't exist in the checker!\")", "def validate_str(val, allow_none=False, allow_empty=False):\n\n if val is None:\n if not allow_none:\n return False\n else:\n\n if not isinstance(val, str) and not isinstance(val, unicode):\n return False\n\n elif len(val.strip()) == 0 and not allow_empty:\n return False\n\n return True", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_valid(self, value) -> 'True|str':\n if self.base_type is not None and not isinstance(value, self.base_type):\n return f'Value {value} is not type of {self.base_type}.'\n return True", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False", "def check_type(character: str):\n if character.isupper():\n return 'upper'\n elif character.islower():\n return 'lower'\n elif character.isspace():\n return 'space'\n elif character in string.punctuation:\n return 'punc'\n else:\n return 'digit'", "def check_type(character: str):\n if character.isupper():\n return 'upper'\n elif character.islower():\n return 'lower'\n elif character.isspace():\n return 'space'\n elif character in string.punctuation:\n return 'punc'\n else:\n return 'digit'", "def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t", "def _validate_str(val):\n if not isinstance(val, str):\n raise ValueError(\"Passed value {} is not a string\".format(val))\n return val", "def _check_types(variables):\n for var in variables:\n if not isinstance(var, (str, type(None))):\n raise ValueError(\"You supplied a value of type %s, where a \"\n \"string or None was expected.\" % type(var))", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def is_valid_type(type):\n return type in type_to_adapter", "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "def validate_typeID(self, type_ID):\n if type(type_ID) == str:\n for letter in type_ID:\n if not letter.isalpha() and not letter.isdigit():\n return False\n return True\n return False", "def check_argtype(val, type_, name, or_none=False):\n if not (isinstance(val, type_) or (or_none and val is None)):\n raise TypeError('{} should be of type {}, got {}'.format(\n name, type_, type(val)))", "def __type_okay(value, argtype, allow_none=False):\n if value is None:\n return allow_none\n if isinstance(argtype, str):\n if argtype in __macros:\n return __type_okay(value, __macros[argtype], allow_none=allow_none)\n elif argtype is 'int':\n return __is_int(value)\n elif argtype is 'float':\n return __is_float(value)\n return argtype in [cls.__name__ for cls in value.__class__.__mro__]\n elif isinstance(argtype, type):\n if argtype == six.text_type:\n return isinstance(value, six.text_type) or isinstance(value, six.string_types)\n elif argtype == str:\n return isinstance(value, six.string_types)\n elif argtype is int:\n return __is_int(value)\n elif argtype is float:\n return __is_float(value)\n return isinstance(value, argtype)\n elif isinstance(argtype, tuple) or isinstance(argtype, list):\n return any(__type_okay(value, i) for i in argtype)\n else: # argtype is None\n return True", "def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, str) and self._validate_length(value))\n else:\n return False", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def IsEmptyString (s) :\n if s is None : return True\n elif isinstance (s, str) :\n return len (s) == 0 \n else :\n raise PQHException (\"the type is unexpected %s\" % str (type (s)))", "def _validate_type(self, key, type_):\n if type_ is None:\n type_ = \"\"\n \n if not isinstance(type_, (str, unicode)):\n raise TypeError(\"FileLink.type should be a str or unicode, \"\n \"not %s\" % type_.__class__.__name__)\n \n return type_", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode" ]
[ "0.6593394", "0.65861076", "0.651669", "0.64556617", "0.6438165", "0.6436408", "0.6325066", "0.63242894", "0.63235027", "0.6314474", "0.62710524", "0.62245846", "0.61829215", "0.616322", "0.6158998", "0.61532086", "0.61133486", "0.6104421", "0.6072779", "0.60565364", "0.60433173", "0.6029648", "0.60220265", "0.60010797", "0.598927", "0.59857494", "0.59826845", "0.59792966", "0.5964317", "0.5956955" ]
0.8424941
0
Gets an MD5 checksum of the file or directory at the given location.
def get_md5(location: str, ignore_hidden_files: bool=True) -> Optional[str]: if not os.path.exists(location): return None if os.path.isfile(location): with open(location, "rb") as file: content = file.read() return hashlib.md5(content).hexdigest() else: return dirhash(location, "md5", ignore_hidden=ignore_hidden_files)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)", "def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()", "def find_md5checksum(device=None, file_name=None):\n if device is None or file_name is None:\n raise Exception(\"device and file_name are mandatory arguments\")\n\n resp = device.shell(command=\"md5sum \" + file_name).response()\n match = re.search(\"([a-fA-F\\d]{32})\\\\s+\" + file_name +\".*\", resp, re.DOTALL)\n\n if not match:\n device.log(level='ERROR', message=\"Not able to find the checksum\")\n raise Exception(\"Not able to find the checksum\")\n\n md5sum = match.group(1)\n return md5sum", "def md5checksum(file_name):\n from hashlib import md5\n hash_md5 = md5()\n with open(file_name, \"rb\") as f:\n for chunk in iter(lambda: f.read(32768), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def md5_checksum(file_path):\n with open(file_path, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()", "def get_checksum(filename):\n # You could use popen here. I read about it, and subprocess is meant\n # to replace os.popen, so I used it instead.\n\n # First, run the command md5 sum with filename as input.\n # It's stored as a subprocess.CompletedProcess\n process = subprocess.run(['md5sum',filename], capture_output=True)\n \n # Use the method stdout from subprocess.CompletedProcess (seen in\n # the Python docs) to get the output. As seen in the book, md5sum will\n # output the checksum follwed by the filename. split() will put\n # those two elements into a list, and [0] will take the first element,\n # which will be the checksum.\n checksum = process.stdout.split()[0]\n return checksum", "def get_checksum(input_fname):\n with open(input_fname, \"rb\") as infile:\n file_contents = infile.read()\n\n checksum = hashlib.md5(file_contents).hexdigest()\n return checksum", "def CalcMD5(filepath):\n with open(filepath,'rb') as f:\n md5obj = hashlib.md5()\n md5obj.update(f.read())\n return md5obj.hexdigest()", "def checksumFile(filename):\n return md5File(filename)", "def md5sum(file_name):\n f = open(file_name, mode='rb')\n h = hashlib.md5()\n h.update(f.read())\n return h.hexdigest()", "def md5sum_file(filepath):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as infile:\n for chunk in util.chunk_reader(infile):\n hasher.update(chunk)\n return hasher.hexdigest()", "def calc_file_md5(file_path):\n hash_md5 = str()\n method = hashlib.md5()\n if not os.path.exists(file_path):\n logger.error(\"File(%s) don not exist, can not calculation file hash\" % file_path)\n return hash_md5\n\n with open(file_path, 'rb') as f:\n for chunk in read_chunks(f, 1024 * 1024):\n method.update(chunk)\n return method.hexdigest()", "def get_md5sum_file(fullname, blksize=2**15):\n\n md5 = hashlib.md5()\n with open(fullname, 'rb') as fhandle:\n for chunk in iter(lambda: fhandle.read(blksize), b''):\n md5.update(chunk)\n return md5.hexdigest()", "def md5_sum_file(path):\n with open(path, 'rb') as f:\n m = hashlib.md5()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()", "def md5(self):\n return md5file(self.abspath)", "def calculate_md5(filename, block_size=2**20):\n import hashlib\n\n md5 = hashlib.md5()\n try:\n with open(filename, 'rb') as f:\n while True:\n data = f.read(block_size)\n if not data:\n break\n md5.update(data)\n except IOError:\n print('File \\'' + filename + '\\' not found!')\n return None\n except:\n return None\n return md5.hexdigest()", "def checksum(self, filepath) -> str:\n if os.path.exists(filepath):\n hash_md5 = md5()\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return urlsafe_b64encode(hash_md5.digest()).decode('utf-8')\n\n return \"\"", "def compute_checksum(filename):\n cmd = 'md5sum ' + filename\n return pipe(cmd)", "def apkdownloadmirror_get_md5_sum(soup, **_):\n return soup.find(text=re.compile(r'File APK Md5:')).next.text.strip()", "def get_checksum(path: Union[Path, str]) -> str:\n path = Path(path)\n if not (path.is_file() or path.is_dir()):\n msg.fail(f\"Can't get checksum for {path}: not a file or directory\", exits=1)\n if path.is_file():\n return hashlib.md5(Path(path).read_bytes()).hexdigest()\n else:\n # TODO: this is currently pretty slow\n dir_checksum = hashlib.md5()\n for sub_file in sorted(fp for fp in path.rglob(\"*\") if fp.is_file()):\n dir_checksum.update(sub_file.read_bytes())\n return dir_checksum.hexdigest()", "def checksum(self):\n hasher = md5()\n with self.open('rb') as fd:\n buf = fd.read(_BLOCKSIZE)\n while len(buf) > 0:\n # TODO Could cancel work here.\n hasher.update(buf)\n buf = fd.read(_BLOCKSIZE)\n digest = safetype(hasher.hexdigest())\n return digest", "def calculate_md5sum_of_a_file(context, file_name, file_path):\n command = \"md5sum \" + file_path + \"/\" + file_name + \" | awk {'print $1'}\"\n return context.cme_session.send_ssh_command(command=command)", "def calculate_md5_of_dir(self, verbose=0):\n directory = self.cfg['sharing_path']\n if verbose:\n start = time.time()\n md5Hash = hashlib.md5()\n if not os.path.exists(directory):\n self.stop(1, 'Error during calculate md5! Impossible to find \"{}\" in user folder'.format(directory))\n\n for root, dirs, files in os.walk(directory, followlinks=False):\n for names in files:\n filepath = os.path.join(root, names)\n rel_path = self.relativize_path(filepath)\n if rel_path in self.client_snapshot:\n md5Hash.update(self.client_snapshot[rel_path][1])\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n hashed_file = self.hash_file(filepath)\n if hashed_file:\n md5Hash.update(hashed_file)\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n print \"can't hash file: \", filepath\n\n if verbose:\n stop = time.time()\n print stop - start\n return md5Hash.hexdigest()", "def md5_sum(content):\r\n md5_hash = hashlib.md5(content).hexdigest()\r\n return md5_hash", "def calc_md5(infile, block_size=256*128):\n logger = logging.getLogger(__name__)\n logger.info(f'Calculating md5 of {infile}')\n md5 = hashlib.md5()\n with open(infile, 'rb') as f:\n for chunk in iter(lambda: f.read(block_size), b''):\n md5.update(chunk)\n return md5.hexdigest()", "def get_md5sum(host, fqpath):\n command = \"md5sum %s\" % fqpath\n rcode, rout, rerr = g.run(host, command)\n\n if rcode == 0:\n return rout.strip()\n\n g.log.error('md5sum failed: %s' % rerr)\n return None", "def local_md5(filepath, blocksize=65536):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as source:\n buf = source.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = source.read(blocksize)\n return hasher.hexdigest()", "def _get_md5(name, path):\n output = run_stdout(\n name, f'md5sum \"{path}\"', chroot_fallback=True, ignore_retcode=True\n )\n try:\n return output.split()[0]\n except IndexError:\n # Destination file does not exist or could not be accessed\n return None", "def md5sum(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.md5()\n for buf in iter(functools.partial(f.read, 1024*100), b''):\n d.update(buf)\n return d.hexdigest()", "def md5sum(fileSrc):\n md5 = hashlib.md5()\n try:\n with open(fileSrc, \"rb\") as fd:\n while True:\n content = fd.read(2**20)\n if not content:\n break\n md5.update(content)\n except IOError:\n print(fileSrc + \" Not found\")\n exit(1)\n return md5.hexdigest()" ]
[ "0.73885596", "0.7376658", "0.73244494", "0.7319759", "0.72340095", "0.72339094", "0.7208211", "0.7188168", "0.71864325", "0.71248466", "0.7078891", "0.7071127", "0.69936997", "0.6981961", "0.69805104", "0.6954879", "0.69371307", "0.6923276", "0.69167644", "0.6897861", "0.688246", "0.6863148", "0.68330306", "0.68239886", "0.6823337", "0.68150413", "0.6806275", "0.68054265", "0.6804917", "0.67995656" ]
0.7807358
0
Checks if the given URL is accessible. This function attempts to get the content at the location avoid pointing to the location of a huge file!
def is_accessible(url: str) -> bool: try: return requests.get(url).status_code == requests.codes.ok except Exception: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False", "def is_downloadable(url):\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n content_type = header.get('content-type')\n if 'text' in content_type.lower():\n return False\n if 'html' in content_type.lower():\n return False\n return True", "def download_allowed(self, url, scheme, netloc):\n robot = urllib.robotparser.RobotFileParser('%s://%s/%s' % (scheme, netloc, config.ROBOTS))\n try:\n robot.read()\n except ValueError:\n raise urllib.error.URLError('<urlopen error no protocol given>')\n\n return robot.can_fetch(config.USER_AGENT, url)", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def validate_url(url):\n response, content = get_response_from_file(url)\n\n if response == None and content == None:\n response, content = get_response_and_content(url)\n\n if response == None:\n return url, url, 0, \"\", \"N\", \"N\", \"N\", hit(\"No Response\"), \"false\"\n else:\n #print(url, get_visible_text(content))\n return evaluate_content_for_200s(response, url, content)", "def get_content_from_url(link):\n # sleep time before making web request\n sleep(SCRAPING_REQUEST_STAGGER)\n response = requests.get(link)\n if response.status_code != 200:\n return False\n return response.content", "def check_url(url, read_lines=False):\n lines = None\n try:\n # Access URL\n url_stream = urllib2.urlopen(url, timeout=2)\n\n # Read lines\n if read_lines is True:\n lines = url_stream.readlines()\n except urllib2.URLError as url_error:\n url_stream = url_error\n except socket.timeout:\n return False, 'Time out. Try again!'\n\n # Return result\n if url_stream.code in (200, 401):\n url_good = True\n else:\n url_good = False\n\n # Close connect\n url_stream.close()\n\n # Return\n if read_lines is True:\n return url_good, lines\n if url_good is False:\n error_message = 'Unable to access %s. Check internet access. Code %d' % (url, url_stream.code)\n else:\n error_message = ''\n\n return url_good, error_message", "def check_link(url):\n try:\n\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('Connection Failed!!!')", "def is_downloadable(url) -> bool:\n content_type = requests.head(url, allow_redirects=True).headers.get('content-type')\n if 'text' in content_type.lower() or 'html' in content_type.lower():\n return False\n return True", "def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok", "def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)", "def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False", "def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False", "def get_url_content(url):\n try:\n print(\"HTTP request to the URL {}\".format(url))\n page = requests.get(url, headers=http_headers, timeout=10)\n except requests.exceptions.Timeout:\n print(\"Timeout exceeded for URL {}\".format(url))\n except requests.exceptions.RequestException:\n print(\"Broken connection for URL {}\".format(url))\n finally:\n return page", "def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False", "def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code", "def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))", "def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def checkStatus(url):\n def checkForIndexPage(r):\n \"\"\"Checks whether it a given url is actually an Index Of page. Takes in a Request object\"\"\"\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"\n\n returnString = \"\"\n try:\n r = requests.get(url)\n returnString += str(r.status_code) \n if r.status_code == 200: # if the page is accessible, then check whether it displays properly\n returnString += \"\\n\\t\" + checkForIndexPage(r)\n return returnString\n except Exception as e:\n return(e)", "def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True", "def test_url(quartus, part, url):\n print(\"\\rChecking %s/%s \" % (quartus, part), end='')\n try:\n response = urllib.request.urlopen(url)\n headers = response.getheaders()\n return True\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n return False", "def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False", "def get_page(self, url):\n\n lynx = True\n\n if lynx:\n try:\n lynxcmd = \"lynx -dump -source -useragent='Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)' %s\" % url\n content = os.popen(lynxcmd).read()\n except IOError, (errno, strerror):\n return False\n else:\n try:\n location = urllib2.urlopen(url)\n except IOError, (errno, strerror):\n return False\n content = location.read()\n\n # Clear out all troublesome whitespace\n content = content.replace(\"\\n\", \"\")\n content = content.replace(\"\\r\", \"\")\n content = content.replace(\"\\t\", \"\")\n content = content.replace(\"> \", \">\")\n content = content.replace(\" \", \" \")\n content = self.latin1_to_ascii(content)\n\n if not lynx:\n location.close()\n return content", "def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None", "def url_is_alive(url: str) -> object:\n from ssl import _create_unverified_context\n from urllib.error import HTTPError, URLError\n from urllib.request import urlopen\n\n try:\n return urlopen(url, context=_create_unverified_context())\n except HTTPError:\n return False\n except URLError:\n return False", "def check_url(url):\n return get_svninfo(url) != {}", "def is_ok(url: str) -> bool:\n try:\n resp = requests.get(url)\n except:\n return False\n return True if math.floor(resp.status_code / 100) == 2 else False", "def check_if_downloaded( url, debug_print = True ):\n\t# Get pdf filename\n\tfilename = basename( url )\n\tfileno, ext_pdf = splitext( filename )\n\tfor file in listdir( getcwd() ):\n\t\tif fileno in file:\n\t\t\tif debug_print:\n\t\t\t\tprint 'Skipping %s' % ( filename )\n\t\t\treturn True\n\treturn False" ]
[ "0.69252676", "0.67921555", "0.67351407", "0.67114097", "0.6709197", "0.6677159", "0.66536385", "0.6605789", "0.65885746", "0.65593976", "0.6556228", "0.65205455", "0.651457", "0.6473422", "0.64675033", "0.6445377", "0.64273685", "0.64245945", "0.6419589", "0.64098907", "0.640868", "0.64059794", "0.63480127", "0.63265234", "0.6298375", "0.6289862", "0.6272423", "0.62687564", "0.6239459", "0.62371707" ]
0.75161856
0
Toggles polling on or off. Connected to Poll button.
def toggle_polling(self): self.polling = not self.polling if not self.polling: # print('In toggle polling') self._stop_loop_feedback() self._start_loop_poll() if self.polling else self._stop_loop_poll()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def toggle(self):\n s = self.status()\n if s == self.POWER_OFF:\n self.on()\n else:\n self.off()\n return self.status()", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def led_toggle(self):\n if self.state == GPIO.LOW:\n self.state = GPIO.HIGH\n else:\n self.state = GPIO.LOW\n return self.update_status()", "def toggle(self, **kwargs):\n self.on = False if self.on else True", "def toggle(self):\n if self._state in [STATE_OFF, STATE_IDLE, STATE_STANDBY]:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF", "def switch_on(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.HIGH)", "def poll(self):\n\tself.met = self.button.poll()", "def polling_call(self) -> global___Snippet.ClientCall:", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def poll(self, poll_input):", "def toggle_call(self) -> None:", "async def async_turn_on(self, **kwargs):\n if self.is_on == False:\n await self.async_call_service(self._cfg.get('turn_on_service')) \n self._state = True", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def toggle(self) -> None:\n ...", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def toggle_refresh(self, event):\n self._continue = not self._continue\n if self._continue:\n self.canvas.itemconfig(\"toggle-text\", text=\"Stop\")\n self.refresh(self._refresh_rate)\n else:\n self.canvas.itemconfig(\"toggle-text\", text=\"Start\")", "def _on_toggle_and_run(self, kwargs: dict) -> None:\n self.toggle(state=kwargs[CONF_STATE])\n\n if kwargs[CONF_STATE] == \"on\":\n state = \"off\"\n else:\n state = \"on\"\n\n self.handles[HANDLE_VACATION_MODE] = self.run_in(\n self._on_toggle_and_run, randint(5 * 60, 60 * 60), state=state\n )", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.async_publish(\n self._config[CONF_COMMAND_TOPIC],\n self._config[CONF_PAYLOAD_ON],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that switch has changed state.\n self._attr_is_on = True\n self.async_write_ha_state()", "def pollswitch(ip_addr, req_obj='mac', oper=\"SCAN\"):\n with database.session():\n poll_switch.poll_switch(ip_addr, req_obj='mac', oper=\"SCAN\")", "def bulb_toggle():\n tx = zb_explicit_command\n tx[\"dest_addr_long\"] = GE_LINK_BULB_MAC\n tx[\"cluster\"] = CLUSTER_A\n tx[\"data\"] = DATA_TOGGLE\n response = zb.Send(tx)", "def toggled_comunication(self):\n if self.actionPC_Monitor.isChecked() and self.actionPC_Monitor.isEnabled():\n self.actionPC_Monitor.setEnabled(0)\n self.actionPC_Sensor_Actuador.setChecked(0)\n self.actionPC_Sensor_Actuador.setEnabled(1)\n self.monitor_environment()\n \n elif self.actionPC_Sensor_Actuador.isChecked() and self.actionPC_Sensor_Actuador.isEnabled():\n self.actionPC_Sensor_Actuador.setEnabled(0)\n self.actionPC_Monitor.setChecked(0)\n self.actionPC_Monitor.setEnabled(1)\n self.actuator_environment()", "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.call_state_change(\"open\")", "def toggle(self):\n self.open = not self.open", "def toggle(self) -> None:", "def toggle(self) -> None:", "def toggled_reload(self):\n if not self.connect_serial():\n self.pushButton_reload.setChecked(0)\n return\n \n if self.pushButton_reload.isChecked():\n self.listWidget_link.clear()\n word = struct.pack(\"BBBBBBBB\", ID_DEVICES,0,0,0,0,0,0,0)\n self.timer_data.start(DATA_TIME)\n \n else:\n word = struct.pack(\"BBBBBBBB\", ID_DEVICES + ID_STOP,0,0,0,0,0,0,0)\n if (not self.pushButton_monitor.isChecked()):\n self.timer_data.stop()\n \n self.textBrowser.append(QtGui.QApplication.translate(\"MainWindow\", \"Sent : \", None, QtGui.QApplication.UnicodeUTF8)+binascii.hexlify(word)+\"\\n\")\n \n self.ser.write(word)" ]
[ "0.67975694", "0.6720252", "0.65820575", "0.64206886", "0.6349977", "0.61981577", "0.6129643", "0.61022615", "0.60992473", "0.6076896", "0.60741454", "0.60673225", "0.60669583", "0.6020311", "0.6000121", "0.5994595", "0.5972944", "0.59448975", "0.59006256", "0.5896034", "0.5889205", "0.58841187", "0.5877878", "0.5877455", "0.5867218", "0.586162", "0.5856785", "0.5854521", "0.5854521", "0.5849882" ]
0.8313653
0
Main qbpm update loop. Reads new QBPM / ring current values and updates plots. Generator for Qt timer method.
def _read_qbpm_loop(self): while True: self.qbpm.read_qbpm() self._plot_update() pitch_position = self.dcm_pitch_tserver.Position self.set_x2pitchlabel() if self.lbutton.isChecked(): fname = 'qbpm_log.csv' if not os.path.isfile(fname): with open(fname, 'a') as f: f.write('timestamp qbpm_avgcurr qbpm_x qbpm_z pitch_position petra_curr\n') with open(fname, 'a') as f: t = self.qbpm.log_time[-1] a = self.qbpm.log_arrays['avgcurr_log'][-1] x = self.qbpm.log_arrays['posx_log'][-1] z = self.qbpm.log_arrays['posz_log'][-1] pp = pitch_position p = self.qbpm.log_arrays['petracurrent_log'][-1] l = '{} {} {} {} {}\n'.format(t, a, x ,z , pp, p) f.write(l) yield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def init_plot_force(nb_mus):\n # --- Curve graph --- #\n # app = pg.mkQApp(\"force\")\n # remote = []\n # layout = pg.LayoutWidget()\n # layout.resize(800, 800)\n # label = QtGui.QLabel()\n # box = []\n # rplt = []\n # row_count = 0\n # col_span = 4 if nb_mus > 8 else 8\n # for mus in range(nb_mus):\n # remote.append(rgv.RemoteGraphicsView())\n # remote[mus].pg.setConfigOptions(antialias=True)\n # app.aboutToQuit.connect(remote[mus].close)\n # box.append(QtGui.QCheckBox(f\"muscle_{mus}\"))\n # if mus >= 8:\n # layout.addWidget(box[mus], row=1, col=mus-8)\n # layout.addWidget(remote[mus], row=mus - 8 + 2, col=4, colspan=col_span)\n # else:\n # layout.addWidget(box[mus], row=0, col=mus)\n # layout.addWidget(remote[mus], row=mus + 2, col=0, colspan=col_span)\n # rplt.append(remote[mus].pg.PlotItem())\n # rplt[mus]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n # remote[mus].setCentralItem(rplt[mus])\n # layout.addWidget(label)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app , box\n\n # --- Progress bar graph --- #\n # app = pg.mkQApp(\"force\")\n # layout = pg.LayoutWidget()\n # layout.resize(400, 800)\n # layout.move(0, 0)\n # box = []\n # rplt = []\n # row_count = 0\n # for mus in range(nb_mus):\n # rplt.append(QProgressBar())\n # rplt[mus].setMaximum(1000)\n # layout.addWidget(rplt[mus], row=mus, col=0)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app\n\n # --- Bar graph --- #\n app = pg.mkQApp()\n layout = pg.plot()\n layout.resize(800, 800)\n rplt = pg.BarGraphItem(x=range(nb_mus), height=np.zeros((nb_mus)), width=0.3, brush=\"r\")\n layout.addItem(rplt)\n return rplt, layout, app", "def run(self):\n while not self.done:\n time_delta = self.clock.tick(self.fps)\n self.event_loop()\n self.update(time_delta)\n pg.display.update()\n if self.show_fps:\n fps = self.clock.get_fps()\n with_fps = \"{} - {:.2f} FPS\".format(self.caption, fps)\n pg.display.set_caption(with_fps)", "def __init__(self, simulate_feedback=False):\n super(QbpmMonitor, self).__init__()\n\n self.sources = {\n \"QBPM1 OH\" : Qbpm('hzgpp05vme0:10000/p05/i404/exp.01', 2),\n \"QBPM2 OH\" : Qbpm('hzgpp05vme0:10000/p05/i404/exp.02', 7),\n \"QBPM EH2\" : Qbpm('hzgpp05vme2:10000/p05/i404/eh2.01', 30)\n }\n default_source = \"QBPM2 OH\"\n self.set_source(default_source)\n self.title = self.qbpm.address\n self.posx_target = 0\n self.posz_target = 0\n self.avgcurr_target = 0\n self.qbpm.frequency = 5.0 # in Hz\n self.qbpm.backlog = 120 # in s\n self.polling = False\n self._generator_poll = None\n self._timerId_poll = None\n self.feedback = False\n self.feedback_threshold = 5E-9\n self._generator_feedback = None\n self._timerId_feedback = None\n self.last_corr_angle = 0\n self.feedback_time = datetime.datetime.now()\n self.dcm_bragg_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_bragg')\n self.dcm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dcm_pitch_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_xtal2_pitch')\n self.dcm_energy_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_energy')\n self.dmm_x1rot_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x1rot')\n self.dmm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dmm_x2rot_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2rot')\n self.dmm_x1z_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x1z')\n self.dmm_x1z_position = self.dmm_x1z_tserver.Position\n self.dmm_x2z_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2z')\n self.dmm_x2y_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2y') \n self.beamstop = tango.DeviceProxy('hzgpp05vme0:10000/HASYLAB/Petra3_P05vil.CDI.SRV/BST')\n self.undulator = tango.DeviceProxy('hzgpp05vme0:10000/p05/undulator/1')\n self.get_mono()\n\n self.heartbeat = time.time()\n self.feedback_file = '/tmp/qbpmfeedback.run'\n if os.path.isfile(self.feedback_file):\n os.remove(self.feedback_file)\n self.cycle = 0\n self.feedback_triggered = False\n self.simulate_feedback = simulate_feedback\n self.dcm_step_backlash = self.dcm_pitch_tserver.read_attribute('StepBacklash').value\n\n ################################################################################################################\n # initUI\n\n # labels\n self.source_label = QtGui.QLabel(\"source\")\n self.poll_label = QtGui.QLabel(\"poll\")\n self.feedback_label = QtGui.QLabel(\"feedback\")\n self.ll_label = QtGui.QLabel(\"backlog (s)\")\n self.freq_label = QtGui.QLabel(\"frequency\")\n self.sensitivity_label = QtGui.QLabel(\"sensitivity\")\n self.filter_label = QtGui.QLabel(\"lowpass filter\")\n self.log_label = QtGui.QLabel(\"log to file\")\n self.pitch_label = QtGui.QLabel(\"0\")\n self.set_x2pitchlabel()\n # QBOM source Combobox\n self.scbox = QtGui.QComboBox(self)\n self.scbox.addItem(\"QBPM1 OH\") # Index 0\n self.scbox.addItem(\"QBPM2 OH\") # Index 1\n self.scbox.addItem(\"QBPM EH2\") # index 2\n self.scbox.setCurrentIndex(1) # Check if this value is consistent with default source above!\n self.scbox.activated[str].connect(self.set_source)\n # poll button\n self.rbtn = QtGui.QPushButton(self)\n self.rbtn.clicked.connect(self.toggle_polling)\n self.rbtn.resize(self.rbtn.sizeHint())\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n # feedback button\n self.fbtn = QtGui.QPushButton(self)\n self.fbtn.clicked.connect(self.toggle_feedback)\n self.fbtn.resize(self.fbtn.sizeHint())\n self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n # reset button\n reset_btn = QtGui.QPushButton('Reset', self)\n reset_btn.clicked.connect(self.qbpm.reset_logs)\n reset_btn.resize(reset_btn.sizeHint())\n # backlog text field\n self.lltext = QtGui.QLineEdit(str(self.qbpm.backlog))\n self.lltext.setValidator(QtGui.QIntValidator())\n self.lltext.setMaxLength(6)\n self.lltext.returnPressed.connect(self.change_backlog)\n # frequency text field\n self.ftext = QtGui.QLineEdit(str(self.qbpm.frequency))\n self.ftext.setValidator(QtGui.QDoubleValidator())\n self.ftext.setMaxLength(6)\n self.ftext.returnPressed.connect(self.change_frequency)\n # sensititvity slider\n self.sslider = QtGui.QSlider(self)\n self.sslider.setOrientation(QtCore.Qt.Horizontal)\n self.sslider.setMinimum(1)\n self.sslider.setMaximum(100)\n self.sslider.setTickPosition(QtGui.QSlider.TicksBothSides)\n self.sslider.setTickInterval(10)\n self.sslider.setSingleStep(1)\n self.sslider.setValue(self.qbpm.sensitivity)\n self.sslider.valueChanged.connect(self._set_sensitivity)\n # filter slider\n self.fslider = QtGui.QSlider(self)\n self.fslider.setOrientation(QtCore.Qt.Horizontal)\n self.fslider.setMinimum(1)\n self.fslider.setMaximum(1000)\n self.fslider.setTickPosition(QtGui.QSlider.TicksBothSides)\n self.fslider.setTickInterval(100)\n self.sslider.setSingleStep(1)\n self.fslider.setValue(self.qbpm.filter)\n self.fslider.valueChanged.connect(self._set_filter)\n # log button\n self.lbutton = QtGui.QRadioButton(self)\n self.lbutton.setChecked(False)\n # quit button\n qbtn = QtGui.QPushButton('Quit', self)\n qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n qbtn.resize(qbtn.sizeHint())\n \n r, g, b, w = [255, 0, 0], [0, 255, 0], [0, 0, 255], [150, 150, 150]\n fill_color = pg.mkColor([0, 255, 0, 100])\n self.curves = {}\n log_pen = pg.mkPen(w, width=1, style=QtCore.Qt.SolidLine)\n avg_pen = pg.mkPen(r, width=3, style=QtCore.Qt.SolidLine)\n target_pen = pg.mkPen(g, width=1, style=QtCore.Qt.SolidLine)\n sensitivity_pen = pg.mkPen(fill_color, width=1, style=QtCore.Qt.SolidLine)\n fill_pen = pg.mkPen(fill_color, width=100, style=QtCore.Qt.SolidLine)\n petra_pen = pg.mkPen(w, width=3, style=QtCore.Qt.SolidLine)\n # define plot font\n font = QtGui.QFont()\n font.setPixelSize(16)\n # make PlotWidgets\n self.plot_main = pg.GraphicsLayoutWidget()\n self.avgcurr_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_avgcurr = self.plot_main.addPlot(title='avg. current', row=0, col=0,\n axisItems={'bottom': self.avgcurr_timeaxis})\n self.petracurr_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_petracurrent = self.plot_main.addPlot(title='PETRA beam current', row=0, col=1,\n axisItems={'bottom': self.petracurr_timeaxis})\n self.plot_main.nextRow()\n self.posx_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_posx = self.plot_main.addPlot(title='x-position', row=1, col=0,\n axisItems={'bottom': self.posx_timeaxis})\n self.posy_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_posz = self.plot_main.addPlot(title='z-position', row=1, col=1,\n axisItems={'bottom': self.posy_timeaxis})\n # assign qbpm data to styles and PlotWidgets\n styles = {'avgcurr_log': (self.plot_avgcurr, log_pen),\n 'avgcurr_filter_log': (self.plot_avgcurr, avg_pen),\n 'avgcurr_target_log': (self.plot_avgcurr, target_pen),\n 'posx_log': (self.plot_posx, log_pen),\n 'posx_filter_log': (self.plot_posx, avg_pen),\n 'posx_target_log': (self.plot_posx, target_pen),\n 'posz_log': (self.plot_posz, log_pen),\n 'posz_filter_log': (self.plot_posz, avg_pen),\n 'posz_target_log': (self.plot_posz, target_pen),\n 'posz_sens_low_log': (self.plot_posz, sensitivity_pen),\n 'posz_sens_high_log': (self.plot_posz, sensitivity_pen),\n 'petracurrent_log': (self.plot_petracurrent, petra_pen)\n }\n # plot curves\n for log_array, style in styles.items():\n # self.curves[key] = style[0].plot(self.qbpm.log_arrays[key], pen=style[1], symbol='o')\n self.curves[log_array] = style[0].plot(self.qbpm.log_time, self.qbpm.log_arrays[log_array], pen=style[1])\n # self.fill = pg.FillBetweenItem(curve1=self.curves['posz_sens_low_log'],\n # curve2=self.curves['posz_sens_high_log'], pen=fill_pen)\n # self.plot_posz.addItem(self.fill)\n # set axis properties\n for log_plot in [self.plot_avgcurr, self. plot_posx, self.plot_posz, self.plot_petracurrent]:\n log_plot.getAxis(\"bottom\").tickFont = font\n log_plot.getAxis(\"bottom\").setStyle(tickTextOffset=20)\n log_plot.getAxis(\"left\").tickFont = font\n log_plot.getAxis(\"left\").setStyle(tickTextOffset=20)\n log_plot.getAxis(\"left\").setWidth(100)\n log_plot.getAxis(\"bottom\").setGrid(100)\n log_plot.getAxis(\"left\").setGrid(100)\n\n # Create a grid layout to manage the widgets size and position\n layout = QtGui.QGridLayout()\n self.setLayout(layout)\n\n # Add widgets to the layout in their proper positions\n layout.addWidget(self.source_label, 0, 0)\n layout.addWidget(self.poll_label, 1, 0)\n layout.addWidget(self.feedback_label, 2, 0)\n layout.addWidget(self.ll_label, 4, 0)\n layout.addWidget(self.freq_label, 5, 0)\n layout.addWidget(self.sensitivity_label, 6, 0)\n layout.addWidget(self.filter_label, 7, 0)\n layout.addWidget(self.log_label, 8, 0)\n layout.addWidget(self.scbox, 0, 1)\n layout.addWidget(self.rbtn, 1, 1) # button goes in lower-left\n layout.addWidget(self.fbtn, 2, 1) # button goes in lower-left\n layout.addWidget(reset_btn, 3, 1) # button goes in lower-left\n layout.addWidget(self.lltext, 4, 1) # text edit goes in middle-left\n layout.addWidget(self.ftext, 5, 1) # text edit goes in middle-left\n layout.addWidget(self.sslider, 6, 1)\n layout.addWidget(self.fslider, 7, 1)\n layout.addWidget(self.lbutton, 8, 1)\n layout.addWidget(self.pitch_label, 9, 0, 1, 2) # button goes in lower-left\n# layout.addWidget(self.fb_step_label, 10, 0, 1, 2)\n# layout.addWidget(self.fb_time_label, 11, 0, 1, 2)\n layout.addWidget(qbtn, 10, 0, 1, 2) # button goes in lower-left\n layout.addWidget(self.plot_main, 0, 2, 11, 1)\n\n layout.setColumnStretch(0, 0.1)\n layout.setColumnStretch(1, 0.1)\n layout.setColumnStretch(2, 1)\n\n # Display the widget as a new window\n self.setWindowTitle(self.title)\n self.show()", "def run(self):\n while self.running:\n self.__update_battery()\n self.__update_signal()\n time.sleep(5)", "def run(self):\n\n try:\n while not self._exit:\n # Update the time varying value of the output.\n\n # The current time offset\n cur_time_offset = time.time() % self._time_period\n\n self._updating = True\n\n # If modulation is selected as FM\n if (self._mod_type == 2):\n # Getting the modulating input\n m_t = self.mod_ip[0].voltage - self.mod_ip[1].voltage\n\n freq = self._frequency + m_t\n if freq != 0:\n time_p = 1 / freq\n\n else:\n time_p = float(\"inf\")\n\n else:\n freq = self._frequency\n time_p = self._time_period\n\n # If sine wave\n if (self.type == 0):\n self._last_updated_time = cur_time_offset\n voltage = 0.5 * math.sin(\n 2 * 3.145926 * freq * cur_time_offset) + 0.5\n\n # If square wave\n elif (self.type == 1 or self.type == 4):\n self._last_updated_time = cur_time_offset\n voltage = 1 if (\n (cur_time_offset) < time_p /\n float(2)) else 0\n\n # If Ramp\n elif (self.type == 2):\n self._last_updated_time = cur_time_offset\n voltage = cur_time_offset / time_p\n\n # If triangular\n else:\n self._last_updated_time = cur_time_offset\n voltage = 2 * cur_time_offset / time_p if (\n (cur_time_offset) < time_p /\n float(2)) else (2 * (time_p - cur_time_offset) / time_p)\n\n if (self._mod_type == 1):\n m_t = self.mod_ip[0].voltage - self.mod_ip[1].voltage\n c_t = voltage * self._amplitude + self.offset\n voltage = (1 + m_t) * c_t\n voltage /= self._amplitude\n\n if (self.type != 4):\n voltage *= self._amplitude\n\n else:\n voltage *= 5.0 # TTL amplitude is constant at 5v\n\n self.outputs[0].voltage = voltage\n self.outputs[1].voltage = -self.offset\n\n self._updating = False\n time.sleep(self._sampling_time_interval)\n\n except Exception as e:\n return", "def work(self):\n while(True):\n debug_print = False\n if debug_print == True:\n start = time.time()\n\n flow = self.gauge.read_flow_from_dp()\n self.flw_q.put([time.time(), flow])\n\n if debug_print == True:\n flow_time = time.time()\n print(f\"Runtime - calc_flow: {1000 * (flow_time - start):.0f} ms\")\n\n pressure = self.gauge.read_pressure()\n self.prs_q.put([time.time(), pressure])\n\n if debug_print == True:\n pressure_time = time.time()\n print(f\"Runtime - read_pressure: {1000 * (pressure_time - flow_time):.0f} ms\")\n \n if debug_print == True:\n runtime = time.time() - start\n print(f\"Runtime - total: {1000 * runtime:.1f} ms\")\n print(f\"Frequency: {1 / runtime:.1f} Hz\")", "def __plot__(self, refresh=False, *args):\n # Check for a closed window:\n if 'Brem' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['Brem'].number):\n del self.plots['Brem']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'Brem' in self.plots.keys()\n if refresh:\n if 'Brem' in self.plots.keys():\n fig = self.plots['Brem']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a new window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('Brem Power')\n ax = fig.add_subplot(111)\n\n # Plot\n if self.fuelVar.get():\n ax.plot(1e9*self.time, self.Pbrem[1,:], 'r-', label='Fuel')\n if self.shellVar.get():\n ax.plot(1e9*self.time, self.Pbrem[2,:], 'b-', label='Shell')\n if self.totalVar.get():\n ax.plot(1e9*self.time, self.Pbrem[0,:], 'k-', label='Total')\n\n ax.set_xlabel('Time (ns)', fontsize=12)\n ax.set_ylabel('Brem Power (W)', fontsize=12)\n ax.legend(loc=2)\n if self.logVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n fig.show()\n fig.canvas.draw()\n self.plots['Brem'] = fig", "def run(self):\n\t\twhile 1:\n\t\t\tif self._driveSystem.port_open == True and self._parent.aborted==False:\n\n\t\t\t\tself._driveSystem.check_encoder_pos()\n\t\t\t\tpos=self._driveSystem.positions\n\t\t\t\tif self._parent.printRequest==True: #Print positions when print Button was pressed\n\t\t\t\t\toutput=\"Axis 1: \"+str(pos[0])+\"\\nAxis 2: \"+str(pos[1])+\"\\nAxis 3: \"+str(pos[2])+\"\\nAxis 4: \"+str(pos[3])\n\t\t\t\t\tprint(output)\n\t\t\t\t\tself._parent.printRequest=False\n\t\t\t\tevent = PosUpdateEvent(myEVT_POSUPDATE, -1, pos)\n\t\t\t\twx.PostEvent(self._parent.matplotpanel, event)\n\n\t\t\t\tt=0\n\t\t\t\twhile t<UPDATE_TIME:\n\t\t\t\t\tself.checkQ()\n\t\t\t\t\ttime.sleep(REAC_TIME)\n\t\t\t\t\tt=t+REAC_TIME\n\t\t\telse:\n\t\t\t\ttime.sleep(1)", "def run(self):\n while self._update_func():\n self.update_signal.emit(None)", "def run(self):\n while True:\n # Status message from state machine\n self.updateStatusMessage.emit(self.sm.status_message)\n # Serial errors from rexarm\n self.updateJointErrors.emit(self.rexarm.get_errors())\n # Only get rexarm feedback if initialized\n if self.rexarm.initialized:\n self.updateJointReadout.emit(self.rexarm.position_fb)\n self.updateEndEffectorReadout.emit(self.rexarm.get_wrist_pose())\n time.sleep(0.1)", "def run( self ):\r\n \r\n # Execute the per-cycle work specifed by the user\r\n for f in self.updateFuncList:\r\n f() # Please make these lightweight and pertain to UI drawing!\r\n \r\n # Update window\r\n self.rootWin.update_idletasks() # idk , draw or something!\r\n \r\n # Wait remainder of period\r\n elapsed = time.time() * 1000 - self.last\r\n if elapsed < self.stepTime:\r\n sleepTime = int( self.stepTime - elapsed ) \r\n else:\r\n sleepTime = 0\r\n # 4.e. Mark beginning of next loop\r\n self.last = time.time() * 1000 \r\n self.rootWin.after( sleepTime , self.run )", "def run(self):\n\n self._check_hardware_control()\n\n if self._is_stabilizing:\n #If we are locking the power, then need to update teh feedback loop and change the output label\n self._update_feedback()\n self._update_output_voltage_label()\n\n #We always need to update the plots as well and power label\n\n self._update_plots()\n self._update_power_label()\n\n self.gui.force_update()", "def update_graphs(self):\n profile_time = False\n if profile_time:\n start_time = time.time()\n\n # Update the graph data with data only within the chosen time_range\n now = time.time()\n i_tr_prs = np.where(now - self.prs_data[0, :] < \n self.time_range[1] - self.time_range[0])[0]\n self.prs_graph.setData(self.prs_data[0, i_tr_prs] - now, self.prs_data[1, i_tr_prs])\n # Updates the graph title\n self.prs_pw.setTitle(f\"Pressão: {self.prs_data[1, 0]:.1f} cmH2O\", **self.ttl_style)\n\n if profile_time == True:\n time_at_pressure = time.time()\n print(f\"Until pressure graph: {time_at_pressure - start_time:.4f} s\")\n \n # Update the graph data with data only within the chosen time_range\n now = time.time()\n i_tr_flw = np.where(now - self.flw_data[0, :] < \n self.time_range[1] - self.time_range[0])[0]\n self.flw_pw.setTitle(f\"Fluxo: {self.flw_data[1, 0]:.1f} l/min\", **self.ttl_style)\n self.flw_graph.setData(self.flw_data[0, i_tr_flw] - now, self.flw_data[1, i_tr_flw])\n\n if profile_time == True:\n time_at_flow = time.time()\n print(f\"Until flow graph: {time_at_flow - start_time:.4f} s\")\n\n i_tr_vol = np.where(now - self.vol_data[0, :] < \n self.time_range[1] - self.time_range[0])[0]\n self.vol_pw.setTitle(f\"Volume: {self.vol_data[1, 0]:.0f} ml\", **self.ttl_style)\n self.vol_graph.setData(self.vol_data[0, i_tr_vol] - now, self.vol_data[1, i_tr_vol])\n\n if profile_time == True:\n time_at_volume = time.time()\n print(f\"After the volume graph: {time_at_volume - time_at_flow:.4f} s\")\n\n # Adjust the Y range every N measurements\n # Manually adjusting by calculating the max and min with numpy is faster than autoscale on \n # the graph. Also calculates FPS\n N = 20\n if self.run_counter % N == 0:\n # definition of the minimum acceptable range for the volume\n min_range_vol = [-5, 50]\n # Tries to get the max and min from each data set \n try:\n range_vol = [np.min(self.vol_data[1, i_tr_vol]), np.max(self.vol_data[1, i_tr_vol])]\n # Adjusts the minimum and maximum, if the measured values are outside the minimum range\n self.vol_pw.setYRange(np.min([range_vol[0], min_range_vol[0]]), \n np.max([range_vol[1], min_range_vol[1]]))\n except:\n pass\n min_range_prs = [-0.2, 5]\n try:\n range_prs = [np.min(self.prs_data[1, i_tr_prs]), np.max(self.prs_data[1, i_tr_prs])]\n self.prs_pw.setYRange(np.min([range_prs[0], min_range_prs[0]]), \n np.max([range_prs[1], min_range_prs[1]]))\n except:\n pass\n\n min_range_flw = [-0.1, 1]\n try:\n range_flw = [np.min(self.flw_data[1, i_tr_flw]), np.max(self.flw_data[1, i_tr_flw])]\n self.flw_pw.setYRange(np.min([range_flw[0], min_range_flw[0]]), \n np.max([range_flw[1], min_range_flw[1]]))\n except:\n pass\n mean_pts = 50\n try:\n FPS = np.nan_to_num(1.0 / np.mean(self.vol_data[0, 0:mean_pts] - \n self.vol_data[0, 1:1+mean_pts]))\n except:\n FPS = 0\n self.fps_lbl.setText(f\"FPS: {FPS:.2f}\")\n self.run_counter = 0\n self.run_counter += 1", "def __work__(self):\n while not self.is_done:\n self.refreshSignal.emit()\n time.sleep(0.05)", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def _start_loop_poll(self):\n self._stop_loop_poll() # Stop any existing timer\n self._generator_poll = self._read_qbpm_loop() # Start the loop\n self._timerId_poll = self.startTimer(0) # This is the idle timer\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPause))", "def _update_gui(self):\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: rf_on = True\r\n self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()", "def loop(self):\r\n while self.__running:\r\n self.__check_events()\r\n self.__render()\r\n self.__reset_variables()", "def loop(self) -> None:\n while True:\n # Sleep before running code to ensure that the sensor is\n # initialized on first run, as per the specifications.\n sleep(config.SLEEP)\n\n self.setup_quiet_hours()\n\n if self.quiet_setup:\n if self.in_quiet_hours():\n if config.DC_QH:\n self.sensor.close()\n self.sleep_quiet_hours()\n continue\n\n # In the case that quiet hours were established during first run\n # and removed from the endpoint afterwards, the sensor may not\n # be in the open state. Because serial.Serial.open() may raise\n # an exception if the sensor is already open, just check prior.\n if not self.sensor.isOpen():\n self.sensor.open()\n\n config.LOGGER.info('Woke up after sleeping. Running loop()')\n self.data = []\n for _ in range(10):\n datum = self.sensor.read()\n self.data.append(datum)\n\n for pm, start in self._SLICES.items():\n # Might be necessary to give the endpoint some time\n # between responses\n sleep(10)\n reading = self.read_data_from_bytes(start)\n aq_dict = air_quality.AQS[pm].get_range(reading)\n\n sections = [\n {\n 'type': 'text',\n 'color': aq_dict['color'],\n 'value': f'Quality: {aq_dict[\"label\"]}'\n },\n {\n 'type': 'gauge',\n 'color': [aq_dict['color']],\n 'range': [aq_dict['lower'], aq_dict['upper']],\n 'value': reading,\n },\n {\n 'type': 'gauge',\n 'color': air_quality.COLORS,\n 'range': air_quality.AQS[pm].get_all_ranges(),\n 'value': reading,\n }\n ]\n\n data = {\n 'module': f'photo-dash-sds011-pm{pm}',\n 'title': f'Air Quality - PM{pm}',\n 'sections': sections,\n }\n\n try:\n r = requests.put(config.ENDPOINT, json=data)\n except Exception as e: # Catching broad Exceptions for now\n config.LOGGER.error(e)\n config.LOGGER.info(r.status_code)", "def _set_feedback_loop(self):\n while True:\n mono = self.get_mono()\n interval = int(self.qbpm.filter / 20)\n if self.qbpm.log_arrays['avgcurr_log'][-1] < self.feedback_threshold:\n print('intensity too low.')\n self._stop_loop_feedback()\n# current_pos = self.qbpm.log_arrays['posz_filter_log'][-1]\n current_pos = self.qbpm.log_arrays['posx_filter_log'][-1]\n# target = self.qbpm.posz_target\n target = self.qbpm.posx_target\n if mono == \"dcm\":\n corr_factor = 0.2\n if mono == \"dmm\":\n corr_factor = 0.2\n bandwidth = 0.003 * float(self.qbpm.sensitivity/100)\n if not ((target - bandwidth) < current_pos < (target + bandwidth)):\n corr_angle = -((current_pos - target) * corr_factor)/self.qbpm.distance\n if self.cycle == interval:\n print('Moving pitch: {}'.format(corr_angle))\n dcm_curr_pitchpos = self.dcm_pitch_tserver.Position\n target_pitchpos = dcm_curr_pitchpos + corr_angle\n if not self.simulate_feedback:\n if mono == \"dcm\":\n self.dcm_pitch_tserver.write_attribute('Position', target_pitchpos)\n if mono == \"dmm\":\n self.dmm_x2rot_tserver.write_attribute('Position', target_pitchpos)\n self.last_corr_angle = corr_angle\n self.feedback_time = datetime.datetime.now()\n self.cycle = 0\n self.cycle = 0 if self.cycle >= interval else self.cycle + 1\n self.set_x2pitchlabel()\n yield", "def update_plot():\n pass", "def play(self):\n # set the type of plot\n self.plot.plotkind = self.plotname_box.currentText()\n \n #connect the mtp interface to the data source\n self.plot.importdata( self.data_source )\n\n # start the serial communication only the first time\n if self.select_serial_box.isEnabled():\n self.start_serial()\n \n # get and set the time interval for updating the plot\n self.timestep = self.spinbox_timestep.value()\n \n #start the timer\n self.timer.start(self.timestep)\n \n #disable the various buttons\n self.play_button.setDown(True)\n self.play_button.setEnabled(False)\n self.stop_button.setEnabled(True)\n self.select_serial_box.setEnabled(False)\n self.select_speed_box.setEnabled(False)\n self.plotname_box.setEnabled(False)\n self.spinbox_timestep.setEnabled(False)", "def set_layout( self ):\n self.main_frame = QtGui.QWidget()\n #self.main_frame.setFixedHeight(500)\n \n self.plot = mtp.mtp( self.main_frame )\n \n \n #actual layout \n #******************\n \n # layout left:\n #-------------\n vboxleft = QtGui.QVBoxLayout()\n \n # the matplotlib canvas\n vboxleft.addWidget(self.plot.canvas) \n \n # the play and stop button\n hbox = QtGui.QHBoxLayout()\n self.play_button = QtGui.QPushButton(\"&Play\")\n hbox.addWidget(self.play_button)\n self.stop_button = QtGui.QPushButton(\"&Stop\")\n hbox.addWidget(self.stop_button)\n vboxleft.addLayout(hbox)\n\n #layout right:\n #------------\n\n # prepare the buttons, the horizontal lines and the spacing\n \n # Serial port configuration\n self.select_serial_box = QtGui.QComboBox()\n self.select_serial_box.addItems( self.data_source.si.getListOfSerialPorts() )\n \n self.select_speed_box = QtGui.QComboBox()\n self.select_speed_box.addItems( self.data_source.si.bitrates )\n self.select_speed_box.setCurrentIndex( 1 )\n \n # Configuration of the output plot\n self.plotname_box = QtGui.QComboBox()\n self.plotname_box.addItems( self.plot.plotnames )\n \n # select the time interval to update the plot\n labelspinbox = QtGui.QLabel(\"Update (ms):\")\n self.spinbox_timestep = QtGui.QSpinBox()\n self.spinbox_timestep.setRange(100,2000) #from 0.1 to 5 seconds\n self.spinbox_timestep.setSingleStep(50)\n self.spinbox_timestep.setValue( self.timestep)\n labelspinbox.setBuddy(self.spinbox_timestep )\n \n # show last N data points\n self.showlastsCheckBox = QtGui.QCheckBox(\"Show last:\")\n self.spinbox_showlast= QtGui.QSpinBox()\n self.spinbox_showlast.setRange(10,1000) #from 0.1 to 5 seconds\n self.spinbox_showlast.setValue( 50)\n \n \n # Save plot button\n self.save_button = QtGui.QPushButton(\"S&ave plot\")\n\n # Reset data button\n self.resetdata_button = QtGui.QPushButton(\"&Reset data\")\n \n # Close button\n self.close_button = QtGui.QPushButton(\"&Close\")\n \n # in order to fix the width of the right layout\n # one needs to put the boxlayout in a widget\n vboxrightWidget = QtGui.QWidget()\n vboxright = QtGui.QVBoxLayout(vboxrightWidget)\n \n # inserting the widgets in the layout\n \n # serial configuration\n label = QtGui.QLabel(\"Serial configuration\")\n vboxright.addWidget(label)\n \n hbox = QtGui.QHBoxLayout()\n label = QtGui.QLabel(\"Port:\")\n hbox.addWidget( label )\n hbox.addWidget( self.select_serial_box)\n vboxright.addLayout( hbox )\n \n hbox = QtGui.QHBoxLayout()\n label = QtGui.QLabel(\"Bitrate:\")\n hbox.addWidget( label )\n hbox.addWidget( self.select_speed_box)\n vboxright.addLayout( hbox )\n \n # horizontal line\n line = QtGui.QFrame(self)\n line.setFrameShape(QtGui.QFrame.HLine)\n line.setFrameShadow(QtGui.QFrame.Sunken)\n vboxright.addWidget(line)\n \n # plot type\n label = QtGui.QLabel(\"Plot type\")\n vboxright.addWidget(label)\n vboxright.addWidget(self.plotname_box)\n \n # plot interval\n hbox_spinbox = QtGui.QHBoxLayout()\n hbox_spinbox.addWidget(labelspinbox)\n hbox_spinbox.addWidget(self.spinbox_timestep)\n vboxright.addLayout(hbox_spinbox)\n \n # horizontal line\n line = QtGui.QFrame(self)\n line.setFrameShape(QtGui.QFrame.HLine)\n line.setFrameShadow(QtGui.QFrame.Sunken)\n vboxright.addWidget(line)\n \n # Plot confifuration\n hboxshowlast = QtGui.QHBoxLayout()\n hboxshowlast.addWidget( self.showlastsCheckBox )\n hboxshowlast.addWidget( self.spinbox_showlast )\n vboxright.addLayout( hboxshowlast )\n \n # vertical space\n vboxright.addItem(QtGui.QSpacerItem(20,40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding))\n \n # buttons\n vboxright.addWidget(self.save_button)\n vboxright.addWidget(self.resetdata_button)\n vboxright.addWidget(self.close_button)\n \n #fix the width of the right layout through its enclosing widget\n vboxright.setContentsMargins(0,0,0,0)\n vboxrightWidget.setFixedWidth(180)\n \n \n # Global horizontal layout: takes the two vertical box layouts\n #-------------------------------------------------------------\n hboxmain = QtGui.QHBoxLayout()\n hboxmain.addLayout(vboxleft)\n hboxmain.addWidget(vboxrightWidget)\n \n # setting the global horizontal box as the main_frame layout\n #-----------------------------------------------------------\n self.main_frame.setLayout( hboxmain )\n self.setCentralWidget( self.main_frame )", "def run(self):\n\n if self.transport == 'any':\n devs = kromek.discover()\n else:\n devs = kromek.discover(self.transport)\n\n print('Discovered %s' % devs)\n\n if len(devs) <= 0:\n return\n\n filtered = []\n\n for dev in devs:\n if self.device == 'all' or dev[0] in self.device:\n filtered.append(dev)\n\n devs = filtered\n if len(devs) <= 0:\n return\n\n done_devices = set()\n try:\n while self.running:\n print(\"Plot_manager.run: getting data\")\n with kromek.Controller(devs, self.interval) as controller:\n for reading in controller.read():\n if self.create_structures:\n self.total = np.array(reading[4])\n self.lst = np.array([reading[4]])\n self.create_structures = False\n else:\n self.total += np.array(reading[4])\n self.lst = np.concatenate(\n (self.lst, [np.array(reading[4])]))\n serial = reading[0]\n dev_count = reading[1]\n if serial not in done_devices:\n this_start, this_end = self.get_interval(\n time.time() - self.interval)\n\n self.handle_spectra(\n this_start, this_end, reading[4])\n if dev_count >= self.count > 0:\n done_devices.add(serial)\n controller.stop_collector(serial)\n if len(done_devices) >= len(devs):\n break\n except KeyboardInterrupt:\n self.vprint(1, '\\nKeyboardInterrupt: stopping Manager run')\n self.takedown()\n except SystemExit:\n self.vprint(1, '\\nSystemExit: taking down Manager')\n self.takedown()", "def run(self):\n last_time = time.time()\n while self.running:\n now_time = time.time()\n interval = now_time - last_time\n last_time = now_time\n self.update(interval)\n time.sleep(Options['update interval'])", "def run(self): # pragma: no cover\n while True:\n self.update()", "def on_redraw_timer(self, event):\n \n if self.sampling_timer.IsRunning():\n self.daq.get_data()\n self.draw_plot()\n else:\n self.control_box.txt_info_box.SetLabel('Measurement complete')\n self.calculate()\n return", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def run(self):\n while self.running:\n QtCore.QCoreApplication.processEvents()" ]
[ "0.6118877", "0.6087371", "0.60151035", "0.6013379", "0.59444356", "0.5929536", "0.59115523", "0.58924115", "0.5889598", "0.58719826", "0.5863707", "0.5845268", "0.58439153", "0.5833174", "0.5832204", "0.5808844", "0.5802605", "0.5795682", "0.5755714", "0.5680615", "0.5673164", "0.56713194", "0.56622916", "0.5655377", "0.56447315", "0.56412166", "0.56368876", "0.56220686", "0.5619471", "0.5604536" ]
0.6859841
0
Initializes Qt timer method for polling routine and switches Play button icon.
def _start_loop_poll(self): self._stop_loop_poll() # Stop any existing timer self._generator_poll = self._read_qbpm_loop() # Start the loop self._timerId_poll = self.startTimer(0) # This is the idle timer self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPause))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timer_setup(self):\n pass", "def autonomousInit(self):\n #self.timer.reset()\n #self.timer.start()\n pass", "def _stop_loop_poll(self): # Connect to Stop-button clicked()\n if self._timerId_poll is not None:\n self.killTimer(self._timerId_poll)\n self._generator_poll = None\n self._timerId_poll = None\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n self.heartbeat = time.time()", "def start_timer(self): # Bug timer gets not called due to a lock somewhere else\n self.log.info(\"Framework initialized...\")\n timer = QtCore.QTimer()\n timer.timeout.connect(self.update_)\n timer.start(self.update_interval)\n self.timer = timer\n return timer", "def __init__(self, sec=None, callFunc=None, *args, **kwargs):\n self._tkWdg = _getTkWdg()\n self._timerID = None\n if sec is not None:\n self.start(sec, callFunc, *args, **kwargs)", "def _startbuttontimers(self):\n changetimes = {}\n for b in self._buttons:\n if not b._times:\n continue\n t0, t1 = b._times\n changetimes[t0] = changetimes[t1] = 1\n for t in changetimes.keys():\n mw_globals.toplevel.settimer(t, (self._window._buttonschanged, ()))", "def StartTimer(self):\n self._start_time = time.time()", "def start_timer(self):\n self.start_time = datetime.now()", "def on_Start_CWA_button_clicked(self):\n self.timer2.start(500)", "def start_timer(self):\n self.start_time = time.time()", "def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)", "def start_clock(self):\n pass", "def med_timer(self):\n self.start_button.config(text='Sit', state='disabled')\n self.start_button.update()\n if self.mins.get() == \"\":\n num_mins = 0\n else:\n num_mins = float(self.mins.get())\n time_in_seconds = num_mins * 60\n self.t = Timer(time_in_seconds, self.play_wav)\n self.t.start()", "def start(self):\n self.timer.start(500)", "def start_handler():\r\n timer.start()", "def build_and_play_alarm(self):\n # Stop any playing radio stream\n if self.radio_button.isChecked():\n self.radio_button.click()\n\n self.alarm_play_button.setEnabled(False)\n self.main_window.waiting_spinner.start()\n self.build_and_play_thread.start()", "def start(self) -> None:\n self.timer = wpilib.Timer()\n self.timer.start()\n\n self._on_autonomous_enable()", "def OnButton2(self):\n if self.timer.IsRunning() & self.timerNotZero:\n self.timer.Stop()\n self.button2.SetBitmap(self.button2play)\n elif self.timerNotZero:\n self.timer.Start()\n self.button2.SetBitmap(self.button2pause)", "def __init__(self, parent=None):\n super().__init__(parent)\n self.camera = None\n\n self.timer = QtCore.QBasicTimer()", "def start_aco(self, event):\n if not self.running:\n self.start_btn_text.set(\"Start\")\n self.pause_btn_text.set(\"Pause\")\n self.running = True\n self.main_window.after(int(self.speed), self.update_aco)", "def init_update_timer(self):\r\n\r\n self.update_timer = wx.Timer(self)\r\n self.Bind(wx.EVT_TIMER, self.check_updates, self.update_timer)", "def Start():\n timer.start()", "def play(self):\n # set the type of plot\n self.plot.plotkind = self.plotname_box.currentText()\n \n #connect the mtp interface to the data source\n self.plot.importdata( self.data_source )\n\n # start the serial communication only the first time\n if self.select_serial_box.isEnabled():\n self.start_serial()\n \n # get and set the time interval for updating the plot\n self.timestep = self.spinbox_timestep.value()\n \n #start the timer\n self.timer.start(self.timestep)\n \n #disable the various buttons\n self.play_button.setDown(True)\n self.play_button.setEnabled(False)\n self.stop_button.setEnabled(True)\n self.select_serial_box.setEnabled(False)\n self.select_speed_box.setEnabled(False)\n self.plotname_box.setEnabled(False)\n self.spinbox_timestep.setEnabled(False)", "def _default__band_timer(self):\n timer = QTimer()\n timer.setSingleShot(True)\n timer.timeout.connect(self._on_band_timer)\n return timer", "def initUI(self):\n startbtn = QPushButton(\"Start Recroding\", self)\n startbtn.move(30, 50)\n\n stopbtn = QPushButton(\"Stop Recording\", self)\n stopbtn.move(150, 50)\n\n initbtn = QPushButton(\"Initilize\", self)\n initbtn.move(30, 100)\n\n plotbtn = QPushButton(\"Plot\", self)\n plotbtn.move(150, 100)\n\n startbtn.clicked.connect(self.start_recording)\n stopbtn.clicked.connect(self.stop_recording)\n initbtn.clicked.connect(self.init_recording)\n plotbtn.clicked.connect(self.plot_signals)\n\n self.statusBar()\n self.statusBar().showMessage('Click Init')\n\n self.setGeometry(300, 300, 290, 150)\n self.setWindowTitle('Recorder 1.0')\n self.setWindowIcon(QIcon(\"./Static/Images/icon.jpg\"))\n self.show()", "def _set_mode_running(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Pause\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-pause\"))\n cls.btn_startpause.setWhatsThis(\"Pause timer.\")\n cls.btn_startpause.clicked.connect(cls.pause)\n\n cls.btn_stopsave.setText(\"Stop\")\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.prompt_stop)\n cls.btn_stopsave.setEnabled(True)", "def init(self, parent):\n super(_VideoComponentEditor, self).init(parent)\n\n self.playTimer = QTimer(self.control)\n self.playTimer.timeout.connect(self.update)\n # self.control.connect(self.playTimer, SIGNAL('timeout()'), self.update)\n print(\"fffffffffff\", self.value.fps)\n if self.value.fps:\n self.playTimer.setInterval(1000 / float(self.value.fps))\n self.playTimer.start()\n\n self.value.on_trait_change(self.stop, \"closed_event\")\n\n self.value.on_trait_change(self._update_fps, \"fps\")\n self.sync_value(\"stop_timer\", \"stop_timer\", mode=\"from\")\n # self._prev_time = time.time()\n self._alive = True\n # QTimer.singleShot(self._get_interval(), lambda: self.update(-1))", "def start_timer(self):\n print \"Timer Object Started. Will update ADC Information every %s seconds\" % self.refreshTime\n self.timer=Timer(float(self.refreshTime)*1000, self._refresh_Visible_channels)", "def OnButton1(self):\n self.start_time = self.start_time.Minutes(DEFAULT_TIMER)\n self.timertext.SetLabel(self.start_time.Format(\"%M:%S\"))\n self.timerNotZero = True\n self.blinkPhase = 0\n self.timertext.SetForegroundColour('black')\n self.button1.SetBackgroundColour('white')", "def __init__( self ):\r\n\r\n # Create the main window in which our gui will display.\r\n self.main_window = QtGui.QWidget() # Or QMainWindow().\r\n\r\n # Create an instance of our gui and set it up in the main window.\r\n self.gui = Ui_StopwatchGui()\r\n self.gui.setupUi( self.main_window )\r\n\r\n # Set the connections between the gui components and this application.\r\n self.gui.start_stop_button.clicked.connect( self.start_stop )\r\n\r\n # Use an Event to communicate with the thread.\r\n self.stop_event = Event() # Set to False by default; set to True\r\n self.stop_event.set() # so stopwatch is not running at launch.\r\n\r\n # Show the main window containing our gui.\r\n self.main_window.show()" ]
[ "0.6865208", "0.6701982", "0.66158944", "0.6391647", "0.62690413", "0.6246653", "0.62351704", "0.62272644", "0.6169445", "0.6142069", "0.612976", "0.61192137", "0.6077936", "0.6072218", "0.60494316", "0.6029154", "0.6022442", "0.5995246", "0.5978963", "0.59544915", "0.59477735", "0.59450835", "0.5917475", "0.5870905", "0.58688164", "0.5844239", "0.58354807", "0.58175206", "0.5799513", "0.57980067" ]
0.712923
0
Stops Qt timer method for polling routine and switches Play button icon.
def _stop_loop_poll(self): # Connect to Stop-button clicked() if self._timerId_poll is not None: self.killTimer(self._timerId_poll) self._generator_poll = None self._timerId_poll = None self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay)) self.heartbeat = time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)", "def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()", "def stop_timer(self):\r\n self.countdownTimer.stop()", "def _stop_loop_feedback(self): # Connect to Stop-button clicked()\n if self._timerId_feedback is not None:\n self.killTimer(self._timerId_feedback)\n self._generator_feedback = None\n self._timerId_feedback = None\n self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n self.qbpm.feedback_on = False", "def slot_stop(self):\n\n self.thread.working = False", "def force_stop(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False\n self.stopped = True", "def loop_stop(self):\n super(TimerLoop, self).loop_stop()\n self.timer.cancel()\n self.loop_confirm_stopped()", "def _set_mode_stopped(cls):\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Start\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Start a new timer.\")\n cls.btn_startpause.clicked.connect(cls.start)\n\n cls.btn_stopsave.setIcon(QIcon.fromTheme(None))\n cls.btn_stopsave.setText(\"Stopped\")\n cls.btn_stopsave.setWhatsThis(\"Timer is already stopped.\")\n cls.btn_stopsave.setEnabled(False)", "def stop(self):\n self._stopped.set()\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def stop_timer(self):\n self.end_time = datetime.now()", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def _control_stop(self):\n self.player.stop()", "def stop(self):\n self._schedule(0, 0)\n self._started = False", "def _stop(self):\n self._pi.stop()", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n with self._lock:\n self._running.clear()\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def stop_alarm_ringtone(self):\n mixer.stop()\n mixer.quit()", "def stop(self):\n if self.is_playing:\n self.is_playing = False\n self.tstamp_play = None\n self.thread.stop()", "def stop(self):\r\n self.stopped = True\r\n time.sleep(1)", "def stop(self):\n self._stop_event.set()", "def stop(self):\n if self.monitor_lc:\n self.monitor_lc.cancel()\n self.monitor_lc = None", "def stop(self):", "def stop(self):", "def stop(self):\r\n self.stopped = True\r\n time.sleep(3)", "def stop(self):\n self.active = False" ]
[ "0.7859332", "0.72420764", "0.7157977", "0.69275886", "0.68424386", "0.67483723", "0.6747969", "0.67310244", "0.6729655", "0.6729282", "0.67194355", "0.67194355", "0.6666322", "0.6643688", "0.66143656", "0.6603734", "0.6603734", "0.6603734", "0.6603734", "0.6603734", "0.6593516", "0.65682817", "0.65516776", "0.6537533", "0.6532692", "0.6526859", "0.65188825", "0.65188825", "0.6494276", "0.64915806" ]
0.7740067
1
Toggles DCM feedback. Connected tofeedback button.
def toggle_feedback(self): self.feedback = not self.feedback if self.feedback: self.dcm_pitch_tserver.write_attribute('StepBacklash',0) self._start_loop_feedback() else: # print('In toggle feedback') self.dcm_pitch_tserver.write_attribute('StepBacklash',self.dcm_step_backlash) self._stop_loop_feedback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toggle_audio_feedback(self, enabled):\r\n self.config.audio_feedback = enabled", "def feedback_enable(self):\n return self._feedback_enable", "def set_as_not_feedback(self):\n self.feedback = False", "def set_as_feedback(self):\n if self.type == MessageTypes.AGENT:\n raise InvalidMessageTypeError(\n 'Cannot set feedback as True when msg is of type Agent')\n self.feedback = True", "async def feedback(self, ctx, *, feedback):\n url = os.environ.get(\"FEEDBACK_WEBHOOK\", None)\n if url:\n webhook = Webhook.from_url(url, adapter=RequestsWebhookAdapter())\n embed = discord.Embed(description=feedback, colour=discord.Colour.teal())\n embed.set_author(name=f\"{ctx.author.name}#{ctx.author.discriminator}\", icon_url=ctx.author.avatar_url)\n embed.set_footer(text=f\"User id: {ctx.author.id}\")\n webhook.send(embed=embed)\n await ctx.send(embed=embeds.success(\"Sent the feedback!\"))\n else:\n await ctx.send(embed=embeds.error(\"This command is disabled.\"))", "async def feedback(self, ctx, *, message):\n channel = self.bot.get_channel(config.feedback_channel) # feedback chanel in support server\n\n embed = discord.Embed(title='New Feedback!',\n description=message,\n color=self.bot.color)\n embed.add_field(name='Author',\n value=ctx.author.mention)\n embed.add_field(name='Server',\n value=ctx.guild.name)\n if ctx.message.attachments:\n embed.add_field(name='Attachments',\n value='\\n'.join(f'[{file.filename}]({file.url})' for file in ctx.message.attachments),\n inline=False)\n embed.set_footer(text='Vote on this submissions using the reactions so I can determine what to focus on!')\n\n message = await channel.send(embed=embed)\n await message.add_reaction('<:upvote:651325140663140362>')\n await message.add_reaction('<:downvote:651325233105600544>')\n await ctx.send('Thank you for your submission! '\n 'If you haven\\'t already, consider joining the support server with `support`.')", "def toggled_comunication(self):\n if self.actionPC_Monitor.isChecked() and self.actionPC_Monitor.isEnabled():\n self.actionPC_Monitor.setEnabled(0)\n self.actionPC_Sensor_Actuador.setChecked(0)\n self.actionPC_Sensor_Actuador.setEnabled(1)\n self.monitor_environment()\n \n elif self.actionPC_Sensor_Actuador.isChecked() and self.actionPC_Sensor_Actuador.isEnabled():\n self.actionPC_Sensor_Actuador.setEnabled(0)\n self.actionPC_Monitor.setChecked(0)\n self.actionPC_Monitor.setEnabled(1)\n self.actuator_environment()", "def ext_fb_trigger(self):\n if os.path.isfile(self.feedback_file):\n self.toggle_feedback()\n os.remove(self.feedback_file)", "def feedbackButton(name, value=None, enabled=True, **kwparams):\n if value is None:\n value = _(u'Feedback')\n kwparams.setdefault('class', 'feedbackbutton')\n return button(name, value, enabled, **kwparams)", "async def toggle(self, ctx: BBContext):\n\n self.code_enabled = not self.code_enabled\n e = 'enabled.' if self.code_enabled else 'disabled.'\n await ctx.send(f\"Bunker code auto reaction has been : **{e}**\")\n self.bot.logger.info('Bunker code listener %s by %s', e, str(ctx.author))", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def feedback(ctx, message):\n client = ctx.obj[\"client\"]\n\n if len(message) > 0:\n message = \" \".join(message)\n else:\n message = click.edit(\n text=\"Type your message here. \" + \"Save and exit to send, or just exit to abort.\",\n require_save=True,\n )\n if not message:\n click.echo(\"Aborted.\")\n else:\n click.echo(\"Posting feedback to the Spell team\" + ellipses(ctx.obj[\"utf8\"]))\n with api_client_exception_handler():\n logger.info(\"Sending feedback\")\n client.post_feedback(message)\n click.echo(\n \"Post complete. Thanks so much for your feedback. We'll look into it right away!\"\n )", "def grabFeedback(self, message): #$NON-NLS-1$\r", "def toggle(self) -> None:\n ...", "def start_stop_feedback(self):\n\n if self.pb_start_statistics.text() == \"Statistics Accum On\":\n return 0\n if self.mi_standard_fb is not None and self.mi_standard_fb.is_running():\n self.error_box(\"Standard FeedBack is running!\")\n logger.info(\"start_stop_feedback: St.FB is running\")\n return 0\n\n delay = self.sb_feedback_rep.value()*1000\n if self.pb_start_feedback.text() == \"Stop Feedback\":\n self.stop_feedback()\n elif self.pb_start_feedback.text() == \"Start Feedback\":\n self.feedback_timer.start(delay)\n logger.info(\"Start Feedback\")\n self.pb_start_feedback.setText(\"Stop Feedback\")\n self.pb_start_feedback.setStyleSheet(\"color: red\")\n else:\n logger.warning(\"start_stop_feedback: To early\")", "async def _toggle(self, ctx, id: int = None):\n if id is None:\n id = ctx.channel.id\n if id in self.etrigs['channels']:\n self.etrigs['channels'].remove(id)\n else:\n self.etrigs['channels'].append(id)\n self.write_config()\n await ctx.message.add_reaction('\\u2705')", "def pfeedback(self, msg):\n if not self.quiet:\n if self.feedback_to_output:\n self.poutput(msg)\n else:\n print(msg)", "def toggle(self):", "def toggle(self) -> None:", "def toggle(self) -> None:", "def __editToggleComment(self):\n self.activeWindow().toggleCommentBlock()", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "def _stop_loop_feedback(self): # Connect to Stop-button clicked()\n if self._timerId_feedback is not None:\n self.killTimer(self._timerId_feedback)\n self._generator_feedback = None\n self._timerId_feedback = None\n self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n self.qbpm.feedback_on = False", "def receive_feedback(self, winner):\r\n pass", "def receive_feedback(self, winner):\r\n pass", "def pfeedback(self, msg):\n if not self.quiet:\n if self.feedback_to_output:\n self.poutput(msg)\n else:\n sys.stderr.write(\"{}\\n\".format(msg))", "def bulb_toggle():\n tx = zb_explicit_command\n tx[\"dest_addr_long\"] = GE_LINK_BULB_MAC\n tx[\"cluster\"] = CLUSTER_A\n tx[\"data\"] = DATA_TOGGLE\n response = zb.Send(tx)", "def getFeedback(self):\n return self.feedback", "def send_feedback(piece_id):\n form = FeedbackForm()\n piece = mongo.db.project_pieces.find_one_or_404({'_id': ObjectId(piece_id)})\n \n return render_template('pages/feedback.html', form=form, piece=piece, title='Feedback', legend='Send feedback')" ]
[ "0.653941", "0.6109054", "0.60173434", "0.58298653", "0.58292776", "0.57932585", "0.57298297", "0.563823", "0.5619917", "0.56008965", "0.55841565", "0.5564902", "0.5460401", "0.5391467", "0.5353717", "0.533838", "0.53296936", "0.53205925", "0.5314621", "0.5314621", "0.52678615", "0.52345157", "0.5227932", "0.51939225", "0.51831764", "0.51831764", "0.51822954", "0.518224", "0.5158106", "0.5137194" ]
0.7789733
0
Main feedback loop. Calculates deviation of the vertical QBPM position from target value and moves DCM pitch accordingly. Generator for Qt timer method.
def _set_feedback_loop(self): while True: mono = self.get_mono() interval = int(self.qbpm.filter / 20) if self.qbpm.log_arrays['avgcurr_log'][-1] < self.feedback_threshold: print('intensity too low.') self._stop_loop_feedback() # current_pos = self.qbpm.log_arrays['posz_filter_log'][-1] current_pos = self.qbpm.log_arrays['posx_filter_log'][-1] # target = self.qbpm.posz_target target = self.qbpm.posx_target if mono == "dcm": corr_factor = 0.2 if mono == "dmm": corr_factor = 0.2 bandwidth = 0.003 * float(self.qbpm.sensitivity/100) if not ((target - bandwidth) < current_pos < (target + bandwidth)): corr_angle = -((current_pos - target) * corr_factor)/self.qbpm.distance if self.cycle == interval: print('Moving pitch: {}'.format(corr_angle)) dcm_curr_pitchpos = self.dcm_pitch_tserver.Position target_pitchpos = dcm_curr_pitchpos + corr_angle if not self.simulate_feedback: if mono == "dcm": self.dcm_pitch_tserver.write_attribute('Position', target_pitchpos) if mono == "dmm": self.dmm_x2rot_tserver.write_attribute('Position', target_pitchpos) self.last_corr_angle = corr_angle self.feedback_time = datetime.datetime.now() self.cycle = 0 self.cycle = 0 if self.cycle >= interval else self.cycle + 1 self.set_x2pitchlabel() yield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _start_loop_feedback(self):\n self._stop_loop_feedback() # Stop any existing timer\n self._generator_feedback = self._set_feedback_loop() # Start the loop\n self._timerId_feedback = self.startTimer(0) # This is the idle timer\n self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPause))\n # tell qbpm class that feedback is on\n self.qbpm.feedback_on = True\n self.posx_target = self.qbpm.log_arrays['posx_target_log'][-1]\n self.posz_target = self.qbpm.log_arrays['posz_target_log'][-1]\n self.avgcurr_target = self.qbpm.log_arrays['avgcurr_target_log'][-1]\n self.qbpm.posx_target = self.posx_target\n self.qbpm.posz_target = self.posz_target\n self.qbpm.avgcurr_target = self.avgcurr_target\n self.dcm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dmm_x1z_position = self.dmm_x1z_tserver.Position", "def update(self, feedback_value):\r\n error = self.SetPoint - feedback_value\r\n if error < 180 :\r\n error = error\r\n else :\r\n error = error - 360\r\n\r\n self.current_time = time.time()\r\n delta_time = self.current_time - self.last_time\r\n delta_error = error - self.last_error\r\n\r\n if (delta_time >= self.sample_time):\r\n self.PTerm = self.Kp * error\r\n self.ITerm += error * delta_time\r\n\r\n if (self.ITerm < -self.windup_guard):\r\n self.ITerm = -self.windup_guard\r\n elif (self.ITerm > self.windup_guard):\r\n self.ITerm = self.windup_guard\r\n\r\n self.DTerm = 0.0\r\n if delta_time > 0:\r\n self.DTerm = delta_error / delta_time\r\n\r\n # Remember last time and last error for next calculation\r\n self.last_time = self.current_time\r\n self.last_error = error\r\n\r\n self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)\r\n return self.output", "def measure(self):\n if(self.c1):\n self.c1.destroy()\n self.c1=None\n if(self.checkbcclock(0)):\n output=self.vb.io.execute(self.cmd,log=\"out\",applout=\"<>\")\n print 'output=',output\n if output[len(output)-1] != '0':\n self.vb.io.write('Error in measurephase.c')\n xy=self.xy(output)\n max=self.finddelay(xy)\n self.c1=Graph(self.f1,x0=0.,y0=0.,xgraph=32.,nxtick=8,\n ygraph=260.,nytick=13)\n self.c1.plot(xy,'red')\n self.c1.xlabel(text='Delay [ns]')\n self.c1.ylabel(text=self.ytitle)\n self.c1.pack()\n self.en.setEntry(str(max))\n self.c1.update_idletasks()\n self.saveauto()", "def __init__(self, simulate_feedback=False):\n super(QbpmMonitor, self).__init__()\n\n self.sources = {\n \"QBPM1 OH\" : Qbpm('hzgpp05vme0:10000/p05/i404/exp.01', 2),\n \"QBPM2 OH\" : Qbpm('hzgpp05vme0:10000/p05/i404/exp.02', 7),\n \"QBPM EH2\" : Qbpm('hzgpp05vme2:10000/p05/i404/eh2.01', 30)\n }\n default_source = \"QBPM2 OH\"\n self.set_source(default_source)\n self.title = self.qbpm.address\n self.posx_target = 0\n self.posz_target = 0\n self.avgcurr_target = 0\n self.qbpm.frequency = 5.0 # in Hz\n self.qbpm.backlog = 120 # in s\n self.polling = False\n self._generator_poll = None\n self._timerId_poll = None\n self.feedback = False\n self.feedback_threshold = 5E-9\n self._generator_feedback = None\n self._timerId_feedback = None\n self.last_corr_angle = 0\n self.feedback_time = datetime.datetime.now()\n self.dcm_bragg_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_bragg')\n self.dcm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dcm_pitch_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_xtal2_pitch')\n self.dcm_energy_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_energy')\n self.dmm_x1rot_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x1rot')\n self.dmm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dmm_x2rot_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2rot')\n self.dmm_x1z_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x1z')\n self.dmm_x1z_position = self.dmm_x1z_tserver.Position\n self.dmm_x2z_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2z')\n self.dmm_x2y_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2y') \n self.beamstop = tango.DeviceProxy('hzgpp05vme0:10000/HASYLAB/Petra3_P05vil.CDI.SRV/BST')\n self.undulator = tango.DeviceProxy('hzgpp05vme0:10000/p05/undulator/1')\n self.get_mono()\n\n self.heartbeat = time.time()\n self.feedback_file = '/tmp/qbpmfeedback.run'\n if os.path.isfile(self.feedback_file):\n os.remove(self.feedback_file)\n self.cycle = 0\n self.feedback_triggered = False\n self.simulate_feedback = simulate_feedback\n self.dcm_step_backlash = self.dcm_pitch_tserver.read_attribute('StepBacklash').value\n\n ################################################################################################################\n # initUI\n\n # labels\n self.source_label = QtGui.QLabel(\"source\")\n self.poll_label = QtGui.QLabel(\"poll\")\n self.feedback_label = QtGui.QLabel(\"feedback\")\n self.ll_label = QtGui.QLabel(\"backlog (s)\")\n self.freq_label = QtGui.QLabel(\"frequency\")\n self.sensitivity_label = QtGui.QLabel(\"sensitivity\")\n self.filter_label = QtGui.QLabel(\"lowpass filter\")\n self.log_label = QtGui.QLabel(\"log to file\")\n self.pitch_label = QtGui.QLabel(\"0\")\n self.set_x2pitchlabel()\n # QBOM source Combobox\n self.scbox = QtGui.QComboBox(self)\n self.scbox.addItem(\"QBPM1 OH\") # Index 0\n self.scbox.addItem(\"QBPM2 OH\") # Index 1\n self.scbox.addItem(\"QBPM EH2\") # index 2\n self.scbox.setCurrentIndex(1) # Check if this value is consistent with default source above!\n self.scbox.activated[str].connect(self.set_source)\n # poll button\n self.rbtn = QtGui.QPushButton(self)\n self.rbtn.clicked.connect(self.toggle_polling)\n self.rbtn.resize(self.rbtn.sizeHint())\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n # feedback button\n self.fbtn = QtGui.QPushButton(self)\n self.fbtn.clicked.connect(self.toggle_feedback)\n self.fbtn.resize(self.fbtn.sizeHint())\n self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n # reset button\n reset_btn = QtGui.QPushButton('Reset', self)\n reset_btn.clicked.connect(self.qbpm.reset_logs)\n reset_btn.resize(reset_btn.sizeHint())\n # backlog text field\n self.lltext = QtGui.QLineEdit(str(self.qbpm.backlog))\n self.lltext.setValidator(QtGui.QIntValidator())\n self.lltext.setMaxLength(6)\n self.lltext.returnPressed.connect(self.change_backlog)\n # frequency text field\n self.ftext = QtGui.QLineEdit(str(self.qbpm.frequency))\n self.ftext.setValidator(QtGui.QDoubleValidator())\n self.ftext.setMaxLength(6)\n self.ftext.returnPressed.connect(self.change_frequency)\n # sensititvity slider\n self.sslider = QtGui.QSlider(self)\n self.sslider.setOrientation(QtCore.Qt.Horizontal)\n self.sslider.setMinimum(1)\n self.sslider.setMaximum(100)\n self.sslider.setTickPosition(QtGui.QSlider.TicksBothSides)\n self.sslider.setTickInterval(10)\n self.sslider.setSingleStep(1)\n self.sslider.setValue(self.qbpm.sensitivity)\n self.sslider.valueChanged.connect(self._set_sensitivity)\n # filter slider\n self.fslider = QtGui.QSlider(self)\n self.fslider.setOrientation(QtCore.Qt.Horizontal)\n self.fslider.setMinimum(1)\n self.fslider.setMaximum(1000)\n self.fslider.setTickPosition(QtGui.QSlider.TicksBothSides)\n self.fslider.setTickInterval(100)\n self.sslider.setSingleStep(1)\n self.fslider.setValue(self.qbpm.filter)\n self.fslider.valueChanged.connect(self._set_filter)\n # log button\n self.lbutton = QtGui.QRadioButton(self)\n self.lbutton.setChecked(False)\n # quit button\n qbtn = QtGui.QPushButton('Quit', self)\n qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n qbtn.resize(qbtn.sizeHint())\n \n r, g, b, w = [255, 0, 0], [0, 255, 0], [0, 0, 255], [150, 150, 150]\n fill_color = pg.mkColor([0, 255, 0, 100])\n self.curves = {}\n log_pen = pg.mkPen(w, width=1, style=QtCore.Qt.SolidLine)\n avg_pen = pg.mkPen(r, width=3, style=QtCore.Qt.SolidLine)\n target_pen = pg.mkPen(g, width=1, style=QtCore.Qt.SolidLine)\n sensitivity_pen = pg.mkPen(fill_color, width=1, style=QtCore.Qt.SolidLine)\n fill_pen = pg.mkPen(fill_color, width=100, style=QtCore.Qt.SolidLine)\n petra_pen = pg.mkPen(w, width=3, style=QtCore.Qt.SolidLine)\n # define plot font\n font = QtGui.QFont()\n font.setPixelSize(16)\n # make PlotWidgets\n self.plot_main = pg.GraphicsLayoutWidget()\n self.avgcurr_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_avgcurr = self.plot_main.addPlot(title='avg. current', row=0, col=0,\n axisItems={'bottom': self.avgcurr_timeaxis})\n self.petracurr_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_petracurrent = self.plot_main.addPlot(title='PETRA beam current', row=0, col=1,\n axisItems={'bottom': self.petracurr_timeaxis})\n self.plot_main.nextRow()\n self.posx_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_posx = self.plot_main.addPlot(title='x-position', row=1, col=0,\n axisItems={'bottom': self.posx_timeaxis})\n self.posy_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_posz = self.plot_main.addPlot(title='z-position', row=1, col=1,\n axisItems={'bottom': self.posy_timeaxis})\n # assign qbpm data to styles and PlotWidgets\n styles = {'avgcurr_log': (self.plot_avgcurr, log_pen),\n 'avgcurr_filter_log': (self.plot_avgcurr, avg_pen),\n 'avgcurr_target_log': (self.plot_avgcurr, target_pen),\n 'posx_log': (self.plot_posx, log_pen),\n 'posx_filter_log': (self.plot_posx, avg_pen),\n 'posx_target_log': (self.plot_posx, target_pen),\n 'posz_log': (self.plot_posz, log_pen),\n 'posz_filter_log': (self.plot_posz, avg_pen),\n 'posz_target_log': (self.plot_posz, target_pen),\n 'posz_sens_low_log': (self.plot_posz, sensitivity_pen),\n 'posz_sens_high_log': (self.plot_posz, sensitivity_pen),\n 'petracurrent_log': (self.plot_petracurrent, petra_pen)\n }\n # plot curves\n for log_array, style in styles.items():\n # self.curves[key] = style[0].plot(self.qbpm.log_arrays[key], pen=style[1], symbol='o')\n self.curves[log_array] = style[0].plot(self.qbpm.log_time, self.qbpm.log_arrays[log_array], pen=style[1])\n # self.fill = pg.FillBetweenItem(curve1=self.curves['posz_sens_low_log'],\n # curve2=self.curves['posz_sens_high_log'], pen=fill_pen)\n # self.plot_posz.addItem(self.fill)\n # set axis properties\n for log_plot in [self.plot_avgcurr, self. plot_posx, self.plot_posz, self.plot_petracurrent]:\n log_plot.getAxis(\"bottom\").tickFont = font\n log_plot.getAxis(\"bottom\").setStyle(tickTextOffset=20)\n log_plot.getAxis(\"left\").tickFont = font\n log_plot.getAxis(\"left\").setStyle(tickTextOffset=20)\n log_plot.getAxis(\"left\").setWidth(100)\n log_plot.getAxis(\"bottom\").setGrid(100)\n log_plot.getAxis(\"left\").setGrid(100)\n\n # Create a grid layout to manage the widgets size and position\n layout = QtGui.QGridLayout()\n self.setLayout(layout)\n\n # Add widgets to the layout in their proper positions\n layout.addWidget(self.source_label, 0, 0)\n layout.addWidget(self.poll_label, 1, 0)\n layout.addWidget(self.feedback_label, 2, 0)\n layout.addWidget(self.ll_label, 4, 0)\n layout.addWidget(self.freq_label, 5, 0)\n layout.addWidget(self.sensitivity_label, 6, 0)\n layout.addWidget(self.filter_label, 7, 0)\n layout.addWidget(self.log_label, 8, 0)\n layout.addWidget(self.scbox, 0, 1)\n layout.addWidget(self.rbtn, 1, 1) # button goes in lower-left\n layout.addWidget(self.fbtn, 2, 1) # button goes in lower-left\n layout.addWidget(reset_btn, 3, 1) # button goes in lower-left\n layout.addWidget(self.lltext, 4, 1) # text edit goes in middle-left\n layout.addWidget(self.ftext, 5, 1) # text edit goes in middle-left\n layout.addWidget(self.sslider, 6, 1)\n layout.addWidget(self.fslider, 7, 1)\n layout.addWidget(self.lbutton, 8, 1)\n layout.addWidget(self.pitch_label, 9, 0, 1, 2) # button goes in lower-left\n# layout.addWidget(self.fb_step_label, 10, 0, 1, 2)\n# layout.addWidget(self.fb_time_label, 11, 0, 1, 2)\n layout.addWidget(qbtn, 10, 0, 1, 2) # button goes in lower-left\n layout.addWidget(self.plot_main, 0, 2, 11, 1)\n\n layout.setColumnStretch(0, 0.1)\n layout.setColumnStretch(1, 0.1)\n layout.setColumnStretch(2, 1)\n\n # Display the widget as a new window\n self.setWindowTitle(self.title)\n self.show()", "def update(self, feedback_value):\n\n self.current_time = time.time()\n delta_time = self.current_time - self.last_time\n\n if (delta_time >= self.sample_time):\n error = self.SetPoint - feedback_value\n delta_error = error - self.last_error\n self.PTerm = self.Kp * error # Set P to P coefficient * current error\n self.ITerm += error * delta_time # Set I to error * time since last set\n\n if (self.ITerm < -self.windup_guard): # Clamp I to the windup guard limits\n self.ITerm = -self.windup_guard\n elif (self.ITerm > self.windup_guard):\n self.ITerm = self.windup_guard\n\n self.DTerm = 0.0 # Reset D\n if delta_time > 0: # If time has elapsed\n self.DTerm = delta_error / delta_time # Difference in the error / difference in the time\n\n # Remember last time and last error for next calculation\n self.last_time = self.current_time\n self.last_error = error\n\n self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)", "def run(self):\n\n try:\n while not self._exit:\n # Update the time varying value of the output.\n\n # The current time offset\n cur_time_offset = time.time() % self._time_period\n\n self._updating = True\n\n # If modulation is selected as FM\n if (self._mod_type == 2):\n # Getting the modulating input\n m_t = self.mod_ip[0].voltage - self.mod_ip[1].voltage\n\n freq = self._frequency + m_t\n if freq != 0:\n time_p = 1 / freq\n\n else:\n time_p = float(\"inf\")\n\n else:\n freq = self._frequency\n time_p = self._time_period\n\n # If sine wave\n if (self.type == 0):\n self._last_updated_time = cur_time_offset\n voltage = 0.5 * math.sin(\n 2 * 3.145926 * freq * cur_time_offset) + 0.5\n\n # If square wave\n elif (self.type == 1 or self.type == 4):\n self._last_updated_time = cur_time_offset\n voltage = 1 if (\n (cur_time_offset) < time_p /\n float(2)) else 0\n\n # If Ramp\n elif (self.type == 2):\n self._last_updated_time = cur_time_offset\n voltage = cur_time_offset / time_p\n\n # If triangular\n else:\n self._last_updated_time = cur_time_offset\n voltage = 2 * cur_time_offset / time_p if (\n (cur_time_offset) < time_p /\n float(2)) else (2 * (time_p - cur_time_offset) / time_p)\n\n if (self._mod_type == 1):\n m_t = self.mod_ip[0].voltage - self.mod_ip[1].voltage\n c_t = voltage * self._amplitude + self.offset\n voltage = (1 + m_t) * c_t\n voltage /= self._amplitude\n\n if (self.type != 4):\n voltage *= self._amplitude\n\n else:\n voltage *= 5.0 # TTL amplitude is constant at 5v\n\n self.outputs[0].voltage = voltage\n self.outputs[1].voltage = -self.offset\n\n self._updating = False\n time.sleep(self._sampling_time_interval)\n\n except Exception as e:\n return", "def update(self, current_time, feedback_value):\n error = self.SetPoint - feedback_value\n\n delta_time = (current_time - self.last_time) / current_time\n delta_error = (error - self.last_error) / error\n\n if (delta_time >= self.sample_time):\n self.PTerm = self.Kp * error\n self.ITerm += error * delta_time\n\n if (self.ITerm < -self.windup_guard):\n self.ITerm = -self.windup_guard\n elif (self.ITerm > self.windup_guard):\n self.ITerm = self.windup_guard\n\n self.DTerm = 0.0\n if delta_time > 0:\n self.DTerm = delta_error / delta_time\n\n # Remember last time and last error for next calculation\n self.last_time = current_time\n self.last_error = error\n\n # print(\"P=\", self.PTerm, \" I=\", self.ITerm, \" D=\", self.DTerm)\n self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)", "def update(self, feedback_value, t=None):\n\t\tif t is None: t = datetime.now()\n\n\t\tself.last_time = self.curr_time\n\t\tself.last_input = self.curr_input\n\t\tself.last_error = self.curr_error\n\t\tself.curr_time = t\n\t\tself.curr_input = feedback_value\n\t\tself.curr_error = self.SetPoint - feedback_value\n\n\t\tif self.invert_error: self.curr_error = -self.curr_error\n\t\tif self.error_in_degrees:\n\t\t\tself.curr_error %= 360\n\t\t\twhile self.curr_error < -180: self.curr_error += 360\n\t\t\twhile self.curr_error > 180: self.curr_error -= 360 # Back to (-180, 180] range\n\t\tself.curr_error = min(self.error_max, max(-self.error_max, self.curr_error))\n\n\t\tif self.clearing: # If the method clear() was just called, force delta_error=0 and delta_time=0 -> ITerm = DTerm = 0\n\t\t\tdelta_time = 0\n\t\t\tdelta_error = 0\n\t\t\tself.clearing = False\n\t\telse:\n\t\t\tif self.is_interval_const:\n\t\t\t\tself.curr_time = self.last_time + self.sample_time + timedelta(milliseconds=25) # Add 25ms so delta_time is never 0 (not even if sample_time=0), so that D component is not always 0\n\t\t\tdelta_time = (self.curr_time - self.last_time).total_seconds()\n\t\t\tdelta_error = self.curr_error - self.last_error\n\t\tif delta_time < self.sample_time: return self.output\n\n\t\tself.PTerm = self.Kp * self.curr_error\n\t\tself.ITerm = min(self.I_max, max(-self.I_max, self.ITerm + self.curr_error*delta_time))\n\t\tself.DTerm = 0.0 if delta_time<=0 else delta_error/delta_time\n\n\t\tself.output = self.out_offs + min(self.out_max, max(-self.out_max, self.PTerm + (self.Ki*self.ITerm) + (self.Kd*self.DTerm)))\n\t\treturn self.output", "def update(self, current):\n\n self.times[self.index] = time.time()\n\n self.current = current\n\n # for circular PID, keep values within [-pi, pi]\n if self.circular:\n while self.current > 2*math.pi:\n self.current = self.current - 2*math.pi\n while self.current < 0:\n self.current = self.current + 2*math.pi\n\n # COMPUTE PROPORTIONAL\n\n self.error_p = self.__target - self.current\n\n # for circular PID, keep error values within [-pi, pi]\n if self.circular: \n while self.error_p > math.pi:\n self.error_p = self.error_p - 2*math.pi\n while self.error_p < -math.pi:\n self.error_p = self.error_p + 2*math.pi\n\n self.errors[self.index] = self.error_p\n if callable(self.debug_callback):\n self.debug_callback(self.errors)\n\n # COMPUTE INTEGRAL\n\n # time step here is only the diff between current and past sample\n time_step = self.times[self.index] - self.times[(self.index - 1) % self.samples]\n # impose upper bound on time step (to avoid jump from 0 to unix time)\n time_step = min(time_step, 0.1)\n self.error_i += self.error_p * time_step\n self.error_i = max(min(self.error_i, self.I_LIMIT), -self.I_LIMIT)\n\n # COMPUTE DIFFERENTIAL\n\n # time_step here is over all self.samples=5 samples\n time_step = self.times[self.index] - self.times[(self.index + 1) % self.samples]\n # impose lower bound on time step (to avoid divide by zero error)\n time_step = max(time_step, 0.001)\n self.error_d = (self.errors[self.index] \\\n - self.errors[(self.index + 1) % self.samples]) \\\n / (time_step)\n\n # increment index for next irritation\n self.index = (self.index + 1) % self.samples\n\n # COMPUTE CORRECTION\n\n correction = self.KP * self.error_p \\\n + self.KI * self.error_i \\\n + self.KD * self.error_d\n\n # safety feature in case update() is not called frequently enough\n if time_step > 0.2:\n if callable(self.debug_callback):\n self.debug_callback(\"infrequent updates, returning 0\")\n return 0\n\n if callable(self.debug_callback):\n self.debug_callback(\"target = {:2.4f} current = {:2.4f}\".format( \\\n self.__target, self.current))\n self.debug_callback(\"errors = \" + str(self.errors))\n self.debug_callback(\"e = {:2.4f} e_i = {:2.4f} e_d = {:2.4f} corr = {:2.4f}\".format( \\\n self.error_p, self.error_i, self.error_d, correction))\n\n return correction", "def run_loop_measurement(t=0.5, name='test', loops=4, pump_t=180, total_t=600, jump_x=10):\n\n incident_angles = [0.1, 0.4]\n waxs_arc = [20, 0]\n user = \"TP\"\n\n condition = (\n ( -1 < waxs.arc.position )\n and ( waxs.arc.position < 1 )\n and (waxs_arc[0] == 20)\n )\n\n if condition:\n waxs_arc = waxs_arc[::-1]\n \n ranges = { 0.1 : [-16, 16, 33],\n 0.4 : [-25, 25, 51],\n }\n\n try:\n ai0 = RE.md['ai_0']\n except:\n yield from bp.count([])\n ai0 = db[-1].start['ai_0']\n print('Failed to acces RE.md')\n print(f'\\n\\nSample flat at theta = {ai0}')\n \n proposal_id('2023_2', '311564_Pettersson')\n #det_exposure_time(t, t)\n \n t_initial = time.time()\n\n for i in range(loops):\n t_start = time.time()\n print('Cycle number',i+1,'started at', (t_start - t_initial)/60)\n\n # Wait initial time for pumping to finish\n print(f'Start pumping now, going to wait for {pump_t} s\\n')\n while (time.time() - t_start) < pump_t:\n print(f'Pumping time: {(time.time() - t_start):.1f} s')\n yield from bps.sleep(10)\n\n # Go over SAXS and WAXS\n t_measurement = ( time.time() - t_initial ) / 60\n for wa in waxs_arc:\n yield from bps.mv(waxs, wa)\n dets = [pil900KW] if waxs.arc.position < 15 else [pil1M, pil900KW]\n\n for ai in incident_angles:\n yield from bps.mv(piezo.th, ai0 + ai)\n yield from bps.mvr(piezo.x, - jump_x)\n\n t2 = 2 * t if ai == 0.4 else t\n det_exposure_time(t2, t2)\n\n try:\n y_range = ranges[ai]\n except:\n y_range = [-10, 10, 11]\n \n sample_name = f'{name}{get_scan_md()}_time{t_measurement:.1f}_ai{ai}'\n sample_id(user_name=user, sample_name=sample_name)\n print(f\"\\n\\n\\n\\t=== Sample: {sample_name} ===\")\n yield from bp.rel_scan(dets, piezo.y, *y_range, md=dict(ai=ai))\n \n yield from bps.mv(waxs, waxs_arc[0],\n piezo.th, ai0)\n\n # Wait until the total loop time passes\n if i + 1 < loops:\n print(f'Waiting for the loop to last {total_t} s in total\\n')\n sleep_count = 0\n while (time.time() - t_start) < total_t:\n sleep_count += 1\n if (sleep_count % 10 == 0):\n print(f'Total time: {(time.time() - t_start):.1f} s')\n yield from bps.sleep(1)\n\n sample_id(user_name=\"test\", sample_name=\"test\")\n det_exposure_time(0.5, 0.5)", "def update(self, feedback_value, current_time=None):\n error = self.SetPoint - feedback_value\n\n self.current_time = current_time if current_time is not None else time.time()\n delta_time = self.current_time - self.last_time\n delta_error = error - self.last_error\n\n if (delta_time >= self.sample_time):\n self.PTerm = self.Kp * error\n self.ITerm += error * delta_time\n\n if (self.ITerm < -self.windup_guard):\n self.ITerm = -self.windup_guard\n elif (self.ITerm > self.windup_guard):\n self.ITerm = self.windup_guard\n\n self.DTerm = 0.0\n if delta_time > 0:\n self.DTerm = delta_error / delta_time\n\n # Remember last time and last error for next calculation\n self.last_time = self.current_time\n self.last_error = error\n\n self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)", "def dd_plan(centrefreq, bandwidth, nfreqchan, timeres, lowDM, highDM, min_DM_step=0.02):\n\n DD_plan_array = []\n freqres = bandwidth / float(nfreqchan)\n previous_DM = lowDM\n\n #number of time samples smeared over before moving to next D_dm\n smear_fact = 3.\n\n #Loop until you've made a hit your range max\n D_DM = 0.\n downsample = 1\n while D_DM < round(highDM, 2):\n #calculate the DM where the current time resolution equals the\n #dispersion in a frequency channel (a bit of an overkill)\n\n #Dm smear over a frequency channel\n dm_smear = previous_DM * freqres * 8.3 * 10.**6 / centrefreq**3\n total_smear = math.sqrt(timeres**2 +\n dm_smear**2)\n\n\n D_DM = smear_fact * timeres * centrefreq**3 /\\\n (8.3 * 10.**6 * freqres)\n\n #difference in DM that will double the effective width (eq 6.4 of pulsar handbook)\n #TODO make this more robust\n #DM_step = math.sqrt( (2.*timeres)**2 - timeres**2 )/\\\n # (8.3 * 10**6 * bandwidth / centrefreq**3)\n DM_step = smear_fact * total_smear * centrefreq**3 /\\\n (8.3 * 10.**6 * 0.5 * bandwidth)\n\n\n #round to nearest 0.01\n DM_step = round(DM_step, 2)\n if DM_step < min_DM_step:\n #set DM to 0.01 as a zero DM doesn't make sense\n DM_step = min_DM_step\n\n\n if D_DM > highDM:\n #last one so range from to max\n D_DM = highDM\n #range from last to new\n D_DM = round(D_DM, 2)\n nDM_step = int((D_DM - previous_DM) / DM_step)\n if D_DM > lowDM:\n DD_plan_array.append([ previous_DM, D_DM, DM_step, nDM_step, timeres, downsample ])\n previous_DM = D_DM\n\n #Double time res to account for incoherent dedispersion\n timeres *= 2.\n downsample *= 2\n\n return DD_plan_array", "def update(self, state_value, current_time):\r\n\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\r\n\t\tself.current_time = current_time/1000.0 \t\t# Converting from msec to sec\r\n\t\tdelta_time = self.Ts\r\n\t\tdelta_error = error - self.last_error\r\n\r\n\t\tself.ITerm += error * delta_time\r\n\t\t\r\n\t\tself.DTerm = 0.0\r\n\t\tif delta_time > 0:\r\n\t\t\tself.DTerm = delta_error / delta_time\r\n\r\n\t\t# Remember last time and last error for next calculation\r\n\t\tself.last_time = self.current_time\r\n\t\tself.last_error = error\r\n\t\t\r\n\t\t# Calculate u(t) - catch potential division by zero error\r\n\t\ttry:\r\n\t\t\tu = self.Kp * (error + ((1.0/self.Ti) * self.ITerm) + (self.Td * self.DTerm))\r\n\t\texcept ZeroDivisionError:\r\n\t\t\tu = self.Kp * (error + (0.0 * self.ITerm) + (self.Td * self.DTerm))\r\n\t\t\t\t\r\n\t\t# Bound the controller output if necessary (between MinValue - MaxValue) \r\n\t\tif u > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telif u < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telse:\r\n\t\t\tself.OutputValue = u\r\n\t\t\r\n\t\t# Update the last output value\r\n\t\tself.last_OutputValue = self.OutputValue\r\n\t\t\r\n\t\t# Record state, error, y(t), and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t# Convert from msec to sec\r\n\t\t\r\n\t\t# Return controller output\r\n\t\treturn self.OutputValue", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def output():\n if(reset == 1):\n dataOut_decimated.next = 0\n maxPeriod.next = False\n maxValue.next = MIN_VALUE\n minValue.next = MAX_VALUE\n decimationSum.next = 0\n dataClkEdge.next = True\n else:\n if(dataClk == 1 and dataClkEdge == 1):\n dataClkEdge.next = False\n decimationSum[16:8].next = 0\n decimationSum[8:].next = dataIn\n maxValue.next = MIN_VALUE\n minValue.next = MAX_VALUE\n if(decimationRatio > 0):\n if(decimationStyle == 0):\n dataOut_decimated.next = dataIn\n elif(decimationStyle == 1):\n if(decimationRatio == 2):\n dataOut_decimated.next = buff[lfsr[1:]]\n elif(decimationRatio == 4):\n dataOut_decimated.next = buff[lfsr[2:]]\n elif(decimationRatio == 8):\n dataOut_decimated.next = buff[lfsr[3:]]\n elif(decimationRatio == 16):\n dataOut_decimated.next = buff[lfsr[4:]]\n elif(decimationRatio == 32):\n dataOut_decimated.next = buff[lfsr[5:]]\n elif(decimationRatio == 64):\n dataOut_decimated.next = buff[lfsr[6:]]\n elif(decimationRatio == 128):\n dataOut_decimated.next = buff[lfsr[7:]]\n elif(decimationRatio == 256):\n dataOut_decimated.next = buff[lfsr[8:]]\n elif(decimationStyle == 2):\n maxPeriod.next = not maxPeriod\n if(maxPeriod):\n dataOut_decimated.next = maxValue\n else:\n dataOut_decimated.next = minValue\n elif(decimationStyle == 3):\n if(decimationRatioBase == 1):\n dataOut_decimated.next = decimationSum[9:1]\n elif(decimationRatioBase == 2):\n dataOut_decimated.next = decimationSum[10:2]\n elif(decimationRatioBase == 3):\n dataOut_decimated.next = decimationSum[11:3]\n elif(decimationRatioBase == 4):\n dataOut_decimated.next = decimationSum[12:4]\n elif(decimationRatioBase == 5):\n dataOut_decimated.next = decimationSum[13:5]\n elif(decimationRatioBase == 6):\n dataOut_decimated.next = decimationSum[14:6]\n elif(decimationRatioBase == 7):\n dataOut_decimated.next = decimationSum[15:7]\n elif(decimationRatioBase == 8):\n dataOut_decimated.next = decimationSum[16:8]\n else:\n if(dataClk == 0):\n dataClkEdge.next = True\n decimationSum.next = decimationSum + concat(\"00000000\", dataIn)\n if(dataIn > maxValue):\n maxValue.next = dataIn\n if(dataIn < minValue):\n minValue.next = dataIn", "def _dsb_updater(self):\n self.dsb_sensor = W1ThermSensor()\n self.humi = '---'\n self.humi_change = 0\n while True:\n # Set previous values\n self.temp_previous = self.temp\n self.humi_previous = self.humi\n self.timelast = self.measurement_time\n\n self.temp = self.dsb_sensor.get_temperature()\n # Calculate change values\n self.temp_change = self.temp - self.temp_previous\n\n # Flag change\n if self.temp_change != 0:\n self.has_changed = True\n else:\n self.has_changed = False\n\n time.sleep(2)", "def _read_qbpm_loop(self):\n while True:\n self.qbpm.read_qbpm()\n self._plot_update()\n pitch_position = self.dcm_pitch_tserver.Position\n self.set_x2pitchlabel()\n if self.lbutton.isChecked():\n fname = 'qbpm_log.csv'\n if not os.path.isfile(fname):\n with open(fname, 'a') as f:\n f.write('timestamp qbpm_avgcurr qbpm_x qbpm_z pitch_position petra_curr\\n')\n with open(fname, 'a') as f:\n t = self.qbpm.log_time[-1]\n a = self.qbpm.log_arrays['avgcurr_log'][-1]\n x = self.qbpm.log_arrays['posx_log'][-1]\n z = self.qbpm.log_arrays['posz_log'][-1]\n pp = pitch_position\n p = self.qbpm.log_arrays['petracurrent_log'][-1]\n l = '{} {} {} {} {}\\n'.format(t, a, x ,z , pp, p)\n f.write(l)\n yield", "def run(self):\n # print(\"111\"+\"--- %s seconds ---\" % (time.time() ))\n err = self.params.tolerance + 1\n for iter_num in range(self.params.max_iter):\n if err <= self.params.tolerance:\n break\n # print(\"11\"+str(iter_num)+\"--- %s seconds ---\" % (time.time() - start_time))\n qprev = self.sigma2\n\n self._expectation_iter(iter_num)\n self._maximization_iter(iter_num)\n\n if self.sigma2 <= 0:\n self.sigma2 = self.params.tolerance / 10\n err = np.abs(self.sigma2 - qprev)\n\n if callable(self.callback):\n kwargs = {\n 'iteration': iter_num,\n 'error': err,\n 'X': self.X,\n 'Y': self.TY,\n 'W': self.W,\n 'P': self.P\n }\n self.callback(**kwargs)\n return self.TY", "def run(self):\n if self.camera.connected.value():\n self.settings.movie_on.update_value(True)\n \n \n num_of_chan=self.daq_ai.settings.num_of_chan.value()\n self.buffer = np.zeros((10000,num_of_chan+2), dtype=float)\n self.buffer[0:self.settings.tdelay.value(),3]=100;\n '''\n initialize position\n '''\n position = 0\n '''\n initialize number of water drops given\n '''\n total_drops=0\n self.settings.total_drops.update_value(total_drops)\n \n \n '''\n Decide whether to create HDF5 file or not\n '''\n # first, create a data file\n if self.settings['save_h5']:\n # if enabled will create an HDF5 file with the plotted data\n # first we create an H5 file (by default autosaved to app.settings['save_dir']\n # This stores all the hardware and app meta-data in the H5 file\n file_name_index=0\n file_name=os.path.join(self.app.settings.save_dir.value(),self.app.settings.sample.value())+'_'+str(file_name_index)+'.h5'\n while os.path.exists(file_name):\n file_name_index+=1\n file_name=os.path.join(self.app.settings.save_dir.value(),self.app.settings.sample.value())+'_'+str(file_name_index)+'.h5'\n self.h5file = h5_io.h5_base_file(app=self.app, measurement=self,fname = file_name)\n \n # create a measurement H5 group (folder) within self.h5file\n # This stores all the measurement meta-data in this group\n self.h5_group = h5_io.h5_create_measurement_group(measurement=self, h5group=self.h5file)\n \n # create an h5 dataset to store the data\n self.buffer_h5 = self.h5_group.create_dataset(name = 'buffer', \n shape = self.buffer.shape,\n dtype = self.buffer.dtype,\n maxshape=(None,self.buffer.shape[1]))\n \n # We use a try/finally block, so that if anything goes wrong during a measurement,\n # the finally block can clean things up, e.g. close the data file object.\n '''\n start actual protocol\n '''\n try:\n '''\n initialize counter ticks\n '''\n i = 0 #counter tick for loading buffer\n j = 0 #counter tick for saving hdf5 file\n self.k=0 #number of seconds saved\n water_tick=0 #\n step_size=self.daq_ai.settings.buffer_size.value()\n \n '''\n Start DAQ, Default at 1kHz\n '''\n self.daq_ai.start()\n \n # Will run forever until interrupt is called.\n '''\n Expand HDF5 buffer when necessary\n '''\n while not self.interrupt_measurement_called:\n i %= self.buffer.shape[0]\n if self.settings['save_h5']:\n if j>(self.buffer_h5.shape[0]-step_size):\n self.buffer_h5.resize((self.buffer_h5.shape[0]+self.buffer.shape[0],self.buffer.shape[1]))\n self.k +=10\n \n\n '''\n Update Progress Bar\n '''\n self.settings['progress'] = i * 100./self.buffer.shape[0]\n \n \n \n '''\n update water status\n '''\n if (water_tick<(self.settings.lick_interval.value()*1000)):\n water_tick+=1\n else:\n self.settings.water_reward.update_value(True)\n water_tick=0\n \n\n \n '''\n Generate a random odor\n '''\n #no addition\n \n \n \n '''\n Read DAQ sensor data(0:lick_left, 1:lick_right, 2:flowmeter)\n '''\n # Fills the buffer with sine wave readings from func_gen Hardware\n self.buffer[i:(i+step_size),0:num_of_chan] = self.daq_ai.read_data()\n\n lick_0 = (self.buffer[i,1]<4)\n lick_1 = (self.buffer[i,2]<4)\n self.buffer[i,1]=lick_0 #convert lick sensor into 0(no lick) and 1(lick)\n self.buffer[i,2]=lick_1\n# ask if the animal licked in this interval\n\n# print(self.buffer[i,0:1])\n lick = (lick_0 or lick_1)\n \n '''\n Decide whether water will be given, based on the status of reward and lick\n '''\n if self.settings.water_reward.value():\n if lick:\n if lick_0:\n side = 0\n else:\n side = 1\n self.water.give_water(side)\n self.settings.water_reward.update_value(False)\n \n '''\n save water given (5:If water given 6:water opened time)\n '''\n self.buffer[i,num_of_chan+side]=1\n #self.buffer[i,num_of_chan+2]=self.water.open_time[side].value()\n total_drops+=1\n self.settings.total_drops.update_value(total_drops)\n \n else:\n '''\n The mouse gets a timeout if it licks repetitively or hold the water port (when it is not suppose to lick)\n '''\n if lick:\n water_tick = 0\n '''\n Read and save Position and Speed at 100Hz(default) (3:position 4:speed)\n '''\n # to be implemented\n '''\n Read odor value from the odor generator, otherwise fill with clean air(default)\n '''\n \n '''\n write odor value to valve\n '''\n self.arduino_sol.write()\n '''\n write odor value to display (7:clean air 8:odor1 9:odor2 10:odor3)\n '''\n #to be implemented\n '''\n Save hdf5 file\n '''\n if self.settings['save_h5']:\n # if we are saving data to disk, copy data to H5 dataset\n self.buffer_h5[j:(j+step_size),:] = self.buffer[i:(i+step_size),:]\n # flush H5\n self.h5file.flush()\n \n \n # wait between readings.\n # We will use our sampling_period settings to define time\n #time.sleep(self.settings['sampling_period'])\n \n i += step_size\n j += step_size\n \n \n if self.interrupt_measurement_called:\n # Listen for interrupt_measurement_called flag.\n # This is critical to do, if you don't the measurement will\n # never stop.\n # The interrupt button is a polite request to the \n # Measurement thread. We must periodically check for\n # an interrupt request\n self.daq_ai.stop()\n break\n\n finally: \n if self.settings['save_h5']:\n # make sure to close the data file\n self.h5file.close()\n \n if self.camera.connected.value():\n self.settings.movie_on.update_value(False)", "def _PIDController__calculate(self): \n \n\t\tenabled = self.m_enabled\n\t\tpidInput = self.m_pidInput\n\t\tpidInput2 = self.source2\n\n\t\tif enabled:\n\n\t\t input = pidInput.PIDGet() - pidInput2.PIDGet()\n\n\t\t self.m_error = self.m_setpoint - input\n\t\t if self.m_continuous:\n\t\t \n\t\t if math.fabs(self.m_error) > (self.m_maximumInput - self.m_minimumInput) / 2:\n\t\t if self.m_error > 0:\n\t\t self.m_error = self.m_error - self.m_maximumInput + self.m_minimumInput\n\t\t else:\n\t\t self.m_error = self.m_error + self.m_maximumInput - self.m_minimumInput\n\n\t\t potentialIGain = (self.m_totalError + self.m_error) * self.m_I\n\t\t \n\t\t if potentialIGain < self.m_maximumOutput:\n\t\t if potentialIGain > self.m_minimumOutput:\n\t\t self.m_totalError += self.m_error\n\t\t else:\n\t\t self.m_totalError = self.m_minimumOutput / self.m_I\n\t\t else:\n\t\t self.m_totalError = self.m_maximumOutput / self.m_I\n\n\t\t self.m_result = self.m_P * self.m_error + self.m_I * self.m_totalError + self.m_D * (self.m_error - self.m_prevError)\n\t\t self.m_prevError = self.m_error\n\n\t\t if self.m_result > self.m_maximumOutput:\n\t\t self.m_result = self.m_maximumOutput\n\t\t elif self.m_result < self.m_minimumOutput:\n\t\t self.m_result = self.m_minimumOutput\n\n\t\t pidOutput = self.m_pidOutput\n\t\t result = self.m_result", "def Step(self, settings):\r\n\t\tsuper(Empty, self).Step(settings)\r\n\t\tself.__period_divider = 0\t\r\n\t\tfor i in self.q:\r\n\t\t\ti.Update()\r\n\t\t\tif i.IsTargetComplete():\r\n\t\t\t\ti.NextTarget()\r\n\t\t\ti.DrawTargets()\t\r\n\t\t\ti.DrawTarget()\r\n\t\t\t\t\r\n\t\t#~ self.renderer.DrawCircle(self.renderer.to_screen(self.q[0].GetTarget()),3,b2Color(0.25,0.5,0.99),1)\r\n\t\tself.Print(\"X: \"+str(round(self.q[0].GetTarget().x,1))+\" \"+\"Y: \"+str(round(self.q[0].GetTarget().y,1)))\r\n\t\tself.Print(\"dist: \"+str(round( sqrt(self.q[0].GetErr().x**2+self.q[0].GetErr().y**2),1)))\r\n\t\t#~ self.Print(\"co: \"+str(round( -self.q[0].x_pid.co/3.14*180,1)))\r\n\t\tif self.q[0].x_pid:\r\n\t\t\tself.Print(\"co: \"+str(round( -self.q[0].x_pid.value/3.14*180,1)))\r\n\t\t\r\n\t\tself.Print(\"vel: \"+str(round(self.q[0].GetVel(),1)))\r\n\t\tself.Print(\"kp: \"+str(self.kp)+\" ki: \"+str(self.ki)+\" kd: \"+str(self.kd))\r\n\t\tif self.center==1:\r\n\t\t\tself.viewCenter = ( (self.q[0].GetTarget().x+self.q[0].GetPos().x)/2, (self.q[0].GetTarget().y+self.q[0].GetPos().y)/2)\r\n\t\telif self.center==2:\r\n\t\t\tself.viewCenter = self.q[0].GetPos()\r\n\t\t# do stuff\r\n\t\t# Placed after the physics step, it will draw on top of physics objects\r", "def _update_feedback(self):\n #First read in the current voltage (power)\n #Read in numReadsPerCycle signals (arb) to average\n #TODO: allow user to select reads per signal\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, self.numReadsPerCycle, max_range=self.max_input_voltage)\n\n #Add new data to the pid\n self.pid.set_pv(np.atleast_1d(np.mean(currSignal)))\n\n #Now compute the new control value and update the AO\n self.pid.set_cv()\n self._curr_output_voltage = self._curr_output_voltage + self.pid.cv\n if self._curr_output_voltage < self.min_voltage:\n self._curr_output_voltage = self.min_voltage\n elif self._curr_output_voltage > self.max_voltage:\n self._curr_output_voltage = self.max_voltage\n\n\n #Finally updating the analog output\n\n #Do a final check to make sure that if you are in hardware control mode that the voltage control is still HIGH\n #This is to avoid the potential error if the voltage control is toggled low between the last call of _check_hardware_control\n #and update_feedback, whcih would mean that currSignal would be 0 (assuming a pulsed experiment), and causing a garbage\n #feedback which could be an issue in the next pulse.\n if (~self._under_hardware_control or self.ai_client.get_ai_voltage(self._hwc_ai_channel)[-1] > self._hwc_thresh):\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)", "def calculate_delay(self, wav_start, wav_finish, thr_start, thr_finish):\n\n w_s=self.find_nearest_wav(wav_start)\n w_f=self.find_nearest_wav(wav_finish)\n temp=self.pre_proc_data.loc[:,w_s]\n t_start = self.times[(temp.values>thr_start).argmax()]\n print(t_start)\n\n temp2=self.pre_proc_data.loc[:,w_f]\n dx=temp2.diff()\n dx_clean=dx.ewm(span = 50).mean()\n t_finish=self.times[np.min(np.where(dx_clean<thr_finish))]\n print(t_finish)\n\n plt.subplot(211)\n plt.plot(temp,label='{}nm'.format(wav_start))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.subplot(212)\n plt.plot(temp2,label='{}nm'.format(wav_finish))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.show()\n\n self.t_delay=np.round(t_finish-t_start,2)\n return np.round(t_finish-t_start,2)", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def main_loop(csd_profile, csd_seed, total_ele):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n h = 10.\n\n #TrueCSD\n start_x, end_x, csd_res = [0.,1.,100] \n t_csd_x, true_csd = generate_csd_1D(csd_profile, csd_seed, \n start_x=start_x, \n end_x=end_x, \n res_x=csd_res)\n \n #Electrodes \n ele_res = int(total_ele) \n ele_lims = [0.10, 0.9]\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, h)\n num_ele = ele_pos.shape[0]\n print 'Number of electrodes:', num_ele\n x_array_pots, true_pots = electrode_config(ele_lims, 100, true_csd, t_csd_x, h)\n\n #kCSD estimation\n gdX = 0.01\n x_lims = [0.,1.] #CSD estimation place\n tic = time.time() #time it\n k, est_csd, est_pot = do_kcsd(ele_pos, pots, h=h, gdx=gdX,\n xmin=x_lims[0], xmax=x_lims[1], n_src_init=300)\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, test_csd = generate_csd_1D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1], \n res_x=int((x_lims[1]-x_lims[0])/gdX))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title =\"Lambda: %0.2E; R: %0.2f; CV_Error: %0.2E; RMS_Error: %0.2E; Time: %0.2f\" %(k.lambd, k.R, k.cv_error, rms, toc)\n make_plots(title, t_csd_x, true_csd, ele_pos, pots, k.estm_x, est_csd, est_pot, true_pots)\n return", "def _reward(self):\n # Clock reward -----------------------------------------------------------------\n A, B = self.get_von_mises(0.0, self.ratio, self.kappa)\n phi = self.phase / self.cycle_len\n #print('Cycles completed = ', self.cycle_complete)\n\n #print('A, B = ', (A,B))\n\n phi_FL = self.wrap(phi + self.theta_FL)\n phi_FR = self.wrap(phi + self.theta_FR)\n phi_RL = self.wrap(phi + self.theta_RL)\n phi_RR = self.wrap(phi + self.theta_RR)\n\n #print(phi_FL)\n #print(phi_FR)\n #print(phi_RL)\n #print(phi_RR)\n\n FL_swing = self.in_swing(A, B, phi_FL)\n FR_swing = self.in_swing(A, B, phi_FR)\n RL_swing = self.in_swing(A, B, phi_RL)\n RR_swing = self.in_swing(A, B, phi_RR)\n\n #print('Time since reset = ', self.rex.GetTimeSinceReset())\n #print('phase phi = ', phi)\n #print('FL swing = ', FL_swing)\n #print('FR swing = ', FR_swing)\n #print('RL swing = ', RL_swing)\n #print('RR swing = ', RR_swing)\n\n if FL_swing:\n c_swing_frc_FL = 1\n c_swing_spd_FL = 0\n else:\n c_swing_frc_FL = 0\n c_swing_spd_FL = 1\n\n if FR_swing:\n c_swing_frc_FR = 1\n c_swing_spd_FR = 0\n else:\n c_swing_frc_FR = 0\n c_swing_spd_FR = 1\n\n if RL_swing:\n c_swing_frc_RL = 1\n c_swing_spd_RL = 0\n else:\n c_swing_frc_RL = 0\n c_swing_spd_RL = 1\n\n if RR_swing:\n c_swing_frc_RR = 1\n c_swing_spd_RR = 0\n else:\n c_swing_frc_RR = 0\n c_swing_spd_RR = 1\n\n FL_foot_force, FR_foot_force, RL_foot_force, RR_foot_force = self.get_contact_forces()\n FL_vel, FR_vel, RL_vel, RR_vel = self.get_foot_velocities()\n\n FL_penalty = c_swing_frc_FL*FL_foot_force + c_swing_spd_FL*FL_vel\n FR_penalty = c_swing_frc_FR*FR_foot_force + c_swing_spd_FR*FR_vel\n RL_penalty = c_swing_frc_RL*RL_foot_force + c_swing_spd_RL*RL_vel\n RR_penalty = c_swing_frc_RR*RR_foot_force + c_swing_spd_RR*RR_vel\n\n foot_penalties = FL_penalty + FR_penalty + RL_penalty + RR_penalty\n \n # Deviation Penalties ----------------------------------------------------------\n # Base height\n base_height = self.rex.GetBasePosition()[-1]\n height_err = np.abs(base_height - self.height_des)\n \n if height_err < 0.02:\n height_err = 0\n\n # Speed \n vx, vy, _ = p.getBaseVelocity(bodyUniqueId=self.rex.quadruped)[0]\n vx = -vx # in rex, forward is the negative x direction\n x_vel_err = 4*np.abs(vx - self.speed) # higher emphasis on x velocity error\n y_vel_err = np.abs(vy - self.side_speed)\n\n # Orientation\n orient_curr = self.rex.GetBaseOrientation()\n orient_des = [0, 0, 0, 1] # not exact, but shouldn't be too far from this\n orient_err = 6 * (1 - np.inner(orient_curr, orient_des)**2 )\n\n shoulder_orient_des = [0, 0, 0, 1]\n FL_sh, FR_sh, RL_sh, RR_sh = self.get_shoulder_orientation()\n\n # quaternion similarity: 1 - <q1, q2>**2 == 0 when 100% similar\n # good when error < 0.01 (individually)\n # put HUGE penalty on this\n shoulder_err = 20 * ((1 - np.inner(shoulder_orient_des, FL_sh)**2) + \n (1 - np.inner(shoulder_orient_des, FR_sh)**2) +\n (1 - np.inner(shoulder_orient_des, RL_sh)**2) + \n (1 - np.inner(shoulder_orient_des, RR_sh)**2))\n\n # Energy Penalties --------------------------------------------------------------\n energy_penalty = np.abs(np.dot(self.rex.GetMotorTorques(),\n self.rex.GetMotorVelocities())) * self._time_step\n\n # Acceleration\n a_trans, a_rot = self.get_base_accelerations()\n accel_penalty = 0.15 * np.abs(a_trans.sum() + a_rot.sum())\n\n # need to encourage exploration: current issue --> Rex is stuck at origin\n # because positive rewards all the time\n # need lim error --> 0, reward > 0 \n\n beta = -0.75\n\n reward = beta + \\\n 0.200 * np.exp(-orient_err - shoulder_err) + \\\n 0.275 * np.exp(-foot_penalties) + \\\n 0.075 * np.exp(-height_err) + \\\n 0.250 * np.exp(-x_vel_err) + \\\n 0.100 * np.exp(-y_vel_err) + \\\n 0.075 * np.exp(-accel_penalty) + \\\n 0.025 * np.exp(-energy_penalty)\n\n\n return reward", "def work(self):\n while(True):\n debug_print = False\n if debug_print == True:\n start = time.time()\n\n flow = self.gauge.read_flow_from_dp()\n self.flw_q.put([time.time(), flow])\n\n if debug_print == True:\n flow_time = time.time()\n print(f\"Runtime - calc_flow: {1000 * (flow_time - start):.0f} ms\")\n\n pressure = self.gauge.read_pressure()\n self.prs_q.put([time.time(), pressure])\n\n if debug_print == True:\n pressure_time = time.time()\n print(f\"Runtime - read_pressure: {1000 * (pressure_time - flow_time):.0f} ms\")\n \n if debug_print == True:\n runtime = time.time() - start\n print(f\"Runtime - total: {1000 * runtime:.1f} ms\")\n print(f\"Frequency: {1 / runtime:.1f} Hz\")", "async def loop():\n # ArmDevice.storage.joints_pos = await get_positions() # Use this if encoders are wired up.\n # ArmDevice.storage.joints_pos = simulate_positions() # Use this for testing without position feedback.\n log.debug(\"command: {}\".format(ArmDevice.storage.command))\n ArmDevice.storage.controller.user_command(ArmDevice.storage.mode, *ArmDevice.storage.command)\n ArmDevice.storage.speeds = ArmDevice.storage.controller.update_duties(ArmDevice.storage.joints_pos)\n\n # publish speeds/duty cycles here\n log.debug(\"joints_pos: {}\".format(ArmDevice.storage.joints_pos))\n log.debug(\"speeds: {}\".format(ArmDevice.storage.speeds))\n await send_duties()", "def example(self):\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)", "def draw_samples(self):\n if self._integrator == 'HMC': \n self.momentum = torch.distributions.Normal(torch.zeros_like(self.parameters), torch.ones_like(self.parameters)).sample()\n start = time.time()\n if (self._integrator == 'RMHMC'): #torch has trouble differentiating through repeated eigenvalues\n self.jitters = self.jitter * torch.rand(self.parameters.shape[0])\n self.jitters[0] = 0.\n self.jitters[1] = 0.\n self.potential_ = self.get_potential()\n self.hamiltonian_ = self.get_hamiltonian()\n self.momentum = self.resample_momenta(init=True)\n self.momenta.append(self.momentum)\n if self.shadow:\n self.shadow_ = self.get_shadow()\n finished = 0\n counter = 0\n if self.verbose:\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n else:\n# for _ in tqdm(range(self.n_samples)):\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n counter += 1\n if counter > self.n_samples * 0.05:\n counter = 0\n print('('+str(int((sample+1)/self.n_samples*100))+'% complete)', int(self.accepted),'of', int(self.accepted + self.rejected), 'accepted', '('+str(int((self.accepted)/(self.accepted+self.rejected)*100))+'%)')\n total = float(self.accepted + self.rejected)\n end = time.time()\n if total >= self.n_samples:\n self.completed = True\n self.elapsed += end-start\n print('\\n', int(self.accepted), ' of ', int(self.accepted + self.rejected), ' samples accepted in', self.elapsed, ' seconds (', 100 * self.accepted/total,'%).')\n return None\n else:\n self.degenerates +=1\n self.find_mode()\n self.parameters = params_init + torch.randn(self.parameters.shape[0])/100\n self.reinitiate_samples()\n self.resample_momenta(init = True)\n return None" ]
[ "0.6415026", "0.6414976", "0.63855183", "0.6365378", "0.6352975", "0.63279885", "0.597451", "0.59414434", "0.59375376", "0.58521336", "0.57605845", "0.57082075", "0.56142", "0.55861205", "0.55427516", "0.55359817", "0.5520663", "0.5505469", "0.5461306", "0.5420455", "0.5379677", "0.5375805", "0.5368224", "0.535464", "0.53481436", "0.5344863", "0.53369766", "0.5321266", "0.53119797", "0.5296862" ]
0.67142516
0
Stops Qt timer method for feedback routine and switches Play button icon.
def _stop_loop_feedback(self): # Connect to Stop-button clicked() if self._timerId_feedback is not None: self.killTimer(self._timerId_feedback) self._generator_feedback = None self._timerId_feedback = None self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay)) self.qbpm.feedback_on = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)", "def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()", "def _stop_loop_poll(self): # Connect to Stop-button clicked()\n if self._timerId_poll is not None:\n self.killTimer(self._timerId_poll)\n self._generator_poll = None\n self._timerId_poll = None\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n self.heartbeat = time.time()", "def stop_timer(self):\r\n self.countdownTimer.stop()", "def _set_mode_stopped(cls):\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Start\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Start a new timer.\")\n cls.btn_startpause.clicked.connect(cls.start)\n\n cls.btn_stopsave.setIcon(QIcon.fromTheme(None))\n cls.btn_stopsave.setText(\"Stopped\")\n cls.btn_stopsave.setWhatsThis(\"Timer is already stopped.\")\n cls.btn_stopsave.setEnabled(False)", "def stopButtonPressed(self):\n\n self.booleanStartButtonPressed = False # For RTLE's updateLabel function to check\n\n self.save_file = open(os.path.join(args.parent_img_path, self.save_file_name), \"a\")\n self.save_file.write(\"\\nStop Button Pressed\\n\")\n self.save_file.close()\n\n print(\"Stop button has been pressed!\")\n\n self.stopButton.setEnabled(False)\n self.startButton.setEnabled(True)\n self.reset.setEnabled(True)\n if self.showCTWM:\n self.setPointsCTWM.stopit() # stops the algorithm\n if self.showWHM:\n time.sleep(0.1)\n self.setPointsWHM.stopit()\n\n # self.save_file.close()", "def _set_mode_prompt_stop(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Resume\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Resume timer from current time.\")\n cls.btn_startpause.clicked.connect(cls.resume)\n\n cls.btn_stopsave.setText(\"Confirm Stop\")\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.stop)\n cls.btn_stopsave.setEnabled(True)", "def stopMeasurement_pmt(self):\r\n self.pmtTest.aboutToQuitHandler()", "def stop_timer(self):\n self.end_time = datetime.now()", "def slot_stop(self):\n\n self.thread.working = False", "def StopPreviewTimer(self):\r\n\r\n self_name = self.IsPaneMinimized()\r\n if not self_name:\r\n return\r\n\r\n manager = self.GetAuiManager() \r\n manager.StopPreviewTimer()", "def _control_stop(self):\n self.player.stop()", "def loop_stop(self):\n super(TimerLoop, self).loop_stop()\n self.timer.cancel()\n self.loop_confirm_stopped()", "def stop(self):\n self._stopped.set()\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def _stop(self):\n self.display_end_message()", "def force_stop(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False\n self.stopped = True", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def stop(self):\n self.active = False", "def stop(self):\n\n self.active = False", "def stop(self):", "def stop(self):", "def stop_recording_video(self):\n self.view.stop_recording_video()\n self.recording_action.setText(\"Start recording\")\n self.recording_action.setIcon(QIcon.fromTheme(\"media-record\"))\n # self.recording_action.clicked.connect(self.start_recording_video)\n self.recording_action.triggered.connect(self.start_recording_video)\n self.recording_action.triggered.disconnect(self.stop_recording_video)", "def stop(self):\r\n pass", "def stop_button(self):\r\n self.set_val(\"display_move\") # Force action\r\n self.update_settings()\r\n self.is_action = True\r\n self.is_pause = True\r\n if self.paths_gen is None:\r\n raise SelectError(\"paths_gen connection has NOT been setup\")\r\n self.paths_gen.stop_gen()", "def stop():", "def stop():", "def stop():", "def stop():", "def Stop(self) :\n\t\t..." ]
[ "0.8010354", "0.74053127", "0.71998173", "0.7143514", "0.6953505", "0.6824744", "0.67185163", "0.67162687", "0.67016304", "0.66806024", "0.6617986", "0.6617031", "0.66150254", "0.6545374", "0.65144545", "0.6493466", "0.6477352", "0.6477352", "0.6474437", "0.646385", "0.6463124", "0.6463124", "0.64538383", "0.6446152", "0.6442201", "0.64260125", "0.64260125", "0.64260125", "0.64260125", "0.6420369" ]
0.7672223
1
Sets the QBPM source
def set_source(self, source): self.qbpm = self.sources[source] self.title = self.qbpm.address self.setWindowTitle(self.title)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_source(self, source):\n self.data['source'] = source", "def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0", "def set_source(self, source_name):\n self.source = source_name", "def set_flow_source(self, source):\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source: Source):\n self._source = source", "async def async_set_source(self, source):\n self._source = source\n #self.async_schedule_update_ha_state(True)", "def setSource(self, *args):\n return _libsbml.ExternalModelDefinition_setSource(self, *args)", "def set_data_source(self, source_id):\n self.data_source = source_id", "def setPIDSourceType(self, pidSource: PIDSourceType) -> None:\n self.pidSource = ...", "def price_source(self, price_source):\n\n self._price_source = price_source", "def setSourcePath(self, sourcePath):\n self.__sourcePath = sourcePath", "def set_source(source):\n if source not in calculators.keys():\n raise SourceNotImplementedException(source)\n global current_calculator\n current_calculator = calculators[source]", "def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')", "def source_instance(self, source_instance):\n self._source_instance = source_instance", "def src(self, src):\n\n self._src = src", "def source_of_published(self, source_of_published):\n\n self._source_of_published = source_of_published", "def source_id(self, source_id):\n\n self._source_id = source_id", "def source_id(self, source_id):\n\n self._source_id = source_id", "def data_source(self, data_source):\n\n self._data_source = data_source", "def source_domain(self, source_domain):\n\n self._source_domain = source_domain", "def set_source_file(self, source_file):\n self.set_attribute(\"source_file\", source_file)", "def _set_p_source(self, p_source, p_rec_div=1):\n self.p_source = p_source\n self.p_S_fin = (1-p_source)*self.p_sus\n self.p_rec_div = p_rec_div\n self.p_infect_fin = (1-p_source)* (1-self.p_sus)\n\n return True", "def source(self, source: str):\n if source is None:\n raise ValueError(\"Invalid value for `source`, must not be `None`\") # noqa: E501\n\n self._source = source" ]
[ "0.71029407", "0.69254607", "0.67641836", "0.6762444", "0.67098093", "0.67098093", "0.67098093", "0.67098093", "0.67098093", "0.67098093", "0.67098093", "0.6520315", "0.64509386", "0.63291866", "0.6263617", "0.61766976", "0.5899179", "0.5895596", "0.5881952", "0.5879499", "0.58672327", "0.58626693", "0.5803617", "0.5799356", "0.5799356", "0.57734865", "0.5758727", "0.5756458", "0.5743771", "0.5664528" ]
0.7650714
0
Checks if feedback trigger file exists. If the file exists, feedback will be toggled and the file will be deleted.
def ext_fb_trigger(self): if os.path.isfile(self.feedback_file): self.toggle_feedback() os.remove(self.feedback_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ensure_attachment_exists(target):\n if target is not None:\n target = Path(target)\n if not target.exists():\n msg = f'COMMUNICATOR WARNING: The file specified for attachment to email does not exist'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n return False\n return True", "def check_file_exist(self):\n return False", "def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False", "def test_no_delete(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def is_file_exists(self):\n pass", "def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False", "def confirm_file(self,fn):\n\t\t\n\t\tif self.autoreload: \n\t\t\tprint '[STATUS] autoreloading %s'%fn\n\t\t\treturn True\n\t\telse:\n\t\t\tcaller = sys._getframe().f_back.f_code.co_name\n\t\t\tprint \"[STATUS] function %s found %s\"%(caller,fn)\n\t\t\tans = raw_input('[QUESTION] is this file valid else quit (y/N)? ')\n\t\t\tif re.match('^(y|Y)',ans): return True\n\t\t\telse: raise Exception('\\n[ERROR] file was invalid and must be deleted manually:\\n%s'%fn)\n\t\t\t#---! may want to later allow a file deletion if the user says the file is invalid\t\t", "def is_new_file(self):\n return self.filename is None", "def toggle_test():\n path = path_test\n if (os.path.isfile(path)):\n os.remove(path)\n button_test.configure(text=\"Appuyer sur le bouton de test\")\n print(\"Bouton test relâché\")\n\n else:\n open(path, 'a').close()\n button_test.configure(text=\"Relâcher le bouton de test\")\n print(\"Bouton test enfoncé\")", "def file_exist() -> bool:\n pass", "def clear(self):\n if os.path.isfile(self._trigger_file):\n os.remove(self._trigger_file)\n logger.debug(\"Removed preview update trigger: %s\", self._trigger_file)", "def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def on_file(self) -> bool:\n\n return (\n self.env_var_helper.set_name(\"PYFUNCEBLE_DEBUG\").exists()\n or self.env_var_helper.set_name(\"DEBUG_PYFUNCEBLE\").exists()\n )", "def check_topic(t):\n empty_string_check(t['@id'])\n filesystem_exists_check(t['@filename'])\n filesystem_exists_check(t['@qrelsFilename'])\n \n if '@backgroundFilename' in t: # A background file was specified.\n filesystem_exists_check(t['@backgroundFilename'])\n else:\n t['@backgroundFilename'] = None # No background file was specified.", "def in_maintenance_mode():\n return os.path.exists(\"maintenance.txt\")", "def test_no_deletion(self):\n\t\tanalyse_text(self.filename)\n\t\tself.assertTrue(os.path.exists(self.filename))", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def fileCheck(file):\n if not os.path.isfile(file):\n print('File : ',file)\n print('E... '+'no file')\n sys.exit()", "def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True", "def check_filename(*args):\n subject = subject_var.get()\n category = cat_var.get()\n private = private_var.get()\n\n extension = 'txt' if not private else 'secret'\n filename = f'{category} - {subject}.{extension}'\n\n if Path(filename).exists():\n status_var.set(f'WARNING: {filename} already exists!')\n else:\n status_var.set('')", "def has_file(self) -> bool:\n return self._file is not None", "def check_both_tmp_files(self, positive, action):\n for filename in (\n \"{0}_{1}_{2}\".format(PREFIX, time, action)\n for time in [\"before\", \"after\"]\n ):\n testflow.step(\"Check if file %s exists in /tmp\", filename)\n assert self.check_file_existence(filename) is positive, (\n \"File {0} {1} exist\".format(\n filename, \"should\" if positive else \"shouldn't\"\n )\n )", "def _file_needs_to_be_created(self, file_path, quiet=False):\n if not self._args.check_for_existing_files:\n return True\n if os.path.exists(file_path):\n if not quiet:\n sys.stderr.write(\n \"File %s exists. Skipping its generation.\\n\" % file_path\n )\n return False\n return True", "def checkFile(self,selected_file):\n path_holder = pathlib.Path(selected_file)\n if path_holder.exists():\n if path_holder.is_file():\n if path_holder.stat().st_size == 0 or path_holder.stat().st_size is None:\n raise CoreException.FileEmptyError(\"File should not be empty!\")\n return False\n\n if path_holder.is_symlink():\n raise CoreException.FileNotSupportedError(\"Symbolic link not supported\")\n return False\n \n # File Clean if they pass the required identity of file.\n return True", "def remove_status_file(self):\r\n if os.path.exists(settings.STATUS_MESSAGE_PATH):\r\n os.remove(settings.STATUS_MESSAGE_PATH)", "def test_GFD_export_existing_file(self):\n filepath = '3.txt'\n existing_file = open(filepath, 'x')\n existing_file.write(\"This file is existing.\")\n existing_file.close()\n gfd = flow_processing_input.GroundFlowData()\n gfd.detector_flow_data = createGFDDataset(5).dataset\n # Check if warning was raised for existing file\n with warnings.catch_warnings(record=True) as w:\n gfd.export_to_file(filepath)\n self.assertTrue(len(w) == 1)\n os.remove(filepath)", "def file_exists(self,\n\t filename,\n\t shutit_pexpect_child=None,\n\t directory=False,\n\t note=None,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.file_exists(filename=filename,directory=directory,note=note,loglevel=loglevel)", "def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False" ]
[ "0.6108252", "0.5992881", "0.58484524", "0.5839872", "0.57959425", "0.56633365", "0.5644353", "0.5628685", "0.5609282", "0.5547753", "0.553532", "0.5524169", "0.5495868", "0.5495868", "0.5476462", "0.54479486", "0.5429927", "0.542428", "0.54199207", "0.54125386", "0.54085326", "0.5398947", "0.53862226", "0.5378811", "0.5377107", "0.5374057", "0.5371888", "0.53693324", "0.5338221", "0.5320319" ]
0.7604029
0
Connected to backlog input field of the GUI. Triggers change of the number of backlog values in the Qbom() class instance.
def change_backlog(self): if not self.lltext.text(): return backlog = int(self.lltext.text()) self.qbpm.change_backlog(backlog) self.lltext.setText(str(self.qbpm.backlog))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_backlog(self, backlog):\n min_backlog = int(numpy.ceil(self.box_length / self.frequency))\n if backlog < min_backlog:\n backlog = min_backlog\n log_length = self.calc_log_length(backlog, self.frequency)\n self.backlog = backlog\n self.change_log_length(log_length)", "def listen(self, backlog: int) -> None:\n ...", "def _number_dbm_changed(self, *a):\r\n self.api.set_power(self.number_dbm.get_value())", "def numBinsChanged(self, val):\n self.numBins = val", "def update_Q(self):", "def input_changed(self, input_data):\n boundary_props = {\n 'foam:0/T boundaryField':{\n \"type\": \"fixedValue\",\n \"value\": Field(0)\n }\n }\n self.load_boundary(boundary_props, input_data)", "def changeValue(self):\r\n # productive #onUpDnArrow\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value) + ' (pt: ' + str(self.ptNumber) + ')')\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)\r\n self.unlockControlPoints(widget.editNeedleTxtBox.value)\r\n widget.drawValidationNeedlesButton.text = \"Render Manual Needle \" + str(widget.editNeedleTxtBox.value)", "def __nextChange(self):\n self.activeWindow().nextChange()", "def motorLimitsChanged(self):\n pass", "def setGammaValueFromSpinBox(self):\n self.gammaSlider.setValue( self.gammaSpinBox.value * 1000)", "def on_change(self, value):", "def change():", "def validationNeedle(self):\n #productive #onButton\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n widget.validationNeedleNumber += 1\n widget.validationNeedleButton.text= \"New Validation Needle: (\"+str(widget.validationNeedleNumber)+\")->(\"+str(widget.validationNeedleNumber+1)+\")\"\n # self.tableValueCtrPt.append([])\n widget.stepNeedle = 0", "def run_backlog_commands(self):\n if not hasattr(self, 'commands') or self.commands == '':\n print('{BLUE}No commands for {f_name}, skipping.{NOCOLOR}'.format(**colors, **self))\n else:\n self.mqtt.connect(self.mqtt_host)\n backlog_topic = '{c_topic}/backlog'.format(**self)\n # Join all command/payload pairs together with semicolons. If the\n # payload is a tasmota GPIO, use the value of the enumeration.\n backlog_payload = '; '.join(['{c} {p}'.format(c=i['command'], p=get_gpio(i['payload']) if 'GPIO' in i['payload'] else i['payload']) for i in self.commands]) + '; restart 1'\n print('{BLUE}Sending {topic} {payload}{NOCOLOR}'.format(topic=backlog_topic, payload=backlog_payload, **colors))\n self.mqtt.publish(backlog_topic, backlog_payload)\n self.mqtt.disconnect()", "def on_change(self, pvname=None, **kws):\n\n current_ctr = kws['value']\n # init on first read\n if self.current_counter is None:\n self.current_counter = current_ctr - 1 # the self.current_counter holds previous\n self.eventq.put(current_ctr)", "def on_b(self):\r\n self.log()", "def updateBar(self):\n pass", "def setGammaValueFromSlider(self):\n self.gammaSpinBox.setValue( self.gammaSlider.value / 1000 )", "def receive_input(self, value):\n self.companies.append(value)\n self.get_stock_data(True)\n self.update_options()", "def tabChangeEvent(self, tabIndex):\n axisList = self.widget(tabIndex)\n self.myParent.updateVarInfo(axisList)", "def change(self, event):\n if(self.lbl2[\"text\"]):\n self.lbl2[\"text\"] = \"\" #Clears any text (e.g. instructions or check) if there is any.\n\n #Updates the buttons to the current number\n row, column = self.kenken.changer(event)\n self.movelist[row][column] +=1 #Increase the value by one.\n \n if self.movelist[row][column] > 5: #If value in movelist is greater than 5, reset back to zero.\n self.movelist[row][column] = 0\n\n #Displays the new number to the board for user to see changes \n self.w.itemconfigure(self.numbers[row][column], text = self.choice[self.movelist[row][column]])\n #As the user plays, it will continually check if the user wins or not\n self.check()", "def validationNeedle(self):\r\n # productive #onButton\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.editNeedleTxtBox.value += 1\r\n widget.validationNeedleButton.text = \"New Validation Needle: (\" + str(widget.editNeedleTxtBox.value) + \")->(\" + str(widget.editNeedleTxtBox.value + 1) + \")\"\r\n widget.editNeedleTxtBox.value += 1\r\n # self.tableValueCtrPt.append([])\r\n widget.stepNeedle = 0\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)", "def addKnobChanged(call, args=(), kwargs={}, nodeClass='*', node=None):\n pass", "def change_max_running(self, _):\n current = self.execution_manager.max_running\n self.details.original_widget = IntInputWidget('Maximum jobs running/pending: ', current, self.__change_max_running)", "def XPLMDataChanged_f(inRefcon):", "def render_backlog(results, ax_backlog):\n dts = results.dts\n backlog = results.backlog\n ax_backlog.yaxis.tick_right()\n cur = backlog[1][-1]\n color = backlog_color(cur)\n p_day, = ax_backlog.plot(dts, backlog[1], '%s-' % color)\n p_week, = ax_backlog.plot(dts, backlog[14], 'k:')\n if max(backlog[1]) > 100 or max(backlog[14]) > 100:\n ax_backlog.set_ylim([0, 100])\n ax_backlog.set_ylabel('Backlog')\n ax_backlog.legend(\n [p_day, p_week],\n ['1d avg: %.1f hr wait' % cur, '14d avg: %.1f hr wait' % backlog[14][-1]],\n 'lower left',\n fontsize='x-small',\n )", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)", "def test_setInputHistory(self):\n self.widget.setInputHistory(History([\"a\", \"b\", \"c\"]))\n self.assertEqual(self.widget.getInputHistory(), [\"a\", \"b\", \"c\"])", "def _number_list_index_changed(self, *a):\r\n self.api.set_list_index(self.number_list_index.get_value())\r\n \r\n # Make sure.\r\n n = self.api.get_list_index()\r\n self.number_list_index.set_value(n, block_events=True)\r\n \r\n # Update the frequency and power in the safest possible way\r\n# fs = self.api.get_list_frequencies()\r\n# ps = self.api.get_list_powers()\r\n# self.number_dbm.set_value(ps[n])\r\n# self.number_frequency.set_value(fs[n])\r\n \r\n # Update the frequency and power using the graph if we have it.\r\n \r\n # If enabled, things are out of sync, get the list.\r\n if self.button_send_list._widget.isEnabled(): self.query_list()\r\n \r\n # Get the power and frequency from the plot\r\n self.number_dbm .set_value(self.plot_list['P_dBm'][n])\r\n self.number_frequency.set_value(self.plot_list['f_Hz'][n])", "def queue_input(self, value):\n self.input_queue.append(value)" ]
[ "0.6767586", "0.5500406", "0.5418215", "0.5408162", "0.52866536", "0.5248968", "0.5198904", "0.5192426", "0.5188179", "0.5161533", "0.5145156", "0.5121663", "0.51012754", "0.5070181", "0.5054365", "0.50377434", "0.50249094", "0.4998926", "0.49962744", "0.49755526", "0.49751335", "0.49681544", "0.49662414", "0.4955479", "0.49515122", "0.49453107", "0.4944779", "0.49381277", "0.49333557", "0.49273095" ]
0.72343934
0
Connected to freuqncey input field of the GUI. Triggers change of the polling frequency in the Qbpm() class instance.
def change_frequency(self): if not self.ftext.text(): return frequency = float(self.ftext.text()) if frequency > 6.0: frequency = 6.0 self.qbpm.change_frequency(frequency) self.ftext.setText(str(self.qbpm.frequency))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _number_frequency_changed(self, *a):\r\n self.api.set_frequency(self.number_frequency.get_value())", "def set_frequency(self):\n def f():\n freq = float(self.freq_edit.get())\n duty = float(self.duty_edit.get())\n if duty == 0:\n duty = 1\n if duty > 1:\n duty = duty / 100\n self.parent.update_frequency(freq, duty, self.model.upper())\n return f", "def poll(self, poll_input):", "def slider_changed(self):\n freq_index = self.ui.frequencySlider.value()\n freq = self.psd.freqs[freq_index]\n self.ui.fmin.setText(str(freq))\n self.ui.fmax.setText(str(freq))\n self.value_changed()", "def tick(self):\n self.connect()", "def _number_dbm_changed(self, *a):\r\n self.api.set_power(self.number_dbm.get_value())", "def config_pulse_modulation(self, frequency=1e3, input='square'):\n self.enable_pulse_modulation()\n self.pulse_source = 'internal'\n self.pulse_input = input\n self.pulse_frequency = frequency", "def set_freqmult(self, freqmult, ioupdate=False):\n\n assert (freqmult == 1 or freqmult in range(4,21)), 'Multiplier must be 1 (off) or between 4 and 20'\n assert (99.999e6 < freqmult * self.refclock_freq < 500.001e6), \\\n 'self.clock_frequency must lie between Min: 100MHz and Max 500MHz'\n\n # Read current state of FR1 register\n initial_state = self._read('FR1')\n\n if freqmult == 1:\n # toggles VCO off deletes multiplier and copies charge pump control\n BYTE0 = 0x03 & initial_state[0]\n\n else:\n # Copy charge pump control setting and set VCO control on or off depending on clock_freq threshold\n if freqmult*self.refclock_freq > 225e6:\n BYTE0 = 0x03 & initial_state [0] | (freqmult << 2) | self.FR1_VCO_BYTE\n elif freqmult*self.refclock_freq < 160e6:\n BYTE0 = 0x03 & initial_state [0] | (freqmult << 2)\n else:\n warn('Clock frequency set between 160MHz and 255MHz. No guarantee of operation')\n BYTE0 = 0x03 & initial_state [0] | (freqmult << 2)\n\n # Copy unchanged bytes from FR1 register\n BYTE1 = initial_state[1]\n BYTE2 = initial_state[2]\n\n # data for FR1 register\n new_state = [BYTE0, BYTE1, BYTE2]\n # write new state into register\n self._write('FR1', new_state)\n\n # Set new freqmult value and print clock information\n self.freqmult = freqmult\n self.clock_freq = self.refclock_freq*self.freqmult\n print ('Refclock =', \"{:.2e}\".format(self.refclock_freq), 'Hz \\nFreqmult =', self.freqmult,\n '\\nClock Frequency =', \"{:.2e}\".format(self.clock_freq), 'Hz')\n\n if ioupdate:\n self._io_update()", "def run(self):\n while self.__running:\n enum = self.__gui_app.pollButtonEvent()\n if enum != '':\n print enum\n if int(enum, 16) == 4:\n self.__qf.tick()\n else:\n self._publish(enum)\n\n print \"Exit: %s\\n\" % self", "def __init__(self, simulate_feedback=False):\n super(QbpmMonitor, self).__init__()\n\n self.sources = {\n \"QBPM1 OH\" : Qbpm('hzgpp05vme0:10000/p05/i404/exp.01', 2),\n \"QBPM2 OH\" : Qbpm('hzgpp05vme0:10000/p05/i404/exp.02', 7),\n \"QBPM EH2\" : Qbpm('hzgpp05vme2:10000/p05/i404/eh2.01', 30)\n }\n default_source = \"QBPM2 OH\"\n self.set_source(default_source)\n self.title = self.qbpm.address\n self.posx_target = 0\n self.posz_target = 0\n self.avgcurr_target = 0\n self.qbpm.frequency = 5.0 # in Hz\n self.qbpm.backlog = 120 # in s\n self.polling = False\n self._generator_poll = None\n self._timerId_poll = None\n self.feedback = False\n self.feedback_threshold = 5E-9\n self._generator_feedback = None\n self._timerId_feedback = None\n self.last_corr_angle = 0\n self.feedback_time = datetime.datetime.now()\n self.dcm_bragg_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_bragg')\n self.dcm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dcm_pitch_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_xtal2_pitch')\n self.dcm_energy_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dcm_energy')\n self.dmm_x1rot_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x1rot')\n self.dmm_bragg_angle = self.dcm_bragg_tserver.Position\n self.dmm_x2rot_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2rot')\n self.dmm_x1z_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x1z')\n self.dmm_x1z_position = self.dmm_x1z_tserver.Position\n self.dmm_x2z_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2z')\n self.dmm_x2y_tserver = tango.DeviceProxy('hzgpp05vme0:10000/dmm_x2y') \n self.beamstop = tango.DeviceProxy('hzgpp05vme0:10000/HASYLAB/Petra3_P05vil.CDI.SRV/BST')\n self.undulator = tango.DeviceProxy('hzgpp05vme0:10000/p05/undulator/1')\n self.get_mono()\n\n self.heartbeat = time.time()\n self.feedback_file = '/tmp/qbpmfeedback.run'\n if os.path.isfile(self.feedback_file):\n os.remove(self.feedback_file)\n self.cycle = 0\n self.feedback_triggered = False\n self.simulate_feedback = simulate_feedback\n self.dcm_step_backlash = self.dcm_pitch_tserver.read_attribute('StepBacklash').value\n\n ################################################################################################################\n # initUI\n\n # labels\n self.source_label = QtGui.QLabel(\"source\")\n self.poll_label = QtGui.QLabel(\"poll\")\n self.feedback_label = QtGui.QLabel(\"feedback\")\n self.ll_label = QtGui.QLabel(\"backlog (s)\")\n self.freq_label = QtGui.QLabel(\"frequency\")\n self.sensitivity_label = QtGui.QLabel(\"sensitivity\")\n self.filter_label = QtGui.QLabel(\"lowpass filter\")\n self.log_label = QtGui.QLabel(\"log to file\")\n self.pitch_label = QtGui.QLabel(\"0\")\n self.set_x2pitchlabel()\n # QBOM source Combobox\n self.scbox = QtGui.QComboBox(self)\n self.scbox.addItem(\"QBPM1 OH\") # Index 0\n self.scbox.addItem(\"QBPM2 OH\") # Index 1\n self.scbox.addItem(\"QBPM EH2\") # index 2\n self.scbox.setCurrentIndex(1) # Check if this value is consistent with default source above!\n self.scbox.activated[str].connect(self.set_source)\n # poll button\n self.rbtn = QtGui.QPushButton(self)\n self.rbtn.clicked.connect(self.toggle_polling)\n self.rbtn.resize(self.rbtn.sizeHint())\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n # feedback button\n self.fbtn = QtGui.QPushButton(self)\n self.fbtn.clicked.connect(self.toggle_feedback)\n self.fbtn.resize(self.fbtn.sizeHint())\n self.fbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n # reset button\n reset_btn = QtGui.QPushButton('Reset', self)\n reset_btn.clicked.connect(self.qbpm.reset_logs)\n reset_btn.resize(reset_btn.sizeHint())\n # backlog text field\n self.lltext = QtGui.QLineEdit(str(self.qbpm.backlog))\n self.lltext.setValidator(QtGui.QIntValidator())\n self.lltext.setMaxLength(6)\n self.lltext.returnPressed.connect(self.change_backlog)\n # frequency text field\n self.ftext = QtGui.QLineEdit(str(self.qbpm.frequency))\n self.ftext.setValidator(QtGui.QDoubleValidator())\n self.ftext.setMaxLength(6)\n self.ftext.returnPressed.connect(self.change_frequency)\n # sensititvity slider\n self.sslider = QtGui.QSlider(self)\n self.sslider.setOrientation(QtCore.Qt.Horizontal)\n self.sslider.setMinimum(1)\n self.sslider.setMaximum(100)\n self.sslider.setTickPosition(QtGui.QSlider.TicksBothSides)\n self.sslider.setTickInterval(10)\n self.sslider.setSingleStep(1)\n self.sslider.setValue(self.qbpm.sensitivity)\n self.sslider.valueChanged.connect(self._set_sensitivity)\n # filter slider\n self.fslider = QtGui.QSlider(self)\n self.fslider.setOrientation(QtCore.Qt.Horizontal)\n self.fslider.setMinimum(1)\n self.fslider.setMaximum(1000)\n self.fslider.setTickPosition(QtGui.QSlider.TicksBothSides)\n self.fslider.setTickInterval(100)\n self.sslider.setSingleStep(1)\n self.fslider.setValue(self.qbpm.filter)\n self.fslider.valueChanged.connect(self._set_filter)\n # log button\n self.lbutton = QtGui.QRadioButton(self)\n self.lbutton.setChecked(False)\n # quit button\n qbtn = QtGui.QPushButton('Quit', self)\n qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n qbtn.resize(qbtn.sizeHint())\n \n r, g, b, w = [255, 0, 0], [0, 255, 0], [0, 0, 255], [150, 150, 150]\n fill_color = pg.mkColor([0, 255, 0, 100])\n self.curves = {}\n log_pen = pg.mkPen(w, width=1, style=QtCore.Qt.SolidLine)\n avg_pen = pg.mkPen(r, width=3, style=QtCore.Qt.SolidLine)\n target_pen = pg.mkPen(g, width=1, style=QtCore.Qt.SolidLine)\n sensitivity_pen = pg.mkPen(fill_color, width=1, style=QtCore.Qt.SolidLine)\n fill_pen = pg.mkPen(fill_color, width=100, style=QtCore.Qt.SolidLine)\n petra_pen = pg.mkPen(w, width=3, style=QtCore.Qt.SolidLine)\n # define plot font\n font = QtGui.QFont()\n font.setPixelSize(16)\n # make PlotWidgets\n self.plot_main = pg.GraphicsLayoutWidget()\n self.avgcurr_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_avgcurr = self.plot_main.addPlot(title='avg. current', row=0, col=0,\n axisItems={'bottom': self.avgcurr_timeaxis})\n self.petracurr_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_petracurrent = self.plot_main.addPlot(title='PETRA beam current', row=0, col=1,\n axisItems={'bottom': self.petracurr_timeaxis})\n self.plot_main.nextRow()\n self.posx_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_posx = self.plot_main.addPlot(title='x-position', row=1, col=0,\n axisItems={'bottom': self.posx_timeaxis})\n self.posy_timeaxis = TimeAxisItem(orientation='bottom')\n self.plot_posz = self.plot_main.addPlot(title='z-position', row=1, col=1,\n axisItems={'bottom': self.posy_timeaxis})\n # assign qbpm data to styles and PlotWidgets\n styles = {'avgcurr_log': (self.plot_avgcurr, log_pen),\n 'avgcurr_filter_log': (self.plot_avgcurr, avg_pen),\n 'avgcurr_target_log': (self.plot_avgcurr, target_pen),\n 'posx_log': (self.plot_posx, log_pen),\n 'posx_filter_log': (self.plot_posx, avg_pen),\n 'posx_target_log': (self.plot_posx, target_pen),\n 'posz_log': (self.plot_posz, log_pen),\n 'posz_filter_log': (self.plot_posz, avg_pen),\n 'posz_target_log': (self.plot_posz, target_pen),\n 'posz_sens_low_log': (self.plot_posz, sensitivity_pen),\n 'posz_sens_high_log': (self.plot_posz, sensitivity_pen),\n 'petracurrent_log': (self.plot_petracurrent, petra_pen)\n }\n # plot curves\n for log_array, style in styles.items():\n # self.curves[key] = style[0].plot(self.qbpm.log_arrays[key], pen=style[1], symbol='o')\n self.curves[log_array] = style[0].plot(self.qbpm.log_time, self.qbpm.log_arrays[log_array], pen=style[1])\n # self.fill = pg.FillBetweenItem(curve1=self.curves['posz_sens_low_log'],\n # curve2=self.curves['posz_sens_high_log'], pen=fill_pen)\n # self.plot_posz.addItem(self.fill)\n # set axis properties\n for log_plot in [self.plot_avgcurr, self. plot_posx, self.plot_posz, self.plot_petracurrent]:\n log_plot.getAxis(\"bottom\").tickFont = font\n log_plot.getAxis(\"bottom\").setStyle(tickTextOffset=20)\n log_plot.getAxis(\"left\").tickFont = font\n log_plot.getAxis(\"left\").setStyle(tickTextOffset=20)\n log_plot.getAxis(\"left\").setWidth(100)\n log_plot.getAxis(\"bottom\").setGrid(100)\n log_plot.getAxis(\"left\").setGrid(100)\n\n # Create a grid layout to manage the widgets size and position\n layout = QtGui.QGridLayout()\n self.setLayout(layout)\n\n # Add widgets to the layout in their proper positions\n layout.addWidget(self.source_label, 0, 0)\n layout.addWidget(self.poll_label, 1, 0)\n layout.addWidget(self.feedback_label, 2, 0)\n layout.addWidget(self.ll_label, 4, 0)\n layout.addWidget(self.freq_label, 5, 0)\n layout.addWidget(self.sensitivity_label, 6, 0)\n layout.addWidget(self.filter_label, 7, 0)\n layout.addWidget(self.log_label, 8, 0)\n layout.addWidget(self.scbox, 0, 1)\n layout.addWidget(self.rbtn, 1, 1) # button goes in lower-left\n layout.addWidget(self.fbtn, 2, 1) # button goes in lower-left\n layout.addWidget(reset_btn, 3, 1) # button goes in lower-left\n layout.addWidget(self.lltext, 4, 1) # text edit goes in middle-left\n layout.addWidget(self.ftext, 5, 1) # text edit goes in middle-left\n layout.addWidget(self.sslider, 6, 1)\n layout.addWidget(self.fslider, 7, 1)\n layout.addWidget(self.lbutton, 8, 1)\n layout.addWidget(self.pitch_label, 9, 0, 1, 2) # button goes in lower-left\n# layout.addWidget(self.fb_step_label, 10, 0, 1, 2)\n# layout.addWidget(self.fb_time_label, 11, 0, 1, 2)\n layout.addWidget(qbtn, 10, 0, 1, 2) # button goes in lower-left\n layout.addWidget(self.plot_main, 0, 2, 11, 1)\n\n layout.setColumnStretch(0, 0.1)\n layout.setColumnStretch(1, 0.1)\n layout.setColumnStretch(2, 1)\n\n # Display the widget as a new window\n self.setWindowTitle(self.title)\n self.show()", "def poll(self):\n\tself.met = self.button.poll()", "def updateport(self,event=None):\n self.messages.log('Looking for focuser on '+str(self.port.get())+'...')\n if self.rf is not None and self.rf.ready:\n self.rf.closeconn()\n self.rf = rfserial.RFSerial(str(self.port.get()))\n if self.rf is not None and self.rf.ready:\n self.messages.log('Connected to focuser on '+str(self.port.get()))\n self.positiontext.set(str(self.rf.querypos()))\n pow = self.rf.queryrempow()\n self.pow[0].set(int(pow[0]))\n self.pow[1].set(int(pow[1]))\n self.pow[2].set(int(pow[2]))\n self.pow[3].set(int(pow[3]))\n else:\n self.messages.log(\"Can't connect to focuser on \"+str(self.port.get()))\n self.positiontext.set('Not Connected')", "def frequency_trigger(self, frequency_trigger):\n\n self._frequency_trigger = frequency_trigger", "def ff_callback(self):\n self.rokucontrol.ff_callback()", "def set_frequency(miner: Miner, login, frequency):\n #default for S9 is 550\n #\"bitmain-freq\" : \"550\",\n commands = get_changeconfigcommands(getminerfilename(miner), 'bitmain-freq', frequency)\n sendcommands_and_restart(miner, login, commands)", "def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))", "def _update_gui(self):\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: rf_on = True\r\n self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()", "def set_frequency(self):\r\n def move_synth(delta_f_synth):\r\n sign_delta_f_synth = int(delta_f_synth/abs(delta_f_synth))\r\n stepsize_Hz = int(10)\r\n num_steps = int(abs(delta_f_synth)/stepsize_Hz)\r\n remainder_Hz = round(abs(delta_f_synth)%stepsize_Hz,1)\r\n self.synth.set_incr(stepsize_Hz, 'Hz')\r\n for nn in range(num_steps): # slowly move the synth by delta_f_synth in stepsize steps\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n self.synth.set_incr(remainder_Hz, 'Hz')\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n \r\n def get_delta_f_synth():\r\n #get latest f_rep,f_0\r\n self.get_frequency() \r\n #calculate required f_rep to get desired PA_freq. switches n and frep in above eq.\r\n f_rep_goal = (self.setfrequency - self.sign_lock * self.f_lock - self.sign_0 * self.f_0) / self.n\r\n # print 'f_rep_goal = %.0f Hz'%f_rep_goal\r\n # lock uses 3rd harmonic so synth must be set to *3\r\n delta_f_synth = (f_rep_goal - self.f_rep)*3 \r\n delta_f_synth = round(delta_f_synth,1)\r\n # print 'delta_f_synth = %.1f Hz'%delta_f_synth\r\n return delta_f_synth\r\n \r\n iteration = 0\r\n delta_f_synth = get_delta_f_synth()\r\n while abs(delta_f_synth) > self.synth_tol:\r\n move_synth(delta_f_synth)\r\n delta_f_synth = get_delta_f_synth()\r\n iteration += 1\r\n if iteration > self.max_iteration:\r\n # print 'REACHED MAX ITERATION: delta_f_synth = %.1f'%delta_f_synth\r\n break", "def frequency_trigger(self):\n return self._frequency_trigger", "def set_frequency(value):\n # user input, minimum, maximum\n checklist = [int(value), 300000000, 928000000]\n\n if SendSignal.__verify_range(*checklist):\n SendSignal.__SIGNAL_SETTINGS['frequency'] = int(value)\n SendSignal.__SIGNAL_OBJ.setFreq(int(value))\n else:\n sys.stdout.write(\"Error {} not between {} and {}\".format(*checklist))\n sys.exit(2)", "def realtime(self):", "def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()", "def value_changed(self):\n from ..backend.util import get_index_freq\n\n try:\n fmin = float(self.ui.fmin.text())\n except ValueError:\n fmin = self.psd.freqs[0]\n\n try:\n fmax = float(self.ui.fmax.text())\n except ValueError:\n fmax = self.psd.freqs[-1]\n self.f_index_min, self.f_index_max = get_index_freq(\n self.psd.freqs, fmin, fmax)\n try:\n self.vmax = float(self.ui.vmax.text())\n except ValueError:\n self.vmax = None\n try:\n self.vmin = float(self.ui.vmin.text())\n except ValueError:\n self.vmin = None\n\n self.log = self.ui.displayLog.checkState()\n self.plot_psd()", "def set_frequency(self):\n\t\t\"\"\"For Frequency Prescalar-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC0, PCA9530_2C_1_PSC0_USERDEFINED)\n\t\t\n\t\t\"\"\"For Frequency Prescalar-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC1, PCA9530_2C_1_PSC1_USERDEFINED)", "def change_frequency(self, frequency):\n self.frequency = frequency\n self.change_backlog(self.backlog)", "def set_update_rate(self, delay_ms):\n self._log_msg_start(\"Setting NMEA message update rate\")\n self._ubx.send(\"CFG-RATE\", measRate=delay_ms, navRate=1, timeRef=1)", "def run(self):\n tick_duration = 1 / self.config.tick_rate\n last_tick_time = time.time()\n\n while True:\n input_ = self.input_source.get_input()\n self.__update(input_)\n\n if self.state.exit:\n break\n\n current_time = time.time()\n sleep_time = tick_duration - (current_time - last_tick_time)\n if sleep_time > 0:\n time.sleep(sleep_time)\n last_tick_time = current_time", "def Update(self, ticks=0):", "def rfactor_event_loop():\n if RfactorLiveEvent.event.is_set():\n is_live = RfactorLiveEvent.get_nowait()\n # -- Update rFactor live state to front end\n if is_live is not None:\n eel.rfactor_live(is_live)\n\n if RfactorStatusEvent.event.is_set():\n status = RfactorStatusEvent.get_nowait()\n # -- Update rFactor status message in front end\n if status is not None:\n logging.debug('Updating rf2 status message: %s', status)\n eel.rfactor_status(status)\n\n RfactorStatusEvent.reset()", "def onFlowUpdate(self, event):" ]
[ "0.6113409", "0.607607", "0.60068643", "0.58455265", "0.581959", "0.57760304", "0.5760519", "0.5603944", "0.557244", "0.55622834", "0.5557524", "0.5518216", "0.5510019", "0.5501127", "0.5498613", "0.54436463", "0.5409313", "0.5407031", "0.53994757", "0.53965205", "0.5388988", "0.5385514", "0.53638804", "0.53470993", "0.53325355", "0.5328718", "0.5300048", "0.52918303", "0.5279308", "0.52779007" ]
0.61401844
0
Changes the pitch label according to the used monochromator
def set_x2pitchlabel(self): labelstr_dcm = "DCM\nenergy:\t\t{:.9f}\nexit offset:\t{:.9f}\npitch:\t\t{:.9f}\nfb stepsize:\t{:.9f}\n\nbeamstop:\t\t{:.1f}°\n\nundulator:\t{}\ngap:\t\t{:.9f}\n\n{}" labelstr_dmm = "DMM\nbragg:\t\t{:.9f}\npitch:\t\t{:.9f}\nx1z:\t\t{:.9f}\nx2z:\t\t{:.9f}\nx2y:\t\t{:.9f}\nfb stepsize:\t{:.9f}\n\nbeamstop:\t\t{:.1f}°\n\nundulator:\t{}\ngap:\t\t{:.9f}\n\n{}" mono = self.get_mono() if mono == "dcm": self.pitch_label.setText(labelstr_dcm.format(self.dcm_energy_tserver.Position, self.dcm_energy_tserver.ExitOffset, self.dcm_pitch_tserver.Position, self.last_corr_angle, self.beamstop.TEMP_OUT[0], self.undulator.State(), self.undulator.Gap, self.feedback_time)) if mono == "dmm": self.pitch_label.setText(labelstr_dmm.format(self.dmm_x1rot_tserver.Position, self.dmm_x2rot_tserver.Position, self.dmm_x1z_tserver.Position, self.dmm_x2z_tserver.Position, self.dmm_x2y_tserver.Position, self.last_corr_angle, self.beamstop.TEMP_OUT[0], self.undulator.State(), self.undulator.Gap, self.feedback_time))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pitch(self, pitch):\n pass", "def newPitch(token, pitch):\n changes.replaceToken(token,\n token.step + ly.pitch.octaveToString(pitch.octave) + token.cautionary)", "def newPitch(token, pitch, lastPitch):\n pitch.absolute(lastPitch)\n changes.replaceToken(token,\n token.step + ly.pitch.octaveToString(pitch.octave) + token.cautionary)", "def updateInfo(self):\n\t\tif ( self.errorCount == 2 ):\n\t\t\tself.pitchText.text = \"Unclear microphone input...\"\n\n\t\tcurNote = self.listener.pitch.note\n\t\tcurFreq = self.listener.pitch.freq\n\t\tself.tuneDelta, self.tuneNeighbor = self.listener.pitch.inTune()\n\t\ttuneText = \"%0.2f Hz off from %s (%0.1f Hz)\" % (abs(self.tuneDelta), \n\t\t\t\t\t\t\t\t\t\t\t\tself.tuneNeighbor.note, \n\t\t\t\t\t\t\t\t\t\t\t\tcurFreq)\n\t\tself.pitchText.text = tuneText", "def on_cam_base_pitch_hSlider_valueChanged(self, value):\n self.cam_base_pitch_ledit.setText(str(100 + value))", "def setLevelPitchPID(P,I,D):\n return RoboCaller().call(\"setLevelPitchPID\", \"void\", P*1000,I*1000,D*1000)", "def pitch(self, evt=None):\n self.dbgprint(\"pitch(%r)\"%evt)", "def setVoicePitch(self, pitch):\n\n try:\n assert (pitch >= 1.0 and pitch <= 4) or pitch == 0\n self.tts.setParameter(\"pitchShift\", pitch)\n\n except AssertionError:\n self.logger.warning(\"Incorrect pitch value, the pitch won't be modified\")", "def UpdateLabel(self) -> _n_6_t_0:", "def set_slot_pitch(self):\n Nmag_txt = self.tr(\"Number of magnets = 2p = \")\n if self.machine.rotor.slot.Zs is not None:\n Zs = self.machine.rotor.slot.Zs\n out = Nmag_txt + str(Zs) + \" => \"\n Slot_pitch = 360.0 / Zs\n Slot_pitch_rad = Slot_pitch * pi / 180\n\n pitch_txt = self.tr(\"Slot pitch = \")\n out += (\n pitch_txt\n + \"%.4g\" % (Slot_pitch)\n + u\" ° (\"\n + \"%.4g\" % (Slot_pitch_rad)\n + \" rad)\"\n )\n self.out_Nmag.setText(out)\n else:\n self.out_Nmag.setText(Nmag_txt + \"?\")", "def pitch(self):\n return self['pitch']", "def setPitchUniform(self, pitchInCm):\n for b in self.getBlocks():\n b.setPitch(pitchInCm)\n\n # have to update the 2-D reactor mesh too.\n self.spatialGrid.changePitch(pitchInCm)", "def _update_output_voltage_label(self):\n self.widgets['p_outputVoltage'].setValue((self._curr_output_voltage))", "def _set_output_voltage_from_label(self):\n if (~self._is_stabilizing): #Only updates value if we are not stabilizing, otherwise the PID loop will be driving the output voltage\n #as opposed to the user.\n self._curr_output_voltage = self.widgets['p_outputVoltage'].value()\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)", "def set_pitch_trig(self, trig):\n\t\tself.pitch_trig = trig", "def pitch(self):\n return self._pitch", "def updateBar(self):\n self.mixer = alsaaudio.Mixer()\n volumes = self.mixer.getvolume()\n mutes = self.mixer.getmute()\n\n # update on changes and prolong living time of self.\n if self.masterVol != volumes[0]:\n self.masterVol = volumes[0]\n self.prolongLiving()\n\n if self.masterMute != mutes[0]:\n self.masterMute = mutes[0]\n self.prolongLiving()\n\n if(self.masterMute == 1):\n self.volumeBar.set_fraction(0)\n self.label.set_markup(\n \"<span foreground='white' size='small'>0</span>\")\n else:\n self.volumeBar.set_fraction(self.masterVol / 100)\n if(self.masterVol == 100):\n self.label.set_markup(\n \"<span foreground='white' size='xx-small'>\" + str(self.masterVol) + \"</span>\")\n else:\n self.label.set_markup(\n \"<span foreground='white' size='small'>\" + str(self.masterVol) + \"</span>\")\n\n return True", "def adjust_pitchset(self):\n register_pitch_set = np.array(list(set([i%12 for i in self.register])))\n is_in = np.isin(self.pitch_set, register_pitch_set)\n if not np.all(is_in):\n self.pitch_set = self.pitch_set[is_in]\n self.weights_ = self.weights_[is_in]\n self.weights_ = self.weights_ / np.sum(self.weights_)", "def note(note_str):\n return pitchhz[note_str.lower()]", "def set_voice_pitch(sim_info: SimInfo, voice_pitch: float) -> None:\n sim_info.voice_pitch = voice_pitch", "def setKp(self, proportional_gain):\r\n\t\tself.Kp = proportional_gain\r\n\t\tself.label = \"standard_PID_Controller/Kp=%f, Ti=%f, Td=%f\" % (self.Kp, self.Ti, self.Td)", "def from_midi(midi_pitch_id:int, detune=0) -> 'Pitch':\n return Pitch((midi_pitch_id - 69)*100 + detune)", "def pitch_dia(self, value):\n Gear.pitch_dia.fset(self, value)\n self._calcs()", "def _update_power_label(self):\n\n #Checks if > 0.5s has elapsed since the last change to the power reading label\n #I do this since otherwise the text label updates too quickly and it's annoying\n #to read.\n currTime = time.time()\n if currTime - self._last_power_text_update > 0.5:\n #If it updates, reads in the power and updates\n #TODO: Read the power in one function only and then all of the places that use it (updating feedback, updating power label, and plotting)\n #access that member variable. Not a huge deal will slightly speed it up I guess and is a bit cleaner.\n power = self.gain*np.array(self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage))\n self.widgets['label_power'].setText(str(power[-1]))\n self._last_power = power[-1]/self.gain\n self._last_power_text_update = currTime", "def get_midi_pitch(self) -> (int, float):\n pitch_id = 69 + int(self._cents/100)\n detune = self._cents - 100*(pitch_id - 69)\n while detune >= 50:\n pitch_id += 1\n detune -= 100\n while detune < -50:\n pitch_id -= 1\n detune += 100\n return (pitch_id, detune)", "def set_pwm(self, fan, pct):\n Logger.debug(\"Set pwm %d to %d\" % (int(fan.source.name), pct))\n fan.source.write(pct)\n self.last_fan_speed = pct", "def rec_default(self):\n self.phase_triggers.setText('(0,1,320)')\n self.phase_min.setText('-1.57')\n self.phase_max.setText('1.57')", "def getPitch(self):\n step = self._elem.find('pitch/step')\n octave = self._elem.find('pitch/octave')\n if step is None or octave is None:\n raise MusicXMLParseError(\"this note does not have pitch\")\n\n note_name = step.text\n octave = int(octave.text)\n notated_accidental = self._get_text('accidental')\n\n notated_sharp = notated_accidental == 'sharp'\n notated_flat = notated_accidental == 'flat'\n notated_natural = notated_accidental == 'natural'\n\n key = self._attributes.getKeySignature()\n key_accidental_char, key_accidental_list = ACCIDENTAL_TABLE[key]\n \n if notated_natural:\n note_name += '='\n return (note_name, octave)\n if not notated_natural and note_name in key_accidental_list: # see what the \n note_name += key_accidental_char\n if notated_sharp:\n note_name += '#'\n if notated_flat:\n note_name += 'b'\n\n return (note_name, octave)", "def main(_):\n\tlabel_wav()", "def to_pwm(self, precision=4, extra_str=\"\"):\n motif_id = self.id\n \n if extra_str:\n motif_id += \"_%s\" % extra_str\n\n if not self.pwm:\n self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()]\n\n return \">%s\\n%s\" % (\n motif_id, \n self._pwm_to_str(precision)\n )" ]
[ "0.68191767", "0.6375695", "0.62295777", "0.6179002", "0.60965174", "0.59890026", "0.5953664", "0.58515495", "0.5733584", "0.5709636", "0.56910586", "0.5606569", "0.5561708", "0.55490696", "0.55043685", "0.5472178", "0.5465675", "0.5421198", "0.54128", "0.54006743", "0.5388796", "0.5343975", "0.5308584", "0.53033197", "0.5270504", "0.52696705", "0.5262543", "0.52570903", "0.525308", "0.5247824" ]
0.7150294
0
Changes log length of all arrays. Tries to keep already measured values in place.
def change_log_length(self, log_length): len_diff = abs(self.log_length - log_length) if log_length > self.log_length: for log_group in self.log_names.values(): for log_array in log_group: tmparr = numpy.full(log_length, self.log_arrays[log_array][0]) # generate tmparr with first value from array tmparr[-self.log_arrays[log_array].size:] = self.log_arrays[log_array] # fill end with current array self.log_arrays[log_array] = tmparr tmparr = numpy.zeros(log_length) tmparr[:len_diff] = numpy.linspace(self.log_time[0] - len_diff/self.frequency, self.log_time[0], len_diff) tmparr[-self.log_time.size:] = self.log_time self.log_time = tmparr else: for log_group in self.log_names.values(): for log_array in log_group: tmparr = numpy.zeros(log_length) tmparr[:] = self.log_arrays[log_array][-log_length:] self.log_arrays[log_array] = tmparr tmparr = numpy.zeros(log_length) tmparr[:] = self.log_time[-log_length:] self.log_time = tmparr self.log_length = log_length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_logs(self):\n # reset log arrays\n try:\n bc = self.petra.BeamCurrent\n except:\n bc = numpy.nan\n try:\n pac = self.tserver.read_attribute('PosAndAvgCurr').value\n except:\n pac = numpy.array([numpy.nan, numpy.nan, numpy.nan])\n server_query = numpy.append(pac, bc)\n for log_group, log_arrays in self.log_names.items():\n omit_group = ['log_sens']\n if log_group not in omit_group:\n for n, log_array in enumerate(log_arrays):\n self.log_arrays[log_array] = numpy.full(self.log_length, server_query[n])\n # reset sensitivity log\n for log_array in self.log_names['log_sens']:\n self.log_arrays[log_array] = numpy.full(self.log_length, numpy.nan)\n # reset time array\n length = self.log_time.size\n t0 = self.timestamp() - self.backlog\n t1 = self.timestamp()\n self.log_time = numpy.linspace(t0, t1, length)", "def _wipe_log(self):\n self.temp_log.clear()\n self.temp_log = [[], []] # init a 2D array", "def clear_logging_arr(self):\n self.log_arr = []", "def correct_length_of_all_items(self):\n if self.need_correct_length:\n if not is_power_of_2(self.max_length) and self.length_is_power_of_2:\n self.max_length = 2 ** next_power_of_2(self.max_length)\n for i in self.items:\n i.correct_length(self.max_length)\n self.need_correct_length = False", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "def _auto_clear_log(self):\n if self.log_size() > self.MAX_LOGSIZE:\n self.clear()", "def _update_length(self):\n\n if self._data_type == 'coords':\n return \"length adjustment not allowed for coordinate-style data\"\n elif self._data_type == 'image':\n self._vectors = self._convert_to_vector_type(self._current_data)\n vertices, triangles = self._generate_meshes(self.vectors,\n self.width)\n self._mesh_vertices = vertices\n self._mesh_triangles = triangles", "def _grow(self): \n limit = 0\n #Iterating through the list to find the number of elements\n for i in xrange(len(self)):\n if self._items[i] != self._fillValue:\n #There's an element at index i, so update the limit\n limit = i\n \n #Only grow the array if the limit+1 and the physical size is the same.\n if limit+1 == len(self):\n temp = Array(len(self)*2)\n \n #Copy existing elements to the new Array\n for i in xrange(len(self)):\n temp[i] = self._items[i]\n \n #Initialize the new elements to the fillValue\n for j in xrange(len(self), len(self)*2):\n temp[j] = self._fillValue\n self._items = temp", "def _extend(self, newlen: int) -> None:\n diff = newlen - len(self)\n if diff > 0:\n self.extend([0] * diff)", "def _resize_list(self, new_size: int):\n for _ in range((new_size + 1) - len(self)):\n self.append(0)", "def granulate(self, length):\n if length == self._flen:\n return\n\n self._flen = length\n frame_count = int(math.ceil(len(self) / float(length)))\n #TODO: Recalculate findex (index will be the same)\n new_findex = 0\n frames = []\n for frame in range(frame_count):\n frames.append(self[frame * length:frame * length + length])\n self._frames = frames\n self._findex = new_findex\n self._index = 0 # temporary", "def Resize(self):\n\n self.history_length = int( round( self.owner['time_span']/self.owner['sample_speed']))\n self.FreshStart()", "def __len__(self):\n return 9 # logsfr_ratios has 6 bins", "def _setVals(self, cmd_length=0):\n self.cmd_length = cmd_length", "def modify_rec(self, events):\n\n pres_events = (events['trial'] == self._trial) & (events['type'] == 'CHEST') & (events['item_name'] != '')\n pres_events_inc_empty = (events['trial'] == self._trial) & (events['type'] == 'CHEST')\n listLength = np.sum(pres_events)\n for ind in np.where(pres_events_inc_empty)[0]:\n events[ind].listLength = listLength\n events[-1].listLength = listLength\n return events", "def log_inplace(a):", "def _resize_arr(self, new_capacity):\n if new_capacity < self._length:\n raise RuntimeError('New capacity is lower than length')\n\n # Copy values to array with new capacity\n longer_arr = self._create_array(new_capacity)\n for i in range(self._length):\n longer_arr[i] = self._arr[i]\n\n # Set the arr to the new array\n self._arr = longer_arr\n self._capacity = new_capacity", "def reset_results_arrays(self):\n raw_dims = self._dataset.raw_dims\n raw_dims = list(raw_dims[::-1])\n \n if len(self.data) != raw_dims:\n self.freq_all = np.zeros(raw_dims, complex)\n self.time_all = np.zeros(raw_dims, complex)", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0", "def log_elements(self, log_elements):\n\n self._log_elements = log_elements", "def set_size(self, new_size: int):\n self.__tab_size = new_size\n self.__check_interpreter()\n self.__vals = [0 for _ in range(self.__tab_size)]", "def appendsize(self, numents):\n pass", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def reset_results_arrays(self):\n raw_dim0 = self._dataset.raw_dims[0]\n \n if len(self.data) != raw_dim0:\n self.freq_current = np.zeros(raw_dim0, complex)", "def setLength(self, new_length):\n\n self.length = new_length", "def set_log_from_main(self, *args):\n if self.logarithmic:\n self.scale.props.adjustment.props.value = \\\n self.smart_log(self.adjustment.props.value)", "def len_all(self) :\n\n return settings.N_AUGMENTATIONS * len(self.run_pairs)", "def _increase_size(self) -> None:\n keys_vals_to_move = [item for item in self.HashMap if item]\n self.length = 0\n self.capacity = self.capacity * 2\n self.HashMap = [None] * self.capacity\n for item in keys_vals_to_move:\n while len(item) > 0:\n self.add(item[0], item[1])\n item.pop(0)\n item.pop(0)", "def grow(self):\r\n\r\n old = self._data\r\n self._capacity = 2 * self._capacity\r\n self._data = [0] * self._capacity\r\n\r\n for i in range(self._size):\r\n\r\n self._data[i] = old[i]", "def test_expand_data_1500_correct_len():\n # TODO: should it round up to allow last snippet of time?\n exp = expand_data(log, 1500)\n assert len(exp) == (log['end'].iloc[-1] / 1500)" ]
[ "0.64319164", "0.62030226", "0.6094597", "0.57537055", "0.5732013", "0.5701708", "0.568778", "0.563325", "0.5563749", "0.5548157", "0.55446655", "0.5532464", "0.5499947", "0.54449874", "0.538284", "0.53404", "0.53337634", "0.5331929", "0.5322957", "0.530966", "0.5293828", "0.52935344", "0.52921325", "0.527761", "0.52600455", "0.5221421", "0.5220474", "0.5219117", "0.52051336", "0.52027714" ]
0.78655356
0
Changes backlog length. If the calculated backlog length is smaller than the box_length for the rolling average backlog length will be set equal to the box_length.
def change_backlog(self, backlog): min_backlog = int(numpy.ceil(self.box_length / self.frequency)) if backlog < min_backlog: backlog = min_backlog log_length = self.calc_log_length(backlog, self.frequency) self.backlog = backlog self.change_log_length(log_length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_log_length(self, backlog, frequency):\n return int(numpy.ceil(backlog * frequency))", "def change_tail_length(self, value):\n self.layer.tail_length = value", "def _on_len_change(self, event=None):\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)", "def change_length(self, value):\n self.layer.length = value\n self.lengthSpinBox.clearFocus()\n self.setFocus()", "def change_backlog(self):\n if not self.lltext.text():\n return\n backlog = int(self.lltext.text())\n self.qbpm.change_backlog(backlog)\n self.lltext.setText(str(self.qbpm.backlog))", "def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...", "def _on_tail_length_change(self, event=None):\n with self.layer.events.tail_length.blocker():\n value = self.layer.tail_length\n value = np.clip(value, 1, MAX_TAIL_LENGTH)\n self.tail_length_slider.setValue(value)", "def setLength(self, new_length):\n\n self.length = new_length", "def change_tail_width(self, value):\n self.layer.tail_width = float(value) / 2.0", "def length_changed(self):\n\t\tprint \"length changed\"\n\t\tlength_cbox = self.ui.findChild(QWidget, \"length_cbox\")\t\n\t\tnew_length = length_cbox.currentText()\n\t\t# todo: calculate new values", "def update_max_fringe_size(self, fringe_len):\n if self.max_fringe_size < fringe_len:\n self.max_fringe_size = fringe_len", "def _on_tail_width_change(self, event=None):\n with self.layer.events.tail_width.blocker():\n value = self.layer.tail_width\n value = np.clip(int(2 * value), 1, MAX_TAIL_WIDTH)\n self.tail_width_slider.setValue(value)", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "def set_length(self, ak_spec: Union[str, BKT], val: float) -> None:\n ...", "def updateSize(self):\n self.currentsize = len(self.intervals)\n try:\n assert self.currentsize <= self.size\n except AssertionError:\n print(self.currentsize)\n print(self.size)\n sys.exit('[', sys.arg[0] + ']: Size problem')", "def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length", "def length(self, length: Union[int, float]):\n self._length = length\n self._update_length()\n self.events.length()\n\n self.refresh()", "def numBinsChanged(self, val):\n self.numBins = val", "def _set_length(self):\n if self.nb_points <= 1:\n self.length = 0\n else:\n ldiff_degree = self.coord_list[1:] - self.coord_list[:-1]\n ldiff_meter = ldiff_degree * np.pi * EQUATORIAL_EARTH_RADIUS / 180\n ldiff_meter[:, 0] *= np.cos(self.mean_pos[1] * np.pi / 180)\n self.length = np.sum(\n np.sqrt(ldiff_meter[:, 0] ** 2 + ldiff_meter[:, 1] ** 2)\n )", "def change_log_length(self, log_length):\n len_diff = abs(self.log_length - log_length)\n if log_length > self.log_length:\n for log_group in self.log_names.values():\n for log_array in log_group:\n tmparr = numpy.full(log_length, self.log_arrays[log_array][0]) # generate tmparr with first value from array\n tmparr[-self.log_arrays[log_array].size:] = self.log_arrays[log_array] # fill end with current array\n self.log_arrays[log_array] = tmparr\n tmparr = numpy.zeros(log_length)\n tmparr[:len_diff] = numpy.linspace(self.log_time[0] - len_diff/self.frequency,\n self.log_time[0], len_diff)\n tmparr[-self.log_time.size:] = self.log_time\n self.log_time = tmparr\n else:\n for log_group in self.log_names.values():\n for log_array in log_group:\n tmparr = numpy.zeros(log_length)\n tmparr[:] = self.log_arrays[log_array][-log_length:]\n self.log_arrays[log_array] = tmparr\n tmparr = numpy.zeros(log_length)\n tmparr[:] = self.log_time[-log_length:]\n self.log_time = tmparr\n self.log_length = log_length", "def set_length(stage, num):\n stage_maxes[stage] = num\n set_nums(stage)\n\n canvas.delete('tick_' + stage)\n\n if num == 0:\n return # No ticks\n\n # Draw the ticks in...\n _, y1, _, y2 = canvas.coords('bar_' + stage)\n\n dist = (width - 40) / num\n if round(dist) <= 1:\n # Don't have ticks if they're right next to each other\n return\n tag = 'tick_' + stage\n for i in range(num):\n pos = int(20 + dist*i)\n canvas.create_line(\n pos, y1, pos, y2,\n fill='#00785A',\n tags=tag,\n )\n canvas.tag_lower('tick_' + stage, 'bar_' + stage)", "def setLength(self, length):\n self.vector.norm = length", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "async def gpt2_set_length(self, ctx, *, arg=None):\n print('Command gpt2_set_length triggered')\n if arg:\n try:\n i = int(arg)\n assert (i > 0) and (i < 1024)\n except ValueError or AssertionError:\n ctx.send(\"ERROR: Argument must be a positive integer number\")\n self.update_config(length=arg)\n else:\n await ctx.send(\"ERROR: Argument required\")", "def as_length(self, value):\n new_vec = self.copy()\n new_vec.length = value\n return new_vec", "def set_auto_throats_length(self):\n\n for n1, n2 in self.graph.edges:\n self.graph[n1][n2]['length'] = self._compute_auto_throat_length(\n n1, n2)", "def customize_weapon_bay_size(self, current_gameboard, new_size):\n current_gameboard['weapon_bay_size'] = new_size", "def Resize(self):\n\n self.history_length = int( round( self.owner['time_span']/self.owner['sample_speed']))\n self.FreshStart()", "def length(self, length):\n\n self._length = length", "def sn_size(self, val):\n if isinstance(val, int) and val >= 1:\n if val != self._faux._sn_size:\n self._faux._sn_size = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")" ]
[ "0.62607", "0.6225002", "0.61845237", "0.5975838", "0.5848272", "0.58313876", "0.5740493", "0.5715802", "0.5671967", "0.56239045", "0.5602261", "0.55881214", "0.5486976", "0.543901", "0.5392246", "0.53808755", "0.5377744", "0.5327991", "0.53013253", "0.527878", "0.5277581", "0.52509505", "0.5250277", "0.52177197", "0.52077633", "0.5181546", "0.51735556", "0.51621354", "0.5155727", "0.5128245" ]
0.7401362
0