query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
send measurement to self.pubTemp | def sendTemperature(self):
if len(self.controller.myContainer.temperature) != 0:
temp = sum(self.controller.myContainer.temperature) / len(self.controller.myContainer.temperature)
else:
temp = 0
payload = ('{"ts": '+ str(int(time())) + ', "temp":' + str(temp) +
'"data": { "status": ' + str(self.controller.status) + ', "setpoint": '+ str(self.controller.setpoint) + ' }}' )
res, self.midTemp = self.client.publish(self.pubTemp, payload, qos=1, retain=False)
if debug: print("Sent: ", payload , "on", self.pubTemp, "mid: ", self.midTemp)
self.controller.myContainer.resetTempAccumulators()
filename = self.pubTemp.replace("/", "-") + ".txt"
if self.storeTempLocal:
f = open(filename, 'a+')
f.write(self.lastTempPayload+"\n")
f.close()
self.storeLocalTemp = True
self.lastTempPayload = payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_temp(context):\n job = context.job\n t1 = __sauna.control.getPortValue(\"Temperature Sensor 2\")\n t2 = float(\"{:.1f}\".format(t1))\n context.bot.send_message(job.context, text=\"Current Temp \" + str(t2) + \" Grad\")",
"def sendMeasurement(self, metric, value, source, timestamp=None):\n sys.stdout.write('{0} {1} {2} {3}\\n'.format(metric, value, source, timestamp).decode('utf-8'))\n sys.stdout.flush()",
"def publish():\n while True:\n mqttClient.reconnect()\n\n energy_data = getEnergyUsage()\n wats = float(energy_data['power_mw']) / 1000\n wat_hours = float(energy_data['total_wh'])\n\n sentPayload(name=\"power\", site=\"bathroom\", value=wats)\n sentPayload(name=\"energy_total\", site=\"bathroom\", value=wat_hours)\n\n time.sleep(updateInterval)",
"def sendEnergy(self):\n if len(self.controller.myContainer.vrms) != 0:\n vrms = sum(self.controller.myContainer.vrms) / len(self.controller.myContainer.vrms)\n irms = sum(self.controller.myContainer.irms) / len(self.controller.myContainer.irms)\n watts = sum(self.controller.myContainer.watts) / len(self.controller.myContainer.watts)\n else:\n vrms = irms = watts = 0\n payload = ('{\"ts\": '+ str(int(time())) + ', \"ace\": ' + str(self.controller.myContainer.ace_accum)\n + ', \"dce\": ' + str(self.controller.myContainer.dce_accum)+\n ', \"data\": { \"watt\": ' + str(watts) + ', \"vrms\": '+ str(vrms) + ', \"irms\": '+ str(irms) + ' }}' )\n\n res, self.midEnergy = self.client.publish(self.pubEnergy, payload, qos=1, retain=False)\n if debug: print(\"Sent: \", payload , \"on\", self.pubEnergy, \"mid: \", self.midEnergy)\n self.controller.myContainer.resetEnergyAccumulators()\n filename = self.pubEnergy.replace(\"/\", \"-\") + \".txt\"\n if self.storeEnergyLocal:\n f = open(filename, 'a+')\n f.write(self.lastEnergyPayload+\"\\n\")\n f.close()\n self.storeLocalEnergy = True\n self.lastEnergyPayload = payload",
"def get_measurements(self):\r\n self.msg_send_upr.data[0] = b\"\\xff\"[0]\r\n self.send_and_flush(self.msg_send_upr)",
"def mqtt_publish_fan_tempon():\n if not mqtt.connected:\n return\n cfg_option = 'fan_status_tempon'\n cfg_section = mqtt.GROUP_TOPICS\n value = round_temp(dev_fan.temperature_on)\n try:\n mqtt.publish(str(value), cfg_option, cfg_section)\n logger.debug(\n 'Published fan temperature ON=%s°C to MQTT topic %s',\n value, mqtt.topic_name(cfg_option, cfg_section))\n except Exception as errmsg:\n logger.error(\n 'Publishing fan temperature ON=%s°C to MQTT topic %s failed: %s',\n value, mqtt.topic_name(cfg_option, cfg_section), errmsg)",
"def temp(update: Update, context: CallbackContext) -> None:\n t1 = __sauna.control.getPortValue(\"Temperature Sensor\")\n t2 = float(\"{:.1f}\".format(t1))\n update.message.reply_text(\"Current Temp \" + str(t2) + \" Grad\")",
"def write_point(datum):\n measurement = {\n \"measurement\": \"weather\",\n \"tags\": {\n \"location\": LOCATION\n },\n \"time\": datetime.now().isoformat(),\n \"fields\": datum\n }\n CHANNEL.basic_publish(exchange='',\n routing_key='scribe',\n body=json.dumps(measurement))",
"def publishTimeTaken(self, data):\n time_taken = Float32()\n time_taken.data = data\n self.time_taken_pub.publish(data)",
"def Temp(t):\n return 20 # Need to link to data",
"def generate_message(self):\n meter = Meter.objects.get_or_create(name=\"4530303237303030303130313334353136\")[0]\n\n measurement = Measurement()\n measurement.meter = meter\n measurement.power_usage_current = random.randint(300,400) / 1000\n measurement.power_usage_total_low = 0\n measurement.power_usage_total_normal = 0\n measurement.power_supply_current = random.randint(300,400) / 1000\n measurement.power_supply_total_low = 0\n measurement.power_supply_total_normal = 0\n measurement.timestamp = datetime.now(pytz.utc)\n\n if(measurement.power_usage_current < measurement.power_supply_current):\n measurement.power_usage_current = 0\n else :\n measurement.power_supply_current = 0\n\n return measurement",
"def digital_temp_data(self): # This function will give the initial digital format for temperature data \n self._bus.write_byte(self._addr, 0x58) \n time.sleep(0.05) \n tempadcbytes = self._bus.read_i2c_block_data(self._addr, 0x00) \n time.sleep(0.05) \n self.tempadc=tempadcbytes[0]*65536.0+tempadcbytes[1]*256.0+tempadcbytes[2]",
"async def send_temp_change(self, newtemp):\n if not self.connected:\n return\n\n # Check if the temp is valid for the heatmode\n if (newtemp < self.tmin[self.temprange][self.tempscale] or\n newtemp > self.tmax[self.temprange][self.tempscale]):\n self.log.error(\"Attempt to set temp outside of boundary of heatmode\")\n return\n\n data = bytearray(8)\n data[0] = M_START\n data[1] = 6\n data[2] = mtypes[BMTS_SET_TEMP][0]\n data[3] = mtypes[BMTS_SET_TEMP][1]\n data[4] = mtypes[BMTS_SET_TEMP][2]\n\n if self.tempscale == self.TSCALE_C:\n newtemp *= 2.0\n val = int(round(newtemp))\n data[5] = val\n data[6] = messages.Message.crc(data[1:6])\n data[7] = M_END\n\n self.writer.write(data)\n await self.writer.drain()",
"def update_temp(self):\n\t\tcurrent_temp = self.thin.temperature\n\t\toutside_temp = self.outside.temperature\n\t\tself.thin.temperature = current_temp + 0.01*self.config.valve_coef*self.thin._actuation_value - self.config.out_temp_coef*(current_temp - outside_temp)",
"def update_out_temp(self):\n\t\tpass # Constant for now",
"def callback(ch, method, properties, body):\n print(f\" [x] Received {str(body)} kW.\")\n\n try:\n timestamp = properties.timestamp\n current_time = datetime.utcfromtimestamp(timestamp).replace(\n tzinfo=timezone.utc\n )\n except AttributeError:\n # If we don't get a timestamp from the broker, add a timestamp here.\n current_time = datetime.now().replace(tzinfo=timezone.utc)\n\n pv_photovoltaic = generate_pv_output(current_time)\n\n report_item = PVMeterReportItem(\n timestamp=current_time.isoformat(),\n pv_meter=int(body),\n pv_photovoltaic=pv_photovoltaic,\n )\n generate_report(report_item)\n\n ch.basic_ack(delivery_tag=method.delivery_tag)",
"def writeHAL_currTemp(self, val):\r\n self.hal['curr-temp'] = val",
"def _measurement_update(self):\n pass",
"def give_temperature(self, value):\n self.temperature = value",
"def arduPusherWPM(self):\r\n tmp = (self.wpm / self.maxWPM) * self.available_degrees \\\r\n if self.wpm < self.maxWPM else self.available_degrees\r\n # send number as char \r\n self.ardu.write(chr(int(tmp)))",
"def on_publish(self, client, userdata, mid):\n if debug: print(\"on_publish ... \\nuserdata: \", userdata, \"\\nmid: \", mid)\n\n if mid == self.midTemp:\n self.storeLocalTemp = False\n elif mid == self.midEnergy:\n self.storeLocalEnergy = False\n elif mid == self.midControls:\n self.storeLocalControls = False",
"def mqtt_publish_fan_tempoff():\n if not mqtt.connected:\n return\n cfg_option = 'fan_status_tempoff'\n cfg_section = mqtt.GROUP_TOPICS\n value = round_temp(dev_fan.temperature_off)\n try:\n mqtt.publish(str(value), cfg_option, cfg_section)\n logger.debug(\n 'Published fan temperature OFF=%s°C to MQTT topic %s',\n value, mqtt.topic_name(cfg_option, cfg_section))\n except Exception as errmsg:\n logger.error(\n 'Publishing fan temperature OFF=%s°C to MQTT topic %s failed: %s',\n value, mqtt.topic_name(cfg_option, cfg_section), errmsg)",
"def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def publish(self):\n msg_imu1, msg_mag1, msg_imu2, msg_mag2, msg_imu, msg_mag= self._create_msg()\n self.pub_imu.publish(msg_imu)\n self.pub_mag.publish(msg_mag)\n #------Uncomment to publish IMUs data separately------",
"def measure():\n print(\"alias, timestamp, current, total, power, voltage, err_code\")\n message_str = MeasurementRequest(None).to_json()\n socket_object = UdpSocket()\n s = UDPSendThread(message_str, socket_object)\n r = UDPRecvThread(socket_object, measurement_output_parser)\n s.start()\n r.start()\n\n wait((s, r))",
"def sendMessage(self):\n print('sendMessage')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def sendMessage(self):\n print(\"sendMessage\")\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def simulate_temperature(self, temp):\n self._inside._mcp = CurrentTemperatureLibrary.SimAdc(self._inside, temp)",
"async def _handle_temperature(\n self, msg: uavcan.si.sample.temperature.Scalar_1_0, metadata: pyuavcan.transport.TransferFrom\n ) -> None:\n print(\"TEMPERATURE\", msg.kelvin - 273.15, \"C\")\n\n # Publish the message synchronously, using await, blocking this task until the message is pushed down to\n # the media layer.\n if not await self._pub_diagnostic_record.publish(\n uavcan.diagnostic.Record_1_1(\n severity=uavcan.diagnostic.Severity_1_0(uavcan.diagnostic.Severity_1_0.TRACE),\n text=f\"Temperature {msg.kelvin:0.3f} K from {metadata.source_node_id} \"\n f\"time={metadata.timestamp.system} tid={metadata.transfer_id} prio={metadata.priority}\",\n )\n ):\n print(\n f\"Diagnostic publication timed out in {self._pub_diagnostic_record.send_timeout} seconds\",\n file=sys.stderr,\n )",
"def temp(self):\n\t\ttemp_out = self.read16(MPU9250_ADDRESS, TEMP_DATA)\n\t\ttemp = temp_out / 333.87 + 21.0 # these are from the datasheets\n\t\treturn temp"
]
| [
"0.62318873",
"0.61939365",
"0.6153396",
"0.6117912",
"0.6080702",
"0.6041283",
"0.6015182",
"0.5937347",
"0.587916",
"0.58472216",
"0.57501924",
"0.5739049",
"0.57359487",
"0.57344073",
"0.56908",
"0.56750673",
"0.56663686",
"0.5665518",
"0.56356996",
"0.56098765",
"0.5607006",
"0.56017333",
"0.55741686",
"0.5572755",
"0.5566376",
"0.5561137",
"0.55606365",
"0.5545329",
"0.55368096",
"0.5534212"
]
| 0.7455403 | 0 |
send the manual control updates to the server | def sendControls(self):
if self.controller.status:
mode = '"cool3"'
temp = self.controller.setpoint
else:
mode = '"off"'
temp = self.controller.setpoint
payload = '{"mode": ' + mode + ', "temp": ' + str(temp) + '}'
res, self.midControls = self.client.publish(self.pubControls, payload, qos=1, retain=False)
if debug: print("Sent", payload, "on", self.pubControls, "mid: ", self.midControls)
filename = self.pubTemp.replace("/", "-") + ".txt"
if self.storeControlsLocal:
f = open(filename, 'a+')
f.write(self.lastControlsPayload+"\n")
f.close()
self.storeControlsTemp = True
self.lastControlsPayload = payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_control(self):\n self._control_ctr += 0x01",
"def sendControls(self, status, tempset):\n outString = str(status) + '?' + str(tempset) + '?control'\n self.sendBytesToSTM(outString.encode(\"utf-8\"))",
"def updateControls(self, onoff=False, radio=True):\n self.myContainer.sendControls(self.status, self.setpoint)\n if onoff and self.status: self.myContainer.sendIRcode(\"cool3\", \"62\")\n elif onoff and not self.status: self.myContainer.sendIRcode(\"off\", \"0\")\n if radio:\n self.myRadio.sendControls()",
"def update_controller(self):",
"def manual_update(self):\n for cb, status in zip(self._manual_cbs, self.status_objs):\n cb(status)",
"def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True",
"def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)",
"def update():",
"def update():",
"def update( ):\r\n pass",
"def send_reg_command(self):\n button = self.sender()\n if button in self.btns.keys():\n state: str = button.text()\n move = None\n else:\n state = self.state.range\n move = (self.SpinRough.value() // 500) * 500\n param = registers.get_reg_str(state, move)\n answer: str = self.UsbHost.send_command(self.state.ser, \"SetAdf14\", str(self.state.device_id),\n param)\n if answer == 'Ok':\n self.statusbar.showMessage(self.result_dict[button])\n # при установлении диапазона, он записывается в интерфейсе везде\n if button in self.btn.keys():\n self.state.range = button.text()\n self.LblFreqVal.setText(button.text())\n self.LblResVal.setText(str(states_dict[button.text()][0] +\n self.SpinFine.value() + self.SpinRough.value()))\n # все кнопки серые, кроме кнопки режима\n for btn in self.btns.keys():\n btn.setStyleSheet(\"\")\n self.btns[btn].setStyleSheet(\"font: 16px\")\n button.setStyleSheet(\"background-color : rgb(70, 210, 00)\")\n self.btns[button].setStyleSheet(\"color: red; font: bold 16px\")\n else:\n error_message(self.error_dict[button])\n self.statusbar.showMessage(answer_translate[answer])\n\n # add commands to log\n param = registers.get_reg_str(state, move).replace(\" \", '\\n')\n param.replace(\" \", \"\\n\")\n self.create_log_message(button.text(), answer, param)\n\n # set sw command\n if answer == 'Ok' and button in self.btn.keys():\n params = '0 1' if button == self.BtnL1 else \"1 0\"\n self.set_sw(params)",
"def sendUpdate(self):\n\n self.timerCounter += 1\n if self.timerCounter % 1 == 0:\n self.update0_1s.emit()\n if (self.timerCounter + 5) % 10 == 0:\n self.update1s.emit()\n if (self.timerCounter + 10) % 30 == 0:\n self.update3s.emit()\n if (self.timerCounter + 20) % 100 == 0:\n self.update10s.emit()\n if (self.timerCounter + 25) % 600 == 0:\n self.update60s.emit()\n if (self.timerCounter + 12) % 1800 == 0:\n self.update3m.emit()\n if (self.timerCounter + 13) % 6000 == 0:\n self.update10m.emit()\n if (self.timerCounter + 14) % 18000 == 0:\n self.update30m.emit()\n if (self.timerCounter + 15) % 36000 == 0:\n self.update1h.emit()\n return True",
"def update(self):\n self.wc.update()",
"async def async_turn_on(self):\n self._ctrl.run_script(self._data[\"name\"])\n await self._ctrl.force_update()",
"def commandUpdate(self):\n pass",
"def send_output(self):\n self.__status_handler.io.async_refresh()",
"def en_update_msg(self, enable=True, freq=64):\n if enable:\n self.__send_short(self.MGMSG_HW_START_UPDATEMSGS, freq, 0x00)\n else:\n self.__send_short(self.MGMSG_HW_STOP_UPDATEMSGS, 0x00, 0x00)",
"def manual(self):\n self.status_message = \"State: Manual - Use sliders to control arm\"\n self.current_state = \"manual\"",
"def update(self):",
"def update(self):",
"def update(self):",
"def Update(self, controller):\n pass",
"def autosync(self):\n if self.atbaudrate != 115200:\n self.setBaudrate(self.atbaudrate)\n print(\"Trying autosync\")\n self.send('\\r\\n')\n time.sleep(1.0)\n self.send('+++')\n self.expect('OK', timeout=1.1)\n for i in range(5):\n self.send('\\r\\nATI\\r\\n')\n if not self.expect('SiK .* on', timeout=0.5):\n print(\"Failed to get SiK banner\")\n continue\n self.send('\\r\\n')\n time.sleep(0.2)\n self.port.flushInput()\n self.send('AT&UPDATE\\r\\n')\n time.sleep(0.7)\n self.port.flushInput()\n if self.atbaudrate != 115200:\n self.setBaudrate(115200)\n print(\"Sent update command\")\n return True\n if self.atbaudrate != 115200:\n self.setBaudrate(115200)\n return False",
"def takeControl(self):\n mainloop()",
"def takeControl(self):\n mainloop()",
"def update(self):\r\n pass",
"def send_control_change(self, cc=0, value=0, ch=None):\n self.send_channel_message(CONTROL_CHANGE, cc, value, ch=ch)",
"def updateGui(self, msg):\n TRIES = 10\n for i in range(1, TRIES+1):\n try: # attempt up to 10 times if socket error\n #self.__gui_app.sendUpdate(msg)\n # LJR - Fixed Python 2.7 bug by reconnecting to server locally on each request\n g= xmlrpclib.ServerProxy('http://localhost:%d' % self.__xmlrpc_port)\n g.sendUpdate(msg)\n break\n except socket.error, info: # most likely 'connection refused'\n if i == TRIES:\n print \"ERROR: XML-RPC call failed all %d tries! %s\" % (i,info)\n sys.exit(-1)\n time.sleep(0.1)",
"def _send_blocking(self, info_type, info):\n\n # skip if API key don't work or service down\n if not self.enabled:\n return 'disabled'\n\n url = self._url_join(self.base_url, 'v1/update')\n self.status = send_to_backend(url, self.api_key, info_type, info)",
"def take_control(self):\n pass"
]
| [
"0.6650497",
"0.66212636",
"0.6336081",
"0.6305258",
"0.6170719",
"0.600657",
"0.59971505",
"0.59700865",
"0.59700865",
"0.5958515",
"0.594598",
"0.593457",
"0.5918884",
"0.587391",
"0.58698344",
"0.5818733",
"0.579478",
"0.57647264",
"0.5762704",
"0.5762704",
"0.5762704",
"0.5719861",
"0.571948",
"0.56991506",
"0.56991506",
"0.5695609",
"0.5693999",
"0.56125546",
"0.560998",
"0.55900097"
]
| 0.705174 | 0 |
Set up the HTU21D sensor. | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
name = config.get(CONF_NAME)
bus_number = config.get(CONF_I2C_BUS)
temp_unit = hass.config.units.temperature_unit
bus = smbus.SMBus(config.get(CONF_I2C_BUS))
sensor = await hass.async_add_executor_job(partial(HTU21D, bus, logger=_LOGGER))
if not sensor.sample_ok:
_LOGGER.error("HTU21D sensor not detected in bus %s", bus_number)
return False
sensor_handler = await hass.async_add_executor_job(HTU21DHandler, sensor)
dev = [
HTU21DSensor(sensor_handler, name, SENSOR_TEMPERATURE, temp_unit),
HTU21DSensor(sensor_handler, name, SENSOR_HUMIDITY, PERCENTAGE),
]
async_add_entities(dev) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)",
"def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()",
"def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)",
"def activate_sensors(self):\n self._sense_hat = SenseHat()\n\n # Scroll Init message over HAT screen\n self._show_message('Init Sensors', (255, 255, 0), (0, 0, 255))\n\n # Init sensors, to be sure first effective run uses correct sensors values\n self._sense_hat.get_humidity()\n self._sense_hat.get_pressure()\n\n # Setup Sense Hat stick\n self._sense_hat.stick.direction_up = self._change_weather_entity\n self._sense_hat.stick.direction_down = self._change_weather_entity\n self._sense_hat.stick.direction_left = self._change_weather_entity\n self._sense_hat.stick.direction_right = self._change_weather_entity",
"def setup_platform(hass, config, add_entities, discovery_info=None):\n available_sensors = {\n \"AM2302\": adafruit_dht.DHT22,\n \"DHT11\": adafruit_dht.DHT11,\n \"DHT22\": adafruit_dht.DHT22,\n }\n sensor = available_sensors.get(config[CONF_SENSOR])\n pin = config[CONF_PIN]\n temperature_offset = config[CONF_TEMPERATURE_OFFSET]\n humidity_offset = config[CONF_HUMIDITY_OFFSET]\n name = config[CONF_NAME]\n\n if not sensor:\n _LOGGER.error(\"DHT sensor type is not supported\")\n return False\n\n data = DHTClient(sensor, pin, name)\n\n monitored_conditions = config[CONF_MONITORED_CONDITIONS]\n entities = [\n DHTSensor(data, name, temperature_offset, humidity_offset, description)\n for description in SENSOR_TYPES\n if description.key in monitored_conditions\n ]\n\n add_entities(entities, True)",
"def __init__(self, hass, device: Warmup4IEDevice, client: WarmupClient):\n _LOGGER.info(\"Setting up Warmup Thermostat %s\", device.get_room_name())\n self._device = device\n self._client = client\n\n self.attributes = {}\n self._update_attributes_from_device()\n\n self._current_temperature = device.current_temperature\n self._target_temperature = device.target_temperature\n self._min_temp = device.min_temp\n self._max_temp = device.max_temp\n self._name = device.get_room_name()\n self._attr_unique_id = \"warmup_\" + device.get_serial_number()\n\n self._current_operation_mode = device.run_mode\n self._away = False\n self._on = True",
"def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])",
"def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()",
"def setup_usb(self):\n global DEVICE\n global epBulkWriter\n global epBulkReader\n global VID\n global PID\n\n DEVICE = usb.core.find(idVendor=0x2AB9,idProduct=0xFFFF)\n if DEVICE is None:#If not a LVPM, look for an HVPM.\n DEVICE = usb.core.find(idVendor=0x04d8,idProduct=0x000b)\n VID = '0x4d8'\n PID = '0xb'\n if \"Linux\" == platform.system():\n try:\n DEVICE.detach_kernel_driver(0)\n except:\n pass # already unregistered\n DEVICE.set_configuration()\n\n cfg = DEVICE.get_active_configuration()\n intf = cfg[(0,0)]\n\n epBulkWriter = usb.util.find_descriptor(\n intf,\n custom_match = \\\n lambda e: \\\n usb.util.endpoint_direction(e.bEndpointAddress) == \\\n usb.util.ENDPOINT_OUT)\n epBulkReader = usb.util.find_descriptor(\n intf,\n custom_match = \\\n lambda e: \\\n usb.util.endpoint_direction(e.bEndpointAddress) == \\\n usb.util.ENDPOINT_IN)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host = config[CONF_HOST]\n monitored_variables = config[CONF_MONITORED_VARIABLES]\n\n charger = openevsewifi.Charger(host)\n\n entities = [\n OpenEVSESensor(charger, description)\n for description in SENSOR_TYPES\n if description.key in monitored_variables\n ]\n\n add_entities(entities, True)",
"def init():\n\n global leftDriverStick\n global rightDriverStick\n global goGamePad\n\n try:\n leftDriverStick = T16000M(0)\n except:\n print('OI: Error - Could not instantiate Left Driver Stick on USB port 0!!!')\n\n try:\n rightDriverStick = T16000M(1)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 0!!!')\n\n try:\n goGamePad = Joystick(2)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 2!!!')\n\n\n # ----------------------------------------------------------\n # Driver Controls\n # ----------------------------------------------------------\n #global resetYawBtn\n #resetYawBtn = JoystickButton(rightDriverStick, config.btnResetYawAngleIndex)\n #resetYawBtn.whenPressed(NavxResetYawAngle())\n\n global btnDriveSlow\n btnDriveSlow = JoystickButton(leftDriverStick, config.btnDriveSlow)\n \n global btnEnableLightSensor\n btnEnableLightSensor = JoystickButton(leftDriverStick, config.btnEnableLightSensorIndex)\n\n global btnExtendAll\n btnExtendAll = JoystickButton(rightDriverStick, config.btnExtendAllIndex)\n btnExtendAll.whenPressed(ExtendAll())\n\n global btnRetract\n btnRetract = JoystickButton(rightDriverStick, config.btnRetractAllIndex)\n btnRetract.whenPressed(RetractAll())\n\n global btnExtendFront\n btnExtendFront = JoystickButton(rightDriverStick, config.btnExtendFrontIndex)\n btnExtendFront.whenPressed(ExtendFront())\n\n global btnExtendBack\n btnExtendBack = JoystickButton(rightDriverStick, config.btnExtendBackIndex)\n btnExtendBack.whenPressed(ExtendBack())\n\n global btnRetractFront\n btnRetractFront = JoystickButton(rightDriverStick, config.btnRetractFrontIndex)\n btnRetractFront.whenPressed(RetractFront())\n\n global btnCargoGrabTog\n btnCargoGrabTog = JoystickButton(goGamePad, config.btnHatchGrabTogIndex)\n btnCargoGrabTog.whenPressed(ExtendBack())\n \n \"\"\"\n global btnResetEncoders\n btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n btnResetEncoders.whenPressed(TankDriveResetEncoders())\n \"\"\"\n\n \"\"\"\n global axisElevator\n axisElevator = JoystickAxis(goGamePad, config.axisElevatorIndex)\n axisElevator. #??? idk how to configure joystick axis\n \"\"\"\n\n \"\"\"\n global btnRampTog\n btnRampTog = JoystickButton(goGamePad, config.btnRampTogIndex)\n btnRampTog.whenPressed(ExtendFront())\n \"\"\"\n #global btnResetEncoders\n #btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n #btnResetEncoders.whenPressed(TankDriveResetEncoders())\n\n # These variable names are inconsistent, need to be fixed!!!!\n #global btnRampExtendTog\n #btnRampExtendTog = JoystickButton(goGamePad, config.btnRampExtendTogIndex)\n #btnRampExtendTog.whenPressed(RampExtend())\n\n #global btnRampRetractTog\n #btnRampRetractTog = JoystickButton(goGamePad, config.btnRampRetractTogIndex)\n #btnRampRetractTog.whenPressed(RampRetract())",
"def __init__(self):\r\n # Check device ID.\r\n chip_id = self._read_byte(_BME280_REGISTER_CHIPID)\r\n if _BME280_CHIPID != chip_id:\r\n raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)\r\n self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)\r\n time.sleep(0.5)\r\n self._read_coefficients()\r\n self.sea_level_pressure = 1013.25\r\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate `altitude`.\"\"\"\r\n # turn on humidity oversample 16x\r\n self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)\r\n self._t_fine = None",
"def __init__(self):\n # FIXME: IS this needed?\n super(ArduinoStation, self).__init__()\n\n self.serial_port_pattern = '/dev/ttyACM{port_num}'\n self.serial_port_num = None\n self.baudrate = 9600\n self.ser = self._setup_serial_connection()\n\n\n # Sensor 1 (DHT11) has 2 readings, Sensor 2 has 1\n ## FIXME: Should look for key pairs in list and submit when no more unique readings are coming through\n if config.SCB_CONFIGURATION == 'standard':\n self.lines_per_observation = 3\n else:\n self.lines_per_observation = 7 # Allows for up to 5 DS18B20 along w/ DHT-11.",
"def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass",
"def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [CHURCH, CHURCH + 1, HALL, HALL + 1]:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)",
"def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [config.gpio_pin_p1_stretch,\n config.gpio_pin_p1_serve,\n config.gpio_pin_p2_stretch,\n config.gpio_pin_p2_serve]:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n input_reader_thread = threading.Thread(target=input_reader_worker)\n input_reader_thread.setDaemon(True)\n input_reader_thread.start()",
"def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([450, 520, 630, 770., 1550, 2090.] ),\n np.array([ 520, 600, 690, 900., 1750., 2350.] ) )",
"def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([500, 610, 780, 1580.] ),\n np.array([590, 680, 890, 1750.] ) )",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host: str = config[CONF_HOST]\n port: int = config[CONF_PORT]\n name: str = config[CONF_NAME]\n url = f\"http://{host}:{port}/api/LiveData.xml\"\n\n gateway = Ted5000Gateway(url)\n\n # Get MUT information to create the sensors.\n gateway.update()\n\n entities = []\n for mtu in gateway.data:\n for description in SENSORS:\n entities.append(Ted5000Sensor(gateway, name, mtu, description))\n\n add_entities(entities)",
"def setup(self):\n \n # Board refers to the P1 header of the Raspberry Pi board\n GPIO.setmode(GPIO.BOARD)\n\n # Set up pin as an input with a pull up resistor to 3.3V\n GPIO.setup(self.__pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)",
"def __init__(self, sensor, temperature_resolution, humidity_resolution):\n self.sensor = sensor\n self.sensor.turnHeaterOn() \n time.sleep(1.0) # Burn off condensed stuff.\n self.sensor.turnHeaterOff() \n self.update()\n # Main Program\n #print \"------------\"\n #print \"Manfacturer ID=0x%X\"% self.sensor.readManufacturerID() \n #print \"Device ID=0x%X\"% self.sensor.readDeviceID() \n #print \"Serial Number ID=0x%X\"% self.sensor.readSerialNumber() \n \n # change temperature resolution\n self.sensor.setTemperatureResolution(temperature_resolution)\n self.sensor.setHumidityResolution(humidity_resolution)",
"def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()",
"def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()",
"def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)",
"def robotInit(self):\n\n #Initialize Networktables\n self.sd = NetworkTables.getTable('SmartDashboard')\n\n \n #Set up motors to drive robot\n self.M2 = wpilib.VictorSP(2)\n self.M3 = wpilib.VictorSP(3)\n #self.M2.setInverted(True)\n #self.M3.setInverted(True)\n self.left = wpilib.SpeedControllerGroup(self.M2,self.M3)\n \n self.M0 = wpilib.VictorSP(0)\n self.M1 = wpilib.VictorSP(1)\n self.right = wpilib.SpeedControllerGroup(self.M0,self.M1)\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\n \n \n self.stick = wpilib.Joystick(1)\n self.timer = wpilib.Timer()\n #Camera\n wpilib.CameraServer.launch()\n #Servo\n self.SV1 = wpilib.Servo(9)\n self.SV2 = wpilib.Servo(8) \n #Dashboard\n NetworkTables.initialize(server='10.61.62.2')\n #Switches\n self.SW0 = wpilib.DigitalInput(0)\n self.SW1 = wpilib.DigitalInput(1)\n #Elevator\n self.E = wpilib.VictorSP(5)\n self.prepareCubeFlag = 0\n self.grabCubeFlag = 0\n self.deliverCubeFlag = 0\n self.adjustLeftFlag=0\n self.adjustRightFlag=0\n self.driveFlag=0\n #Gyro\n self.gyro = wpilib.ADXRS450_Gyro(0)\n self.gyro.reset()\n #All possible autonomous routines in a sendable chooser\n '''\n self.chooser = wpilib.SendableChooser()\n self.chooser.addDefault(\"None\", '4')\n self.chooser.addObject(\"left-LeftScale\", '1')\n self.chooser.addObject(\"Middle-LeftScale\", '2')\n self.chooser.addObject(\"Right-LeftScale\", '3')\n self.chooser.addObject(\"Left-RightScale\", '5')\n '''\n #wpilib.SmartDashboard.putData('Choice', self.chooser)\n #Encoders\n self.EC1 = wpilib.Encoder(2,3)\n self.EC2 = wpilib.Encoder(4,5)\n self.EC1.reset()\n self.EC2.reset()",
"def setup_sensors(self):\n super(EddRoach2ProductController, self).setup_sensors()\n self._firmware_server_sensor = Sensor.string(\n \"firmware-server\",\n description=\"The address of the firmware server started by this product\",\n default=\"\",\n initial_status=Sensor.UNKNOWN)\n self.add_sensor(self._firmware_server_sensor)\n self._parent.mass_inform(Message.inform('interface-changed'))",
"def setup(hass, config):\n\n logger = logging.getLogger(__name__)\n\n try:\n import tellcore.telldus as telldus\n import tellcore.constants as tellcore_constants\n except ImportError:\n logger.exception(\n \"Failed to import tellcore\")\n return False\n\n core = telldus.TelldusCore()\n\n sensors = core.sensors()\n\n if len(sensors) == 0:\n logger.error(\"No Tellstick sensors found\")\n return False\n\n sensor_value_descriptions = {\n tellcore_constants.TELLSTICK_TEMPERATURE:\n DatatypeDescription(\n 'temperature', config[DOMAIN]['temperature_scale']),\n\n tellcore_constants.TELLSTICK_HUMIDITY:\n DatatypeDescription('humidity', ' %'),\n\n tellcore_constants.TELLSTICK_RAINRATE:\n DatatypeDescription('rain rate', ''),\n\n tellcore_constants.TELLSTICK_RAINTOTAL:\n DatatypeDescription('rain total', ''),\n\n tellcore_constants.TELLSTICK_WINDDIRECTION:\n DatatypeDescription('wind direction', ''),\n\n tellcore_constants.TELLSTICK_WINDAVERAGE:\n DatatypeDescription('wind average', ''),\n\n tellcore_constants.TELLSTICK_WINDGUST:\n DatatypeDescription('wind gust', '')\n }\n\n def update_sensor_value_state(sensor_name, sensor_value):\n \"Update the state of a sensor value\"\n sensor_value_description = \\\n sensor_value_descriptions[sensor_value.datatype]\n sensor_value_name = '{} {}'.format(\n sensor_name, sensor_value_description.name)\n\n entity_id = ENTITY_ID_FORMAT.format(\n util.slugify(sensor_value_name))\n\n state = sensor_value.value\n\n state_attr = {\n ATTR_FRIENDLY_NAME: sensor_value_name,\n ATTR_UNIT_OF_MEASUREMENT: sensor_value_description.unit\n }\n\n hass.states.set(entity_id, state, state_attr)\n\n sensor_value_datatypes = [\n tellcore_constants.TELLSTICK_TEMPERATURE,\n tellcore_constants.TELLSTICK_HUMIDITY,\n tellcore_constants.TELLSTICK_RAINRATE,\n tellcore_constants.TELLSTICK_RAINTOTAL,\n tellcore_constants.TELLSTICK_WINDDIRECTION,\n tellcore_constants.TELLSTICK_WINDAVERAGE,\n tellcore_constants.TELLSTICK_WINDGUST\n ]\n\n def update_sensor_state(sensor):\n \"Updates all the sensor values from the sensor\"\n try:\n sensor_name = config[DOMAIN][str(sensor.id)]\n except KeyError:\n if 'only_named' in config[DOMAIN]:\n return\n sensor_name = str(sensor.id)\n\n for datatype in sensor_value_datatypes:\n if datatype & int(config[DOMAIN]['datatype_mask']) and \\\n sensor.has_value(datatype):\n update_sensor_value_state(sensor_name, sensor.value(datatype))\n\n # pylint: disable=unused-argument\n def update_sensors_state(time):\n \"Update the state of all sensors\"\n for sensor in sensors:\n update_sensor_state(sensor)\n\n update_sensors_state(None)\n\n hass.track_time_change(update_sensors_state, second=[0, 30])\n\n return True",
"def setup(hass, base_config):\n from pyhusmow import API as HUSMOW_API\n\n config = base_config.get(DOMAIN)\n\n if hass.data.get(DOMAIN) is None:\n hass.data[DOMAIN] = { 'devices': [] }\n\n api = HUSMOW_API()\n api.login(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))\n\n robots = api.list_robots()\n\n if not robots:\n return False\n\n for robot in robots:\n hass.data[DOMAIN]['devices'].append(AutomowerDevice(robot, api))\n\n for component in AUTOMOWER_COMPONENTS:\n discovery.load_platform(hass, component, DOMAIN, {}, base_config)\n\n return True",
"def __init__(self):\n Thread.__init__(self)\n self.temperature_c = None\n self.humidity = None\n self.running = True\n self._sensor = adafruit_dht.DHT22(board.D17, use_pulseio=False)\n self._current_temperature_reading = None\n self._current_humidity_reading = None",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()"
]
| [
"0.6785495",
"0.60709953",
"0.6040685",
"0.6019263",
"0.5967158",
"0.59412783",
"0.5906236",
"0.5894144",
"0.5891464",
"0.58776057",
"0.584603",
"0.58359516",
"0.58337975",
"0.58261347",
"0.5811149",
"0.58005816",
"0.5769889",
"0.5766768",
"0.5764947",
"0.5758379",
"0.5749928",
"0.571215",
"0.56975174",
"0.5684353",
"0.5684313",
"0.56721944",
"0.56631845",
"0.566291",
"0.5648933",
"0.56465095"
]
| 0.65923154 | 1 |
Analyze and print the bond orders, and optionally use for the bonding in the structure. | def _bond_orders(self, control, bond_order_matrix, configuration):
text = ""
n_atoms = configuration.n_atoms
bond_i = []
bond_j = []
bond_order = []
bond_order_str = []
orders = []
ij = 0
for j in range(n_atoms):
for i in range(j + 1):
if i != j:
order = bond_order_matrix[ij]
if order > 0.5:
bond_i.append(i)
bond_j.append(j)
if order > 1.3 and order < 1.7:
bond_order.append(5)
bond_order_str.append("aromatic")
else:
bond_order.append(round(order))
bond_order_str.append(str(round(order)))
orders.append(order)
ij += 1
symbols = configuration.atoms.symbols
options = self.parent.options
text_lines = []
if len(symbols) <= int(options["max_atoms_to_print"]):
if "name" in configuration.atoms:
name = configuration.atoms.get_column_data("name")
else:
name = []
count = {}
for symbol in symbols:
if symbol not in count:
count[symbol] = 1
else:
count[symbol] += 1
name.append(f"{symbol}{count[symbol]}")
table = {
"i": [name[i] for i in bond_i],
"j": [name[j] for j in bond_j],
"bond order": orders,
"bond multiplicity": bond_order_str,
}
tmp = tabulate(
table,
headers="keys",
tablefmt="pretty",
disable_numparse=True,
colalign=("center", "center", "right", "center"),
)
length = len(tmp.splitlines()[0])
text_lines.append("\n")
text_lines.append("Bond Orders".center(length))
text_lines.append(
tabulate(
table,
headers="keys",
tablefmt="psql",
colalign=("center", "center", "decimal", "center"),
)
)
text += "\n\n"
text += textwrap.indent("\n".join(text_lines), self.indent + 7 * " ")
if control == "yes, and apply to structure":
ids = configuration.atoms.ids
iatoms = [ids[i] for i in bond_i]
jatoms = [ids[j] for j in bond_j]
configuration.bonds.delete()
configuration.bonds.append(i=iatoms, j=jatoms, bondorder=bond_order)
text2 = (
"\nReplaced the bonds in the configuration with those from the "
"calculated bond orders.\n"
)
text += str(__(text2, indent=self.indent + 4 * " "))
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def payment_info_and_status(report):\n\n order_data = open(report)\n for line in order_data:\n order = lines_into_list(line) # function will split each line by '|' and get a list of strings\n # each order has 4 strings\n order[0:1] = []\n name = order.pop(0)\n order_as_floats = convert_to_floats(order)\n melon_count, paid = order_as_floats\n expected_cost = calculate_expected_cost(melon_cost, melon_count)\n print(f\"{name} paid ${paid:.2f}, expected ${expected_cost:.2f}\")\n check_order(name, expected_cost, paid)\n order_data.close()",
"def _printOrderStatus(self, targetorders):\n result = list(self.getList(self.root))\n open_order = filter(lambda y: (y[1] % 2) != 0, result)\n close_order = filter(lambda y: ((y[1] % 2) == 0 and y[1] != 0), result)\n open = list(open_order)\n close = list(close_order)\n close_order_count = 0\n for x in close:\n result = x[1] // 2\n close_order_count += result\n open_order_count = 0\n for x in open:\n result = x[1] + 1 // 2\n open_order_count += result\n balance = targetorders - (open_order_count + close_order_count)\n print(f'Open Orders: {open_order_count}')\n print(f'Closed Orders: {close_order_count}')\n print(f'Yet to be fulfilled: {balance}')\n print('------------------------------------')",
"def print_order(self):\r\n print(\"Order: Symbol=%s, Type=%s, Quantity=%s, Direction=%s\") % \\\r\n (self.symbol, self.order_type, self.quantity, self.direction)",
"def order_report():",
"def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)",
"def do_orders(self,args):\n try:\n orders = bitstamp.open_orders()\n orders = sorted(orders, key=lambda x: float(x['price']))\n buytotal,selltotal = 0,0\n numbuys,numsells = 0,0\n amtbuys,amtsells = 0,0\n buyavg,sellavg = 0,0\n numorder = 0 \n for order in orders:\n ordertype=\"Sell\" if order['type'] == 1 else \"Buy\"\n numorder += 1\n print '%s = %s | $%s @ %s BTC %s' % (numorder,ordertype,order['price'],order['amount'],order['id']) \n if order['type'] == 0:\n buytotal += D(order['price'])*D(order['amount'])\n numbuys += D('1')\n amtbuys += D(order['amount'])\n elif order['type'] == 1:\n selltotal += D(order['price'])*D(order['amount'])\n numsells += D('1')\n amtsells += D(order['amount'])\n if amtbuys:\n buyavg = D(buytotal/amtbuys).quantize(cPrec)\n if amtsells:\n sellavg = D(selltotal/amtsells).quantize(cPrec)\n print \"There are %s Buys. There are %s Sells\" % (numbuys,numsells)\n print \"Avg Buy Price: $%s. Avg Sell Price: $%s\" % (buyavg,sellavg)\n except Exception as e:\n print e",
"def printOrders(self, event):\n \n pass",
"def processOrders(self, printOutput=False):\n orderData = self.trader.tradeData.get('orders',None)\n if orderData.get('success') == 0: #order data contains failed api call\n logging.error('Success=0: orderData: %s' % orderData)\n orderData = self.trader.tapi.getOrders()\n if printOutput:\n try:\n for key in orderData.get('return').keys():\n order = orderData.get('return')[key]\n print('ID: %s %s %s %s at %s' %(key,\n order['pair'],\n order['type'],\n order['amount'],\n order['rate']))\n except TypeError as e:\n # TODO add debug flag for printing output to console on errors\n print'TypeError in processOrders:'\n print e\n logging.error('Type error in helper.processOrders: %s' % e)\n logging.info('orderData: %s' % orderData)\n except KeyError as e:\n print'KeyError in processOrders'\n print e\n logging.error('Key error in helper.processOrders: %s' % e)\n logging.info('orderData: %s' % orderData)\n return orderData",
"def _write_bond_information(xml_file, structure, ref_distance, ref_energy):\n unique_bond_types = set()\n xml_file.write(\"<bond>\\n\")\n for bond in structure.bonds:\n t1, t2 = bond.atom1.type, bond.atom2.type\n if t1 == \"\" or t2 == \"\":\n t1, t2 = bond.atom1.name, bond.atom2.name\n t1, t2 = sorted([t1, t2])\n try:\n bond_type = (\"-\".join((t1, t2)), bond.type.k, bond.type.req)\n except AttributeError: # no forcefield applied, bond.type is None\n bond_type = (\"-\".join((t1, t2)), 0.0, 0.0)\n unique_bond_types.add(bond_type)\n xml_file.write(\n \"{} {} {}\\n\".format(bond_type[0], bond.atom1.idx, bond.atom2.idx)\n )\n xml_file.write(\"</bond>\\n\")\n xml_file.write(\"<bond_coeffs>\\n\")\n xml_file.write(\"<!-- type k r_eq -->\\n\")\n for bond_type, k, req in unique_bond_types:\n xml_file.write(\n \"{} {} {}\\n\".format(\n bond_type,\n k * 2.0 / ref_energy * ref_distance**2.0,\n req / ref_distance,\n )\n )\n xml_file.write(\"</bond_coeffs>\\n\")",
"def print(self):\n size_bid = len(self.bid)\n size_offer = len(self.offer)\n print(\"Book[%s]: %d bids, %d offers --> mid @ %f\" % (self.security,\n size_bid, size_offer, self.mid()))\n print(\"{0: ^32} | {1: ^32}\".format(\"bid\", \"offer\"))\n print(\"{0:^10},{1:^10},{2:^10} | {3:^10}, {4:^10}, {5:^10}\".format(\n \"count\", \"qty\", \"price\", \"price\", \"qty\", \"count\"))\n\n empty_level = OrderBookLevel(\"-\", \"-\", \"-\")\n for i in range(max(size_bid, size_offer)):\n bid = self.bid[-(i+1)] if i < size_bid else empty_level\n offer = self.offer[i] if i < size_offer else empty_level\n print(\"{0:^10},{1:^10},{2:^10} | {3:^10}, {4:^10}, {5:^10}\".format(\n bid.order_count, bid.qty, bid.price, offer.price, offer.qty, offer.order_count))",
"def displayBalance(self):\n orders = self.trader.tradeData.get(\n 'openOrders',\n 'Failed to read orderCount')\n# uncomment 3 lines below for orderType debug printing\n## ordertype = type(orders)\n# print'DEBUG: helper.displayBalance orders TYPE is',ordertype\n# print'DEBUG: helper.displayBalance orders:',orders\n if isinstance(orders, int) and orders > 0:\n print\"Open Orders:\", orders\n self.processOrders(printOutput=True)\n self.separator()\n print'Available Balances:'\n funds = self.trader.tradeData['funds']\n for bal in funds.keys():\n if funds[bal] >= 0.01:\n print bal.upper() + ':', funds[bal]\n self.separator()",
"def place_order(self):\n\n order_total = self.get_order_total()\n\n if self.person.can_afford(order_total):\n print 'This person is stinkin rich!'\n else:\n print \"No soup for you!\"",
"def show_orders(self):\n\n data = cur.execute(\"\"\"SELECT * FROM orders\"\"\").fetchall()\n print(tabulate(data, headers=[\"Order ID\", \"Status\", \"Customer\", \"Address\", \"Delivery Method\"]))",
"def test_order_atoms(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n converter.order_atoms(ref_mol=mol1, mol=mol2)\n for atom1, atom2 in zip(mol1.atoms, mol2.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n converter.order_atoms(ref_mol=mol3, mol=mol1)\n for atom1, atom2 in zip(mol3.atoms, mol1.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n for mol in mol_list:\n converter.order_atoms(ref_mol=ref_mol, mol=mol)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols",
"def _explain(self):\n self._explain_simple_consensus()\n logger.LOGGER.info(\"One or more groups have stances on both sides of this bill:\")\n logger.LOGGER.info(self._decision.split_group)",
"def manage_orders(self):\r\n for coin, pair_info in self.usdt_pairs.items():\r\n orders = self.kc.get_orders(pair_info[\"symbol\"], status=\"active\")\r\n self.log(coin, orders[\"totalNum\"])\r\n if orders[\"totalNum\"]:\r\n self.log(len(orders[\"items\"]))\r\n for order in orders[\"items\"]:\r\n self.log(order)\r\n\r\n self.log(mp.mpf())\r\n\r\n # ticker = current price action, bid/ask, etc\r\n ticker = self.kc.get_ticker(pair_info[\"symbol\"])\r\n self.log(ticker)\r\n return",
"def get_bond_info(self):\n return",
"def DumpDetails(self, sentences, label=\"N.A.\"):\n AdjR = 0.0\n adjAll = []\n for sentence in sentences:\n # if sentence[\"Text\"].startswith(\"Joanie is not helpful\"):\n # x = 1\n adjectives, dependencies = self.ExtractSentDetails(sentence)\n adjAll.extend(adjectives)\n allAdjectives = adjectives | Angel.GlobalAdjList\n AdjS = 0.0\n words = wordpunct_tokenize(sentence[\"Text\"])\n if len(words) <= 3:\n allAdjectives |= set([x.lower() for x in words])\n for i in range(len(words)):\n word = words[i].lower()\n if word in {\"but\", \"if\"}:\n AdjS = 0.0\n print words[i],\n elif word in allAdjectives and word in self.lexicon:\n multiplier = self.PredictMultiplier(word, dependencies[word], words, i)\n score = float(self.lexicon[word]) * multiplier\n if multiplier < 1:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red',None,['underline'])\n elif multiplier > 1:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red',None,['bold'])\n else:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red')\n AdjS += score\n print colortext,\n else:\n print words[i],\n print\n colortext = colored(\"Adjectives: \" + '{:.3}'.format(AdjS),'red')\n print colortext\n AdjR += AdjS\n print\n print \"Label:\", label\n base = self.PredictBase(adjAll)\n colortext = colored(\"Adjectives: \" + str(AdjR) + \"*\" + str(base) + \" = \" + str(AdjR*base),'red')\n print colortext",
"def get_bond_infos(self):\n\n yield from self._molecule_state.get_bond_infos()",
"def print_methods():\n print('''1. Sobol Variance Based:\n first and total order''')\n print('''2. Regional Sensitivity Analysis:\n also called Monte Carlo Filtering''')\n print('''3. Morris Screening Method:\n with pre-optimized defined trajects and group option''')\n print('''4. Sampled-OAT:\n Latin HYpercube or Sobol sampling with OAT sensitivity''')\n print('''5. Standardized Regression Coefficients:\n Latin HYpercube or Sobol sampling with linear regression''')\n print('''6. DYNamic Identifiability Analysis:\n Latin HYpercube or Sobol sampling with time-sliced based\n evaluation''')",
"def printordering(cls):\n print(ordering)",
"def _print_order(self, node):\n if node:\n self._print_order(node.left)\n print node.data\n self._print_order(node.right)",
"def final(order_dict):\r\n gap = (\"=\" * 50)\r\n overall_potential_cost = 0\r\n print(\"Thank you for using this program\")\r\n print(gap)\r\n print(\"Registrations of Interest in EV Subsidy Received:\")\r\n print(gap)\r\n \"\"\"\r\n for orders in order_dict:\r\n print(\"Order: {}\".format(orders))\r\n overall_potential_cost += order_dict[orders][\"Total Subsidy\"]\r\n for items in order_dict[orders]:\r\n if items == \"Cars on order\":\r\n occ_of_car = {}\r\n for car in order_dict[orders][\"Cars on order\"]:\r\n occ_of_car = order_dict[orders][\"Cars on order\"].count(car)\r\n print(\"{} x {} @ ${}\".format(occ_of_car, car, 200))\r\n else:\r\n print(\"{}: {}\".format(items, order_dict[orders][items]))\r\n \"\"\"\r\n for orders in order_dict:\r\n if order_dict[orders] != \"canceled\":\r\n print(\"Order: {}\".format(orders))\r\n overall_potential_cost += order_dict[orders][\"Total Subsidy\"]\r\n for items in order_dict[orders]:\r\n if items == \"Cars on order\":\r\n cars_dict = {}\r\n for car in order_dict[orders][\"Cars on order\"]:\r\n if car in cars_dict.keys():\r\n cars_dict[car] += 1\r\n else:\r\n cars_dict[car] = 1\r\n\r\n for car_name in cars_dict.keys():\r\n print(\"{} x {}\".format(car_name, cars_dict[car_name]))\r\n\r\n else:\r\n print(\"{}: {}\".format(items, order_dict[orders][items]))\r\n \r\n else:\r\n print(\"Order {} was canceled\".format(orders))\r\n print(gap)\r\n\r\n print(\"Total orders: {}\".format(len(order_dict)))\r\n print(\"Overall potential cost of these orders = {}\".format(overall_potential_cost))\r\n print(gap)\r\n return",
"def get_all_bond_orders(molecule):\n return nps.vtk_to_numpy(molecule.GetBondOrdersArray())",
"def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()",
"def print_orders(self, as_var=False, tabs=1):\n \n out_str = ''\n for o in self.order_lst:\n ord_id = o.get_orderId()\n item_count = o.count()\n out_str += \"\\n%sOrder Id: %s\\n\" % (str('\\t' * tabs), ord_id)\n \n if as_var: return out_str\n \n print(out_str)",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))",
"def print_pairing_info(melon_types):\n\n # Fill in the rest",
"def test_bond_order_method_passing(self, model, toolkit):\n mol = Molecule.from_smiles(\"CCO\")\n\n # Test that default model works\n mol.assign_fractional_bond_orders()\n\n mol.assign_fractional_bond_orders(\n bond_order_model=model,\n )\n\n mol.assign_fractional_bond_orders(\n bond_order_model=model,\n toolkit_registry=toolkit(),\n )\n\n mol.assign_fractional_bond_orders(\n bond_order_model=model,\n toolkit_registry=ToolkitRegistry([toolkit()]),\n )",
"def run(self):\n # take care of self.symmetry_list\n refatomtype = []\n for i in self.symmetry_list:\n if isinstance(i,int):\n line = self.topfile[self.atomndx[i-1]]\n refatomtype.append(line.split()[1])\n else:\n line_1 = self.topfile[self.atomndx[i[0]-1]]\n atype = line_1.split()[1]\n if len(i) > 1:\n for j in i[1:]:\n line = self.topfile[self.atomndx[j-1]]\n if atype != line.split()[1]:\n print('symmetry_list:')\n print(line_1[:-1])\n print(line[:-1])\n raise ValueError('not equivalent!')\n refatomtype.append(atype)\n\n totatomtype = []\n for i in self.atomtypendx:\n ltmp = self.topfile[i].split()\n totatomtype.append(ltmp[0])\n\n atomlist = []\n for i in self.atomndx:\n ltmp = self.topfile[i].split()\n atomlist.append(ltmp[1])\n\n self.outfile = []\n for charge in self.prochargefile[:self.gennm]:\n\n # ATTENTION! Here is very important !!!\n # make a copy of self.topfile, avoide the same memory address\n # This copy has the same effect like the DEEP copy\n topfile = self.topfile[:]\n\n count = 0\n for pair in charge:\n atype = refatomtype[count]\n try:\n ndx = totatomtype.index(atype)\n except:\n print(atype)\n raise ValueError('not defined')\n nm = self.atomtypendx[ndx]\n line = topfile[nm]\n ltmp = self.procomments(line).split()\n subline = ''\n if len(ltmp) == 6:\n ltmp[2] = pair\n else:\n ltmp[3] = pair\n for ch in ltmp:\n subline += '{:>8} '.format(ch)\n topfile[nm] = subline + '\\n'\n\n # process the [atoms] directive\n scount = 0\n for i in atomlist:\n if i == atype:\n snm = self.atomndx[scount]\n\n line = topfile[snm]\n ltmp = self.procomments(line).split()\n subline = ''\n if len(ltmp) == 6:\n ltmp.append(pair)\n else:\n ltmp[6] = pair\n for ch in ltmp:\n subline += '{:>8} '.format(ch)\n topfile[snm] = subline + '\\n'\n scount += 1\n\n count += 1\n\n self.outfile.append(topfile)"
]
| [
"0.5906417",
"0.58164936",
"0.57983583",
"0.572823",
"0.5678574",
"0.5646116",
"0.5638378",
"0.5605157",
"0.5403813",
"0.53988194",
"0.53677356",
"0.5344988",
"0.5310984",
"0.5300891",
"0.52989656",
"0.52847356",
"0.5280561",
"0.5248943",
"0.5236017",
"0.5175988",
"0.51714003",
"0.51631325",
"0.51491904",
"0.5146076",
"0.5124842",
"0.51189053",
"0.50981826",
"0.5079742",
"0.50698537",
"0.50323063"
]
| 0.7156463 | 0 |
The non social agent should pump to where it wants to go to. However, if P1 pups above the agent's EV, it should ajust its EV accordingly in order to win. | def non_social_action(self):
if not self.agent.done:
if self.opponenet.cashed and self.opponenet.pumps >= self.agent.pumps:
self.EV = self.opponenet.pumps + np.random.randint(1,5)
self.action_gating() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hesitant_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps - np.random.randint(1,5)\n else:\n if self.opponenet.cashed:\n self.EV = self.opponenet.pumps + 1\n elif self.opponenet.popped:\n if not self.stopCount:\n if self.agent.pumps == 0:\n self.EV = np.random.randint(1,10)\n else:\n self.EV = self.agent.pumps\n self.stopCount = True\n self.action_gating()",
"def propagation(self, other):\n if self.infected_pop >= 1 and other.infected_pop == 0:\n if random.random() <= self.infected_ratio()/10:\n other.infected_pop = 1",
"def send_proposes(self):\n neighbors = self.model.space.get_neighbors(self.pos, self.range, include_center=False)\n neighbors = list(filter(lambda x: x.type == 'guest', neighbors))\n\n if len(neighbors) > 0:\n options = list(map(lambda x: (x.role, self.action), neighbors))\n know = list(map(lambda x: self.knowledge[x], options))\n # print(\"Knowledges\", probs)\n probs = list(map(lambda x: np.exp(x), know))\n # print(\"Softmax\", probs)\n probs = list(map(lambda x: x / sum(probs), probs))\n # print(\"Normed\", probs)\n if len(neighbors) > 1:\n print(self.unique_id, neighbors, probs, know)\n\n other_agent = random.choices(neighbors, probs)[0]\n self.propose_interaction(other_agent, self.action)",
"def demote(name):\r\n print('Office Worker is the lowest position')",
"def which_pump (self):\n if self.msg == b'1': #left\n self.pump_it_80(1)\n # self.pump_it(1)\n # self.pump_it(1)\n # self.pump_it(1)\n # self.pump_it(1)\n elif self.msg == b'2': #right\n self.pump_it_20(2)\n # self.pump_it(2)\n # self.pump_it(2)\n # self.pump_it(2)\n # self.pump_it(2)",
"def ai_event(self): \n self.choice = (-1, -1)\n any_human_agents = isinstance(self.agents[Player.WHITE], HumanAgent) or \\\n isinstance(self.agents[Player.BLACK], HumanAgent)\n\n if self.timestep_watch.time() >= self.TIMESTEP_DELAY:\n self.timestep_watch.reset()\n if not any_human_agents:\n self.event = Event.next(self.event)\n if Event.is_valid_placement_stage(self.event):\n self.choice = self.agents[self.env.turn].act()",
"def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])",
"def aggressive_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps + np.random.randint(1,5)\n else:\n if not self.stopCount:\n if self.opponenet.cashed:\n self.EV = self.agent.pumps + np.random.randint(1,5)\n elif self.opponenet.popped:\n self.EV = self.agent.pumps + np.random.randint(1,10)\n self.stopCount = True\n self.action_gating()",
"def play_game(self, p1, p2):\n self.state_manager.init_game()\n state = self.state_manager.get_state()\n players = [p1, p2]\n player = random.choice([1, 2])\n actions = []\n p1_wins = 0\n p2_wins = 0\n while not self.state_manager.is_game_over(state):\n current_agent = players[player - 1]\n actor_chosen_action = current_agent.target_policy(state, player, is_top_policy=True) # is_top_policy = True to ensure that the agents uses the ANET and not the random exploration\n actions.append(actor_chosen_action)\n self.state_manager.perform_action(actor_chosen_action)\n\n state = self.state_manager.get_state()\n player = change_player(player)\n if player == 1:\n p2_wins += 1\n else:\n p1_wins += 1\n winning_agent = players[change_player(player)-1] # Since player is changed in end of while, the winning player at winning state is the previous player\n # print(p1.name + \" vs. \" + p2.name + \", winner: \" + winning_agent.name + \", actions: \" + str(actions))\n self.results[winning_agent.name] += 1\n return p1_wins, p2_wins, actions",
"def step(self):\n if self.model.schedule.steps < self.model.residential_steps:\n residential_move = True\n else:\n residential_move = False\n\n\n if residential_move:\n # only step the agents if the number considered is not exhausted\n if self.model.total_considered < self.model.residential_moves_per_step:\n # move residential\n U_res = self.get_res_satisfaction(self.pos)\n self.model.res_satisfaction.append(U_res)\n\n # print(\"U_res\",U_res)\n if U_res < self.T:\n\n # todo: implement different move schemes, for now only random\n # find all empty places\n # rank them\n # take one with boltzmann probability.\n self.evaluate_move(U_res, school=False)\n\n else:\n self.model.res_happy += 1\n\n self.model.total_considered += 1\n #print(\"considered\",self.model.total_considered)\n\n\n else:\n if self.model.total_considered < self.model.school_moves_per_step:\n # school moves\n # satisfaction in current school\n U = self.get_school_satisfaction(self.school, self.dist_to_school)\n self.model.satisfaction.append(U)\n\n # If unhappy, compared to threshold move:\n if U < self.T:\n #print('unhappy')\n self.evaluate_move(U, school=True)\n\n else:\n self.model.happy += 1\n if self.model.total_considered>0:\n self.model.percent_happy = np.ma(self.model.happy/self.model.total_considered)",
"def action(self):\n # --- Ruled Based Test Policy ---\n # Stay still just send communication event\n if self.uid == 0:\n if random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n action = 3\n action_param = {}\n self.comm_count += 1\n else:\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": 0}\n return action, action_param\n\n # Others\n # If wall in vision, rotate\n vision_array = self.vision[1]\n if 1 in vision_array[0]:\n accel = -1 if self.speed > 0 else 0\n action = 1\n action_param = {\"ang_accel\": (random.randint(20, 45) * math.pi / 180), \"accel\": accel}\n\n # If hider in front, tag\n elif self.agt_class == 3 and 2 in vision_array[0] and vision_array[1][list(vision_array[0]).index(2)] < 60:\n action = 2\n action_param = {}\n\n # Randomly invoked communication event\n # elif random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n # action = 3\n # action_param = {}\n # self.comm_count += 1\n\n # If communication received head towards nearest comm. agent for three steps\n elif len(self.comm) > 0:\n closest_agent = min(self.comm, key=lambda x: x[0])\n\n # Calculate target angle to the event sender\n target_angle = closest_agent[1] + self.angle\n target_angle = 2*math.pi + target_angle if target_angle < 0 else target_angle\n target_angle = target_angle - 2*math.pi if target_angle > 2*math.pi else target_angle\n\n # Add target angle to history such that the agent moves until it finds the target angle\n self.history.append(target_angle)\n direction = closest_agent[1]/abs(closest_agent[1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n\n # If target angle not found, continue searching\n elif len(self.history) > 0:\n direction = self.history[-1]/abs(self.history[-1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n if self.history[-1] - math.pi/9 < self.angle < self.history[-1] + math.pi/9:\n self.history.pop(-1)\n\n # When there isn't a special event, just move forward\n else:\n st_rate = self.stamina/self.max_stamina\n if st_rate > 0.75:\n accel = np.random.normal(3, 1, 1)\n elif st_rate > 0.4:\n accel = np.random.randint(-1, 3)\n else:\n accel = -1\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": accel}\n\n return action, action_param",
"def event706():\n header(706, 0)\n\n if_event_flag_on(-1, 710)\n if_event_flag_on(-1, EVENT.WarpAbilityAtSunChamber)\n if_condition_true(0, -1)\n\n flag.enable(706) # Enable warping.\n\n # WARPING IS ACTIVE WHILE PENDING HERE.\n\n if_event_flag_on(-1, 11705170) # Player in Archive Tower ...\n if_in_world_area(-1, 11, 0) # OR player in Painted World ...\n if_in_world_area(7, 15, 1) # OR (Player in Anor Londo AND Dark Anor Londo active AND Jareel not dead)\n if_event_flag_on(7, 11510400)\n if_event_flag_off(7, 11510901)\n if_condition_true(-1, 7)\n if_in_world_area(6, 14, 1) # OR (Player in Lost Izalith AND Jeremiah present)\n if_event_flag_on(-2, EVENT.JeremiahInRuins)\n if_event_flag_on(-2, EVENT.JeremiahInIzalith)\n if_event_flag_on(-2, EVENT.JeremiahImpatient)\n if_event_flag_on(-2, EVENT.JeremiahFleeingIzalith)\n if_condition_true(6, -2)\n if_condition_true(-1, 6)\n if_condition_true(0, -1)\n flag.disable(706)\n\n # WARPING IS NOT ACTIVE WHILE PENDING HERE.\n\n if_event_flag_off(1, 11705170) # Player not in Archive Tower ...\n if_not_in_world_area(1, 11, 0) # AND player not in Painted World ...\n if_not_in_world_area(-7, 15, 1) # AND (player not in AL OR not Dark Anor Londo OR Jareel dead)\n if_event_flag_off(-7, 11510400)\n if_event_flag_on(-7, 11510901)\n if_condition_true(1, -7)\n if_not_in_world_area(-6, 14, 1) # AND (player not in Izalith OR Jeremiah gone)\n if_event_flag_off(2, EVENT.JeremiahInRuins)\n if_event_flag_off(2, EVENT.JeremiahInIzalith)\n if_event_flag_off(2, EVENT.JeremiahImpatient)\n if_event_flag_off(2, EVENT.JeremiahFleeingIzalith)\n if_condition_true(-6, 2)\n if_condition_true(1, -6)\n if_condition_true(0, 1)\n restart()",
"def non_social_action_SB(self):\n self.pumpRate = 0.4\n if not self.agent.done:\n self.action_gating()",
"def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)",
"def Peacekeaper(self):\n\t\tprint(self.name.title() + \" is now shotting.\")",
"def poke(self):\n self._messaged.emit((\"poke\",None,0,None))",
"def plant_food(self):\n self.phase.set(0)\n #self.broadcast_phase()\n self.players[self.first_player].take_turn()",
"def set_pump(self, ON):\n cmd = protocol.SET_PUMP.format(1 if ON else 0)\n response = self.__send_and_receive(cmd)\n if response.startswith(protocol.OK.lower()):\n return True\n else:\n return False",
"def do_in_progress_switches(self):\n event = self.get_event_from_args(self.lhs, check_host=True)\n if \"movehere\" in self.switches:\n loc = self.caller.char_ob.location\n self.event_manager.move_event(event, loc)\n self.msg(\"Event moved to your room.\")\n elif \"endevent\" in self.switches:\n self.event_manager.finish_event(event)\n self.msg(\"You have ended the event.\")",
"def plp_no_preconditions(self):\n self.publisher.publish(\n PlpMessage(None, \"Waypoint\", \"info\", \"PLP triggered, but its preconditions have not been met (yet)\"))",
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()",
"def lock_in_soda_can(self):\n move_msg = Move()\n move_msg.lane = director.coldring\n move_msg.location = 1 # 1: fully down\n self.move_pub.publish(move_msg)",
"def _step_their_paddle(self):\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()",
"def performOverflow(self, call):\n overFlowDest = self.getOverflowDest()\n if not overFlowDest:\n self.huntGroup.member_to_distribute = 0\n PrintLog(\"+++++++Debug: Under construction+++++\")\n return\n PrintLog(\"Waiting overflow timeout %s sec\" % self.overflowTimeout)\n time.sleep(self.overflowTimeout)\n if overFlowDest.tserver <> self.tserver:\n overFlowDest = self.trunk(self, overFlowDest)\n if InTrue(GetOption(\"CofFeature\")):\n call.ViaExtRouter = 1\n call.external = 1\n pt = self.partyToDistribute()\n thirdPartyDNRole = PartyRole.Destination\n if pt.Role == PartyRole.ConferenceMember and len(pt.Call.PartyList) >= 3:\n thirdPartyDNRole = PartyRole.ConferenceMember\n thirdPartyDN = \"Trunk\"\n addPrm = {\"ThirdPartyDN\": thirdPartyDN, \"ThirdPartyDNRole\": thirdPartyDNRole}\n if not self.routeRequestOnQueued:\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n else:\n addPrmRU = {\"ReferenceID\": 0, \"Reasons\": None, \"ThirdPartyDN\": thirdPartyDN,\n \"ThirdPartyDNRole\": thirdPartyDNRole}\n ev = self.mayBeEvent(EventName.RouteUsed, pt, timeout=3, addPrm=addPrmRU)\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n if not ev:\n pt.postponedAbandonedOrDiverted = 1\n self.postponedAbandonedOrDiverted = self.postponedAbandonedOrDiverted + 1\n pt.removeFromCall()\n ringPt = overFlowDest.ring(call)\n return ringPt",
"def go_to_waiting(self):\n if self.in_front_of_home:\n if self.dock():\n self.in_front_of_home = False\n elif self.goto_goal(self.home_x, self.home_y):\n self.in_front_of_home = True",
"def forwarder_state_changed(self, ev):\n\n\n dp = ev.dp\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n\n if ev.enter is True:\n # in plain MAC setup, this should install only ICMP and ARP re-route rules, watchout for hardcoded DP id\n self.on_inner_dp_join(dp)\n\t ##For evry new forwarder we send out discovery ICMP packets out of every port except OFPP_CONTROLLER\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' saying hello to Unifycore Controller, Unifycore warmly welcomes you!')\n for port in dp.ports:\n if port != (ofp.OFPP_CONTROLLER):\n LOG.debug('TOPO MNGR: Controller is sending topology discovery ICMPs to forwarder: ' + str(dp.id) + ', port: ' + str(port))\n _icmp_send(dp,port,DISCOVERY_IP_SRC, DISCOVERY_IP_DST)\n\n ##For evry new forwarder we send out discovery ARP packets out of every port except OFPP_CONTROLLER to find APN\n for apn in APN_POOL:\n if apn.ip_addr != None:\n LOG.debug('TOPO MNGR: Forwarder: '+str(dp.id)+', port: '+ str(port) + ' is looking for APN: ' + str(apn.name) +' at IP: '+str(apn.ip_addr)+' with ARP search source IP: ' + str(apn.arp_origin_ip))\n _arp_send(dp=dp, port_out=port, arp_code=1, ip_target=apn.ip_addr, ip_sender=apn.arp_origin_ip)\n\n\n\n\n\n if ev.enter is False:\n\t ##TODO: We need to scan if any tunnels were affected, and if so, if any PDP COntexts were affected\n ##JUST REMOVING NODE FROM TOPOLOGY ISNT ENOUGH!\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is leaving topology. It was a pleasure for us!')\n topo.del_forwarder(dp.id)",
"def testTriggerPause(self):\n\n # Pause the proxy so data sent to it builds up in its buffer.\n self.proxy.pauseProducing()\n self.assertFalse(self.parentProducer.paused, \"don't pause yet\")\n self.proxy.write(\"x\" * 51)\n self.assertFalse(self.parentProducer.paused, \"don't pause yet\")\n self.proxy.write(\"x\" * 51)\n self.assertTrue(self.parentProducer.paused)",
"def decide(self):\n self.maybe_shoot()\n next(self.move_cycle)\n\n pass",
"async def evaluate(self):\n if self.players[1].id == bot.user.id:\n self.p2_move = random.choice((\"Rock\", \"Paper\", \"Scissors\"))\n\n if None in self.moves:\n return\n\n if len(self.moves) == 1:\n tie_embed = discord.Embed(title=\"It's a Draw\")\n await self.channel.send(embed=tie_embed)\n return await self.end()\n\n if self.moves == {\"Rock\", \"Paper\"}:\n winner = \"Paper\"\n elif self.moves == {\"Scissors\", \"Paper\"}:\n winner = \"Scissors\"\n elif self.moves == {\"Rock\", \"Scissors\"}:\n winner = \"Rock\"\n\n # P1 Wins\n if self.p1_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[0].name}'s **{winner}** beats {self.players[1].name}'s **{self.p2_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[0])\n\n # P2 Wins\n elif self.p2_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[1].name}'s **{winner}** beats {self.players[0].name}'s **{self.p1_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[1])",
"def step_to_princess(self):\n if ((self.robot[0] >= self.princess[0]) and\n (self.robot[1] >= self.princess[1])):\n if ((self.robot[0] - self.princess[0]) >\n (self.robot[1] - self.princess[1])):\n self.robot[0] -= 1\n self.path.append(\"LEFT\")\n else:\n self.robot[1] -= 1\n self.path.append(\"UP\")\n elif ((self.robot[0] >= self.princess[0]) and\n (self.robot[1] <= self.princess[1])):\n if ((self.robot[0] - self.princess[0]) >\n (self.princess[1] - self.robot[1])):\n self.robot[0] -= 1\n self.path.append(\"LEFT\")\n else:\n self.robot[1] += 1\n self.path.append(\"DOWN\")\n elif ((self.robot[0] <= self.princess[0]) and\n (self.robot[1] >= self.princess[1])):\n if ((self.princess[0] - self.robot[0]) >\n (self.robot[1] - self.princess[1])):\n self.robot[0] += 1\n self.path.append(\"RIGHT\")\n else:\n self.robot[1] -= 1\n self.path.append(\"UP\")\n else:\n if ((self.princess[0] - self.robot[0]) >\n (self.princess[1] - self.robot[1])):\n self.robot[0] += 1\n self.path.append(\"RIGHT\")\n else:\n self.robot[1] += 1\n self.path.append(\"DOWN\")"
]
| [
"0.58771443",
"0.57470256",
"0.5713751",
"0.5505406",
"0.5483104",
"0.5479817",
"0.5391838",
"0.53856057",
"0.5380091",
"0.5310722",
"0.5270674",
"0.52652335",
"0.5252042",
"0.5229873",
"0.52257144",
"0.5217264",
"0.52000123",
"0.51954573",
"0.5180291",
"0.51767766",
"0.5165507",
"0.51593864",
"0.5157781",
"0.5149787",
"0.5140042",
"0.5134528",
"0.511569",
"0.50933003",
"0.50825447",
"0.5078004"
]
| 0.58396876 | 1 |
gera o ddl (create table e tipos das colunas) | def ddl_table(self, tabela):
sql = """SELECT COLUMN_NAME as coluna,
CASE DATA_TYPE
WHEN 'uniqueidentifier' THEN 'varchar'
WHEN 'datetime' THEN 'timestamp'
WHEN 'varbinary' THEN 'bytea'
WHEN 'char' THEN 'varchar'
WHEN 'nvarchar' THEN 'varchar'
WHEN 'image'THEN 'bytea'
WHEN 'bit'THEN 'boolean'
ELSE DATA_TYPE END AS tipo,
ORDINAL_POSITION as column_id,
DATA_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = '%s'
ORDER BY ORDINAL_POSITION""" % (tabela)
res = self.cur_origem.execute(sql)
table = "CREATE TABLE IF NOT EXISTS %s (" % tabela
tipos = {}
for coluna, tipo, id, data_type, in res.fetchall():
# print('"%s,%s"'%(coluna,tipo))
if coluna == 'ROW_VERSION' or coluna == 'ROWVERSION':
continue
else:
col = coluna
table += "%s %s," % (col.strip(), tipo.strip())
if tipo == 'bytea':
if data_type == 'image':
coluna = 'CONVERT(VARCHAR(1000), cast(%s as varbinary(max)), 2)' % coluna
else:
coluna = 'CONVERT(VARCHAR(1000), %s, 2)' % coluna
tipos[id] = [coluna.strip(), data_type]
table = table[:-1]+");"
return table, tipos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_table(self):\n pass",
"def ddl_table(self, tabela):\r\n sql = \"\"\"SELECT col.column_name AS coluna,\r\n CASE\r\n WHEN col.data_type LIKE 'NUMBER%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'NCHAR%%' THEN 'CHAR'\r\n WHEN col.data_type LIKE 'VARCHAR2%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'NVARCHAR2%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'VARCHAR%%' THEN col.DATA_TYPE||'('||col.DATA_LENGTH||')'\r\n WHEN col.data_type LIKE 'BLOB%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'CLOB%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'NCLOB%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'LONG%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'RAW%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'BFILE%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'LONG RAW%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'FLOAT%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'BINARY_FLOAT%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'BINARY_DOUBLE%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'TIMESTAMP%%' THEN 'TIMESTAMP'\r\n WHEN col.data_type LIKE 'INTERVAL%%' THEN 'TEXT'\r\n ELSE col.DATA_TYPE\r\n END AS tipo,\r\n col.column_id,\r\n col.data_type\r\n FROM\r\n all_tab_columns col\r\n WHERE\r\n upper(table_name) = '%s'\r\n ORDER BY col.column_id\"\"\" % (tabela)\r\n res = self.cur_origem.execute(sql)\r\n table = \"CREATE TABLE IF NOT EXISTS %s (\" % tabela\r\n tipos = {}\r\n for coluna, tipo, id, data_type, in res.fetchall():\r\n # EXECOES (palavras reservadas no postgres)\r\n if coluna.strip() == \"NATURAL\":\r\n col = \"NATURALDE\"\r\n elif coluna.strip() == \"SIMILAR\":\r\n col = \"SIMILARR\"\r\n else:\r\n col = coluna\r\n table += \"%s %s,\" % (col.strip(), tipo.strip())\r\n tipos[id] = [coluna.strip(), data_type]\r\n table = table[:-1]+\");\"\r\n return table, tipos",
"def dump(self):\n # This is pretty, but we could just return the ddl_string\n outputs = [\"Table : %s\\n\" % self.name]\n # We show the columns in sequence order, using DSU\n # DSU = Decorate, Sort, Undecorate - a.k.a Schwartzian transform\n deco_cols = [ (x['sequence'], x) for x in list(self.columns.values()) ]\n deco_cols.sort()\n cols = [ col for seq, col in deco_cols ]\n for column in cols:\n outputs.append(\" %-30s\" % column['name'])\n if 'length' in column and column['length'] != None:\n if 'precision' in column and column['precision'] != None:\n # This column is a numeric data type\n column_defn = column['type']+self.__class__.calc_precision(column['type'], column['length'], column['precision'], column['scale'])\n else:\n # This column is a text data type\n column_defn = '%s(%d)' % (column['type'], column['length'])\n else:\n # This column is a simple data type such as date or boolean\n column_defn = column['type']\n outputs.append(\" %-15s \" % column_defn)\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n if 'special' in column:\n # Special case for e.g. 'enum' in MySQL\n outputs.append(' %s' % column['special'])\n outputs.append(\"\\n\")\n # Constraints please\n if len(self.constraints) != 0:\n outputs.append(\" Constraints;\\n\")\n for constraint_name, constraint in list(self.constraints.items()):\n outputs.append(\" %s, \" % constraint_name)\n outputs.append(\"%s \" % (constraint['type']))\n if 'columns' in constraint:\n outputs.append(\": \")\n outputs.append(', '.join(constraint['columns']))\n outputs.append(\"\\n\")\n # Indexes\n if len(self.indexes) > 0:\n outputs.append(\" Indexes:\\n\")\n for index_name, index in list(self.indexes.items()):\n outputs.append(\" %s, \" % index_name)\n outputs.append(\"%s\\n\" % index['type'])\n # Don't check number of columns because there must be at least 1\n outputs.append(\" Columns: \")\n outputs.append(\", \".join(index['columns']))\n outputs.append(\"\\n\")\n # LOG.debug(\"Table Dump output: \" + \"\".join(outputs))\n return \"\".join(outputs)",
"def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)",
"def create_table_statements() -> [str]:\n pass",
"def create_lectures_table(conn):\n execute_sql_script(conn, \"05_create_lectures_table.sql\")",
"def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n id INT UNSIGNED NOT NULL AUTO_INCREMENT,\n name VARCHAR(140) NOT NULL,\n PRIMARY KEY (id)\n )\n \"\"\")",
"def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)",
"def make(self):\n fp = open('delicious.ddl', 'r')\n dml = fp.read()\n print dml\n self.connection.executescript(dml)",
"def create_tables(self, script):\n for query in script:\n try:\n self.cursor.execute(query)\n except:\n print(\"\\n Une erreur s'est produite lors \"\n \"de la création des tables \\n\")\n print(\"\\n Les tables ont bien été créées \"\n f\"dans la base de données |{DB_NAME}| \\n\")",
"def make_new_tbl(self):\n debug = False\n default_dd = getdata.get_default_db_dets()\n con, cur = default_dd.con, default_dd.cur\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n tblname = self.tblname_lst[0]\n if debug: print(f'DBE in make_new_tbl is: {default_dd.dbe}')\n getdata.make_sofa_tbl(\n con, cur, tblname, oth_name_types, headless=False)\n wx.MessageBox(\n _('Your new table has been added to the default SOFA database'))",
"def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td",
"def create_table_db(self):\n table_name = self.name_table.get()\n if len(table_name) > 0:\n self.table_db = table_name\n self.sql_database.db_name = self.db\n if self.sql_database.create_table(self.table_db):\n msg.showinfo(\n message=\"\".join([str(self.table_db), \" created\"]))\n self.name_table.delete(0, tk.END)\n self.show_table_combobox()\n else:\n msg.showinfo(message=\"Failed\")\n else:\n msg.showinfo(message=\"Write table name!\")",
"def ddl_table(self, tabela):\r\n sql = \"\"\"SELECT\r\n RF.RDB$FIELD_NAME FIELD_NAME,\r\n CASE F.RDB$FIELD_TYPE\r\n WHEN 7 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'INT'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 8 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'INTEGER'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 9 THEN 'QUAD'\r\n WHEN 10 THEN 'FLOAT'\r\n WHEN 12 THEN 'DATE'\r\n WHEN 13 THEN 'TIME'\r\n WHEN 14 THEN 'CHAR(' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ') '\r\n WHEN 16 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'BIGINT'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 27 THEN 'NUMERIC'\r\n WHEN 35 THEN 'TIMESTAMP'\r\n WHEN 37 THEN 'VARCHAR(' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ')'\r\n WHEN 40 THEN 'CSTRING' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ')'\r\n WHEN 45 THEN 'BLOB_ID'\r\n WHEN 261 THEN 'TEXT'\r\n ELSE 'RDB$FIELD_TYPE: ' || F.RDB$FIELD_TYPE || '?'\r\n END FIELD_TYPE\r\n FROM RDB$RELATION_FIELDS RF\r\n JOIN RDB$FIELDS F ON (F.RDB$FIELD_NAME = RF.RDB$FIELD_SOURCE)\r\n LEFT OUTER JOIN RDB$CHARACTER_SETS CH ON (CH.RDB$CHARACTER_SET_ID = F.RDB$CHARACTER_SET_ID)\r\n LEFT OUTER JOIN RDB$COLLATIONS DCO ON ((DCO.RDB$COLLATION_ID = F.RDB$COLLATION_ID) AND (DCO.RDB$CHARACTER_SET_ID = F.RDB$CHARACTER_SET_ID))\r\n WHERE (RF.RDB$RELATION_NAME = '%s') AND (COALESCE(RF.RDB$SYSTEM_FLAG, 0) = 0)\r\n ORDER BY RF.RDB$FIELD_POSITION;\"\"\" % (tabela)\r\n res = self.cur_origem.execute(sql)\r\n table = \"CREATE TABLE IF NOT EXISTS %s (\" % tabela\r\n tipos = {}\r\n for coluna, tipo, in res.fetchall():\r\n table += \"%s %s,\" % (coluna.strip(), tipo.strip())\r\n tipos[coluna.strip()] = tipo\r\n table = table[:-1]+\");\"\r\n return table, tipos",
"def bd_createTable(self, _c):\n\n _c.execute('CREATE TABLE IF NOT EXISTS package (id TEXT, num INT, desc TEXT, status TEXT, source_env TEXT, dest_env TEXT, app TEXT, last_rev TEXT)')",
"def _do_action_tables_create(self):\n\n schema_shell = os.path.join(self.bento_home, \"schema-shell\", \"bin\", \"kiji-schema-shell\")\n assert os.path.isfile(schema_shell), schema_shell\n\n # Delete the table first!\n cmd = (\n \"kiji delete --target={kiji_uri} --interactive=false; \" +\n \"kiji install --kiji={kiji_uri}\" ).format(kiji_uri=self.kiji_uri)\n self._run_kiji_job(cmd)\n\n for ddl in self.ddls:\n ddl_full_path = os.path.join(self.movie_advisor_home, ddl)\n assert os.path.isfile(ddl_full_path)\n cmd = \"{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}\".format(\n schema_shell=schema_shell,\n kiji_uri=self.kiji_uri,\n ddl_full_path=ddl_full_path)\n self._run_kiji_job(cmd)",
"def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n substitut_id bigint unsigned references product(id),\n original_id bigint unsigned references product(id),\n PRIMARY KEY (substitut_id, original_id)\n )\n \"\"\")",
"def _get_sql_create_table(self, table_attr):\n template = 'CREATE TABLE IF NOT EXISTS \"%s\" (\\n %s );'\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n v2_columns = []\n for columnName, columnAttr in merge_dicts(columns_pri, columns_ref, columns).iteritems():\n v2_columns.append(PsqlParser._get_sql_column(columnAttr))\n return template % (table_attr['name'], \", \\n \".join(v2_columns))",
"def create_table(self, name: str, fields: Iterable[Field]) -> DbTable:",
"def create_table():\n\tCURSOR.execute(\"\"\"CREATE TABLE IF NOT EXISTS {} (\n\t\t\t[ID] NVARCHAR NOT NULL PRIMARY KEY,\n\t\t\t[Name] NVARCHAR,\n\t\t\t[Definition] NVARCHAR)\"\"\".format(TABLE_NAME))",
"def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n id INT UNSIGNED NOT NULL AUTO_INCREMENT,\n name VARCHAR(140) NOT NULL,\n PRIMARY KEY (id)\n )\n \"\"\")\n\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS product_category (\n product_id bigint unsigned,\n category_id int unsigned,\n CONSTRAINT pfk_product_2\n FOREIGN KEY (product_id)\n REFERENCES product(id),\n CONSTRAINT pfk_category_2\n FOREIGN KEY (category_id)\n REFERENCES category(id),\n PRIMARY KEY (product_id, category_id)\n )\n \"\"\")",
"def product_tables(self): \r\n\r\n self.mycursor.execute('CREATE TABLE IF NOT EXISTS product(\\\r\n PROD_id BIGINT PRIMARY KEY,\\\r\n PROD_name VARCHAR(100) NOT NULL,\\\r\n PROD_grade CHAR(1) NOT NULL,\\\r\n PROD_url VARCHAR(150) NOT NULL UNIQUE)')",
"def insert_tables_docstring(cur, conn):",
"def get_ddl(self):\n raise NotImplementedError",
"def get_ddl(self):\n raise NotImplementedError",
"def get_ddl(self):\n raise NotImplementedError",
"def get_ddl(self):\n raise NotImplementedError",
"def get_ddl(self):\n raise NotImplementedError",
"def category_table(self):\r\n\r\n self.mycursor.execute('CREATE TABLE IF NOT EXISTS category(\\\r\n CAT_id int PRIMARY KEY AUTO_INCREMENT,\\\r\n CAT_nom VARCHAR(50) UNIQUE)')",
"def _create_table(self, table_name: str, column_name_type: List[tuple]):\n self.cursor.execute(f'''CREATE TABLE {table_name}\n ({', '.join([f\"{column} {column_type}\" for column, column_type in column_name_type])})''')\n self.conn.commit()"
]
| [
"0.70581096",
"0.69772404",
"0.6690184",
"0.6676371",
"0.65414095",
"0.6525927",
"0.65184194",
"0.6486767",
"0.6476043",
"0.64362794",
"0.6377072",
"0.63573045",
"0.6342024",
"0.63180053",
"0.63128495",
"0.6280136",
"0.6240983",
"0.6212543",
"0.6198705",
"0.61973125",
"0.61784655",
"0.6160025",
"0.61521304",
"0.614941",
"0.614941",
"0.614941",
"0.614941",
"0.614941",
"0.6144713",
"0.61249995"
]
| 0.71857804 | 0 |
Return a background callable for this task specification. Returns collections.abc.Callable Callable accepting arguments ``send`` and ``cancelled``. The callable can use ``send`` to send messages and ``cancelled`` to check whether cancellation has been requested. | def background_task(self):
return CallBackgroundTask(
callable=self.callable,
args=self.args,
kwargs=self.kwargs.copy(),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def submit_call(executor, callable, *args, **kwargs):\n task = BackgroundCall(callable=callable, args=args, kwargs=kwargs)\n return executor.submit(task)",
"def to_async(func: Callable, scheduler=None) -> Callable:\n from ..operators.observable.toasync import to_async\n return to_async(func, scheduler)",
"def background(f):\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n # The background task needs to be decorated with Flask's\n # copy_current_request_context to have access to context globals.\n @copy_current_request_context\n def task():\n global background_tasks\n try:\n # invoke the wrapped function and record the returned\n # response in the background_tasks dictionary\n background_tasks[id] = make_response(f(*args, **kwargs))\n except:\n # the wrapped function raised an exception, return a 500\n # response\n background_tasks[id] = make_response(internal_server_error())\n\n # store the background task under a randomly generated identifier\n # and start it\n global background_tasks\n id = uuid.uuid4().hex\n background_tasks[id] = Thread(target=task)\n background_tasks[id].start()\n\n # return a 202 Accepted response with the location of the task status\n # resource\n return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}\n return wrapped",
"def submit(self, callable_, *args, **kwargs):\n return self.submit_to_queue('', None, callable_, *args, **kwargs)",
"def background(func):\n @functools.wraps(func)\n def background_func(*args, **kwargs):\n thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n thread.start()\n return thread\n return background_func",
"def background(func):\n def func_wrapper(*args, **kwargs):\n if settings.TEST or settings.DEBUG:\n # In a test environment we just wanna run everything synchronously\n # so just run the function right away. We should get these tasks\n # running in a local environment, but for the time being, just run\n # those synchronously as well.\n func(*args, **kwargs)\n elif settings.BACKGROUND or settings.SHELL:\n # If we're in the background we wanna run the function, but we need\n # some kind of error reporting mechanism.\n try:\n func(*args, **kwargs)\n except Exception:\n log_error('{} error'.format(func.__name__), exc_info=sys.exc_info())\n else:\n # Otherwise we're on web or something and we wanna kick this function\n # off to a background thread.\n try:\n q.enqueue(func, *args, **kwargs)\n except Exception as ex:\n search_for = \"OOM command not allowed when used memory > 'maxmemory'\"\n is_response_error = isinstance(ex, ResponseError)\n if is_response_error and ex.args[0].find(search_for) >= 0:\n message = (\n 'I fixed this one time by running\\n'\n 'heroku addons:destroy redistogo -a stayd-prod\\n'\n 'heroku addons:create redistogo:nano -a stayd-prod\\n'\n 'heroku restart -a stayd-prod')\n log_error('Redis is out of memory', message, sys.exc_info())\n else:\n log_error(\n 'Unknown error enquing background task', func.__name__,\n sys.exc_info())\n\n return func_wrapper",
"def send(self, func, *args, **kwargs):\n\n # Argument Check\n if not callable(func):\n raise TypeError(\"The argument 2 'func' is requested to be \"\n \"callable.\")\n\n with self.__lock:\n if self.__is_killed:\n raise error.DeadPoolError(\"Pool.send is called after killed.\")\n\n # Wake up workers waiting task.\n self.__lock.notify()\n\n future = _future.PoolFuture(func, *args, **kwargs)\n self.__futures.append(future)\n return future",
"def request(self, *args, **kwargs):\r\n func = sup = super(FuturesSession, self).request\r\n\r\n background_callback = kwargs.pop('background_callback', None)\r\n if background_callback:\r\n def wrap(*args_, **kwargs_):\r\n resp = sup(*args_, **kwargs_)\r\n background_callback(self, resp)\r\n return resp\r\n\r\n func = wrap\r\n\r\n return self.executor.submit(func, *args, **kwargs)",
"def submit(self, func: Callable, *args, **kwargs)->Future:\n task = self.pool.submit(func, *args, **kwargs)\n self.task_stack.append(task)\n return task",
"def getCallable():",
"def submit_to_executor(self, func: Callable, *args, **kwargs) -> Future:\n\n callback = kwargs.pop(\"callback\", None)\n\n # get stuff we'll need to fake scheduler call\n sched_data = {\n \"id\": uuid.uuid4().hex,\n \"name\": self.name,\n \"objectid\": self.AD.app_management.objects[self.name][\"id\"],\n \"type\": \"scheduler\",\n \"function\": callback,\n \"pin_app\": self.get_app_pin(),\n \"pin_thread\": self.get_pin_thread(),\n }\n\n def callback_inner(f):\n try:\n # @todo : use our own callback type instead of borrowing\n # from scheduler\n rargs = {}\n rargs[\"result\"] = f.result()\n sched_data[\"kwargs\"] = rargs\n self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))\n\n # callback(f.result(), kwargs)\n except Exception as e:\n self.error(e, level=\"ERROR\")\n\n f = self.AD.executor.submit(func, *args, **kwargs)\n\n if callback is not None:\n self.logger.debug(\"Adding add_done_callback for future %s for %s\", f, self.name)\n f.add_done_callback(callback_inner)\n\n self.AD.futures.add_future(self.name, f)\n return f",
"def function(self) -> Callable:\n\n if self._function is None:\n # Create the underlying continuous function.\n\n if self._domain.size != 0 and self._range.size != 0:\n self._function = self._extrapolator(\n self._interpolator(\n self._domain, self._range, **self._interpolator_kwargs\n ),\n **self._extrapolator_kwargs,\n )\n else:\n\n def _undefined_function(\n *args: Any, **kwargs: Any # noqa: ARG001\n ):\n \"\"\"\n Raise a :class:`ValueError` exception.\n\n Other Parameters\n ----------------\n args\n Arguments.\n kwargs\n Keywords arguments.\n\n Raises\n ------\n ValueError\n \"\"\"\n\n raise ValueError(\n \"Underlying signal interpolator function does not \"\n 'exists, please ensure that both \"domain\" and \"range\" '\n \"variables are defined!\"\n )\n\n self._function = cast(Callable, _undefined_function)\n\n return cast(Callable, self._function)",
"def _async_submit(self, callable, *args, **kwargs):\n if not self._async_executor:\n self._async_executor = ThreadPoolExecutor(max_workers=self._thread_pool_count)\n return self._async_executor.submit(callable, args, kwargs)",
"def _submit_one(self, callable_, *args, **kwargs):\n def postamble():\n self._tasks.release()\n self._maybe_schedule_task()\n return self._pool.submit(_run_callable_with_postamble(postamble, callable_, *args, **kwargs))",
"def send_async_ex_method(\n self, call: Callable[[Any, str, Message], Awaitable[str]]\n ) -> Callable[[Any, str, Message], Awaitable[str]]:\n assert self._send_async_raw_message_ex_call is None\n self._send_async_raw_message_ex_call = call\n return call",
"def func(cls):\n return cls.get_wrapper()(cls.callable)",
"def __call__(self, *args, **kw):\n return Task(self, **self.__options)(*args, **kw)",
"def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()",
"def background(func):\n def do_stuff(*args, **kwargs):\n Thread(target=func, args=(args), kwargs=(kwargs)).start()\n return\n return do_stuff",
"async def run_in_executor(self, func: Callable, *args, **kwargs) -> Callable:\n\n return await utils.run_in_executor(self, func, *args, **kwargs)",
"def future_func(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n return make_future(func(*args, **kwargs))\n return func_wrapper",
"def callFromWorker(cls, func, args, on_success=None, on_failure=None, on_complete=None):\n worker = cls(func, args)\n if on_success is not None:\n worker.job_succeeded.connect(on_success)\n if on_failure is not None:\n worker.job_failed.connect(on_failure)\n if on_complete is not None:\n worker.finished.connect(on_complete)\n worker.start()\n\n return worker",
"def task(self, *args, **options):\n\n def inner_create_task_cls(**options):\n\n def _create_task_cls(fun):\n options[\"app\"] = self\n options.setdefault(\"accept_magic_kwargs\", False)\n base = options.pop(\"base\", None) or self.Task\n\n @wraps(fun, assigned=(\"__module__\", \"__name__\"))\n def run(self, *args, **kwargs):\n return fun(*args, **kwargs)\n\n # Save the argspec for this task so we can recognize\n # which default task kwargs we're going to pass to it later.\n # (this happens in celery.utils.fun_takes_kwargs)\n run.argspec = getargspec(fun)\n\n cls_dict = dict(options, run=run,\n __module__=fun.__module__,\n __doc__=fun.__doc__)\n T = type(fun.__name__, (base, ), cls_dict)()\n return registry.tasks[T.name] # global instance.\n\n return _create_task_cls\n\n if len(args) == 1 and callable(args[0]):\n return inner_create_task_cls(**options)(*args)\n return inner_create_task_cls(**options)",
"def send_async_method(\n self, call: Callable[[Any, str], Awaitable[str]]\n ) -> Callable[[Any, str], Awaitable[str]]:\n assert self._send_async_raw_message_call is None\n self._send_async_raw_message_call = call\n return call",
"def schedule_task(self, callback, delay=1.0, repeat=False, execute_now=False):\n task_name = str(uuid.uuid4())\n\n self.xmpp.schedule(task_name, delay, callback, repeat=repeat)\n\n return _generate_cancel_method(task_name, self.xmpp.scheduler)",
"def __call__(self, func):\n LOG.debug(\"@function %s\", func)\n\n func.handler = True\n func.function = True\n\n # Circuits properties\n func.names = self.names\n func.priority = self.kwargs.get(\"priority\", 0)\n func.channel = self.kwargs.get(\"channel\", \",\".join([\"functions.{}\".format(name) for name in self.names]))\n func.override = self.kwargs.get(\"override\", False)\n \n # If getfullargspec if available to us \n if hasattr(_inspect, 'getfullargspec'):\n args = _inspect.getfullargspec(func)[0]\n else: # fall back to deprecated getargspec\n args = _inspect.getargspec(func)[0]\n\n if args and args[0] == \"self\":\n del args[0]\n func.event = getattr(func, \"event\", bool(args and args[0] == \"event\"))\n\n @wraps(func)\n def decorated(itself, event, *args, **kwargs):\n \"\"\"the decorated function\"\"\"\n LOG.debug(\"decorated\")\n function_parameters = event.message.get(\"inputs\", {})\n\n def _the_task(event, *args, **kwargs):\n return func(itself, event, *args, **kwargs)\n\n def _call_the_task(evt, **kwds):\n # On the worker thread, call the function, and handle a single or generator result.\n LOG.debug(\"%s: _call_the_task\", threading.currentThread().name)\n result_list = []\n task_result_or_gen = _the_task(evt, *args, **kwds)\n if not isinstance(task_result_or_gen, GeneratorType):\n task_result_or_gen = [task_result_or_gen]\n for val in task_result_or_gen:\n if isinstance(val, StatusMessage):\n # Fire the wrapped status message event to notify resilient\n LOG.info(\"[%s] StatusMessage: %s\", evt.name, val)\n itself.fire(StatusMessageEvent(parent=evt, message=val.text))\n elif isinstance(val, FunctionResult):\n # Collect the result for return\n LOG.debug(\"[%s] FunctionResult: %s\", evt.name, val)\n val.name = evt.name\n result_list.append(val)\n elif isinstance(val, Event):\n # Some other event, just fire it\n LOG.debug(val)\n itself.fire(val)\n elif isinstance(val, FunctionError_):\n LOG.error(\"[%s] FunctionError: %s\", evt.name, val)\n itself.fire(FunctionErrorEvent(parent=evt, message=str(val)))\n evt.success = False\n return # Don't wait for more results!\n elif isinstance(val, Exception):\n raise val\n else:\n # Whatever this is, add it to the results\n LOG.debug(val)\n result_list.append(val)\n return result_list\n\n the_task = task(_call_the_task, event, **function_parameters)\n ret = yield itself.call(the_task, \"functionworker\")\n xxx = ret.value\n # Return value is the result_list that was yielded from the wrapped function\n yield xxx\n return decorated",
"def _spawn_worker(self, func, *args, **kwargs):\n if self._worker_pool.free():\n return self._worker_pool.spawn(func, *args, **kwargs)\n else:\n raise exception.NoFreeConductorWorker()",
"async def call(fn: Callable, *args, **kwargs) -> Any:\n async with websockets.connect(WS_SERVER_URI) as websocket:\n\n task = serialize((fn, args, kwargs))\n\n await websocket.send(task)\n message = await websocket.recv()\n\n results = deserialize(message)\n\n if isinstance(results, TaskExecutionError):\n raise results\n\n return results",
"def task(self):\n return import_path_to_callable(self.func)",
"def submit(self, func, *args, **kwargs):\n errors = []\n arguments = []\n keyword_arguments = {}\n result = None\n try:\n for arg in args:\n if isinstance(arg, futures.Future) and arg.failed:\n exc = arg._exception\n if isinstance(exc, exceptions.MultipleExceptions):\n errors.extend(exc.exceptions)\n else:\n errors.append(exc)\n else:\n arguments.append(executor.get_actual_value(arg))\n\n for key, val in kwargs.iteritems():\n if isinstance(val, futures.Future) and val.failed:\n exc = val._exception\n if isinstance(exc, exceptions.MultipleExceptions):\n errors.extend(exc.exceptions)\n else:\n errors.append(val._exception)\n else:\n keyword_arguments[key] = executor.get_actual_value(val)\n\n except exceptions.ExecutionBlocked:\n result = futures.Future()\n finally:\n if errors:\n result = futures.Future()\n result._state = futures.FINISHED\n result._exception = exceptions.MultipleExceptions(\n 'futures failed',\n errors,\n )\n if result is not None:\n return result\n\n try:\n if isinstance(func, Activity):\n make_task = self.make_activity_task\n elif issubclass(func, Workflow):\n make_task = self.make_workflow_task\n else:\n raise TypeError\n task = make_task(func, *arguments, **keyword_arguments)\n except TypeError:\n raise TypeError('invalid type {} for {}'.format(\n type(func), func))\n\n return self.resume(task, *arguments, **keyword_arguments)"
]
| [
"0.6046948",
"0.57481253",
"0.5699998",
"0.56213224",
"0.55187863",
"0.542474",
"0.539684",
"0.53696924",
"0.53572047",
"0.5343441",
"0.5229373",
"0.52166164",
"0.52069044",
"0.51850367",
"0.51710916",
"0.51294076",
"0.5113927",
"0.5096022",
"0.50803524",
"0.5024802",
"0.5010762",
"0.499393",
"0.4964223",
"0.49640745",
"0.49314392",
"0.49291533",
"0.48977616",
"0.48674938",
"0.48547143",
"0.4851437"
]
| 0.680307 | 0 |
Convenience function to submit a background call to an executor. | def submit_call(executor, callable, *args, **kwargs):
task = BackgroundCall(callable=callable, args=args, kwargs=kwargs)
return executor.submit(task) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _async_submit(self, callable, *args, **kwargs):\n if not self._async_executor:\n self._async_executor = ThreadPoolExecutor(max_workers=self._thread_pool_count)\n return self._async_executor.submit(callable, args, kwargs)",
"def submit(self, *args, **kwargs):\n return self.executor.submit(*args, **kwargs)",
"def background(func):\n def do_stuff(*args, **kwargs):\n Thread(target=func, args=(args), kwargs=(kwargs)).start()\n return\n return do_stuff",
"def submit(self, callable_, *args, **kwargs):\n return self.submit_to_queue('', None, callable_, *args, **kwargs)",
"def background_task(self):\n return CallBackgroundTask(\n callable=self.callable,\n args=self.args,\n kwargs=self.kwargs.copy(),\n )",
"def request(self, *args, **kwargs):\r\n func = sup = super(FuturesSession, self).request\r\n\r\n background_callback = kwargs.pop('background_callback', None)\r\n if background_callback:\r\n def wrap(*args_, **kwargs_):\r\n resp = sup(*args_, **kwargs_)\r\n background_callback(self, resp)\r\n return resp\r\n\r\n func = wrap\r\n\r\n return self.executor.submit(func, *args, **kwargs)",
"def submit(self, func: Callable, *args, **kwargs)->Future:\n task = self.pool.submit(func, *args, **kwargs)\n self.task_stack.append(task)\n return task",
"def background(func):\n @functools.wraps(func)\n def background_func(*args, **kwargs):\n thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n thread.start()\n return thread\n return background_func",
"def submit_to_executor(self, func: Callable, *args, **kwargs) -> Future:\n\n callback = kwargs.pop(\"callback\", None)\n\n # get stuff we'll need to fake scheduler call\n sched_data = {\n \"id\": uuid.uuid4().hex,\n \"name\": self.name,\n \"objectid\": self.AD.app_management.objects[self.name][\"id\"],\n \"type\": \"scheduler\",\n \"function\": callback,\n \"pin_app\": self.get_app_pin(),\n \"pin_thread\": self.get_pin_thread(),\n }\n\n def callback_inner(f):\n try:\n # @todo : use our own callback type instead of borrowing\n # from scheduler\n rargs = {}\n rargs[\"result\"] = f.result()\n sched_data[\"kwargs\"] = rargs\n self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))\n\n # callback(f.result(), kwargs)\n except Exception as e:\n self.error(e, level=\"ERROR\")\n\n f = self.AD.executor.submit(func, *args, **kwargs)\n\n if callback is not None:\n self.logger.debug(\"Adding add_done_callback for future %s for %s\", f, self.name)\n f.add_done_callback(callback_inner)\n\n self.AD.futures.add_future(self.name, f)\n return f",
"def background(func):\n def func_wrapper(*args, **kwargs):\n if settings.TEST or settings.DEBUG:\n # In a test environment we just wanna run everything synchronously\n # so just run the function right away. We should get these tasks\n # running in a local environment, but for the time being, just run\n # those synchronously as well.\n func(*args, **kwargs)\n elif settings.BACKGROUND or settings.SHELL:\n # If we're in the background we wanna run the function, but we need\n # some kind of error reporting mechanism.\n try:\n func(*args, **kwargs)\n except Exception:\n log_error('{} error'.format(func.__name__), exc_info=sys.exc_info())\n else:\n # Otherwise we're on web or something and we wanna kick this function\n # off to a background thread.\n try:\n q.enqueue(func, *args, **kwargs)\n except Exception as ex:\n search_for = \"OOM command not allowed when used memory > 'maxmemory'\"\n is_response_error = isinstance(ex, ResponseError)\n if is_response_error and ex.args[0].find(search_for) >= 0:\n message = (\n 'I fixed this one time by running\\n'\n 'heroku addons:destroy redistogo -a stayd-prod\\n'\n 'heroku addons:create redistogo:nano -a stayd-prod\\n'\n 'heroku restart -a stayd-prod')\n log_error('Redis is out of memory', message, sys.exc_info())\n else:\n log_error(\n 'Unknown error enquing background task', func.__name__,\n sys.exc_info())\n\n return func_wrapper",
"def run_in_executor(loop, executor, func, *args, tracer=None):\n tracer = tracer or ddtrace.tracer\n current_ctx = tracer.current_trace_context()\n\n # prepare the future using an executor wrapper\n future = loop.run_in_executor(executor, _wrap_executor, func, args, tracer, current_ctx)\n return future",
"def _submit(self,\n func: Callable,\n fn_args: Any,\n p_kwargs: Dict,\n timeout: float,\n callback_timeout: Callable[[Any], Any],\n daemon: bool):\n p_args = fn_args if isinstance(fn_args, tuple) else (fn_args,)\n queue = Queue()\n logger.debug(\"Executor: starting {} {}\".format(func.__name__, p_args))\n p = Process(target=self._process_run,\n args=(queue, func, *p_args,), kwargs=p_kwargs)\n\n if daemon:\n p.daemon = True\n self.process = p\n\n p.start()\n p.join(timeout=timeout)\n if not queue.empty():\n return queue.get()\n if callback_timeout:\n callback_timeout(*p_args, **p_kwargs)\n if p.is_alive():\n logger.debug('Executor: terminating by timeout')\n p.terminate()\n p.join()",
"def _run_async(fn, **inputs):\n tp = concurrent.futures.ThreadPoolExecutor(1)\n future = tp.submit(fn=fn, **inputs)\n tp.shutdown(False)\n return future",
"def start_background_task(target, *args, **kwargs):\n context.application.socket_io.start_background_task(target, *args, **kwargs)",
"def _submit_one(self, callable_, *args, **kwargs):\n def postamble():\n self._tasks.release()\n self._maybe_schedule_task()\n return self._pool.submit(_run_callable_with_postamble(postamble, callable_, *args, **kwargs))",
"def _submit_run(webclient, config, cpachecker_args, counter=0):\n limits = {}\n if config.memorylimit:\n limits[\"memlimit\"] = config.memorylimit\n if config.timelimit:\n limits[\"timelimit\"] = config.timelimit\n if config.corelimit:\n limits[\"corelimit\"] = config.corelimit\n\n run = _parse_cpachecker_args(cpachecker_args)\n\n run_result_future = webclient.submit(\n run, limits, config.cpu_model, config.result_file_pattern, config.cloud_priority\n )\n webclient.flush_runs()\n return run_result_future.result()",
"def submit(self, func, *args, **kwargs):\n \n if self._executor_bad_state.is_set():\n raise self._executor_exception\n\n # here call the start function\n\n pilot = self.start\n umgr = self.unit_manager\n report.header('submit pilots')\n\n self._task_counter += 1\n task_id = self._task_counter\n\n report.header('submit %d units' % max_tasks)\n\n # Register the ComputePilot in a UnitManager object.\n umgr.add_pilots(pilot)\n\n for i in range(0, self.max_tasks):\n \n task = rp.ComputeUnitDescription()\n self.tasks[task_id] = Future()\n task executable = func\n task.arguments = [args, kwargs]\n task.cpu_processes = self.cores_per_task\n self.tasks.append(task)\n \n umgr.submit_units(tasks)\n umgr.wait_units()",
"def submit_foreground_work_and_wait(self, work, workunit_parent=None):\r\n return self.run_tracker.foreground_worker_pool().submit_work_and_wait(\r\n work, workunit_parent=workunit_parent)",
"async def run_in_executor(self, func: Callable, *args, **kwargs) -> Callable:\n\n return await utils.run_in_executor(self, func, *args, **kwargs)",
"def __call__(self, *args, **kwargs):\n with ThreadPoolExecutor(max_workers=2) as executor:\n executor.submit(self.perform_callback, *args, **kwargs)\n self.database_query = executor.submit(mark_task_as_done(self.sql_command), *self.database_args, **self.database_kwargs)",
"def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()",
"def submit(self, fn, *args, **kwargs):\n fn = self._prepare_fn(fn)\n future = self._self.submit(fn, *args, **kwargs)\n for callback in self._default_done_callbacks:\n future.add_done_callback(callback)\n return FutureProxy(future, self)",
"def submit(cls, background_job):\n background_job.save()\n request_es_backup.schedule(args=(background_job.id,), delay=10)",
"def schedule(executor: Executor) -> asyncio.Future:\n awaitable = executor.start()\n future = asyncio.ensure_future(awaitable)\n return future",
"def submit(self, func, *args, **kwargs):\n errors = []\n arguments = []\n keyword_arguments = {}\n result = None\n try:\n for arg in args:\n if isinstance(arg, futures.Future) and arg.failed:\n exc = arg._exception\n if isinstance(exc, exceptions.MultipleExceptions):\n errors.extend(exc.exceptions)\n else:\n errors.append(exc)\n else:\n arguments.append(executor.get_actual_value(arg))\n\n for key, val in kwargs.iteritems():\n if isinstance(val, futures.Future) and val.failed:\n exc = val._exception\n if isinstance(exc, exceptions.MultipleExceptions):\n errors.extend(exc.exceptions)\n else:\n errors.append(val._exception)\n else:\n keyword_arguments[key] = executor.get_actual_value(val)\n\n except exceptions.ExecutionBlocked:\n result = futures.Future()\n finally:\n if errors:\n result = futures.Future()\n result._state = futures.FINISHED\n result._exception = exceptions.MultipleExceptions(\n 'futures failed',\n errors,\n )\n if result is not None:\n return result\n\n try:\n if isinstance(func, Activity):\n make_task = self.make_activity_task\n elif issubclass(func, Workflow):\n make_task = self.make_workflow_task\n else:\n raise TypeError\n task = make_task(func, *arguments, **keyword_arguments)\n except TypeError:\n raise TypeError('invalid type {} for {}'.format(\n type(func), func))\n\n return self.resume(task, *arguments, **keyword_arguments)",
"def background(f):\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n # The background task needs to be decorated with Flask's\n # copy_current_request_context to have access to context globals.\n @copy_current_request_context\n def task():\n global background_tasks\n try:\n # invoke the wrapped function and record the returned\n # response in the background_tasks dictionary\n background_tasks[id] = make_response(f(*args, **kwargs))\n except:\n # the wrapped function raised an exception, return a 500\n # response\n background_tasks[id] = make_response(internal_server_error())\n\n # store the background task under a randomly generated identifier\n # and start it\n global background_tasks\n id = uuid.uuid4().hex\n background_tasks[id] = Thread(target=task)\n background_tasks[id].start()\n\n # return a 202 Accepted response with the location of the task status\n # resource\n return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}\n return wrapped",
"def run_op_async(self, *args):\n return self._executor.run_op_async(*args)",
"def start_new_background_thread(target, args, kwargs=None):\n\n if kwargs is None:\n kwargs = {}\n request = system_service_pb.StartBackgroundRequestRequest()\n response = system_service_pb.StartBackgroundRequestResponse()\n try:\n apiproxy_stub_map.MakeSyncCall('system', 'StartBackgroundRequest', request,\n response)\n except apiproxy_errors.ApplicationError as error:\n raise ERROR_MAP[error.application_error](error.error_detail)\n else:\n return background.EnqueueBackgroundThread(\n response.request_id(),\n target,\n args,\n kwargs)",
"def submit(self, target=None, name: str = None, args: Tuple = (), kwargs: Dict = None, *, daemon: bool = None):\n raise NotImplementedError",
"def async_thread_call(fun, *args, **kwargs):\n t = Thread(\n target=_decorated_mp,\n args=(host(), port(), fun) + args,\n kwargs=kwargs)\n\n t.start()\n return t"
]
| [
"0.6497439",
"0.64870363",
"0.62554556",
"0.6224032",
"0.6215283",
"0.6135851",
"0.6057905",
"0.60406923",
"0.59824437",
"0.58395463",
"0.58181906",
"0.5815557",
"0.5810637",
"0.57943904",
"0.57922715",
"0.5750794",
"0.5741174",
"0.5738223",
"0.56663096",
"0.55685973",
"0.54702836",
"0.5450842",
"0.5422651",
"0.5412377",
"0.53981113",
"0.5385666",
"0.5369451",
"0.53575146",
"0.5302548",
"0.5298535"
]
| 0.8166681 | 0 |
Sets the total of this Metadata. | def total(self, total):
self._total = total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_total(self):\n\n self.total = 0\n for item in self.items.all():\n self.total += item.price\n self.save()",
"def set_total(self, valeurs):\r\n \r\n self._total = valeurs",
"def total_num(self, total_num):\n\n self._total_num = total_num",
"def total_amount(self, total_amount):\n\n self._total_amount = total_amount",
"def total(self, total):\n if self.local_vars_configuration.client_side_validation and total is None: # noqa: E501\n raise ValueError(\"Invalid value for `total`, must not be `None`\") # noqa: E501\n\n self._total = total",
"def total_cargo(self, total_cargo):\n\n self._total_cargo = total_cargo",
"def total_storage(self, total_storage):\n\n self._total_storage = total_storage",
"def total(self, total: float):\n if total is None:\n raise ValueError(\"Invalid value for `total`, must not be `None`\") # noqa: E501\n \n self._total = total",
"def update_total(self):\n self.objects[self.ids.AMOUNT].setText(\"Total Spend: \\xA3%.2f\" % (self.owner.total_price() / 100))",
"def total_quantity(self, total_quantity):\n\n self._total_quantity = total_quantity",
"def row_total(self, row_total):\n\n self._row_total = row_total",
"def total(self):\n\t\treturn self._total",
"def total(self) -> float:\n return self._total",
"def total(self) -> int:\n return self._total",
"def total_other(self, total_other):\n\n self._total_other = total_other",
"def total_memory(self, total_memory):\n\n self._total_memory = total_memory",
"def total_hits(self, total_hits):\n\n self._total_hits = total_hits",
"def total_data_processed(self, total_data_processed):\n\n self._total_data_processed = total_data_processed",
"def total(self):\n return sum(self.meta) + sum(child.total() for child in self.children)",
"def total_mass(self, total_mass):\n\n self._total_mass = total_mass",
"def GetTotal(self):\n return(self.total)",
"def total_copies(self, total_copies):\n\n self._total_copies = total_copies",
"def set_total_mem(self, mem):\n self.total_mem = mem\n return",
"def set_total_time(self, total_time):\n self._total_time = total_time",
"def total_heal(self, total_heal):\n\n self._total_heal = total_heal",
"def total(self):\n return sum(self.d.values())",
"def yield_total(self, yield_total):\n\n self._yield_total = yield_total",
"def total(self):\n total = sum(self.d.values())\n return total",
"def reads_total(self, reads_total):\n\n self._reads_total = reads_total",
"def totalStock(self, totalStock):\n\n self._totalStock = totalStock"
]
| [
"0.7345318",
"0.73374975",
"0.72063965",
"0.71454555",
"0.7011428",
"0.69185275",
"0.69073534",
"0.6881462",
"0.68273604",
"0.67822355",
"0.6774405",
"0.6726431",
"0.6698601",
"0.6680655",
"0.667271",
"0.6666874",
"0.66498363",
"0.66184264",
"0.6577569",
"0.65486485",
"0.6507367",
"0.64998364",
"0.6488543",
"0.6466039",
"0.6459016",
"0.64419264",
"0.6436584",
"0.64114255",
"0.63656884",
"0.6337758"
]
| 0.8169622 | 1 |
Sets the index of this Metadata. | def index(self, index):
self._index = index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_index(self, index):\n self.index = index",
"def index(self, index):\n \"\"\"\n if index is None:\n raise ValueError(\"Invalid value for `index`, must not be `None`\")\n \"\"\"\n\n self.container['index'] = index",
"def setIndex(self, index):\n\n self._index = index\n\n return True",
"def set_index(self, nIndex):\n\t\tcall_sdk_function('PrlVmDev_SetIndex', self.handle, nIndex)",
"def setIndex(self,index):\n if isinstance(index,str):\n index = MaterialIndex(index)\n self[0].refractiveindex = index",
"def set_at_index(self, index: int, value: object) -> None:\n self.data[index] = value",
"def setSourceIndex(self, index):\n self.sourceIndex = index",
"def findex(self, findex):\n self.logger.debug(\"In 'findex' setter.\")\n\n self._findex = findex",
"def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)",
"def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))",
"def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))",
"def set_primary_column_index(self, index: int):\n self._primary_column_index = index",
"def set_index(self, nDevIndex):\n\t\tcall_sdk_function('PrlBootDev_SetIndex', self.handle, nDevIndex)",
"def setIndexMode(self, mode):\n self.indexMode = mode",
"def setIndexFilePath(self, index_file_path):\n self.index_file_path = index_file_path",
"def set(self, index, data):\n self.data[index] = data",
"def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind",
"def setCurrentIndex(self, index):\n self._editor.setCurrentIndex(index)",
"def index(self, *index):\n # .index() resets\n s = self._clone()\n if not index:\n s._index = None\n else:\n s._index = (self._index or []) + list(index)\n return s",
"def set_default_by_index(self, index):\n if index >= len(self._datasets):\n raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n\n self._default_index = index",
"def label_index(self, label_index):\n\n self._label_index = label_index",
"def label_index(self, label_index):\n\n self._label_index = label_index",
"def setIndexMonth(self,index):\n self.indexMonth = index",
"def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self",
"def __init__(self, index):\n self._index = index",
"def init_index(self):\n raise NotImplementedError",
"def __setitem__(self, index: Any, value: Any) -> None:\n self.contents[index] = value\n return",
"def addIndex(self, index):\r\n assert type(index)==int\r\n assert 0<=index and index < self._dataset.getSize()\r\n\r\n if not (index in self._indices):\r\n self._indices.append(index)",
"def rindex(self, rindex):\n self.logger.debug(\"In 'rindex' setter.\")\n\n self._rindex = rindex",
"def is_indexed(self, is_indexed):\n\n self._is_indexed = is_indexed"
]
| [
"0.8639466",
"0.8079269",
"0.8046353",
"0.75118023",
"0.73526555",
"0.7189295",
"0.6799408",
"0.6671059",
"0.6655663",
"0.6651388",
"0.6651388",
"0.65320814",
"0.65255284",
"0.6455516",
"0.64315563",
"0.63905156",
"0.6384226",
"0.6369748",
"0.6339082",
"0.63354546",
"0.63331246",
"0.63331246",
"0.63244",
"0.6320671",
"0.63161796",
"0.62780315",
"0.6266994",
"0.62557906",
"0.62335974",
"0.6224701"
]
| 0.8210771 | 1 |
export_courses get all courses from the database return all courses as a dictionary of course dictionaries | def export_courses():
courses = Course.query().fetch()
dictionary = {}
for course in courses:
dictionary[course.department + "" + course.number] = course.to_dict()
return dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_courses(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_courses',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n print(request)\n\n for req in request:\n try:\n if req['id']==1:\n pass\n else:\n self.create({\n 'course_id': req['id'], \n 'category':req['categoryid'],\n 'fullname':req['fullname'], \n 'shortname':req['shortname'],\n 'summary': req['summary']\n }\n )\n except Exception:\n print('Course not created')",
"def get_courses():\n courses = []\n courses_recs = Course._file.read_db()\n for course in courses_recs[\"courses\"]:\n courses.append(Course(**course))\n return courses",
"def export_courses(courses, output):\n courses = sorted(courses)\n writer = csv.writer(output)\n writer.writerow([\n 'College', 'Department', 'Code', 'Name', 'Credits', 'Tags',\n 'Prerequisites'\n ])\n\n for course in courses:\n writer.writerow([\n course.college, course.department, course.code, course.name,\n course.credits, ','.join(course.tags), ','.join(course.prerequisites)\n ])",
"def list_all_courses(request):\r\n courses = Course.objects.all()\r\n courses = [dict(course_name = c.course_name, course_code = c.course_code, course_year = c.year,\r\n course_url = '/course/%s/' % c.course_code.lower()) for c in courses]\r\n\r\n response = {'courses': courses}\r\n\r\n return render_to_response('all_courses.json', response, mimetype = 'application/json',\r\n context_instance = RequestContext(request))",
"def get_courses(db: Session = Depends(get_db)): # , _: models.User = Depends(get_current_user))\n return crud.course.get_multi(db, skip=0, limit=100)",
"def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []",
"def get_courses(self):\r\n\r\n return self.def_ms.get_courses()",
"def load_courses(self):\r\n store = modulestore()\r\n\r\n # Add a course with a unicode name, if the modulestore\r\n # supports adding modules.\r\n if hasattr(store, 'create_xmodule'):\r\n CourseFactory.create(org=u'ëḋẌ',\r\n course=u'śíḿṕĺé',\r\n display_name=u'2012_Fáĺĺ',\r\n modulestore=store)\r\n\r\n courses = store.get_courses()\r\n # NOTE: if xml store owns these, it won't import them into mongo\r\n if SlashSeparatedCourseKey.from_deprecated_string(TEST_COURSE_ID) not in [c.id for c in courses]:\r\n import_from_xml(store, DATA_DIR, ['toy', 'simple'])\r\n\r\n return [course.id for course in store.get_courses()]",
"def retrieve_courses(self) -> pd.DataFrame:\n if self.courses_df is None:\n self.courses_df = pd.read_sql_query('SELECT * FROM courses', con=self.connection())\n\n return self.courses_df",
"def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)",
"def dump_all_json():\n\n # Set up process logging.\n # Existence of error log file can tell us if errors occur.\n script_logging.clear_status_log()\n script_logging.clear_error_log()\n\n # Pull list of courses\n courses = canvas_data.pull_courses()\n\n # If there are course ID parameters, just load the specified courses\n if len(sys.argv) > 1:\n course_id_list = map(int, sys.argv[1:])\n courses = [course for course in courses if course['id'] in course_id_list]\n\n # course_id = int(sys.argv[1])\n # courses = [course for course in courses if course['id'] == course_id]\n\n script_logging.log_status('Storing courses JSON to %s' % (COURSES_FILE_NAME))\n with open(COURSES_FILE_NAME, 'w') as f:\n json.dump(courses, f, indent = 2)\n \n for course in courses:\n course_id = course['id']\n\n # Pull students in each course\n students = canvas_data.pull_course_students(course_id)\n dump_json(students, STUDENTS_FILE_NAME, course_id, \"course students\")\n\n # Pull users for each course.\n # We'll need this to look up comment submitters.\n users = canvas_data.pull_course_users(course_id)\n dump_json(users, USERS_FILE_NAME, course_id, \"course users\")\n\n # pull assignments for each course\n assignments = canvas_data.pull_assignments(course_id)\n dump_json(assignments, ASSIGNMENTS_FILE_NAME, course_id, 'course assignments')\n\n # pull submissions for each assignment\n for assignment in assignments:\n assignment_id = assignment[\"id\"]\n submissions = canvas_data.pull_submissions_with_comments(course_id, assignment_id)\n dump_json(submissions, SUBMISSIONS_FILE_NAME, assignment_id, 'assignment submissions')",
"def GetCourses(firebase: firebase) -> None:\n\n global courses\n obj_key_list = []\n\n result = firebase.get('/course', None)\n\n if result is None:\n return\n\n for i in result.keys():\n obj_key_list.append(i)\n\n for i in obj_key_list:\n course = Course()\n course.setId(i)\n course.setKnowledgeAreaId(result[i]['knowledgeareaid'])\n course.setCatalogId(result[i]['catalogid'])\n course.setTitle(result[i]['name'])\n course.setDescription(result[i]['description'])\n course.setInstructor(result[i]['instructor'])\n course.setFee(result[i]['fee'])\n courses.append(course)",
"def get_courses(self, depth=0):\r\n return self.courses.values()",
"def all_courses(records):\n \n course_and_id_dict = {} #This creates an empty dictionary\n for all_tuples in records:\n course_info_tuple = all_tuples[0] #Extracts all course information\n course_id = course_info_tuple[0]\n course_name = course_info_tuple[1]\n \n course_and_id_dict[course_id] = course_name\n \n return course_and_id_dict",
"def get_courses(self, *args):\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({\n 'mnemo': module.course.mnemo,\n 'modules': [{'mnemo': module.mnemo}]\n })\n return courses",
"def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)",
"def create_courses():\n\n\t# create list for courses\n\tallcourses = []\n\n\t# load courses as classes in allcourses-list\n\twith open(\"../data/vakken.csv\", \"rt\") as coursefile:\n\n\t\t# clean text\n\t\tcourses = csv.reader(coursefile)\n\t\tfor row in courses:\n\t\t\tfor text in row:\n\t\t\t\tcourse_info = text.split(\";\")\n\n\t\t\t\t# add course name\n\t\t\t\tcourse_name = course_info[0]\n\n\t\t\t\t# add amount of lectures\n\t\t\t\tcourse_lectures = course_info[1]\n\n\t\t\t\t# add amount of seminars\n\t\t\t\tcourse_seminars = course_info[2]\n\n\t\t\t\t# add max amount seminars\n\t\t\t\tcourse_max_sem = course_info[3]\n\t\t\t\tif course_max_sem == \"nvt\":\n\t\t\t\t\tcourse_max_sem = 0\n\n\t\t\t\t# add amount of practicals\n\t\t\t\tcourse_practicals = course_info[4]\n\n\t\t\t\t# add max amount practicals\n\t\t\t\tcourse_max_prac = course_info[5]\n\t\t\t\tif course_max_prac == \"nvt\":\n\t\t\t\t\tcourse_max_prac = 0\n\n\t\t\t\t# add course to list\n\t\t\t\tallcourses.append(Course(course_name, course_lectures, course_seminars, course_max_sem, course_practicals, course_max_prac))\n\n\treturn allcourses",
"def get_course(data):\n\n return {item['course'] for item in data}",
"def dump(self):\n course = {\n \"course_id\": self.course_id,\n \"title\": self.title,\n \"chapters\": {}\n }\n for chapter_num in self.chapters:\n chapter = self.chapters[chapter_num]\n course[\"chapters\"][chapter.num] = {\n \"name\": chapter.name,\n \"lessons\": {lesson_num: lesson_data.name for lesson_num,\n lesson_data in chapter.lessons.items()}\n }\n with open(_JSON_PATH_FORMAT % self.course_id, \"w+\") as fp:\n _logger.debug(\"Dumping the data into a JSON file so that it can \"\n \"be accessed at a later time quickly and without \"\n \"need to scrape LearnItFirst.com, saving time and \"\n \"unnecessary requests.\")\n json.dump(course, fp)",
"def getCoursesList(self, pageSize=100):\n results = self.service.courses().list(pageSize=pageSize).execute()\n self.courses = results.get('courses', [])\n if not self.courses:\n return []\n return self.courses # Might not have to return self.courses, but it's useful for now",
"def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses",
"def _accessible_courses_list_from_groups(request):\r\n courses_list = {}\r\n\r\n instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()\r\n staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()\r\n all_courses = instructor_courses | staff_courses\r\n\r\n for course_access in all_courses:\r\n course_key = course_access.course_id\r\n if course_key not in courses_list:\r\n course = modulestore('direct').get_course(course_key)\r\n if course is None:\r\n raise ItemNotFoundError(course_key)\r\n courses_list[course_key] = course\r\n\r\n return courses_list.values()",
"def search_courses():\n current_user = view_helpers.get_current_user()\n courses, has_more = m.Course.search(flask.request.values, current_user)\n\n course_dicts, user_course_dicts, _ = (\n m.Course.get_course_and_user_course_dicts(courses, current_user))\n\n return api_util.jsonify({\n 'courses': course_dicts,\n 'user_courses': user_course_dicts,\n 'has_more': has_more,\n })",
"def scrape(url, filename):\n courselist = []\n headers = ['title', 'description', 'course number', 'duration', 'difficulty', 'instructors', 'course url']\n with open(filename, 'wb') as outfile:\n wr = csv.writer(outfile)\n wr.writerow(headers)\n courses = json.load(urllib2.urlopen(url))\n for course in courses['courses']:\n c = Course()\n c.title = course['title']\n c.desc = course['summary']\n c.course_number = course['key']\n c.duration = str(course['expected_duration']) + ' ' + str(course['expected_duration_unit'])\n c.difficulty = course['level']\n c.url = 'https://www.udacity.com/course/' + course['slug']\n l = len(course['instructors'])\n for i in xrange(l):\n if(i == 0):\n c.instructors += course['instructors'][i]['name']\n else:\n c.instructors += ';' + course['instructors'][i]['name']\n with open(filename, 'ab') as outfile:\n wr = csv.writer(outfile)\n wr.writerow(c.getaslist())",
"def _course_json(request, course_key):\r\n course_module = _get_course_module(course_key, request.user, depth=None)\r\n return _xmodule_json(course_module, course_module.id)",
"def get_courses_metadata(self):\n return Metadata(**settings.METADATA['course_ids'])",
"def see_teaching_courses(self, username: str, token: str) -> List[Dict[str, object]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get UID from user's username\n uid = self.get_uid(username=username)\n\n # Query database for courses instructed by a user with this UID\n cursor = self._db_connection.cursor()\n cursor.execute(\n '''\n SELECT \n course_id,\n course_abbreviation,\n course_name, \n time,\n seats \n FROM \n courses\n WHERE \n instructor_id = ?\n ;\n ''', (uid,))\n\n db_results = cursor.fetchall()\n\n if db_results is None:\n print(\"No associated courses found!\")\n return []\n\n # Build information dicts for every course this user is instructing\n courses = []\n for result in db_results:\n # Get the number of students enrolled in this course already\n cursor.execute('''SELECT COUNT(*) FROM enrollment_records WHERE course_id = ?;''', (result[0],))\n students_enrolled = cursor.fetchone()[0]\n if students_enrolled is None:\n students_enrolled = 0\n\n # Build a course dict from the data\n courses.append({\n \"course_abbreviation\": result[1],\n \"course_name\": result[2],\n \"time\": result[3],\n \"students_enrolled\": students_enrolled,\n \"capacity\": result[4],\n })\n\n return courses",
"def getCourseData(self, course):\n\t\tif course == None:\n\t\t\treturn None\n\t\tcommand = \"SELECT name, description, author_id FROM courses WHERE id=?;\"\n\t\tparams = (course,)\n\n\t\tdata = self._run_command(command, params)\n\n\t\tif not data:\n\t\t\treturn None\n\n\t\tdata = data[0]\n\t\tresult = {\"name\": data[0] if data[0] else \"\", \n\t\t\"description\": data[1] if data[1] else \"\", \n\t\t\"author_id\": data[2]}\n\n\t\treturn result",
"def get_course_table(self, table):\n json_result = {}\n row_list = table.xpath('.//table[@id = \"s_course\"]/tr[position() > 1]')\n for row in row_list:\n session = row.xpath('./td[1]/text()')\n course_full_code_list = row.xpath('.//a[starts-with(@href, \"javascript:course_popup\")]/text()')\n course_name_list = row.xpath('.//font[@style = \"font-size:7pt;\"]/text()')\n course_list = []\n if len(course_full_code_list) != len(course_name_list):\n # year course design project would be count twice\n if (\"Design Project\" == course_name_list[0]) & \\\n (len(course_full_code_list) + 1 == len(course_name_list)):\n course_name_list = course_name_list[1:]\n else:\n raise ProfileException(\n \"Error: unmatched lists. course code list:\",\n course_full_code_list, \"\\n course name list:\", course_name_list)\n for i, full_code in enumerate(course_full_code_list):\n if re.match(re.compile('\\w{3}\\d{3}[YH]1\\s+[SFY]'), full_code) is None:\n raise ProfileException(\"Illegal course code!:\" + full_code)\n course_list.append({\n \"courseName\": course_name_list[i],\n \"courseCode\": full_code[0:6],\n \"courseTime\": full_code[-1],\n \"courseLength\": full_code[6:8]\n })\n # there is a empty session\n if session:\n json_result.update({session[0]: course_list})\n if json_result:\n return json_result\n else:\n raise ProfileException(\"Failed to get course_table table(row list is empty)\")",
"def course_listing(request):\r\n if GlobalStaff().has_user(request.user):\r\n # user has global access so no need to get courses from django groups\r\n courses = _accessible_courses_list(request)\r\n else:\r\n try:\r\n courses = _accessible_courses_list_from_groups(request)\r\n except ItemNotFoundError:\r\n # user have some old groups or there was some error getting courses from django groups\r\n # so fallback to iterating through all courses\r\n courses = _accessible_courses_list(request)\r\n\r\n def format_course_for_view(course):\r\n \"\"\"\r\n return tuple of the data which the view requires for each course\r\n \"\"\"\r\n return (\r\n course.display_name,\r\n reverse_course_url('course_handler', course.id),\r\n get_lms_link_for_item(course.location),\r\n course.display_org_with_default,\r\n course.display_number_with_default,\r\n course.location.name\r\n )\r\n\r\n return render_to_response('index.html', {\r\n 'courses': [format_course_for_view(c) for c in courses if not isinstance(c, ErrorDescriptor)],\r\n 'user': request.user,\r\n 'request_course_creator_url': reverse('contentstore.views.request_course_creator'),\r\n 'course_creator_status': _get_course_creator_status(request.user),\r\n 'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)\r\n })"
]
| [
"0.7052836",
"0.7041838",
"0.70088214",
"0.700561",
"0.6939108",
"0.66422844",
"0.66270727",
"0.6546052",
"0.64977354",
"0.649505",
"0.6464057",
"0.64300716",
"0.64214396",
"0.6378788",
"0.6374261",
"0.6343026",
"0.63298833",
"0.6263807",
"0.61841094",
"0.61821026",
"0.61341554",
"0.6030339",
"0.6026791",
"0.6025707",
"0.6013959",
"0.5952772",
"0.59439987",
"0.59339386",
"0.59133387",
"0.5912377"
]
| 0.85508007 | 0 |
update_db Updates the database with all the courses for the current semester | def update_db():
with open("courses_2016.json") as data:
data = data.read()
courses = json.loads(data)
for course in courses:
try:
[dept, course] = course.split(" ")
text = get_course(dept, course)
insert_course(dept, course, text)
except:
failures.append(course) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_course(self):\n # ensure that updating course is exists\n if self.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n\n # ensuring that user does not provided less number of limited places\n if db[\"courses\"][crs_i][\"total_place\"] > self._total_place:\n print(\"{} course's limited places number must be more than {}\".format(\n self._course_name,\n db[\"courses\"][crs_i][\"total_place\"]\n ))\n return\n\n db[\"courses\"][crs_i][\"teacher\"] = self._teacher\n db[\"courses\"][crs_i][\"total_place\"] = self._total_place\n break\n self._file.write_db(db)\n print(\"The course - {} is updated\".format(self._course_name))\n return self.get_course().course_info()",
"def update(request):\n\tcourse_id = request.GET.get('course_id')\n\tif request.method == 'POST':\n\t\tcourse_title = request.POST['course_title']\n\t\tinstitute_name = request.POST['institute_name']\n\t\tcourse_desc = request.POST['course_desc']\n\t\tcurrent_data = Course.objects.get(course_id = course_id)\n\t\tcurrent_data.course_title = course_title\n\t\tcurrent_data.institute_name = institute_name\n\t\tcurrent_data.course_desc = course_desc\n\t\tcurrent_data.save()\n\t\treturn HttpResponseRedirect(reverse('courseapp:index'))\n\tdata = Course.objects.get(course_id = course_id)\n\treturn render(request,'update.html',{'data':data})",
"def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()",
"def update_course_index(self, updated_index_entry):\r\n self.db_connection.update_course_index(updated_index_entry)",
"def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)",
"def update(self, semester, year, abbreviation=None, course_number=None):\n print({\n 'message': 'Updating sections.',\n 'semester': semester,\n 'year': year,\n 'abbreviation': abbreviation,\n 'course_number': course_number,\n })\n\n # Get list of courses for which to update sections, excluding those updated already today\n if abbreviation and course_number:\n courses = Course.objects.filter(abbreviation=abbreviation, course_number=course_number)\n else:\n midnight = datetime.datetime.combine(datetime.datetime.now().date(),datetime.time(0))\n courses = Course.objects.exclude(section__last_updated__gte=midnight).distinct()\n\n\n # Asynchronously perform an update for each course's sections\n i = AtomicInteger()\n def update_wrapper(course):\n i.inc()\n self._update_class(\n course=course,\n semester=semester,\n year=year,\n )\n p = ThreadPool(NUM_THREADS)\n result = p.map_async(update_wrapper, courses)\n\n # Log progress of updates\n print(BColors.OKGREEN + f'Starting job with {NUM_THREADS} workers.' + BColors.ENDC)\n while not result.ready():\n print(BColors.OKGREEN + f'Updating course {i.value()} of {len(courses)}.' + BColors.ENDC)\n time.sleep(5)",
"def _install_course_updates(self):\r\n url = STUDIO_BASE_URL + '/course_info_update/' + self._course_key + '/'\r\n\r\n for update in self._updates:\r\n\r\n # Add the update to the course\r\n date, content = update\r\n payload = json.dumps({'date': date, 'content': content})\r\n response = self.session.post(url, headers=self.headers, data=payload)\r\n\r\n if not response.ok:\r\n raise CourseFixtureError(\r\n \"Could not add update to course: {0} with {1}. Status was {2}\".format(\r\n update, url, response.status_code))",
"def refresh_course(self):\r\n self.course = modulestore().get_course(self.course.id)",
"def set_course(self, new_course, updating=False):\n COURSE_QUERY = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Course (subject_code, credit_hours, description, name) VALUES (%s, %s, %s, %s)\"\"\"\n\n self.db_cursor.execute(COURSE_QUERY, (new_course.subject_code, new_course.credit_hours, new_course.description, new_course.name))\n self.db_connection.commit()\n\n # Add course topics and course goals:\n for ct_id in new_course.topics:\n self.set_course_topic(ct_id, new_course.name)\n for cg_id in new_course.goals:\n self.set_course_goal(cg_id, new_course.name)",
"def test_update_entry_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def update_db(df, db, app=None):\n db = db\n\n print_or_log(\"Formatting data...\", app=app)\n\n # list to keep track of filtered data\n # keys are the date with the code concatenated\n entries = {}\n i = 1\n for key, sheet in df.items():\n\n # print progress\n print_or_log(\"{}/{}\".format(i, len(df.items())), end=\"\\r\", app=app)\n i += 1\n\n # prepare exam results from sheet period\n for index, row in sheet.iterrows():\n\n # skip if not exam\n if row[\"Provnamn\"] != \"Tentamen\":\n continue\n\n # retrieve course code\n code = row[\"Kurs\"]\n\n # retrieve course name\n name = row[\"Kursnamn\"]\n\n # retrieve grade\n grade = row[\"Betyg\"]\n\n # retrieve amount of results\n amount = row[\"Antal\"]\n\n # retrieve exam date\n date = row[\"Provdatum\"]\n if type(date) is pd.Timestamp:\n date = str(date.date())\n\n # necessary because of different date formats in sheets\n elif type(date) is str:\n date = str(parse(date).date())\n\n # if exam occasion hasn't been encountered yet, add it\n key = code + date\n if key not in entries:\n entries[key] = {\n \"taken\": date,\n \"code\": code,\n \"name\": name,\n \"failures\": 0,\n \"threes\": 0,\n \"fours\": 0,\n \"fives\": 0,\n }\n\n # now modify grade that this iteration concerns in occasion\n if grade == \"U\":\n entries[key][\"failures\"] = amount\n elif grade == \"3\":\n entries[key][\"threes\"] = amount\n elif grade == \"4\":\n entries[key][\"fours\"] = amount\n elif grade == \"5\":\n entries[key][\"fives\"] = amount\n\n print_or_log(\"\\nInserting data...\", app=app)\n insertions = 0\n i = 1\n\n # go through entries and add them if they're not already present in the database\n for key, entry in entries.items():\n\n # print progress\n print_or_log(\"{}/{}\".format(i, len(entries.items())), end=\"\\r\", app=app)\n i += 1\n\n # see if there is an entry matching the course code and exam date\n db_entry = db.query(\n \"SELECT * FROM results WHERE code=%s AND taken=%s\", (entry[\"code\"], entry[\"taken\"]))\n\n # if there isn't, we can insert the new result in the database\n if len(db_entry) == 0:\n insertions += 1\n\n db.query(\"INSERT INTO results (taken, code, name, failures, threes, fours, fives) \"\n \"VALUES (%s,%s,%s,%s,%s,%s,%s)\", (entry[\"taken\"], entry[\"code\"], entry[\"name\"], entry[\"failures\"],\n entry[\"threes\"], entry[\"fours\"], entry[\"fives\"]))\n\n print_or_log(\"\\nInserted \" + str(insertions) + \" entries in database\", app=app)",
"def _update(self, course_name: str, newdata: ParseType) -> None:\n\n self.courses[course_name] = newdata",
"def save_courses(user, token):\n\n GOOGLE_API_COURSES = 'https://classroom.googleapis.com/v1/courses/'\n headers = {\n 'content-type': 'application/json',\n }\n\n response = requests.get(\n GOOGLE_API_COURSES,\n params={'access_token': token},\n headers=headers)\n\n courses = response.json()['courses']\n\n Course.objects.filter(teacher=user).delete()\n\n for course in courses:\n saved_course = Course.objects.create(\n id=course['id'],\n teacher=user,\n name=course['name'].title(),\n section=course['section'].title(),\n state=course['courseState'],\n link=course['alternateLink'],\n teachers_email=course['teacherGroupEmail'],\n course_email=course['courseGroupEmail'],\n created_at=course['creationTime'],\n updated_at=course['updateTime']\n )\n save_course_topics(user, token, saved_course)",
"def run_end_of_semester():\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"SELECT netID FROM MEMBER\"\n cursor.execute(sql_string)\n entries = cursor.fetchall()\n for entry in entries:\n #example string:\n #UPDATE Member SET dues_paid = (SELECT dues_paid FROM Member WHERE netID = 'jmrolf')-1\n #WHERE netID = 'jmrolf'\n sql_update_string = \"UPDATE Member SET dues_paid = (SELECT dues_paid FROM Member WHERE netID = '\"+str(entry[0])+ \\\n \"')-1 WHERE netID = '\"+str(entry[0])+\"'\"\n cursor.execute(sql_update_string)\n\n cursor.execute('SELECT netID, dues_paid FROM Member')\n to_delete_entries = cursor.fetchall()\n for entry in to_delete_entries:\n if entry[1] < 0:\n cursor.execute(\"DELETE FROM Member WHERE netID = '\"+entry[0]+\"'\")\n connection.commit()",
"def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')",
"def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()",
"def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n SET course_id = ?, course_section_id = ?\n WHERE student_id = ?\n (?,?,?)\"\"\",\n (course_id, course_section_id, student_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1",
"def update(self) -> None:\n self.app.notifier.set_value(\"Database update started on {}\".format(datetime.datetime.now().strftime(\"%c\")))\n self.app.notifier.clear()\n self.app.logger.info(\"Starting database update\")\n card_parser = JSonCardParser(self.app)\n card_parser.check_update()\n try:\n self.__update(card_parser)\n except mysql.connector.errors.ProgrammingError as exc:\n if exc.errno == mysql.connector.errorcode.ER_BAD_DB_ERROR:\n self.setup_db()\n self.__update(card_parser)\n else:\n raise\n else:\n self.app.logger.info(\"Finished database update\")\n self.app.notifier.set_value(\"Database update finished on {}\".format(datetime.datetime.now().strftime(\"%c\")))\n self.app.notifier.clear()",
"def update_database(self) -> None:\n \n # Simulate that we update a database\n time.sleep(10)",
"def update():\r\n\t#print(\"Updating poeninja database...\")\r\n\t#poeninja.update_database()\r\n\t#print(\"Updateing poeninja name-dict...\")\r\n\t#poeninja.update_name_dict()\r\n\t#print(\"Updating stats...\")\r\n\t#update_stats()\r\n\t#print(\"Updating items...\")\r\n\t#update_items()\r\n\t#print(\"Updating mod MySQL database...\")\r\n\t#update_mod_database()\r\n\tmydb = database()\r\n\twhile True:\r\n\t\trandom_mod(mydb)",
"def partial_update_course(course_id: int, course_request: schemas.CourseRequestPartial, db: Session = Depends(get_db)):\n\n course = crud.course.get(db, obj_id=course_id)\n\n if not course:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Course {course_id} not found')\n\n try:\n return crud.course.update(db, db_obj=course, obj_in=course_request)\n except Exception as error:\n logger.error(f'{error}')\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f'{error}')",
"def update_course(course):\r\n store = editable_modulestore()\r\n store.update_item(course, '**replace_user**')\r\n updated_course = store.get_course(course.id)\r\n return updated_course",
"def validate_new_curriculum_courses(self, curriculum_courses):\n\n for cur in curriculum_courses:\n # check to make sure its in the general courses table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Course WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"course does not exist, we must create new one or cancel\") # todo\n\n return True",
"def reload_course(self):\r\n self.course = self.store.get_course(self.course.id)",
"def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)",
"def update_course_index(self, course_index):\r\n self.course_index.update(\r\n son.SON([('org', course_index['org']), ('offering', course_index['offering'])]),\r\n course_index\r\n )",
"def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()",
"def edit_current_schedule(current_courses, full_courses):\n\n days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']\n valid_grades= [\"A\", \"A-\",\"B+\",\"B\",\"B-\",\"C+\",\"C\",\"C-\",\"D\",\"F\"]\n\n clear_screen()\n while True:\n try:\n print(\"Here are your current classes\")\n for val in current_courses:\n print(val)\n choice = int(input(\"Please select which one you'd like to edit:\\n1.Days\\n2.Time\\n3.Grade\\n4.Save and Quit \"))\n if choice !=4:\n class_code = input(\"Which class? \")\n if choice == 1:\n days = input(\"Please input days using style: mon,tues,wed,thurs,fri,sat,sun. Separate by comma \").split(',')\n for val in days:\n if val not in days_list:\n print(\"Invalid option\")\n days = current_courses[class_code][0]\n current_courses[class_code][0] = days\n else:\n current_courses[class_code][0] = days\n elif choice == 2:\n start_time = int(input(\"Using format 2400, input start time: \"))\n end_time = int(input(\"Using format 2400, input end time: \"))\n current_courses[class_code][1] = start_time\n current_courses[class_code][2] = end_time\n continue\n elif choice == 3:\n grade = input(\"Update current letter grade: \")\n if grade not in valid_grades:\n print(\"Invalid input\")\n grade = current_courses[class_code][3]\n current_courses[class_code][3] = grade.upper()\n full_courses[class_code][1] = grade.upper()\n else:\n current_courses[class_code][3] = grade.upper()\n full_courses[class_code][1] = grade.upper()\n continue\n else:\n with open('current_courses.json', 'w') as fp:\n json.dump(current_courses, fp)\n with open('full_courses.json', 'w') as f_file:\n json.dump(full_courses, f_file)\n break\n except ValueError:\n print(\"Invalid input.\")\n continue\n return 0",
"def set_curriculum_course(self, curriculum_name, course_name, required, updating=True):\n CURRICULUM_COURSE_QUERY = \"\"\"UPDATE CurriculumListings SET required = %s WHERE curriculum_name = %s AND course_name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO CurriculumListings (curriculum_name, course_name, required) VALUES (%s, %s, %s)\"\"\"\n\n if not updating:\n self.db_cursor.execute(\n CURRICULUM_COURSE_QUERY,\n (curriculum_name, course_name, required))\n else:\n self.db_cursor.execute(\n CURRICULUM_COURSE_QUERY,\n (required, curriculum_name, course_name))\n self.db_connection.commit()",
"def import_previous_grades_into_db(year, semester, db_name='./grades.sqlite3', filename='./grades.xls'):\n if not os.path.isfile(db_name):\n raise Exception(\"DB not found\")\n\n df1 = pd.read_excel(filename)\n\n try:\n cls = df1.filter(like='CL')\n except Exception as e:\n print(e)\n cls = None # no CLA's found\n\n try:\n ols = df1.filter(like='OL')\n except Exception as e:\n print(e)\n ols = None # no OLAs found\n\n try:\n ids = df1.filter(like='sername').values.ravel().tolist()\n ids_len = len(ids)\n except Exception as e:\n print('Was not able to parse user ids, check xls file you are trying to import: ', e)\n raise e # may be improved in the future - strange case\n try:\n names = df1.filter(like='Name').values.ravel().tolist()\n except Exception as e: # either does not exist or has different name\n print(e)\n names = None\n\n class_dict = get_ids_in_class_by_year_semester(year, semester, db_name)\n\n if (not class_dict and not names) or (class_dict and len(class_dict) < ids_len and not names):\n raise Exception('Did not find ids in table CLASS and did not find names in xls file')\n elif names and (not class_dict or (class_dict and len(class_dict) < ids_len)):\n print('Did not find existing students, but found names in xsl\\nAdding new students...\\n')\n existing_ids = get_pipeline_ids(db_name)\n need_to_update_students = False\n # otherwise just add ids to the class list\n if existing_ids:\n for sid in ids:\n if sid not in existing_ids:\n need_to_update_students = True\n else:\n need_to_update_students = True\n\n if need_to_update_students:\n fname, lname = zip(*(name.split(', ') for name in names))\n fname = (name.strip() for name in fname)\n lname = (name.strip() for name in lname)\n insert_students(ids, fname, lname, db_name)\n register_students_in_class(ids, year, semester, db_name)\n\n class_ids = [class_dict[sid] for sid in ids]\n if ols is None and cls is None or len(class_ids) == 0:\n raise Exception('No grades to load')\n\n grades_tupples = list()\n if ols is not None:\n for lab_name in ols:\n grades = (str(grade) for grade in ols[lab_name].values)\n grades_tupples += list(zip(class_ids, [lab_name] * ids_len, [-1] * ids_len, grades, ['TRUE'] * ids_len))\n\n if cls is not None:\n for lab_name in cls:\n grades = (str(grade) for grade in cls[lab_name].values)\n grades_tupples += list(zip(class_ids, [lab_name] * ids_len, [-1] * ids_len, grades, ['TRUE'] * ids_len))\n\n with lite.connect(db_name) as con:\n cur = con.cursor()\n cur.executemany('INSERT OR REPLACE INTO grades\\\n (class_id, lab, attempt, grade, pass_fail) VALUES (?, ?, ?, ?, ?)', grades_tupples)\n con.commit()"
]
| [
"0.6768394",
"0.63613737",
"0.63062465",
"0.6221332",
"0.60496604",
"0.59358865",
"0.5929495",
"0.59224737",
"0.5922039",
"0.565147",
"0.5638097",
"0.5626825",
"0.56209064",
"0.55687594",
"0.55594355",
"0.55489606",
"0.5509718",
"0.54877836",
"0.5472728",
"0.5467652",
"0.5453478",
"0.5444335",
"0.54354805",
"0.5434976",
"0.5390544",
"0.53843105",
"0.5373312",
"0.5365245",
"0.53644943",
"0.5353773"
]
| 0.7535003 | 0 |
check_db Compares the database entries with the entire list of courses Returns a list of courses that failed to insert into the database | def check_db():
with open("courses_2016.json") as data:
data = data.read()
courses = json.loads(data)
course_keys_in_db = Course.query().fetch(keys_only=True)
db_list = []
failures = []
for course in course_keys_in_db:
db_list.append(course.id())
failures = [i for i in courses if i.replace(" ","") not in db_list]
return failures | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_new_curriculum_courses(self, curriculum_courses):\n\n for cur in curriculum_courses:\n # check to make sure its in the general courses table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Course WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"course does not exist, we must create new one or cancel\") # todo\n\n return True",
"def check_courses(self):\n course_list = list(self.json_parsed_file['course'].items())\n course_found = False\n index = 0\n\n course_id = \"\"\n version = \"\"\n\n for courses in course_list:\n course_id = courses[0]\n version = courses[1]\n\n if Course.objects.filter(CourseID=course_id, Version=version).exists():\n course_found = True\n self.json_parsed_file['course'] = course_list[index]\n continue\n\n index += 1\n\n if course_list is []:\n self.is_parsed_pdf_valid = False\n self.output_message += \"No parsed course information found\\n\"\n\n elif not course_found:\n self.is_parsed_pdf_valid = False\n self.output_message += 'Courses: ' + str(course_id) + ', Version: ' + str(version) + \\\n ' Does not exist in the database.\\n'",
"def update_db():\n \n with open(\"courses_2016.json\") as data:\n data = data.read()\n\n courses = json.loads(data)\n\n for course in courses:\n try:\n [dept, course] = course.split(\" \")\n text = get_course(dept, course)\n insert_course(dept, course, text)\n except:\n failures.append(course)",
"def test_inserting_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n c2 = Course.objects.create(name = \"CS 1110\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c2)\n\n expected = 2\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.inserting_course: Adding courses failed.\")",
"def check_db(db, nb_exps, nb_algos, nb_trials, nb_benchmarks, nb_child_exps=0):\n experiments = db.read(\"experiments\")\n assert len(experiments) == nb_exps\n assert len(db.read(\"algo\")) == nb_algos\n assert len(db.read(\"trials\")) == nb_trials\n assert len(db.read(\"benchmarks\")) == nb_benchmarks\n\n # Check we have expected number of child experiments.\n exp_map = {exp[\"_id\"]: exp for exp in experiments}\n assert len(exp_map) == nb_exps\n child_exps = []\n for exp in experiments:\n parent = exp[\"refers\"][\"parent_id\"]\n if parent is not None:\n assert parent in exp_map\n child_exps.append(exp)\n assert len(child_exps) == nb_child_exps",
"def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])",
"def check_name_db ():\n db_checks = [DB_FIRST_MALE, DB_FIRST_FEMALE,\n DB_LAST_SIMPLE, DB_LAST_NAMESON,\n DB_LAST_GAELIC1, DB_LAST_GAELIC2,\n DB_LAST_COMBO1, DB_LAST_COMBO2,\n DB_LAST_UPPER1, DB_LAST_UPPER2]\n\n db_exists = db.database_exists\n for db_name in db_checks:\n if not db_exists(db_name):\n raise DatabaseException, db_name",
"def test_xml_get_courses(self):\r\n self.initdb('direct')\r\n courses = self.store.modulestores['xml'].get_courses()\r\n self.assertEqual(len(courses), 2)\r\n course_ids = [course.id for course in courses]\r\n self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, course_ids)\r\n self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, course_ids)\r\n # this course is in the directory from which we loaded courses but not in the map\r\n self.assertNotIn(\"edX/toy/TT_2012_Fall\", course_ids)",
"def test_course_not_available(self):\n \n user1 = User.objects.create(username=\"user1\", password=\"1234\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"1234\", email=\"[email protected]\")\n \n course = Course.objects.first()\n course.registered_course.add(user1)\n course.registered_course.add(user2)\n \n self.assertFalse(course.is_course_available())",
"def check_empty_db(loaded_db):\n _Helpers.check_db(\n loaded_db, nb_exps=0, nb_algos=0, nb_trials=0, nb_benchmarks=0\n )",
"def test_adding_duplicate_course(self):\n a_user = User.objects.create(first_name=\"2\", last_name=\"test\", username=\"test\")\n user = VSBUser.objects.create(user=a_user)\n inst = Institution.objects.create(name = \"UVA\")\n c1 = Course.objects.create(name = \"CS 3240\", institution= inst)\n\n user.add_course(c1)\n user.add_course(c1)\n\n expected = 1\n\n received = len(user.get_courses())\n\n self.assertEqual(received, expected, msg=\"Course_And_Topic.adding_duplicate_course: Adding duplicate course test failed.\")",
"def test_instructor_table_db(self) -> None:\n rep: Repository = Repository(r\"C:\\Users\\harsh\\OneDrive\\Desktop\\My Files\\Assignment\")\n\n expected: List[str] = [\n ('Bezos, J', '10115', 'SSW 810', 'A', 'Rowland, J'),\n ('Bezos, J', '10115', 'CS 546', 'F', 'Hawking, S'),\n ('Gates, B', '11714', 'SSW 810', 'B-', 'Rowland, J'),\n ('Gates, B', '11714', 'CS 546', 'A', 'Cohen, R'),\n ('Gates, B', '11714', 'CS 570', 'A-', 'Hawking, S'),\n ('Jobs, S', '10103', 'SSW 810', 'A-', 'Rowland, J'),\n ('Jobs, S', '10103', 'CS 501', 'B', 'Hawking, S'),\n ('Musk, E', '10183', 'SSW 555', 'A', 'Rowland, J'),\n ('Musk, E', '10183', 'SSW 810', 'A', 'Rowland, J')\n ]\n\n res = list()\n\n for row in rep.instructor_table_db(r'C:\\Users\\harsh\\OneDrive\\Desktop\\My Files\\Assignment\\810_startup.db'):\n res.append(row)\n\n self.assertEqual(expected, res)",
"def test_get_course_list_with_invalid_course_location(self):\r\n request = self.factory.get('/course')\r\n request.user = self.user\r\n\r\n course_key = SlashSeparatedCourseKey('Org', 'Course', 'Run')\r\n self._create_course_with_access_groups(course_key, self.user)\r\n\r\n # get courses through iterating all courses\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 1)\r\n\r\n # get courses by reversing group name formats\r\n courses_list_by_groups = _accessible_courses_list_from_groups(request)\r\n self.assertEqual(len(courses_list_by_groups), 1)\r\n # check both course lists have same courses\r\n self.assertEqual(courses_list, courses_list_by_groups)\r\n\r\n # now delete this course and re-add user to instructor group of this course\r\n delete_course_and_groups(course_key, commit=True)\r\n\r\n CourseInstructorRole(course_key).add_users(self.user)\r\n\r\n # test that get courses through iterating all courses now returns no course\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 0)\r\n\r\n # now test that get courses by reversing group name formats gives 'ItemNotFoundError'\r\n with self.assertRaises(ItemNotFoundError):\r\n _accessible_courses_list_from_groups(request)",
"def _init_check_database(self):\n # FIXME add additional checks, for example that columns in BY,\n # ACROSS, ON are not the same ? (see task structure notes)\n # also that location columns are not used\n if self.verbose:\n print('checking input database {}'.format(self.database))\n\n # check that required columns are present\n cols = set(self.db.columns)\n message = (\n ' argument is invalid, check that all the provided attributes '\n 'are defined in the database {}'.format(self.database))\n # the argument of issuperset needs to be a list ...\n assert cols.issuperset(self.on), 'ON' + message\n assert cols.issuperset(self.across), 'ACROSS' + message\n assert cols.issuperset(self.by), 'BY' + message\n\n for col in cols:\n assert '_' not in col, \\\n col + ': you cannot use underscore in column names'\n assert '#' not in col, \\\n col + ': you cannot use \\'#\\' in column names'\n\n if self.verbose:\n print(\"input database verified\")",
"def test_database():\n sanity_tester = sanity.DatabaseSanity(Base, engine)\n sanity_tester.test()\n if sanity_tester.errors:\n for error in sanity_tester.errors:\n err_target, err_msg = str(error).split(' ', 1)\n message = ' '.join([click.style(err_target, bold=True), err_msg])\n output.warning(message)\n output.error('Database has failed sanity check; '\n 'run `cum repair-db` to repair database')\n exit(1)",
"def check(self):\n self.conn = psycopg2.connect(self.conn_string)\n self.cur = self.conn.cursor(\"rifflecursor\")\n self.cur.execute(\"\"\"\n SELECT * FROM yelp_stored WHERE business_id = %s;\n \"\"\", (self.bus_id,))\n sql_tup = self.cur.fetchall()\n self.conn.close()\n if sql_tup == []:\n return False\n else:\n return sql_tup",
"def test_course_info(self):\r\n # Regex of first 3 columns of course information table row for\r\n # test course loaded from git. Would not have sha1 if\r\n # git_info_for_course failed.\r\n table_re = re.compile(r\"\"\"\r\n <tr>\\s+\r\n <td>edX\\sAuthor\\sCourse</td>\\s+ # expected test git course name\r\n <td>MITx/edx4edx/edx4edx</td>\\s+ # expected test git course_id\r\n <td>[a-fA-F\\d]{40}</td> # git sha1 hash\r\n \"\"\", re.VERBOSE)\r\n\r\n self._setstaff_login()\r\n self._mkdir(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n # Make sure we don't have any git hashes on the page\r\n response = self.client.get(reverse('sysadmin_courses'))\r\n self.assertNotRegexpMatches(response.content, table_re)\r\n\r\n # Now add the course and make sure it does match\r\n response = self._add_edx4edx()\r\n self.assertRegexpMatches(response.content, table_re)",
"def course_tester(courses):\n\n return False",
"def test_valid_data_course_add(self, app, auth):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n app.course.create_course(course_data)\n assert (app.course.new_course_page() == course_data.full_course_name), \\\n \"The course was not created!\"",
"def database_script_check(table, bs_id, attempt_num):\n con = lite.connect(DB_FILE, timeout = TIMEOUT)\n con.row_factory = lite.Row\n with con:\n cur = con.cursor()\n #get script data\n cur.execute(\"SELECT * FROM {0} WHERE AttemptNum=? AND BSID=?\".format(table),\n (attempt_num, bs_id))\n rows = cur.fetchall()\n\n error_data = []\n for row in rows:\n if row['Started'] == None or row['Ended'] == None or row['Exit'] != 0:\n error_data.append([row['Command'], row['Arguments'], row['ExpProc']])\n return error_data",
"def get_courses(db: Session = Depends(get_db)): # , _: models.User = Depends(get_current_user))\n return crud.course.get_multi(db, skip=0, limit=100)",
"def check_db(self) -> None:\n a = self.cursor.execute(f'SELECT * from {table_users};').fetchall()\n print(a)\n print('#################################################')\n b = self.cursor.execute(f'SELECT * from {table_groups};').fetchall()\n print(b)\n print('#################################################')\n c = self.cursor.execute(f'SELECT * from {table_users_groups};').fetchall()\n print(c)\n print('#################################################')\n d = self.cursor.execute(f\"SELECT * FROM {table_users_settings};\").fetchall()\n print(d)\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n e = self.get_search_button_basic()\n print(e)\n print('________________________________________________________')",
"def validate_db():\n if not os.path.exists(app.config['DATABASE']):\n print(\"Init database!\")\n init()",
"def check_db(self):\n if self.db == 'user':\n db = USERS_LIST\n return db\n elif self.db == 'questions':\n db = QUESTIONS_LIST\n return db\n elif self.db == 'meetups':\n db = MEETUPS_LIST\n return db\n\n elif self.db == 'rsvp':\n db = RSVP_LIST\n return db",
"def assertExists(self):\n for db in self._db_tree:\n assert(db in self._datastore.conn.database_names)\n for collection in db['collections']:\n assert(collection['name'] in self._datastore[db['database']].collection_names())",
"def test_create_course_check_forum_seeding(self):\r\n test_course_data = self.assert_created_course(number_suffix=uuid4().hex)\r\n self.assertTrue(are_permissions_roles_seeded(_get_course_id(test_course_data)))",
"def is_course_exists(self):\n db = Course._file.read_db()\n courses = db[\"courses\"]\n for crs in courses:\n if crs[\"course_name\"] == self._course_name:\n return True\n break\n return False",
"def test_course_available(self):\n \n course = Course.objects.first()\n \n self.assertTrue(course.is_course_available())",
"def test_create_db(self):\r\n function_name = sys._getframe().f_code.co_name\r\n db_name = \"{}_{}\".format(function_name, \"db\")\r\n db_name_illegal_by_rdb = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n db_name_illegal_by_this_program = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n\r\n test_list_1 = [db_name, False, None, None, False]\r\n test_list_2 = [db_name_illegal_by_rdb, None]\r\n test_list_3 = [db_name_illegal_by_this_program, None]\r\n\r\n test_list_1[len(test_list_1) - 1] = isinstance(\r\n crd(\r\n self.c,\r\n test_list_1[0],\r\n True\r\n ),\r\n r.ast.DbCreate\r\n )\r\n test_list_1[len(test_list_1) - 2] = crd(self.c, test_list_1[0])\r\n test_list_1[len(test_list_1) - 3] = crd(self.c, test_list_1[0])\r\n test_list_1[len(test_list_1) - 4] = isinstance(\r\n crd(\r\n self.c,\r\n test_list_1[0],\r\n True\r\n ),\r\n r.ast.DbCreate\r\n )\r\n dd(self.c, test_list_1[0])\r\n\r\n \"\"\"Test 1.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_2[len(test_list_2) - 1] = crd(self.c, test_list_2[0])\r\n\r\n \"\"\"Test 2.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_3[len(test_list_3) - 1] = crd(self.c, test_list_3[0])\r\n\r\n self.assertTrue(test_list_1[len(test_list_1) - 1]) # Test 3.\r\n self.assertIsNotNone(test_list_1[len(test_list_1) - 2]) # Test 4.\r\n self.assertIsNone(test_list_1[len(test_list_1) - 3]) # Test 5.\r\n self.assertFalse(test_list_1[len(test_list_1) - 4]) # Test 6.\r\n self.assertIsNone(test_list_2[len(test_list_2) - 1]) # Test 7.\r\n self.assertIsNone(test_list_3[len(test_list_3) - 1]) # Test 8.\r",
"def test_invalid_data_course_add(self, app, auth, field):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n setattr(course_data, field, None)\n app.course.create_course(course_data)\n assert (\n not app.course.all_required_fields_filled()\n ), \"Empty fields are ignored and user data changed successfully!\""
]
| [
"0.6906886",
"0.6675097",
"0.6283814",
"0.61212605",
"0.60753566",
"0.597159",
"0.5901944",
"0.58181834",
"0.5798651",
"0.5767637",
"0.5721752",
"0.5678129",
"0.564499",
"0.5597766",
"0.55770147",
"0.55652153",
"0.5563142",
"0.54699236",
"0.5439198",
"0.5401672",
"0.5396904",
"0.53924716",
"0.538384",
"0.5361862",
"0.53603506",
"0.534693",
"0.534093",
"0.5322632",
"0.53085166",
"0.5280787"
]
| 0.82807064 | 0 |
Uses eslsfetch to generate family specific fasta files out of seq_file which is provided as source (e.g. rfamseq11.fa). It will generate fasta files for all families by default | def generate_fasta(seq_file, out_dir):
LOGGER.info("Generating fasta file", seq_file)
sequence = ''
fp_out = None
seq_bits = None
# logging sequences not exported
# rename this to family log
log_file = os.path.join(out_dir, "missing_seqs.log")
logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO)
cnx = RfamDB.connect()
cursor = cnx.cursor(raw=True)
# fetch clan specific family full_region data and sequence description
query = ("SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\n"
"FROM full_region fr, rfamseq rf\n"
"WHERE fr.rfamseq_acc=rf.rfamseq_acc\n"
"AND fr.is_significant=1\n"
"ORDER BY fr.rfam_acc")
cursor.execute(query)
for region in cursor:
# new family
if str(region[RFAM_ACC]) != rfam_acc:
# check if there's no open file
if fp_out is not None:
fp_out.close()
# open new fasta file
fp_out = gzip.open(
os.path.join(out_dir, str(region[RFAM_ACC]) + ".fa.gz"), 'w')
rfam_acc = region[RFAM_ACC]
cmd = "esl-sfetch -c %s/%s %s %s" % (str(region[START]), str(region[END]),
seq_file, str(region[SEQ_ACC]))
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE)
seq = proc.communicate()[0]
# get sequence
sequence = ''
seq_bits = seq.split('\n')[1:]
sequence = sequence.join(seq_bits)
# print sequence
if sequence != '' and seq_validator(sequence) is True:
# write header
fp_out.write(">%s/%s-%s %s\n" % (str(region[SEQ_ACC]),
str(region[START]),
str(region[END]),
str(region[DESC])))
# write sequence
fp_out.write(sequence + '\n')
else:
# logging sequences that have not been exported
logging.info(sequence)
# close last file
fp_out.close()
# disconnect from DB
cursor.close()
RfamDB.disconnect(cnx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')",
"def seq_download(name, organism=\"Homo sapiens\", gaba=False):\n\n subunits = {\n \"Alpha-1\": \"Gabra1\",\n \"Alpha-2\": \"Gabra2\",\n \"Alpha-3\": \"Gabra3\",\n \"Alpha-4\": \"Gabra4\",\n \"Alpha-5\": \"Gabra5\",\n \"Alpha-6\": \"Gabra6\",\n \"Beta-1\": \"Gabrb1\",\n \"Beta-2\": \"Gabrb2\",\n \"Beta-3\": \"Gabrb3\",\n \"Gamma-1\": \"Gabrg1\",\n \"Gamma-2\": \"Gabrg2\",\n \"Gamma-3\": \"Gabrg3\",\n \"Delta\": \"Gabrd\",\n \"Pi\": \"Gabrp\",\n \"Rho-1\": \"Gabrr1\",\n \"Rho-2\": \"Gabrr2\",\n \"Rho-3\": \"Gabrr3\",\n \"Epsilon\": \"Gabre\",\n \"Theta\": \"Gabrq\"\n }\n if gaba:\n results = search(subunits[name])\n else:\n results = search(name)\n results = results[results[\"Organism\"].str.contains(organism, na=False)]\n if len(results):\n if gaba:\n target = results[results[\"Gene names\"].str.contains(subunits[name].upper())][\"Entry\"].max()\n else:\n target = results[results[\"Gene names\"].str.contains(name)][\"Entry\"].max()\n response = urlopen(f\"https://www.uniprot.org/uniprot/{target}.fasta\").read().decode(\"utf-8\")\n with open(\"Temp_seq.fasta\", \"w\") as file:\n file.write(response)\n seq = SeqIO.read(\"Temp_seq.fasta\", \"fasta\")\n os.remove(\"Temp_seq.fasta\")\n\n return seq\n\n else:\n return -1",
"def run(input, output, additional=None, fasta_path=None, seed_path=None):\r\n version = \"##gff-version 3\\n\"\r\n gff3_columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes']\r\n gff3 = pd.DataFrame(columns=gff3_columns)\r\n table = pd.read_csv(input, sep='\\t')\r\n\r\n if seed_path:\r\n seed_file = pd.read_csv(seed_path, sep='\\t')\r\n\r\n if fasta_path is not None:\r\n fasta_file = ''\r\n open(fasta_path, 'w').close()\r\n\r\n\r\n if additional:\r\n table_to_add = pd.read_csv(additional, sep='\\t')\r\n table = table.append(table_to_add)\r\n\r\n for index, row in table.iterrows():\r\n name = handleGivenName(row['name'], table, 'name')\r\n seqId = row['seqName']\r\n name5p = handleGivenName(row['5pname'], table, '5pname')\r\n seq5p = row['5pseq']\r\n name3p = handleGivenName(row['3pname'], table, '3pname')\r\n seq3p = row['3pseq']\r\n strand = row['strand']\r\n hairpin = row['hairpinSeq']\r\n start = row['start']\r\n end = row['end']\r\n\r\n if row['5pRC'] >= row['3pRC']:\r\n name5p += '|m'\r\n name3p += '|s'\r\n else:\r\n name5p += '|s'\r\n name3p += '|m'\r\n\r\n seq5p_freq = len(table[(table['5pseq'] == seq5p) | (table['3pseq'] == seq5p)])\r\n seq3p_freq = len(table[(table['5pseq'] == seq3p) | (table['3pseq'] == seq3p)])\r\n\r\n name5p += f'|{seq5p_freq}'\r\n name3p += f'|{seq3p_freq}'\r\n\r\n\r\n if seed_path is not None:\r\n if not pd.isnull(seq5p):\r\n seq5p_seed = seq5p[1:8].upper().replace(\"T\", \"U\")\r\n try:\r\n name5p += '|' + seed_file[seed_file['seed'] == seq5p_seed][\"miRBase_name\"].iloc[0]\r\n except:\r\n name5p += '|' + seq5p_seed\r\n\r\n if not pd.isnull(seq3p):\r\n seq3p_seed = seq3p[1:8].upper().replace(\"T\", \"U\")\r\n try:\r\n name3p += '|' + seed_file[seed_file['seed'] == seq3p_seed][\"miRBase_name\"].iloc[0]\r\n except:\r\n name3p += '|' + seq3p_seed\r\n \r\n if fasta_path is not None:\r\n if not pd.isnull(seq5p):\r\n fasta_file += f'>{name5p}\\n{seq5p}\\n'\r\n if not pd.isnull(seq3p):\r\n fasta_file += f'>{name3p}\\n{seq3p}\\n'\r\n\r\n if len(fasta_file) > 100000:\r\n with open(fasta_path, 'a+') as f:\r\n f.write(fasta_file)\r\n fasta_file = ''\r\n\r\n gff_row = [[seqId, '.', 'pre_miRNA', start, end, '.', strand, '.', f'ID={name}']]\r\n\r\n if strand == '+':\r\n try:\r\n offset5p = len(hairpin.split(seq5p)[0])\r\n start5p = start + offset5p\r\n end5p = start + offset5p + len(seq5p) - 1\r\n gff_row.append([seqId, '.', 'miRNA', start5p, end5p, '.', strand, '.', f'ID={name5p}'])\r\n except:\r\n pass\r\n\r\n try:\r\n offset3p = len(hairpin.split(seq3p)[0])\r\n start3p = start + offset3p\r\n end3p = start + offset3p + len(seq3p) - 1\r\n gff_row.append([seqId, '.', 'miRNA', start3p, end3p, '.', strand, '.', f'ID={name3p}'])\r\n except:\r\n pass\r\n\r\n else:\r\n try:\r\n offset5p = len(hairpin.split(seq5p)[0])\r\n end5p = end - offset5p\r\n start5p = end - offset5p - len(seq5p) + 1\r\n gff_row.append([seqId, '.', 'miRNA', start5p, end5p, '.', strand, '.', f'ID={name5p}'])\r\n except:\r\n pass\r\n\r\n try:\r\n offset3p = len(hairpin.split(seq3p)[0])\r\n end3p = end - offset3p\r\n start3p = end - offset3p - len(seq3p) + 1\r\n gff_row.append([seqId, '.', 'miRNA', start3p, end3p, '.', strand, '.', f'ID={name3p}'])\r\n except:\r\n pass\r\n\r\n miRNAs = pd.DataFrame(gff_row, columns=gff3_columns)\r\n\r\n gff3 = gff3.append(miRNAs)\r\n\r\n with open(output, 'w') as file:\r\n file.write(version)\r\n\r\n if fasta_path is not None:\r\n with open(fasta_path, 'a+') as f:\r\n f.write(fasta_file)\r\n\r\n gff3.to_csv(output, index=False, header=False, mode=\"a\", sep='\\t')",
"def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)",
"def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r",
"def main():\n\n (options, args) = parse_options(sys.argv)\n\n iterator = GFFParser.GFFAddingIterator() \n examiner = GFFParser.GFFExaminer()\n\n exon_map = dict()\n\n id_dict = examiner.available_limits(options.anno)['gff_id']\n intron_lists = dict()\n\n ### collect all available sources from gff-file\n source_dict = examiner.available_limits(options.anno)['gff_source_type']\n taken_sources = set()\n #types = ['gene', 'mRNA', 'exon', 'CDS']\n types = ['exon']\n\n ### parse only for exons and let the GFFparser \n ### infer the respective parents (otherwise doubled entries occured)\n ### we sanitize the structure later on anyways\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### try different type, if sources are empty \n if len(taken_sources) == 0:\n types = ['CDS']\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### print taken_sources\n if len(taken_sources) == 0:\n print >> sys.stderr, 'No suitable sources found!'\n sys.exit(-1)\n\n ### only show available sources - if neccessary\n if options.show_sources:\n print 'Parsed file %s\\n' % options.anno\n print 'Following sources are available:\\n'\n for source in taken_sources:\n print source \n print '\\nUse option -s to specify a comma-separated list of sources (-s source1,source2,source3), otherwise all sources are taken'\n sys.exit(0)\n\n if options.sources != '':\n user_sources = set(options.sources.split(','))\n taken_sources = taken_sources.intersection(user_sources)\n if len(taken_sources) == 0:\n print >> sys.stderr, 'The specified sources do not match any of the available sources - Please use option -S to get a list of available sources'\n sys.exit(-1)\n\n if options.verbose:\n print \"take sources %s\" % str(list(taken_sources))\n\n ### build up gff-parsing filter\n gff_sources = []\n for source in taken_sources:\n gff_sources.extend(zip([source] * len(types), types))\n\n ### parse gff-file\n for idx in id_dict.keys():\n print 'parsing chromosome %s' % idx\n if len(gff_sources) > 0:\n trans_dict = iterator.get_all_features(options.anno, {'gff_source_type':gff_sources, 'gff_id':idx})\n else:\n trans_dict = iterator.get_all_features(options.anno, {'gff_id':idx})\n ### since we parse only one chromosome, this loop is evaluated only once\n for chrm in trans_dict.keys():\n ### verify/sanitize the created dictionairy\n fix_structure(trans_dict[chrm])\n intron_lists[chrm] = dict()\n for gene in trans_dict[chrm].features:\n for trans in gene.sub_features:\n if trans.type == 'exon':\n print \"WARNING: Exon on transcript level:\"\n print trans\n print 'will continue\\n'\n continue\n elif len(trans.sub_features) > 1: ### at least two exons for one intron ...\n strand = trans.sub_features[0].strand\n contig_list = [(trans.sub_features[i].location.nofuzzy_start, trans.sub_features[i].location.nofuzzy_end) for i in range(len(trans.sub_features))]\n contig_list.sort(lambda u, v:u[0]-v[0])\n for exon in range(len(contig_list) - 1):\n ### update intron lists\n if contig_list[exon][1] - contig_list[exon + 1][0] == 0:\n continue\n try:\n assert(contig_list[exon][1] < contig_list[exon + 1][0])\n except AssertionError:\n print >> sys.stderr, 'exon_1 %i, exon_2 %i' % (contig_list[exon][1], contig_list[exon + 1][0]) \n print >> sys.stderr, contig_list[exon]\n print >> sys.stderr, contig_list[exon+1]\n print >> sys.stderr, exon\n sys.exit(-1)\n ### for now strand information is only dummy\n intron_lists[chrm][(0, contig_list[exon][1], contig_list[exon + 1][0])] = strand\n \n ### update exon map\n for exon in range(len(contig_list)):\n if not exon_map.has_key(chrm):\n exon_map[chrm] = dict()\n\n if not exon_map[chrm].has_key(trans.id):\n exon_map[chrm][trans.id] = dict()\n ### we assume, that an exon cannot occurr twice in the same transcript!\n ### the value in the dict is a binary encoding, if the left/right end is intronic 10 = 2 means, 5' end is intronic\n if len(contig_list) == 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 0 ### 00 -> should never occurr\n elif exon == 0:\n exon_map[chrm][trans.id][contig_list[exon]] = 2 ### 10\n elif exon == len(contig_list) - 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 1 ### 01\n else:\n exon_map[chrm][trans.id][contig_list[exon]] = 3 ### 11 \n\n outfile = open(options.outfile, 'w')\n cPickle.dump(intron_lists, outfile)\n outfile.close()\n \n outfile = open(options.outfile + '.' + 'cov', 'w')\n cPickle.dump(exon_map, outfile)\n outfile.close()",
"def main():\n args = setup_args()\n header_info = extract_header_info_from_probes(args.probe)\n\n for gene in header_info.keys():\n # check there is a folder for gene, else create it\n gene_out_dir = os.path.join(args.output_path, gene)\n if not os.path.exists(gene_out_dir):\n os.mkdir(gene_out_dir)\n\n gene_msa_fname = os.path.join(gene_out_dir, '{}_msa.fa'.format(gene))\n gene_ref = os.path.join(os.path.abspath(args.gene_refs), gene + '.fa')\n generate_msa_for_gene(gene, header_info[gene], gene_ref, gene_msa_fname)",
"def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))",
"def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"[email protected]\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()",
"def read_gene_families(gftxt, protfile = None, cdsfile = None, wrkdir = None):\n gene_families = []\n if protfile is None and cdsfile is None:\n logging.info(\"Gene families need to have sequences!\")\n with open(gftxt, 'r') as f:\n for line in f:\n line = line.rstrip()\n x = line.split()\n gf_id = x.pop(0)[:-1]\n gf_genes = x\n gene_families.append(GeneFamily(gf_id=gf_id, gf_members=gf_genes))\n return gene_families\n \n if protfile is not None:\n prot = SeqIO.to_dict(SeqIO.parse(protfile, \"fasta\"))\n\n if cdsfile is not None:\n cds = SeqIO.to_dict(SeqIO.parse(cdsfile, \"fasta\"))\n \n with open(gftxt, 'r') as handle:\n for line in handle:\n line = line.rstrip()\n x = line.split()\n gf_id = x.pop(0)[:-1]\n gf_genes = x\n gf_prot = {}\n gf_cds = {}\n for gid in x:\n if prot[gid][-1:].seq == '*':\n gf_prot[gid] = prot[gid][:-1]\n else:\n gf_prot[gid] =prot[gid]\n if cds[gid][-3:].seq == \"TAA\" or \\\n cds[gid][-3:].seq == \"TAG\" or \\\n cds[gid][-3:].seq == \"TGA\":\n gf_cds[gid] = cds[gid][:-3]\n else:\n gf_cds[gid] = cds[gid]\n gene_families.append(GeneFamily(gf_id = gf_id, gf_members = gf_genes, \n prot_seqs = gf_prot, cds_seqs = gf_cds, wrkdir=wrkdir))\n return gene_families",
"def load_yaafedata(params, \n n_learn_frames=2000,\n use_custom_stft=False):\n\n audio_file_path = getoptions(params, 'location', '/sons/voxforge/data/Learn/')\n # if no number specified, use n_learn_frames\n n_frames = getoptions(params, 'n_frames', n_learn_frames)\n sr = getoptions(params, 'sr', 16000)\n sigma_noise = getoptions(params, 'sigma', 0.0)\n random_seed = getoptions(params, 'shuffle', 1001)\n features = getoptions(params, 'features', [])\n wintime = getoptions(params, 'wintime', 0.032)\n steptime = getoptions(params, 'steptime', 0.008)\n startpoint = getoptions(params, 'startpoint', 0)\n forbid_list = getoptions(params, 'forbidden_names', [])\n mfnpf = getoptions(params, 'frame_num_per_file', 3000)\n# wintime = float(win_size)/float(sr)\n# steptime = float(step_size)/float(sr)\n \n win_size = int(wintime*sr)\n step_size = int(steptime*sr)\n# print wintime, steptime, win_size, step_size\n # apply sub_routine to all the files until a condition is met\n n_frames_reached = 0\n\n all_file_paths = get_filepaths(audio_file_path,\n random_seed,\n forbid_list = forbid_list)\n file_index = 0\n\n specseq = []\n featseq = []\n dataseq = []\n n_files_used = 0\n\n while (n_frames_reached < n_frames):\n file_index = file_index + 1\n filepath = all_file_paths[file_index]\n n_files_used = n_files_used + 1\n\n [loc_magSTFT, loc_Feats, locDatas] = load_data_one_audio_file(\n filepath, sr,\n wintime=wintime,\n steptime=steptime,\n max_frame_num_per_file=mfnpf,\n sigma_noise=sigma_noise,\n startpoint = startpoint,\n features=features)\n# if get_data:\n# [loc_magSTFT, loc_Feats, locDatas] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# Data = [Data , locDatas'];\n# else\n# [loc_magSTFT, loc_Feats, ~] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# end\n if not use_custom_stft:\n specseq.append(loc_magSTFT)\n else:\n specseq.append(np.abs(get_stft(locDatas,\n wsize=win_size,\n tstep=step_size,\n sigma = sigma_noise)).T)\n# print wintime, steptime, win_size, step_size\n# print loc_magSTFT.shape\n# print specseq[-1].shape\n# print locDatas.shape\n featseq.append(loc_Feats)\n dataseq.append(locDatas)\n \n n_frames_reached += min(loc_magSTFT.shape[0], loc_Feats.shape[0])\n print n_frames_reached\n \n Spectrums = np.vstack(specseq)\n Features = np.vstack(featseq)\n Data = np.hstack(dataseq)\n\n n_frames_reached = min(n_frames_reached, n_frames)\n Spectrums = Spectrums[0:n_frames_reached,:]\n Features = Features[0:n_frames_reached,:]\n used_files = all_file_paths[0:n_files_used]\n\n return Features, Spectrums, n_frames_reached, Data, used_files",
"def process_fasta(in_fh, args, cluster_size_re, rna_seq_objs):\n for record in SeqIO.parse(in_fh, 'fasta'):\n sequence = '%s%s%s'.replace('T', 'U') % (\n args.prefix, str(record.seq), args.suffix\n )\n cluster_size = 1\n try:\n cluster_size = cluster_size_re.search(record.description)\n cluster_size = cluster_size.group(1)\n except AttributeError:\n print 'Not able to find cluster size. Setting to 1.'\n if cluster_size is None:\n cluster_size = 1\n\n # find structure\n curr_seq = RNASequence(record.id, cluster_size, sequence)\n if args.run_mfold:\n curr_seq.structure, curr_seq.energy_dict = run_mfold(\n sequence, args\n )\n curr_seq.free_energy = curr_seq.energy_dict['dG']\n else:\n rnafold_out = run_rnafold(sequence, args)\n rnafold_out = rnafold_out.split('\\n')\n try:\n curr_seq.structure, curr_seq.free_energy = (\n rnafold_out[1].split(' (')\n )\n except (ValueError, IndexError):\n print 'Error running RNAfold:\\n%s\\nExiting.' % rnafold_out\n sys.exit(1)\n\n print '%s\\n' % rnafold_out\n try:\n curr_seq.free_energy = abs(\n float(curr_seq.free_energy.replace(')', ''))\n )\n curr_seq.ensemble_free_energy = abs(\n float(rnafold_out[2].split('[')[1].replace(']', ''))\n )\n curr_seq.ensemble_probability = abs(float(\n rnafold_out[4].split(';')[0].replace(\n ' frequency of mfe structure in ensemble ', ''\n )\n ))\n curr_seq.ensemble_diversity = abs(float(\n rnafold_out[4].split(';')[1].replace(\n ' ensemble diversity ', ''\n )\n ))\n except IndexError:\n print (\n 'Error parsing RNAfold output. '\n '(Couldn\\'t find statistics.) Please check '\n 'RNAfold options.'\n )\n sys.exit(1)\n rna_seq_objs.append(curr_seq)",
"def fasta2MSF():\n path = \"./data/\"\n for file in os.listdir(path):\n if file.endswith(\".fa\") or file.endswith(\".fasta\"):\n os.chdir(path)\n try:\n filecore = file.rstrip(\".fa\")\n except:\n filecore = file.rstrip(\".fasta\")\n fileout = filecore + \".msf2\"\n \n seqret = os.system(\"seqret fasta::\" + file + \\\n \" msf::\" + fileout)\n print seqret\n \n outmsf = filecore + \".msf\"\n out = open(outmsf, \"w\")\n op = open(fileout, \"r\")\n msf = op.readlines()\n op.close()\n for line in msf:\n if line[0] == \"\\n\":\n print >> out, line.rstrip(\"\\n\")\n elif line[0] != \"!\" and line[0] != \"/\" and \\\n line[0] != \"\\n\":\n line = line.replace(\".\", \"-\")\n line = line.replace(\"~\", \"-\")\n print >> out, line.rstrip(\"\\n\") \n else:\n print >> out, line.rstrip(\"\\n\")\n out.close()\n \n # remove the comment if you want to remove the\n # original file\n #os.remove(file)\n \n os.remove(fileout)\n os.chdir(\"../\")\n return",
"def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')",
"def some(args):\n from jcvi.utils.cbook import gene_name\n\n p = OptionParser(some.__doc__)\n p.add_option(\n \"--exclude\",\n default=False,\n action=\"store_true\",\n help=\"Output sequences not in the list file\",\n )\n p.add_option(\n \"--no_strip_names\",\n default=False,\n action=\"store_true\",\n help=\"Do not strip alternative splicing (e.g. At5g06540.1 -> At5g06540)\",\n )\n p.add_option(\n \"--uniprot\", default=False, action=\"store_true\", help=\"Header is from uniprot\"\n )\n\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(p.print_help())\n\n strip_names = not opts.no_strip_names\n fastafile, listfile, outfastafile = args\n outfastahandle = must_open(outfastafile, \"w\")\n qualfile = get_qual(fastafile)\n\n names = set(open(listfile).read().split())\n if qualfile:\n outqualfile = outfastafile + \".qual\"\n outqualhandle = open(outqualfile, \"w\")\n parser = iter_fasta_qual(fastafile, qualfile)\n else:\n parser = SeqIO.parse(fastafile, \"fasta\")\n\n recs = []\n seen = set()\n for rec in parser:\n name = rec.id\n if strip_names:\n name = gene_name(name)\n\n if name in seen: # Only report one instance\n continue\n\n if opts.uniprot:\n name = name.split(\"|\")[-1]\n\n if opts.exclude:\n if name in names:\n continue\n else:\n if name not in names:\n continue\n\n recs.append(rec)\n seen.add(name)\n\n for rec in recs:\n SeqIO.write([rec], outfastahandle, \"fasta\")\n if qualfile:\n SeqIO.write([rec], outqualhandle, \"qual\")\n\n logging.debug(\"A total of %d records written to `%s`\" % (len(recs), outfastafile))",
"def main(options):\n\n ref_fa=Fasta(options[\"ref\"])\n\n out0=gzip.open(options[\"out\"]+\".0.fa.gz\", \"w\")\n out1=gzip.open(options[\"out\"]+\".1.fa.gz\", \"w\")\n out0.write(\">\"+options[\"chrom\"]+\"\\n\")\n out1.write(\">\"+options[\"chrom\"]+\"\\n\")\n\n vcf=gdc.open2(options[\"vcf\"])\n sample_idx=None\n last_pos=0\n for line in vcf:\n if line.startswith(\"##\"):\n continue\n elif line.startswith(\"#\"):\n bits=line.split()\n sample_idx=bits.index(options[\"sample\"])\n else: #data line\n bits=line.split()\n gt=bits[sample_idx]\n pos=int(bits[1])\n if pos==last_pos:\n continue\n ref=bits[3]\n alt=bits[4]\n \n if len(ref)==1 and len(alt)==1 and gt in [\"0|0\", \"1|0\", \"0|1\", \"1|1\"]: #This is a phased biallelic site\n #This is the sequence from the last position to the base before the current position (note that pos is 1-based)\n ref_seq=ref_fa[options[\"chrom\"]][last_pos:(pos-1)].seq\n if options[\"refcheck\"] and ref_fa[options[\"chrom\"]][pos-1].seq!=ref:\n raise Exception(\"Reference mismatcah at pos \"+str(pos))\n \n if gt[0]==\"0\":\n out0.write(ref_seq+ref)\n elif gt[0]==\"1\":\n out0.write(ref_seq+alt)\n else:\n raise Exception(\"Untrapped bad genotype in haplotype 0 at pos\"+str(pos))\n \n if gt[2]==\"0\":\n out1.write(ref_seq+ref)\n elif gt[2]==\"1\":\n out1.write(ref_seq+alt)\n else:\n raise Exception(\"Untrapped bad genotype in haplotype 1 at pos\"+str(pos))\n \n else: #This is either unphased or missing or multiallelic\n out0.write(\"N\"*(pos-last_pos))\n out1.write(\"N\"*(pos-last_pos))\n \n last_pos=pos\n\n #Fill in the reference at the end and terminate with newline. \n tail_seq=ref_fa[options[\"chrom\"]][last_pos:].seq\n out0.write(tail_seq+\"\\n\")\n out1.write(tail_seq+\"\\n\")",
"def find_and_download_files(context):\n\n\n input_path = 'input/'\n if os.path.isdir(input_path):\n log.debug('Path already exists: ' + input_path)\n else:\n log.debug('Creating: ' + input_path)\n os.mkdir(input_path)\n\n fw = context.client\n\n if 'classification_measurement' in context.config:\n class_meas = context.config['classification_measurement'].split()\n else:\n class_meas = ['T1']\n\n # session and acquisition include/exclude lists can come from:\n # project info metadata,\n # subject info metadata, and\n # config options\n # The last one wins (how about getting it from an input file also, eh?)\n ses_exclude_list = None\n ses_include_list = None\n acq_exclude_list = None\n acq_include_list = None\n\n fs = 'freesurfer_longitudinal_'\n where = 'Found in project info'\n # check for exclude/include lists of regexs for sessions in project info\n sel = context.gear_dict['project'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['project'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in project info\n ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in subject info'\n # check for exclude/include lists of regexs for sessions in subject info\n sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['subject'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in subject info\n ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in config'\n # set up exclude/include lists of reegexs for sessions in config\n if 'session_excludelist' in context.config:\n ses_exclude_list = context.config['session_excludelist'].split()\n log.info(where+' session_excludelist: \"'+str(ses_exclude_list)+'\"')\n if 'session_includelist' in context.config:\n ses_include_list = context.config['session_includelist'].split()\n log.info(where+' session_includelist: \"'+str(ses_include_list)+'\"')\n\n # set up exclude/include lists of reegexs for acquisitions in config\n if 'acquisition_excludelist' in context.config:\n acq_exclude_list = context.config['acquisition_excludelist'].split()\n log.info(where+' acquisition_excludelist: \"'+str(acq_exclude_list)+'\"')\n if 'acquisition_includelist' in context.config:\n acq_include_list = context.config['acquisition_includelist'].split()\n log.info(where+' acquisition_includelist: \"'+str(acq_include_list)+'\"')\n\n # go through all sessions, acquisitions to find files\n for session in context.gear_dict['subject'].sessions():\n\n lemme_out = False\n if ses_exclude_list:\n for regex in ses_exclude_list:\n if re.search(regex, session.label): # if excluded, skip\n log.info('Session \"' + session.label + '\" matches ' + \\\n 'exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if ses_include_list:\n match = False\n for regex in ses_include_list:\n if not re.search(regex, session.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Session \"' + session.label + '\" matches ' \\\n 'an inclusion regex, keeping it')\n\n for acquisition in fw.get_session_acquisitions(session.id):\n\n lemme_out = False\n if acq_exclude_list:\n for regex in acq_exclude_list:\n if re.search(regex, acquisition.label): # if excluded, skip\n log.info('Acquisition \"' + acquisition.label + \\\n '\" matches exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if acq_include_list:\n match = False\n for regex in acq_include_list:\n if not re.search(regex, acquisition.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Acquisition \"' + acquisition.label + '\" ' + \\\n 'matches an inclusion regex, keeping it')\n\n for afile in acquisition.files:\n\n # Scan must be nifti\n if afile.type == 'nifti':\n\n found_one = False\n for cm in class_meas:\n if 'Measurement' in afile.classification:\n if cm in afile.classification['Measurement']:\n found_one = True\n log.info('Found ' + cm + ' file')\n\n if found_one:\n download_it(fw, acquisition, afile.name, input_path)\n context.gear_dict['visits'].append(\n make_file_name_safe(session.label, '_'))\n else:\n log.info('Ignoring ' + afile.name)",
"def build_gff(annotations, faa):\n with open(faa, \"rt\") as faa_file:\n for line in faa_file:\n if \">\" not in line:\n continue\n\n # each fasta is suffixed on the annotated faa if a prefix _INT (_1 .. _n)\n contig_name, start, end, strand = parse_fasta_header(line)\n if None in (contig_name, start, end, strand):\n print(\n \"It was not possible to parse the \" + line, end=\"\", file=sys.stderr\n )\n continue\n\n clean_name = Annotation.clean_seq_name(contig_name)\n\n row_annotations = Annotation.merge(\n [ann.get() for ann in annotations.get(contig_name, [])]\n )\n\n ann_string = \";\".join(\n [\n \"{}={}\".format(k, \",\".join(v).strip())\n for k, v in row_annotations.items()\n ]\n )\n\n eggNOGScore = \"\".join(row_annotations.get(\"eggNOG_score\", []))\n\n if len(ann_string):\n yield [\n clean_name,\n \"eggNOG-v2\",\n \"CDS\",\n start,\n end,\n eggNOGScore or \".\",\n \"+\" if strand == \"1\" else \"-\",\n \".\",\n \"ID=\" + clean_name + \";\" + ann_string,\n ]",
"def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()",
"def parse_facs_files():\n\n #Load parser settings\n parser_settings = getattr(settings,'FACS_PARSER_SETTINGS')\n\n files_to_parse = [parser_settings['facs_source_directory']+f for f in os.listdir(parser_settings['facs_source_directory']) if '.exp' in f]\n\n for filename in files_to_parse: \n\n #Compute MD5 hash\n facs_file = file(filename,'rbU')\n md5hash = hashlib.md5(facs_file.read()).hexdigest()\n facs_file.close()\n \n #Skip file if previously parsed.\n if FacsFile.objects.filter(original_filename=filename,md5hash=md5hash):\n print 'Skipping ', filename\n continue\n\n #Open file, remove null bytes and prepare csv reader\n facs_file = file(filename, 'rU')\n csv_reader = csv.reader((x.replace('\\0', '') for x in facs_file),dialect=csv.excel_tab)\n\n #Reader header\n csv_header = csv_reader.next()\n facs_file_results = []\n\n #Parse the file\n for csv_row in csv_reader:\n if csv_row[0]:\n facs_file_results.append(dict(zip(csv_header,csv_row)))\n\n #Close the file\n facs_file.close()\n\n #Save the information to database and archive file\n random_ints = ''.join([str(random.randint(0,9)) for n in range(10)])\n archive_filename = parser_settings['facs_archive_directory'] + filename.split('/')[-1][:-4].split('_')[0] + '_' + random_ints + '.exp'\n shutil.move(filename, archive_filename)\n\n facs_file = FacsFile(\n original_filename = filename,\n md5hash = md5hash,\n archive_filename = archive_filename,\n )\n facs_file.save()\n\n #Remove empty elements\n for result in facs_file_results:\n for key, data in result.items():\n if data == '.' or not(data):\n del result[key]\n\n #Cache test code and interface mappings\n test_codes = []\n for testcode_mapping in TestCodeMapping.objects.filter(interface_name=parser_settings['testcode_interface_name']):\n test_code = testcode_mapping.code\n code = test_code.code\n code_mapping = testcode_mapping.code_mapping\n\n test_codes.append((code, code_mapping, test_code))\n\n #Add results to database\n for result in facs_file_results:\n\n #Parse result date\n result_date = dateutil.parser.parse(result[parser_settings['result_datetime']])\n result_error_code = getattr(result, parser_settings['error_codes'], '')\n result_identifier = result[parser_settings['sample_identifier']]\n result_cytometer = result[parser_settings['cytometer_serial']]\n\n #Create the dictionnary of result items.\n new_result_item_dict = {}\n for test_code, facs_file_column, test_code_object in test_codes:\n new_result_item_dict[test_code] = ResultItem(\n test_code = test_code_object,\n result_item_value = result[facs_file_column],\n error_code = result_error_code,\n result_item_datetime = result_date,\n )\n\n #Search for possible duplicate result\n is_duplicate = False\n for possible_duplicate in FacsResult.objects.filter(result_identifier=result_identifier):\n if possible_duplicate.get_resultitem_dict() == new_result_item_dict:\n is_duplicate = True\n break\n\n #Save result and result item to data if it is not a duplicate\n if not is_duplicate:\n \n new_result = FacsResult(\n result_identifier=result_identifier,\n result_datetime=result_date,\n origin_facs_file=facs_file,\n cytometer_serial_number=result_cytometer,\n )\n \n new_result.save()\n \n #Add the reference to the result for each item and save it to database.\n for item in new_result_item_dict.values():\n item.result = new_result\n item.save()\n\n new_result.link_to_requisition()",
"def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs",
"def main():\n ref_seq = {}\n ent_spe_sero = {}\n tag_dict = {\"Contigs_with_VP1\":\"contigs\", \"P1_sequences\":\"p1\",\n \"VP1_sequences\":\"vp1\", \"5UTR_sequences\":\"5utr\", \"3D_sequences\":\"3d\"}\n args = get_arguments()\n # Load query elements\n print(\"Load resume file\")\n (query_dict, classify_list,\n classify_specie_list, serotype_list) = get_query(args.resume_file,\n args.tag,\n args.incomplete)\n print(\"{} descriptions loaded\".format(len(query_dict)))\n # Load specie association\n if args.ent_serotype_file and args.template_seq_file:\n # Load enterovirus serotype\n print(\"Load enterovirus serotype association\")\n ent_spe_sero = load_spe_sero(args.ent_serotype_file)\n # Load template sequence\n print(\"Load template sequence\")\n ref_seq = get_template_sequence(args.template_seq_file, ent_spe_sero)\n # Grab query sequence in the database\n print(\"Load database sequence\")\n sequence_data = get_sequence(query_dict, args.fasta_file)\n print(\"{} sequences loaded\".format(len(sequence_data)))\n # Write the new fasta file\n print(\"Write the new fasta\")\n write_sequence(args.results, sequence_data, query_dict, classify_list,\n tag_dict[args.tag], ref_seq, ent_spe_sero)\n #print(save_association)\n print(\"Write the itol label\")\n write_itol_label(args.itol_dir, sequence_data, query_dict, classify_list,\n tag_dict[args.tag])\n print(\"Write the itol tree color\")\n write_itol_tree_color(args.itol_dir, sequence_data, query_dict, classify_specie_list, serotype_list,\n tag_dict[args.tag])\n print(\"Done\")",
"def get_sequences(data_path, gene):\n \n sequence_file = os.path.join(data_path, gene + \".fasta\")\n try:\n sequences_gene = sequence.Sequence.create(file = sequence_file, input_format = 'fasta')\n except FileNotFoundError:\n print(\"Did not found {} in {}.\".format(gene,data_path))\n sequences_gene = \"Did not found {} in {}.\".format(gene,data_path)\n except:\n print(\"Unexpected Error while trying to get the sequence from {}.\".format(sequence_file))\n sequences_gene = \"Unexpected Error while trying to get the sequence from {}.\".format(sequence_file)\n # print(\"sequences_gene\", sequences_gene)\n return sequences_gene",
"def discoverFromVCF(cls, inputFname, outputFname, refFastaFname=None, VCFOutputType=2, \\\n\t\t\t\t\tminMinorAlleleCoverage=1/4., maxMinorAlleleCoverage=3/4.,\\\n\t\t\t\t\tmaxNoOfReads=2., minNoOfReads=1/4., \\\n\t\t\t\t\tmaxNoOfReadsForGenotypingError=1, maxMajorAlleleCoverage=7/8., maxNoOfReadsForAllSamples=1000,\\\n\t\t\t\t\tnt_set = set(['a','c','g','t','A','C','G','T']), isqID2coverage=None, defaultCoverage=10, \\\n\t\t\t\t\toutputDelimiter='\\t',\\\n\t\t\t\t\treport=0, site_type=1):\n\t\timport csv\n\t\tfrom pymodule.utils import runLocalCommand, getColName2IndexFromHeader\n\t\tsys.stderr.write(\"Looking for heterozygous SNPs in %s (%s<=MAC<=%s).\\n\"%(os.path.basename(inputFname), \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tminMinorAlleleCoverage, maxMinorAlleleCoverage))\n\t\treader =csv.reader(open(inputFname), delimiter='\\t')\n\t\t\n\t\t\n\t\tread_group2col_index = {'ref':0}\t#ref is at column 0. \"ref\" must not be equal to any read_group.\n\t\tread_group2coverage = {}\t#2011-9-2\n\t\tlocus_id2row_index = {}\n\t\tdata_matrix = []\n\t\t\n\t\ttid2refName = {}\t#dictionary storing the target references which have SNP calls\n\t\trefNameSet = set()\n\t\t\"\"\"\n\t\twriter = csv.writer(open(outputFname, 'w'), delimiter='\\t')\n\t\theader = ['sample', 'snp_id', 'chr', 'pos', 'qual', 'DP', 'minDP4', 'DP4_ratio', 'MQ']\n\t\tmoreHeader = ['GQ', 'GL', 'SB', 'QD', 'sndHighestGL', 'deltaGL']\n\t\t#['AF', 'AC','AN', 'Dels', 'HRun', 'HaplotypeScore','MQ0', 'QD']\t#2011-3-4 useless\n\t\tif VCFOutputType==2:\n\t\t\theader += moreHeader\n\t\tchr_pure_number_pattern = re.compile(r'[a-z_A-Z]+(\\d+)')\n\t\tchr_number_pattern = re.compile(r'chr(\\d+)')\n\t\t\"\"\"\n\t\t\n\t\tindividual_name2col_index = None\n\t\tcol_name2index = None\n\t\tcounter = 0\n\t\treal_counter = 0\n\t\t\n\t\t\n\t\tfor row in reader:\n\t\t\tif row[0] =='#CHROM':\n\t\t\t\trow[0] = 'CHROM'\t#discard the #\n\t\t\t\theader = row\n\t\t\t\tcol_name2index = getColName2IndexFromHeader(header, skipEmptyColumn=True)\n\t\t\t\tindividual_name2col_index = cls.getIndividual2ColIndex(header, col_name2index)\n\t\t\t\tcontinue\n\t\t\telif row[0][0]=='#':\t#2011-3-4\n\t\t\t\tcontinue\n\t\t\t\"\"\"\n\t\t\tif chr_number_pattern.search(row[0]):\n\t\t\t\tchr = chr_number_pattern.search(row[0]).group(1)\n\t\t\telif chr_pure_number_pattern.search(row[0]):\n\t\t\t\tchr = chr_pure_number_pattern.search(row[0]).group(1)\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Couldn't parse the chromosome number/character from %s.\\n Exit.\\n\"%(row[0]))\n\t\t\t\tsys.exit(4)\n\t\t\t\"\"\"\n\t\t\tchr = row[0]\n\t\t\trefNameSet.add(chr)\n\t\t\t\n\t\t\tpos = row[1]\n\t\t\tquality = row[5]\n\t\t\t\n\t\t\toutputHet= False\n\t\t\t\n\t\t\tinfo = row[7]\n\t\t\tinfo_ls = info.split(';')\n\t\t\tinfo_tag2value = {}\n\t\t\tfor info in info_ls:\n\t\t\t\ttry:\n\t\t\t\t\ttag, value = info.split('=')\n\t\t\t\texcept:\n\t\t\t\t\t#sys.stderr.write(\"Error in splitting %s by =.\\n\"%info)\t###Error in splitting DS by =.\n\t\t\t\t\tcontinue\n\t\t\t\tinfo_tag2value[tag] = value\n\t\t\t\n\t\t\tcurrent_locus = '%s_%s'%(chr, pos)\n\t\t\trefBase = row[col_name2index['REF']]\n\t\t\taltBase = row[col_name2index['ALT']]\n\t\t\tif VCFOutputType==2:\t#2011-3-4 GATK\n\t\t\t\tformat_column = row[col_name2index['FORMAT']]\n\t\t\t\tformat_column_ls = format_column.split(':')\n\t\t\t\tformat_column_name2index = getColName2IndexFromHeader(format_column_ls)\n\t\t\t\tdata_row = ['NA']*(len(individual_name2col_index)+1)\t# extra 1 for the ref\n\t\t\t\tallele2count = {}\n\t\t\t\tfor individual_name, individual_col_index in individual_name2col_index.iteritems():\n\t\t\t\t\tread_group = individual_name\n\t\t\t\t\tif read_group not in read_group2col_index:\n\t\t\t\t\t\tread_group2col_index[read_group] = len(read_group2col_index)\n\t\t\t\t\t\t#2011-9-2\n\t\t\t\t\t\tif isqID2coverage:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tisqID = read_group.split('_')[1]\n\t\t\t\t\t\t\t\tisqID = int(isqID)\n\t\t\t\t\t\t\t\tcoverage = isqID2coverage.get(isqID, defaultCoverage)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\t\t\t\t\t\timport traceback\n\t\t\t\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t\t\t\tsys.stderr.write(\"Coverage for %s not available. use default=%s.\\n\"%(read_group, defaultCoverage))\n\t\t\t\t\t\t\t\tcoverage = defaultCoverage\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcoverage = defaultCoverage\n\t\t\t\t\t\tread_group2coverage[read_group] = coverage\n\t\t\t\t\t\n\t\t\t\t\tcoverage = read_group2coverage[read_group]\n\t\t\t\t\tgenotype_data = row[individual_col_index]\n\t\t\t\t\tgenotype_data_ls = genotype_data.split(':')\n\t\t\t\t\tgenotype_call_index = format_column_name2index.get('GT')\n\t\t\t\t\tgenotype_quality_index = format_column_name2index.get('GQ')\n\t\t\t\t\tif genotype_quality_index is None:\n\t\t\t\t\t\tgenotype_quality_index = format_column_name2index.get('DP')\n\t\t\t\t\tdepth_index = format_column_name2index.get(\"DP\")\n\t\t\t\t\t#GL_index = format_column_name2index.get('GL')\n\t\t\t\t\tif len(genotype_data_ls)<len(format_column_name2index):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif depth_index is None or genotype_call_index is None:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#genotype_quality = genotype_data_ls[genotype_quality_index]\n\t\t\t\t\tgenotype_call = genotype_data_ls[genotype_call_index]\n\t\t\t\t\tdepth = int(genotype_data_ls[depth_index])\n\t\t\t\t\tif depth>maxNoOfReads*coverage or depth<minNoOfReads*coverage:\t#2011-3-29 skip. coverage too high or too low\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tallele = 'NA'\n\t\t\t\t\tif genotype_call=='0/1' or genotype_call =='1/0':\t#heterozygous, the latter notation is never used though.\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGL_list = genotype_data_ls[GL_index]\n\t\t\t\t\t\tGL_list = GL_list.split(',')\n\t\t\t\t\t\tGL_list = map(float, GL_list)\n\t\t\t\t\t\tGL = GL_list[1]\n\t\t\t\t\t\tsndHighestGL = max([GL_list[0], GL_list[2]])\n\t\t\t\t\t\tdeltaGL = GL-sndHighestGL\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tAD = genotype_data_ls[format_column_name2index.get('AD')]\n\t\t\t\t\t\tAD = map(int, AD.split(','))\n\t\t\t\t\t\tminorAlleleCoverage = min(AD)\n\t\t\t\t\t\tmajorAlleleCoverage = max(AD)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif minorAlleleCoverage<=maxMinorAlleleCoverage*coverage and minorAlleleCoverage>=minMinorAlleleCoverage*coverage \\\n\t\t\t\t\t\t\t\tand majorAlleleCoverage<=maxMajorAlleleCoverage*coverage:\n\t\t\t\t\t\t\tDP4_ratio = float(AD[0])/AD[1]\n\t\t\t\t\t\t\tallele = '%s%s'%(refBase, altBase)\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t\tdata_row = [individual_name, 'chr%s:%s'%(chr, pos), chr, pos, quality, \\\n\t\t\t\t\t\t\t\t\tdepth, minorAlleleCoverage, DP4_ratio,\\\n\t\t\t\t\t\t\t\t\tinfo_tag2value.get('MQ'), genotype_quality, GL,\\\n\t\t\t\t\t\t\t\t\tinfo_tag2value.get('SB'), info_tag2value.get('QD'), sndHighestGL, deltaGL]\n\t\t\t\t\t\t\t#for i in range(3, len(moreHeader)):\n\t\t\t\t\t\t\t#\tinfo_tag = moreHeader[i]\n\t\t\t\t\t\t\t#\tdata_row.append(info_tag2value.get(info_tag))\n\t\t\t\t\t\t\twriter.writerow(data_row)\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\telif genotype_call=='./.':\t#missing\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif genotype_call =='1/1':\n\t\t\t\t\t\tallele = '%s%s'%(altBase, altBase)\n\t\t\t\t\telif genotype_call =='0/0':\n\t\t\t\t\t\tallele = '%s%s'%(refBase, refBase)\n\t\t\t\t\tcol_index = read_group2col_index.get(read_group)\n\t\t\t\t\tdata_row[col_index] = allele\n\t\t\t\t\tif allele!='NA':\n\t\t\t\t\t\tif allele not in allele2count:\n\t\t\t\t\t\t\tallele2count[allele] = 0\n\t\t\t\t\t\tallele2count[allele] += 1\n\t\t\t\t\n\t\t\t\tif len(allele2count)>site_type-1:\t#whether polymorphic across samples or all sites in vcf\n\t\t\t\t\treal_counter += 1\n\t\t\t\t\tlocus_id2row_index[current_locus] = len(locus_id2row_index)\n\t\t\t\t\tdata_matrix.append(data_row)\n\t\t\t\"\"\"\n\t\t\telif VCFOutputType==1:\t#samtools. 2011-7-20 outdated.\n\t\t\t\tsample_id = row[8]\n\t\t\t\tfor tag in info_tag2value.keys():\n\t\t\t\t\tvalue = info_tag2value.get(tag)\n\t\t\t\t\tif tag=='DP4':\n\t\t\t\t\t\ttag = 'DP4_ratio'\n\t\t\t\t\t\tvalue = value.split(',')\n\t\t\t\t\t\tvalue = map(int, value)\n\t\t\t\t\t\tno_of_ref_allele = sum(value[0:2])\n\t\t\t\t\t\tno_of_non_ref_allele = sum(value[2:])\n\t\t\t\t\t\tMAC = min(no_of_ref_allele, no_of_non_ref_allele)\n\t\t\t\t\t\tif MAC<=maxMinorAlleleCoverage and MAC>=minMinorAlleleCoverage:\n\t\t\t\t\t\t\toutputHet = True\n\t\t\t\t\t\t\tvalue = float(no_of_ref_allele)/no_of_non_ref_allele\n\t\t\t\t\t\t\tinfo_tag2value['minDP4'] = min(no_of_ref_allele, no_of_non_ref_allele)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvalue = None\n\t\t\t\t\t\tinfo_tag2value[tag] = value\n\t\t\t\tif outputHet:\n\t\t\t\t\treal_counter += 1\n\t\t\t\t\toutput_row = [sample_id, 'chr%s:%s'%(chr, pos), chr, pos, quality, info_tag2value.get('DP'), \\\n\t\t\t\t\t\t\t\tinfo_tag2value.get('minDP4'), info_tag2value.get('DP4_ratio'), info_tag2value.get('MQ')]\n\t\t\t\t\twriter.writerow(output_row)\n\t\t\t\"\"\"\n\t\t\tcounter += 1\n\t\t\tif counter%2000==0 and report:\n\t\t\t\tsys.stderr.write(\"%s\\t%s\\t%s\"%(\"\\x08\"*80, counter, real_counter))\n\t\tdel reader\n\t\t\n\t\tcls.outputCallMatrix(data_matrix, refFastaFname, outputFname=outputFname, refNameSet=refNameSet, \\\n\t\t\t\t\tread_group2col_index=read_group2col_index, \\\n\t\t\t\t\tlocus_id2row_index=locus_id2row_index, outputDelimiter=outputDelimiter)\n\t\t\n\t\tsys.stderr.write(\"%s\\t%s\\t%s.\\n\"%(\"\\x08\"*80, counter, real_counter))",
"def read_fasta(src, remove_gaps=False):\n file_obj = None\n if isinstance(src, str):\n try:\n file_obj = open(src, \"r\")\n except IOError:\n print((\"The file `%s` does not exist, exiting gracefully\" % src))\n elif isinstance(src, filetypes):\n file_obj = src\n else:\n raise TypeError('FASTA reader cannot recognize the source of %s, %s, %s' % (src,type(src),isinstance(src, filetypes)))\n name = None\n seq_list = list()\n for line_number, i in enumerate(file_obj):\n if i.startswith('>'):\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n seq_list = list()\n name = i[1:].strip()\n else:\n #seq = ''.join(i.strip().upper().split())\n seq = ''.join(i.strip().split())\n #if not is_sequence_legal(seq):\n # raise Exception(\"Error: illegal characeters in sequence at line %d\" % line_number)\n seq_list.append(seq)\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n if isinstance(src, str):\n file_obj.close()",
"def test_prep_sffs_in_dir(self):\r\n prep_sffs_in_dir(self.sff_dir, self.sff_dir, make_flowgram=True)\r\n prep_sffs_in_dir(self.gz_sff_dir, self.gz_sff_dir, make_flowgram=True)\r\n\r\n fna_fp = os.path.join(self.sff_dir, 'test.fna')\r\n fna_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.fna')\r\n self.assertEqual(open(fna_fp).read(), fna_txt)\r\n self.assertEqual(open(fna_gz_fp).read(), fna_txt)\r\n\r\n qual_fp = os.path.join(self.sff_dir, 'test.qual')\r\n qual_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.qual')\r\n self.assertEqual(open(qual_fp).read(), qual_txt)\r\n self.assertEqual(open(qual_gz_fp).read(), qual_txt)\r\n\r\n flow_fp = os.path.join(self.sff_dir, 'test.txt')\r\n flow_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.txt')\r\n self.assertEqual(open(flow_fp).read(), flow_txt)\r\n self.assertEqual(open(flow_gz_fp).read(), flow_txt)",
"def create_pfam_seq_batches(family_accessions,\n batch_size,\n test=False,\n samples=None,\n epochs=1,\n drop_remainder=False,\n buffer_size=None,\n shuffle_seed=0,\n sample_random_state=0,\n data_partitions_dirpath='random_split/',\n gcs_bucket='neuralblast_public',\n as_numpy=False):\n\n pfam_df = create_pfam_df(family_accessions,\n test=test,\n samples=samples,\n random_state=sample_random_state,\n data_partitions_dirpath=data_partitions_dirpath,\n gcs_bucket=gcs_bucket)\n\n pfam_batches = create_data_iterator(df=pfam_df,\n input_col='one_hot_inds',\n output_col='index',\n batch_size=batch_size,\n epochs=epochs,\n buffer_size=buffer_size,\n seed=shuffle_seed,\n drop_remainder=drop_remainder,\n add_outputs=False,\n as_numpy=as_numpy)\n\n return pfam_batches",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]",
"def __main__():\n\n args = parse_command_line(sys.argv)\n\n identifiers = []\n if args.input and args.column:\n [\n identifiers.append(line.split(\"\\t\")[args.column - 1].strip())\n for line in open(args.input, \"r\")\n ]\n elif args.text:\n identifiers = [a.strip() for a in args.text.split() if a.strip()]\n\n fetch_fasta(identifiers, args)"
]
| [
"0.682649",
"0.5828559",
"0.5778012",
"0.5694929",
"0.5649731",
"0.56244236",
"0.5572279",
"0.5545438",
"0.55044687",
"0.5495895",
"0.53976244",
"0.53522515",
"0.52965266",
"0.52827317",
"0.5257475",
"0.5205217",
"0.5178159",
"0.5138306",
"0.51249355",
"0.51217663",
"0.51090467",
"0.5095201",
"0.50836927",
"0.5071274",
"0.5064007",
"0.50469553",
"0.50433064",
"0.5042698",
"0.5024368",
"0.4982982"
]
| 0.6929186 | 0 |
Uses eslsfetch to generate family specific fasta files out of seq_file which is provided as source. Works on single family based on rfam_acc. Files are generated in a compressed .fa.gz format | def generate_fasta_single(seq_file, rfam_acc, out_dir):
sequence = ''
fp_out = None
seq_bits = None
# logging sequences not exported
# rename this to family log
log_file = os.path.join(out_dir, rfam_acc + ".log")
logging.basicConfig(
filename=log_file, filemode='w', level=logging.INFO)
# connect to db
cnx = RfamDB.connect()
# get a new buffered cursor
cursor = cnx.cursor(raw=True)
# fetch sequence accessions for specific family - significant only!!
query = ("SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\n"
"FROM full_region fr, rfamseq rf\n"
"WHERE fr.rfamseq_acc=rf.rfamseq_acc\n"
"AND fr.is_significant=1\n"
"AND fr.rfam_acc=\'%s\'") % (rfam_acc)
# execute the query
cursor.execute(query)
# open a new fasta output file
fp_out = gzip.open(
os.path.join(out_dir, str(rfam_acc) + ".fa.gz"), 'w')
for region in cursor:
cmd = "esl-sfetch -c %s/%s %s %s" % (str(region[START]), str(region[END]),
seq_file, str(region[SEQ_ACC]))
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE)
seq = proc.communicate()[0]
# get sequence
sequence = ''
seq_bits = seq.split('\n')[1:]
sequence = sequence.join(seq_bits)
# print sequence
if sequence != '' and seq_validator(sequence) is True:
# write header
fp_out.write(">%s/%s-%s %s\n" % (str(region[SEQ_ACC]),
str(region[START]),
str(region[END]),
str(region[DESC])))
# write sequence
fp_out.write(sequence + '\n')
else:
# logging sequences that have not been exported
logging.info(str(region[SEQ_ACC]))
# close last file
fp_out.close()
# disconnect from DB
cursor.close()
RfamDB.disconnect(cnx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_fasta(seq_file, out_dir):\n\n LOGGER.info(\"Generating fasta file\", seq_file)\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, \"missing_seqs.log\")\n logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO)\n\n cnx = RfamDB.connect()\n cursor = cnx.cursor(raw=True)\n\n # fetch clan specific family full_region data and sequence description\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"ORDER BY fr.rfam_acc\")\n\n cursor.execute(query)\n\n for region in cursor:\n\n # new family\n if str(region[RFAM_ACC]) != rfam_acc:\n # check if there's no open file\n if fp_out is not None:\n fp_out.close()\n\n # open new fasta file\n fp_out = gzip.open(\n os.path.join(out_dir, str(region[RFAM_ACC]) + \".fa.gz\"), 'w')\n\n rfam_acc = region[RFAM_ACC]\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(sequence)\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))",
"def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"[email protected]\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()",
"def process_fasta(in_fh, args, cluster_size_re, rna_seq_objs):\n for record in SeqIO.parse(in_fh, 'fasta'):\n sequence = '%s%s%s'.replace('T', 'U') % (\n args.prefix, str(record.seq), args.suffix\n )\n cluster_size = 1\n try:\n cluster_size = cluster_size_re.search(record.description)\n cluster_size = cluster_size.group(1)\n except AttributeError:\n print 'Not able to find cluster size. Setting to 1.'\n if cluster_size is None:\n cluster_size = 1\n\n # find structure\n curr_seq = RNASequence(record.id, cluster_size, sequence)\n if args.run_mfold:\n curr_seq.structure, curr_seq.energy_dict = run_mfold(\n sequence, args\n )\n curr_seq.free_energy = curr_seq.energy_dict['dG']\n else:\n rnafold_out = run_rnafold(sequence, args)\n rnafold_out = rnafold_out.split('\\n')\n try:\n curr_seq.structure, curr_seq.free_energy = (\n rnafold_out[1].split(' (')\n )\n except (ValueError, IndexError):\n print 'Error running RNAfold:\\n%s\\nExiting.' % rnafold_out\n sys.exit(1)\n\n print '%s\\n' % rnafold_out\n try:\n curr_seq.free_energy = abs(\n float(curr_seq.free_energy.replace(')', ''))\n )\n curr_seq.ensemble_free_energy = abs(\n float(rnafold_out[2].split('[')[1].replace(']', ''))\n )\n curr_seq.ensemble_probability = abs(float(\n rnafold_out[4].split(';')[0].replace(\n ' frequency of mfe structure in ensemble ', ''\n )\n ))\n curr_seq.ensemble_diversity = abs(float(\n rnafold_out[4].split(';')[1].replace(\n ' ensemble diversity ', ''\n )\n ))\n except IndexError:\n print (\n 'Error parsing RNAfold output. '\n '(Couldn\\'t find statistics.) Please check '\n 'RNAfold options.'\n )\n sys.exit(1)\n rna_seq_objs.append(curr_seq)",
"def seq_download(name, organism=\"Homo sapiens\", gaba=False):\n\n subunits = {\n \"Alpha-1\": \"Gabra1\",\n \"Alpha-2\": \"Gabra2\",\n \"Alpha-3\": \"Gabra3\",\n \"Alpha-4\": \"Gabra4\",\n \"Alpha-5\": \"Gabra5\",\n \"Alpha-6\": \"Gabra6\",\n \"Beta-1\": \"Gabrb1\",\n \"Beta-2\": \"Gabrb2\",\n \"Beta-3\": \"Gabrb3\",\n \"Gamma-1\": \"Gabrg1\",\n \"Gamma-2\": \"Gabrg2\",\n \"Gamma-3\": \"Gabrg3\",\n \"Delta\": \"Gabrd\",\n \"Pi\": \"Gabrp\",\n \"Rho-1\": \"Gabrr1\",\n \"Rho-2\": \"Gabrr2\",\n \"Rho-3\": \"Gabrr3\",\n \"Epsilon\": \"Gabre\",\n \"Theta\": \"Gabrq\"\n }\n if gaba:\n results = search(subunits[name])\n else:\n results = search(name)\n results = results[results[\"Organism\"].str.contains(organism, na=False)]\n if len(results):\n if gaba:\n target = results[results[\"Gene names\"].str.contains(subunits[name].upper())][\"Entry\"].max()\n else:\n target = results[results[\"Gene names\"].str.contains(name)][\"Entry\"].max()\n response = urlopen(f\"https://www.uniprot.org/uniprot/{target}.fasta\").read().decode(\"utf-8\")\n with open(\"Temp_seq.fasta\", \"w\") as file:\n file.write(response)\n seq = SeqIO.read(\"Temp_seq.fasta\", \"fasta\")\n os.remove(\"Temp_seq.fasta\")\n\n return seq\n\n else:\n return -1",
"def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)",
"def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs",
"def run(input, output, additional=None, fasta_path=None, seed_path=None):\r\n version = \"##gff-version 3\\n\"\r\n gff3_columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes']\r\n gff3 = pd.DataFrame(columns=gff3_columns)\r\n table = pd.read_csv(input, sep='\\t')\r\n\r\n if seed_path:\r\n seed_file = pd.read_csv(seed_path, sep='\\t')\r\n\r\n if fasta_path is not None:\r\n fasta_file = ''\r\n open(fasta_path, 'w').close()\r\n\r\n\r\n if additional:\r\n table_to_add = pd.read_csv(additional, sep='\\t')\r\n table = table.append(table_to_add)\r\n\r\n for index, row in table.iterrows():\r\n name = handleGivenName(row['name'], table, 'name')\r\n seqId = row['seqName']\r\n name5p = handleGivenName(row['5pname'], table, '5pname')\r\n seq5p = row['5pseq']\r\n name3p = handleGivenName(row['3pname'], table, '3pname')\r\n seq3p = row['3pseq']\r\n strand = row['strand']\r\n hairpin = row['hairpinSeq']\r\n start = row['start']\r\n end = row['end']\r\n\r\n if row['5pRC'] >= row['3pRC']:\r\n name5p += '|m'\r\n name3p += '|s'\r\n else:\r\n name5p += '|s'\r\n name3p += '|m'\r\n\r\n seq5p_freq = len(table[(table['5pseq'] == seq5p) | (table['3pseq'] == seq5p)])\r\n seq3p_freq = len(table[(table['5pseq'] == seq3p) | (table['3pseq'] == seq3p)])\r\n\r\n name5p += f'|{seq5p_freq}'\r\n name3p += f'|{seq3p_freq}'\r\n\r\n\r\n if seed_path is not None:\r\n if not pd.isnull(seq5p):\r\n seq5p_seed = seq5p[1:8].upper().replace(\"T\", \"U\")\r\n try:\r\n name5p += '|' + seed_file[seed_file['seed'] == seq5p_seed][\"miRBase_name\"].iloc[0]\r\n except:\r\n name5p += '|' + seq5p_seed\r\n\r\n if not pd.isnull(seq3p):\r\n seq3p_seed = seq3p[1:8].upper().replace(\"T\", \"U\")\r\n try:\r\n name3p += '|' + seed_file[seed_file['seed'] == seq3p_seed][\"miRBase_name\"].iloc[0]\r\n except:\r\n name3p += '|' + seq3p_seed\r\n \r\n if fasta_path is not None:\r\n if not pd.isnull(seq5p):\r\n fasta_file += f'>{name5p}\\n{seq5p}\\n'\r\n if not pd.isnull(seq3p):\r\n fasta_file += f'>{name3p}\\n{seq3p}\\n'\r\n\r\n if len(fasta_file) > 100000:\r\n with open(fasta_path, 'a+') as f:\r\n f.write(fasta_file)\r\n fasta_file = ''\r\n\r\n gff_row = [[seqId, '.', 'pre_miRNA', start, end, '.', strand, '.', f'ID={name}']]\r\n\r\n if strand == '+':\r\n try:\r\n offset5p = len(hairpin.split(seq5p)[0])\r\n start5p = start + offset5p\r\n end5p = start + offset5p + len(seq5p) - 1\r\n gff_row.append([seqId, '.', 'miRNA', start5p, end5p, '.', strand, '.', f'ID={name5p}'])\r\n except:\r\n pass\r\n\r\n try:\r\n offset3p = len(hairpin.split(seq3p)[0])\r\n start3p = start + offset3p\r\n end3p = start + offset3p + len(seq3p) - 1\r\n gff_row.append([seqId, '.', 'miRNA', start3p, end3p, '.', strand, '.', f'ID={name3p}'])\r\n except:\r\n pass\r\n\r\n else:\r\n try:\r\n offset5p = len(hairpin.split(seq5p)[0])\r\n end5p = end - offset5p\r\n start5p = end - offset5p - len(seq5p) + 1\r\n gff_row.append([seqId, '.', 'miRNA', start5p, end5p, '.', strand, '.', f'ID={name5p}'])\r\n except:\r\n pass\r\n\r\n try:\r\n offset3p = len(hairpin.split(seq3p)[0])\r\n end3p = end - offset3p\r\n start3p = end - offset3p - len(seq3p) + 1\r\n gff_row.append([seqId, '.', 'miRNA', start3p, end3p, '.', strand, '.', f'ID={name3p}'])\r\n except:\r\n pass\r\n\r\n miRNAs = pd.DataFrame(gff_row, columns=gff3_columns)\r\n\r\n gff3 = gff3.append(miRNAs)\r\n\r\n with open(output, 'w') as file:\r\n file.write(version)\r\n\r\n if fasta_path is not None:\r\n with open(fasta_path, 'a+') as f:\r\n f.write(fasta_file)\r\n\r\n gff3.to_csv(output, index=False, header=False, mode=\"a\", sep='\\t')",
"def main(options):\n\n ref_fa=Fasta(options[\"ref\"])\n\n out0=gzip.open(options[\"out\"]+\".0.fa.gz\", \"w\")\n out1=gzip.open(options[\"out\"]+\".1.fa.gz\", \"w\")\n out0.write(\">\"+options[\"chrom\"]+\"\\n\")\n out1.write(\">\"+options[\"chrom\"]+\"\\n\")\n\n vcf=gdc.open2(options[\"vcf\"])\n sample_idx=None\n last_pos=0\n for line in vcf:\n if line.startswith(\"##\"):\n continue\n elif line.startswith(\"#\"):\n bits=line.split()\n sample_idx=bits.index(options[\"sample\"])\n else: #data line\n bits=line.split()\n gt=bits[sample_idx]\n pos=int(bits[1])\n if pos==last_pos:\n continue\n ref=bits[3]\n alt=bits[4]\n \n if len(ref)==1 and len(alt)==1 and gt in [\"0|0\", \"1|0\", \"0|1\", \"1|1\"]: #This is a phased biallelic site\n #This is the sequence from the last position to the base before the current position (note that pos is 1-based)\n ref_seq=ref_fa[options[\"chrom\"]][last_pos:(pos-1)].seq\n if options[\"refcheck\"] and ref_fa[options[\"chrom\"]][pos-1].seq!=ref:\n raise Exception(\"Reference mismatcah at pos \"+str(pos))\n \n if gt[0]==\"0\":\n out0.write(ref_seq+ref)\n elif gt[0]==\"1\":\n out0.write(ref_seq+alt)\n else:\n raise Exception(\"Untrapped bad genotype in haplotype 0 at pos\"+str(pos))\n \n if gt[2]==\"0\":\n out1.write(ref_seq+ref)\n elif gt[2]==\"1\":\n out1.write(ref_seq+alt)\n else:\n raise Exception(\"Untrapped bad genotype in haplotype 1 at pos\"+str(pos))\n \n else: #This is either unphased or missing or multiallelic\n out0.write(\"N\"*(pos-last_pos))\n out1.write(\"N\"*(pos-last_pos))\n \n last_pos=pos\n\n #Fill in the reference at the end and terminate with newline. \n tail_seq=ref_fa[options[\"chrom\"]][last_pos:].seq\n out0.write(tail_seq+\"\\n\")\n out1.write(tail_seq+\"\\n\")",
"def fasta2MSF():\n path = \"./data/\"\n for file in os.listdir(path):\n if file.endswith(\".fa\") or file.endswith(\".fasta\"):\n os.chdir(path)\n try:\n filecore = file.rstrip(\".fa\")\n except:\n filecore = file.rstrip(\".fasta\")\n fileout = filecore + \".msf2\"\n \n seqret = os.system(\"seqret fasta::\" + file + \\\n \" msf::\" + fileout)\n print seqret\n \n outmsf = filecore + \".msf\"\n out = open(outmsf, \"w\")\n op = open(fileout, \"r\")\n msf = op.readlines()\n op.close()\n for line in msf:\n if line[0] == \"\\n\":\n print >> out, line.rstrip(\"\\n\")\n elif line[0] != \"!\" and line[0] != \"/\" and \\\n line[0] != \"\\n\":\n line = line.replace(\".\", \"-\")\n line = line.replace(\"~\", \"-\")\n print >> out, line.rstrip(\"\\n\") \n else:\n print >> out, line.rstrip(\"\\n\")\n out.close()\n \n # remove the comment if you want to remove the\n # original file\n #os.remove(file)\n \n os.remove(fileout)\n os.chdir(\"../\")\n return",
"def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()",
"def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r",
"def getSequence(ref, fasta):\n\n fasta_header = \"\"\n\n fh_fasta = open(fasta, \"r\")\n entry = (x[1] for x in groupby(fh_fasta, lambda line: line[0] == \">\"))\n\n for header in entry:\n headerStr = header.__next__()[1:].strip()\n\n seq = \"\".join(s.strip() for s in entry.__next__())\n\n if ref == headerStr.replace('>',''):\n filename = os.path.join(os.getcwd(), ref.replace('/','_').split('|')[0])\n fasta_header = replace_char(headerStr)\n\n with open(filename + '.fa', \"w\") as output_file:\n output_file.write(\">\" + fasta_header + \"\\\\n\" + seq.upper() + \"\\\\n\")\n\n fh_fasta.close()\n return fasta_header",
"def read_fasta(src, remove_gaps=False):\n file_obj = None\n if isinstance(src, str):\n try:\n file_obj = open(src, \"r\")\n except IOError:\n print((\"The file `%s` does not exist, exiting gracefully\" % src))\n elif isinstance(src, filetypes):\n file_obj = src\n else:\n raise TypeError('FASTA reader cannot recognize the source of %s, %s, %s' % (src,type(src),isinstance(src, filetypes)))\n name = None\n seq_list = list()\n for line_number, i in enumerate(file_obj):\n if i.startswith('>'):\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n seq_list = list()\n name = i[1:].strip()\n else:\n #seq = ''.join(i.strip().upper().split())\n seq = ''.join(i.strip().split())\n #if not is_sequence_legal(seq):\n # raise Exception(\"Error: illegal characeters in sequence at line %d\" % line_number)\n seq_list.append(seq)\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n if isinstance(src, str):\n file_obj.close()",
"def build_gff(annotations, faa):\n with open(faa, \"rt\") as faa_file:\n for line in faa_file:\n if \">\" not in line:\n continue\n\n # each fasta is suffixed on the annotated faa if a prefix _INT (_1 .. _n)\n contig_name, start, end, strand = parse_fasta_header(line)\n if None in (contig_name, start, end, strand):\n print(\n \"It was not possible to parse the \" + line, end=\"\", file=sys.stderr\n )\n continue\n\n clean_name = Annotation.clean_seq_name(contig_name)\n\n row_annotations = Annotation.merge(\n [ann.get() for ann in annotations.get(contig_name, [])]\n )\n\n ann_string = \";\".join(\n [\n \"{}={}\".format(k, \",\".join(v).strip())\n for k, v in row_annotations.items()\n ]\n )\n\n eggNOGScore = \"\".join(row_annotations.get(\"eggNOG_score\", []))\n\n if len(ann_string):\n yield [\n clean_name,\n \"eggNOG-v2\",\n \"CDS\",\n start,\n end,\n eggNOGScore or \".\",\n \"+\" if strand == \"1\" else \"-\",\n \".\",\n \"ID=\" + clean_name + \";\" + ann_string,\n ]",
"def test_make_fna(self):\r\n fna_fp = os.path.join(self.sff_dir, 'test.fna')\r\n fna_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.fna')\r\n make_fna(self.sff_fp, fna_fp)\r\n make_fna(self.sff_gz_fp, fna_gz_fp)\r\n self.assertEqual(open(fna_fp).read(), fna_txt)\r\n self.assertEqual(open(fna_gz_fp).read(), fna_txt)",
"def parse_facs_files():\n\n #Load parser settings\n parser_settings = getattr(settings,'FACS_PARSER_SETTINGS')\n\n files_to_parse = [parser_settings['facs_source_directory']+f for f in os.listdir(parser_settings['facs_source_directory']) if '.exp' in f]\n\n for filename in files_to_parse: \n\n #Compute MD5 hash\n facs_file = file(filename,'rbU')\n md5hash = hashlib.md5(facs_file.read()).hexdigest()\n facs_file.close()\n \n #Skip file if previously parsed.\n if FacsFile.objects.filter(original_filename=filename,md5hash=md5hash):\n print 'Skipping ', filename\n continue\n\n #Open file, remove null bytes and prepare csv reader\n facs_file = file(filename, 'rU')\n csv_reader = csv.reader((x.replace('\\0', '') for x in facs_file),dialect=csv.excel_tab)\n\n #Reader header\n csv_header = csv_reader.next()\n facs_file_results = []\n\n #Parse the file\n for csv_row in csv_reader:\n if csv_row[0]:\n facs_file_results.append(dict(zip(csv_header,csv_row)))\n\n #Close the file\n facs_file.close()\n\n #Save the information to database and archive file\n random_ints = ''.join([str(random.randint(0,9)) for n in range(10)])\n archive_filename = parser_settings['facs_archive_directory'] + filename.split('/')[-1][:-4].split('_')[0] + '_' + random_ints + '.exp'\n shutil.move(filename, archive_filename)\n\n facs_file = FacsFile(\n original_filename = filename,\n md5hash = md5hash,\n archive_filename = archive_filename,\n )\n facs_file.save()\n\n #Remove empty elements\n for result in facs_file_results:\n for key, data in result.items():\n if data == '.' or not(data):\n del result[key]\n\n #Cache test code and interface mappings\n test_codes = []\n for testcode_mapping in TestCodeMapping.objects.filter(interface_name=parser_settings['testcode_interface_name']):\n test_code = testcode_mapping.code\n code = test_code.code\n code_mapping = testcode_mapping.code_mapping\n\n test_codes.append((code, code_mapping, test_code))\n\n #Add results to database\n for result in facs_file_results:\n\n #Parse result date\n result_date = dateutil.parser.parse(result[parser_settings['result_datetime']])\n result_error_code = getattr(result, parser_settings['error_codes'], '')\n result_identifier = result[parser_settings['sample_identifier']]\n result_cytometer = result[parser_settings['cytometer_serial']]\n\n #Create the dictionnary of result items.\n new_result_item_dict = {}\n for test_code, facs_file_column, test_code_object in test_codes:\n new_result_item_dict[test_code] = ResultItem(\n test_code = test_code_object,\n result_item_value = result[facs_file_column],\n error_code = result_error_code,\n result_item_datetime = result_date,\n )\n\n #Search for possible duplicate result\n is_duplicate = False\n for possible_duplicate in FacsResult.objects.filter(result_identifier=result_identifier):\n if possible_duplicate.get_resultitem_dict() == new_result_item_dict:\n is_duplicate = True\n break\n\n #Save result and result item to data if it is not a duplicate\n if not is_duplicate:\n \n new_result = FacsResult(\n result_identifier=result_identifier,\n result_datetime=result_date,\n origin_facs_file=facs_file,\n cytometer_serial_number=result_cytometer,\n )\n \n new_result.save()\n \n #Add the reference to the result for each item and save it to database.\n for item in new_result_item_dict.values():\n item.result = new_result\n item.save()\n\n new_result.link_to_requisition()",
"def readFastaFile(filename):",
"def Parse_folder_to_multi_faa(target_dir,faa_filename):\n os.chdir(target_dir)\n output_handle = open(faa_filename, \"w\")\n for gbk_filename in FileGen(target_dir):\n with open(gbk_filename, \"r\") as input_handle:\n for seq_record in SeqIO.parse(input_handle, \"genbank\") :\n print(\"Dealing with GenBank record %s\" % seq_record.id)\n for seq_feature in seq_record.features :\n if seq_feature.type==\"CDS\" :\n assert len(seq_feature.qualifiers['translation'])==1\n try:\n name = seq_feature.qualifiers['locus_tag'][0]\n except KeyError:\n name = seq_feature.qualifiers['product'][0]\n output_handle.write(\">%s from %s\\n%s\\n\" % (\n name,\n gbk_filename.split(\"/\")[-1],\n seq_feature.qualifiers['translation'][0])) \n output_handle.close()",
"def generate_siaf_pre_flight_reference_files_fgs(verbose=False, mode='siaf'):\n instrument = 'FGS'\n\n center_offset_x = 1023.5\n center_offset_y = 1023.5\n\n # hardcoded pixelscale, reference?\n scale = 0.06738281367 # arcsec/pixel\n\n if mode == 'siaf':\n # write focal plane alignment reference file\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_alignment.txt'.format(instrument.lower()))\n oss_flags = [False, True]\n elif mode == 'fsw':\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_fsw_coefficients.txt'.format(instrument.lower()))\n oss_flags = [True]\n\n if os.path.isfile(outfile):\n os.remove(outfile)\n\n siaf_alignment = None\n counter = 0\n\n for aperture_id in 'FGS1 FGS2'.split():\n\n if aperture_id == 'FGS1':\n V2Ref = 207.1900\n V3Ref = -697.5000\n\n # coefficients copied from Cox' makeSIAF.py to reproduce PRDOPSSOC-H-015\n # February 2015 FGS delivery\n # these numbers match the `To be Updated for CV3` column in the Tables on page 6ff\n # of an unpublished word document entitled `FGS Transformation for CV3.docx` by\n # Julia Zhou, e.g. C = IDEALPTOREALPXCOE_N\n\n # Initialize the parameters\n A = np.array(\n [-2.33369320E+01, 9.98690490E-01, 1.05024970E-02, 2.69889020E-06, 6.74362640E-06,\n 9.91415010E-07, 1.21090320E-09, -2.84802930E-11, 1.27892930E-09, -1.91322470E-11,\n 5.34567520E-14, 9.29791010E-14, 8.27060020E-14, 9.70576590E-14, 1.94203870E-14])\n\n B = np.array(\n [-2.70337440E+01, -2.54596080E-03, 1.01166810E+00, 2.46371870E-06, 2.08880620E-06,\n 9.32489680E-06, -4.11885660E-11, 1.26383770E-09, -7.60173360E-11, 1.36525900E-09,\n 2.70499280E-14, 5.70198270E-14, 1.43943080E-13, 7.02321790E-14, 1.21579450E-13])\n\n C = np.array(\n [2.31013520E+01, 1.00091800E+00, -1.06389620E-02, -2.65680980E-06, -6.51704610E-06,\n -7.45631440E-07, -1.29600400E-09, -4.27453220E-12, -1.27808870E-09, 5.01165140E-12,\n 2.72622090E-15, 5.42715750E-15, 3.46979980E-15, 2.49124350E-15, 1.22848570E-15])\n\n D = np.array(\n [2.67853100E+01, 2.26545910E-03, 9.87816850E-01, -2.35598140E-06, -1.91455620E-06,\n -8.92779540E-06, -3.24201520E-11, -1.30056630E-09, -1.73730700E-11,\n -1.27341590E-09, 1.84205730E-15, 3.13647160E-15, -2.99705840E-16, 1.98589690E-15,\n -1.26523200E-15])\n\n elif aperture_id == 'FGS2':\n V2Ref = 24.4300\n V3Ref = -697.5000\n\n A = np.array(\n [-3.28410900E+01, 1.03455010E+00, 2.11920160E-02, -9.08746430E-06, -1.43516480E-05,\n -3.93814140E-06, 1.60956450E-09, 5.82814640E-10, 2.02870570E-09, 2.08582470E-10,\n -2.79748590E-14, -8.11622820E-14, -4.76943000E-14, -9.01937740E-14,\n -8.76203780E-15])\n\n B = np.array(\n [-7.76806220E+01, 2.92234710E-02, 1.07790000E+00, -6.31144890E-06, -7.87266390E-06,\n -2.14170580E-05, 2.13293560E-10, 2.03376270E-09, 6.74607790E-10, 2.41463060E-09,\n -2.30267730E-14, -3.63681270E-14, -1.35117660E-13, -4.22207660E-14,\n -1.16201020E-13])\n\n C = np.array(\n [3.03390890E+01, 9.68539030E-01, -1.82288450E-02, 7.72758330E-06, 1.17536430E-05,\n 2.71516870E-06, -1.28167820E-09, -6.34376120E-12, -1.24563160E-09, -9.26192040E-12,\n 8.14604260E-16, -5.93798790E-16, -2.69247540E-15, -4.05196100E-15, 2.14529600E-15])\n\n D = np.array(\n [7.13783150E+01, -2.55191710E-02, 9.30941560E-01, 5.01322910E-06, 5.10548510E-06,\n 1.68083960E-05, 9.41565630E-12, -1.29749490E-09, -1.89194230E-11, -1.29425530E-09,\n -2.81501600E-15, -1.73025000E-15, 2.57732600E-15, 1.75268080E-15, 2.95238320E-15])\n\n number_of_coefficients = len(A)\n polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)\n\n # generate distortion coefficient files\n siaf_index = []\n exponent_x = []\n exponent_y = []\n for i in range(polynomial_degree + 1):\n for j in np.arange(i + 1):\n siaf_index.append('{:d}{:d}'.format(i, j))\n exponent_x.append(i-j)\n exponent_y.append(j)\n\n\n print('*'*100)\n aperture_name = '{}_FULL'.format(aperture_id)\n for oss in oss_flags:\n\n if oss:\n aperture_name = aperture_name + '_OSS'\n oss_factor = 1.\n else:\n oss_factor = -1.\n\n print('{}'.format(aperture_name))\n\n if mode == 'fsw':\n (AX, BX, CX, DX) = (A, B, C, D)\n\n AS = polynomial.shift_coefficients(AX, center_offset_x, center_offset_y)\n BS = polynomial.shift_coefficients(BX, center_offset_x, center_offset_y)\n\n AS0 = copy.deepcopy(AS[0])\n BS0 = copy.deepcopy(BS[0])\n AS[0] = 0.0\n BS[0] = 0.0\n\n betaY = np.arctan2(AS[2], BS[2])\n print('Beta Y', np.degrees(betaY))\n print('Shift zeros', AS0, BS0)\n\n AR = AS * np.cos(betaY) - BS * np.sin(betaY)\n BR = AS * np.sin(betaY) + BS * np.cos(betaY)\n\n\n AR[0] = center_offset_x\n BR[0] = center_offset_y\n\n AF = polynomial.shift_coefficients(AR, -center_offset_x, -center_offset_y)\n BF = polynomial.shift_coefficients(BR, -center_offset_x, -center_offset_y)\n\n # Inverse matrices\n xc = polynomial.poly(AX, center_offset_x, center_offset_y)\n yc = polynomial.poly(BX, center_offset_x, center_offset_y)\n # CS1 = 1.0*C1 # Force a real copy\n CS = polynomial.shift_coefficients(CX, xc, yc)\n DS = polynomial.shift_coefficients(DX, xc, yc)\n CS0 = copy.deepcopy(CS[0])\n DS0 = copy.deepcopy(DS[0])\n\n CS[0] = 0.0\n DS[0] = 0.0\n CR = polynomial.prepend_rotation_to_polynomial(CS, np.degrees(betaY))\n DR = polynomial.prepend_rotation_to_polynomial(DS, np.degrees(betaY))\n CR[0] = CS0\n DR[0] = DS0\n CF = polynomial.shift_coefficients(CR, -center_offset_x, -center_offset_y)\n DF = polynomial.shift_coefficients(DR, -center_offset_x, -center_offset_y)\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, AF, BF, CF, DF),\n names=('siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX','Idl2SciY'))\n\n V3angle = 0\n betaX = 0\n\n\n else:\n # Scale to arcsec\n (AX, BX, CX, DX) = polynomial.rescale(A, B, C, D, scale)\n\n\n V2c = polynomial.poly(AX, center_offset_x, center_offset_y)\n V3c = polynomial.poly(BX, center_offset_x, center_offset_y)\n\n AS = polynomial.shift_coefficients(AX, center_offset_x, center_offset_y)\n AS[0] = 0.0\n BS = polynomial.shift_coefficients(BX, center_offset_x, center_offset_y)\n BS[0] = 0.0\n CS = polynomial.shift_coefficients(CX, V2c, V3c)\n CS[0] = 0.0\n DS = polynomial.shift_coefficients(DX, V2c, V3c)\n DS[0] = 0.0\n\n if aperture_id == 'FGS1':\n if oss is False:\n AF = -polynomial.flip_x(polynomial.flip_y(AS))\n BF = -polynomial.flip_x(polynomial.flip_y(BS))\n CF = -polynomial.flip_x(polynomial.flip_y(CS))\n DF = -polynomial.flip_x(polynomial.flip_y(DS))\n else:\n AF = AS # For OSS detector and science are identical\n BF = -BS\n CF = polynomial.flip_y(CS)\n DF = polynomial.flip_y(DS)\n elif aperture_id == 'FGS2':\n if oss is False:\n AF = -polynomial.flip_x(AS)\n BF = polynomial.flip_x(BS)\n CF = -polynomial.flip_x(CS)\n DF = polynomial.flip_x(DS)\n else:\n AF = AS # For OSS detector and science are identical\n BF = BS\n CF = CS\n DF = DS\n\n betaX = np.arctan2(oss_factor * AF[1], BF[1])\n betaY = np.arctan2(oss_factor * AF[2], BF[2])\n\n V3angle = copy.deepcopy(betaY)\n if (abs(V3angle) > np.pi/2):\n V3angle = V3angle - np.copysign(np.pi, V3angle)\n\n (AR,BR) = polynomial.add_rotation(AF, BF, -1 * oss_factor * np.rad2deg(V3angle))\n\n # take out the rotation, carried separately in V3IdlYangle\n CR = polynomial.prepend_rotation_to_polynomial(CF, oss_factor * np.rad2deg(V3angle))\n DR = polynomial.prepend_rotation_to_polynomial(DF, oss_factor * np.rad2deg(V3angle))\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, AR, BR, CR, DR),\n names=('siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX','Idl2SciY'))\n\n print('{} {}'.format(aperture_name, np.rad2deg(betaY)))\n # if aperture_name == 'FGS1_FULL': # first in loop\n if counter == 0: # first in loop\n siaf_alignment = Table()\n siaf_alignment['AperName'] = ['{:>30}'.format(aperture_name)]\n siaf_alignment['V3IdlYAngle'] = [np.rad2deg(V3angle)]\n siaf_alignment['V3SciXAngle'] = [np.rad2deg(betaX)]\n siaf_alignment['V3SciYAngle'] = [np.rad2deg(betaY)]\n siaf_alignment['V2Ref'] = [V2Ref]\n siaf_alignment['V3Ref'] = [V3Ref]\n else:\n siaf_alignment.add_row(['{:>30}'.format(aperture_name), np.rad2deg(V3angle), np.rad2deg(betaX), np.rad2deg(betaY), V2Ref, V3Ref])\n\n counter += 1\n\n\n distortion_reference_table.add_column(Column([aperture_name] * len(distortion_reference_table), name='AperName'), index=0)\n if mode == 'fsw':\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'fgs_fsw_distortion_{}.txt'.format(aperture_name.lower()))\n else:\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'fgs_siaf_distortion_{}.txt'.format(aperture_name.lower()))\n\n comments = []\n comments.append('FGS distortion reference file for SIAF\\n')\n comments.append('')\n comments.append('Based on coefficients delivered to STScI in February 2015.')\n comments.append('These parameters are stored in PRDOPSSOC-H-014.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n distortion_reference_table.meta['comments'] = comments\n distortion_reference_table.write(distortion_reference_file_name, format='ascii.fixed_width', delimiter=',', delimiter_pad=' ', bookend=False, overwrite=True)\n\n comments = []\n comments.append('{} alignment parameter reference file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the focal plane alignment parameters calibrated during FGS-SI alignment.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n siaf_alignment.meta['comments'] = comments\n siaf_alignment.write(outfile, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False, overwrite=True)",
"def parseFasta(self, fastaRef):\n\n seq = \"\"\n prevId = \"\"\n with open(fastaRef, 'r') as f:\n\n for line in f:\n if \">\" == line[0]:\n # asserting the regex don't fail...\n found = GENEIDRULE.search(line)\n if(found):\n alternate = found.group(1)\n geneName = found.group(2)\n self._transcripts[alternate] = geneName\n else:\n print(\"EnsemblFasta: NOT FOUND\")\n print(line)\n exit()\n\n if(prevId and seq):\n geneName = self._transcripts[prevId]\n if geneName in self._genes:\n gene = self._genes[geneName]\n else:\n gene = Gene(geneName)\n self._genes[geneName] = gene\n\n gene.addTranscripts(prevId, seq)\n seq = \"\"\n prevId = alternate\n else:\n seq += line.rstrip(\"\\n\")\n gene.addTranscripts(prevId, seq)",
"def format_binary_sff_as_fna(sff_file, output_file=None, qual=False):\r\n # TODO: Move to PyCogent\r\n if output_file is None:\r\n output_file = StringIO()\r\n _, reads = parse_binary_sff(sff_file)\r\n for read in reads:\r\n output_file.write(format_read_as_fna(read, qual))\r\n return output_file",
"def load_yaafedata(params, \n n_learn_frames=2000,\n use_custom_stft=False):\n\n audio_file_path = getoptions(params, 'location', '/sons/voxforge/data/Learn/')\n # if no number specified, use n_learn_frames\n n_frames = getoptions(params, 'n_frames', n_learn_frames)\n sr = getoptions(params, 'sr', 16000)\n sigma_noise = getoptions(params, 'sigma', 0.0)\n random_seed = getoptions(params, 'shuffle', 1001)\n features = getoptions(params, 'features', [])\n wintime = getoptions(params, 'wintime', 0.032)\n steptime = getoptions(params, 'steptime', 0.008)\n startpoint = getoptions(params, 'startpoint', 0)\n forbid_list = getoptions(params, 'forbidden_names', [])\n mfnpf = getoptions(params, 'frame_num_per_file', 3000)\n# wintime = float(win_size)/float(sr)\n# steptime = float(step_size)/float(sr)\n \n win_size = int(wintime*sr)\n step_size = int(steptime*sr)\n# print wintime, steptime, win_size, step_size\n # apply sub_routine to all the files until a condition is met\n n_frames_reached = 0\n\n all_file_paths = get_filepaths(audio_file_path,\n random_seed,\n forbid_list = forbid_list)\n file_index = 0\n\n specseq = []\n featseq = []\n dataseq = []\n n_files_used = 0\n\n while (n_frames_reached < n_frames):\n file_index = file_index + 1\n filepath = all_file_paths[file_index]\n n_files_used = n_files_used + 1\n\n [loc_magSTFT, loc_Feats, locDatas] = load_data_one_audio_file(\n filepath, sr,\n wintime=wintime,\n steptime=steptime,\n max_frame_num_per_file=mfnpf,\n sigma_noise=sigma_noise,\n startpoint = startpoint,\n features=features)\n# if get_data:\n# [loc_magSTFT, loc_Feats, locDatas] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# Data = [Data , locDatas'];\n# else\n# [loc_magSTFT, loc_Feats, ~] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# end\n if not use_custom_stft:\n specseq.append(loc_magSTFT)\n else:\n specseq.append(np.abs(get_stft(locDatas,\n wsize=win_size,\n tstep=step_size,\n sigma = sigma_noise)).T)\n# print wintime, steptime, win_size, step_size\n# print loc_magSTFT.shape\n# print specseq[-1].shape\n# print locDatas.shape\n featseq.append(loc_Feats)\n dataseq.append(locDatas)\n \n n_frames_reached += min(loc_magSTFT.shape[0], loc_Feats.shape[0])\n print n_frames_reached\n \n Spectrums = np.vstack(specseq)\n Features = np.vstack(featseq)\n Data = np.hstack(dataseq)\n\n n_frames_reached = min(n_frames_reached, n_frames)\n Spectrums = Spectrums[0:n_frames_reached,:]\n Features = Features[0:n_frames_reached,:]\n used_files = all_file_paths[0:n_files_used]\n\n return Features, Spectrums, n_frames_reached, Data, used_files",
"def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')",
"def get_orfs(gtf, args, config, is_annotated=False, is_de_novo=False):\n\n call = not args.do_not_call\n chr_name_file = os.path.join(config[\"star_index\"], \"chrName.txt\")\n chr_name_str = \"--chr-name-file {}\".format(chr_name_file)\n\n logging_str = logging_utils.get_logging_options_string(args)\n cpus_str = \"--num-cpus {}\".format(args.num_cpus)\n\n # extract a BED12 of the annotated ORFs\n transcript_bed = filenames.get_bed(\n config[\"genome_base_path\"],\n config[\"genome_name\"],\n is_annotated=is_annotated,\n is_de_novo=is_de_novo,\n )\n\n cmd = \"gtf-to-bed12 {} {} {} {} {}\".format(\n gtf, transcript_bed, chr_name_str, cpus_str, logging_str\n )\n in_files = [gtf]\n out_files = [transcript_bed]\n shell_utils.call_if_not_exists(\n cmd, out_files, in_files=in_files, overwrite=args.overwrite, call=call\n )\n\n # extract the transcript fasta\n transcript_fasta = filenames.get_transcript_fasta(\n config[\"genome_base_path\"],\n config[\"genome_name\"],\n is_annotated=is_annotated,\n is_de_novo=is_de_novo,\n )\n\n cmd = \"extract-bed-sequences {} {} {} {}\".format(\n transcript_bed, config[\"fasta\"], transcript_fasta, logging_str\n )\n in_files = [transcript_bed, config[\"fasta\"]]\n out_files = [transcript_fasta]\n shell_utils.call_if_not_exists(\n cmd, out_files, in_files=in_files, overwrite=args.overwrite, call=call\n )\n\n # extract ORFs from the transcripts using genomic coordinates\n orfs_genomic = filenames.get_orfs(\n config[\"genome_base_path\"],\n config[\"genome_name\"],\n note=config.get(\"orf_note\"),\n is_annotated=is_annotated,\n is_de_novo=is_de_novo,\n )\n\n start_codons_str = utils.get_config_argument(\n config, \"start_codons\", default=default_start_codons\n )\n\n stop_codons_str = utils.get_config_argument(\n config, \"stop_codons\", default=default_stop_codons\n )\n\n cmd = \"extract-orf-coordinates {} {} {} {} {} {} {}\".format(\n transcript_bed,\n transcript_fasta,\n orfs_genomic,\n cpus_str,\n start_codons_str,\n stop_codons_str,\n logging_str,\n )\n in_files = [transcript_fasta, transcript_bed]\n out_files = [orfs_genomic]\n shell_utils.call_if_not_exists(\n cmd, out_files, in_files=in_files, overwrite=args.overwrite, call=call\n )\n\n # write the ORF exons, used to label the ORFs\n exons_file = filenames.get_exons(\n config[\"genome_base_path\"],\n config[\"genome_name\"],\n note=config.get(\"orf_note\"),\n is_annotated=is_annotated,\n is_de_novo=is_de_novo,\n )\n\n cmd = \"split-bed12-blocks {} {} --num-cpus {} {}\".format(\n orfs_genomic, exons_file, args.num_cpus, logging_str\n )\n in_files = [orfs_genomic]\n out_files = [exons_file]\n shell_utils.call_if_not_exists(\n cmd, out_files, in_files=in_files, overwrite=args.overwrite, call=call\n )\n\n # label the ORFs\n labeled_orfs = filenames.get_labels(\n config[\"genome_base_path\"],\n config[\"genome_name\"],\n note=config.get(\"orf_note\"),\n is_annotated=is_annotated,\n is_de_novo=is_de_novo,\n )\n\n annotated_bed = filenames.get_bed(\n config[\"genome_base_path\"],\n config[\"genome_name\"],\n is_annotated=True,\n )\n\n orf_exons_str = \"--orf-exons {}\".format(exons_file)\n\n de_novo_str = \"\"\n if is_de_novo:\n de_novo_str = '--label-prefix \"novel_\" --filter --nonoverlapping-label \"novel\"'\n\n cmd = \"label-orfs {} {} {} {} {} {} {}\".format(\n annotated_bed,\n orfs_genomic,\n labeled_orfs,\n orf_exons_str,\n de_novo_str,\n logging_str,\n cpus_str,\n )\n in_files = [annotated_bed, orfs_genomic, exons_file]\n # ** this function overwrites the input file `orfs_genomic`\n out_files = [labeled_orfs]\n shell_utils.call_if_not_exists(\n cmd, out_files, in_files=in_files, overwrite=args.overwrite, call=call\n )",
"def discoverFromVCF(cls, inputFname, outputFname, refFastaFname=None, VCFOutputType=2, \\\n\t\t\t\t\tminMinorAlleleCoverage=1/4., maxMinorAlleleCoverage=3/4.,\\\n\t\t\t\t\tmaxNoOfReads=2., minNoOfReads=1/4., \\\n\t\t\t\t\tmaxNoOfReadsForGenotypingError=1, maxMajorAlleleCoverage=7/8., maxNoOfReadsForAllSamples=1000,\\\n\t\t\t\t\tnt_set = set(['a','c','g','t','A','C','G','T']), isqID2coverage=None, defaultCoverage=10, \\\n\t\t\t\t\toutputDelimiter='\\t',\\\n\t\t\t\t\treport=0, site_type=1):\n\t\timport csv\n\t\tfrom pymodule.utils import runLocalCommand, getColName2IndexFromHeader\n\t\tsys.stderr.write(\"Looking for heterozygous SNPs in %s (%s<=MAC<=%s).\\n\"%(os.path.basename(inputFname), \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tminMinorAlleleCoverage, maxMinorAlleleCoverage))\n\t\treader =csv.reader(open(inputFname), delimiter='\\t')\n\t\t\n\t\t\n\t\tread_group2col_index = {'ref':0}\t#ref is at column 0. \"ref\" must not be equal to any read_group.\n\t\tread_group2coverage = {}\t#2011-9-2\n\t\tlocus_id2row_index = {}\n\t\tdata_matrix = []\n\t\t\n\t\ttid2refName = {}\t#dictionary storing the target references which have SNP calls\n\t\trefNameSet = set()\n\t\t\"\"\"\n\t\twriter = csv.writer(open(outputFname, 'w'), delimiter='\\t')\n\t\theader = ['sample', 'snp_id', 'chr', 'pos', 'qual', 'DP', 'minDP4', 'DP4_ratio', 'MQ']\n\t\tmoreHeader = ['GQ', 'GL', 'SB', 'QD', 'sndHighestGL', 'deltaGL']\n\t\t#['AF', 'AC','AN', 'Dels', 'HRun', 'HaplotypeScore','MQ0', 'QD']\t#2011-3-4 useless\n\t\tif VCFOutputType==2:\n\t\t\theader += moreHeader\n\t\tchr_pure_number_pattern = re.compile(r'[a-z_A-Z]+(\\d+)')\n\t\tchr_number_pattern = re.compile(r'chr(\\d+)')\n\t\t\"\"\"\n\t\t\n\t\tindividual_name2col_index = None\n\t\tcol_name2index = None\n\t\tcounter = 0\n\t\treal_counter = 0\n\t\t\n\t\t\n\t\tfor row in reader:\n\t\t\tif row[0] =='#CHROM':\n\t\t\t\trow[0] = 'CHROM'\t#discard the #\n\t\t\t\theader = row\n\t\t\t\tcol_name2index = getColName2IndexFromHeader(header, skipEmptyColumn=True)\n\t\t\t\tindividual_name2col_index = cls.getIndividual2ColIndex(header, col_name2index)\n\t\t\t\tcontinue\n\t\t\telif row[0][0]=='#':\t#2011-3-4\n\t\t\t\tcontinue\n\t\t\t\"\"\"\n\t\t\tif chr_number_pattern.search(row[0]):\n\t\t\t\tchr = chr_number_pattern.search(row[0]).group(1)\n\t\t\telif chr_pure_number_pattern.search(row[0]):\n\t\t\t\tchr = chr_pure_number_pattern.search(row[0]).group(1)\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Couldn't parse the chromosome number/character from %s.\\n Exit.\\n\"%(row[0]))\n\t\t\t\tsys.exit(4)\n\t\t\t\"\"\"\n\t\t\tchr = row[0]\n\t\t\trefNameSet.add(chr)\n\t\t\t\n\t\t\tpos = row[1]\n\t\t\tquality = row[5]\n\t\t\t\n\t\t\toutputHet= False\n\t\t\t\n\t\t\tinfo = row[7]\n\t\t\tinfo_ls = info.split(';')\n\t\t\tinfo_tag2value = {}\n\t\t\tfor info in info_ls:\n\t\t\t\ttry:\n\t\t\t\t\ttag, value = info.split('=')\n\t\t\t\texcept:\n\t\t\t\t\t#sys.stderr.write(\"Error in splitting %s by =.\\n\"%info)\t###Error in splitting DS by =.\n\t\t\t\t\tcontinue\n\t\t\t\tinfo_tag2value[tag] = value\n\t\t\t\n\t\t\tcurrent_locus = '%s_%s'%(chr, pos)\n\t\t\trefBase = row[col_name2index['REF']]\n\t\t\taltBase = row[col_name2index['ALT']]\n\t\t\tif VCFOutputType==2:\t#2011-3-4 GATK\n\t\t\t\tformat_column = row[col_name2index['FORMAT']]\n\t\t\t\tformat_column_ls = format_column.split(':')\n\t\t\t\tformat_column_name2index = getColName2IndexFromHeader(format_column_ls)\n\t\t\t\tdata_row = ['NA']*(len(individual_name2col_index)+1)\t# extra 1 for the ref\n\t\t\t\tallele2count = {}\n\t\t\t\tfor individual_name, individual_col_index in individual_name2col_index.iteritems():\n\t\t\t\t\tread_group = individual_name\n\t\t\t\t\tif read_group not in read_group2col_index:\n\t\t\t\t\t\tread_group2col_index[read_group] = len(read_group2col_index)\n\t\t\t\t\t\t#2011-9-2\n\t\t\t\t\t\tif isqID2coverage:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tisqID = read_group.split('_')[1]\n\t\t\t\t\t\t\t\tisqID = int(isqID)\n\t\t\t\t\t\t\t\tcoverage = isqID2coverage.get(isqID, defaultCoverage)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\t\t\t\t\t\timport traceback\n\t\t\t\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t\t\t\tsys.stderr.write(\"Coverage for %s not available. use default=%s.\\n\"%(read_group, defaultCoverage))\n\t\t\t\t\t\t\t\tcoverage = defaultCoverage\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcoverage = defaultCoverage\n\t\t\t\t\t\tread_group2coverage[read_group] = coverage\n\t\t\t\t\t\n\t\t\t\t\tcoverage = read_group2coverage[read_group]\n\t\t\t\t\tgenotype_data = row[individual_col_index]\n\t\t\t\t\tgenotype_data_ls = genotype_data.split(':')\n\t\t\t\t\tgenotype_call_index = format_column_name2index.get('GT')\n\t\t\t\t\tgenotype_quality_index = format_column_name2index.get('GQ')\n\t\t\t\t\tif genotype_quality_index is None:\n\t\t\t\t\t\tgenotype_quality_index = format_column_name2index.get('DP')\n\t\t\t\t\tdepth_index = format_column_name2index.get(\"DP\")\n\t\t\t\t\t#GL_index = format_column_name2index.get('GL')\n\t\t\t\t\tif len(genotype_data_ls)<len(format_column_name2index):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif depth_index is None or genotype_call_index is None:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#genotype_quality = genotype_data_ls[genotype_quality_index]\n\t\t\t\t\tgenotype_call = genotype_data_ls[genotype_call_index]\n\t\t\t\t\tdepth = int(genotype_data_ls[depth_index])\n\t\t\t\t\tif depth>maxNoOfReads*coverage or depth<minNoOfReads*coverage:\t#2011-3-29 skip. coverage too high or too low\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tallele = 'NA'\n\t\t\t\t\tif genotype_call=='0/1' or genotype_call =='1/0':\t#heterozygous, the latter notation is never used though.\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGL_list = genotype_data_ls[GL_index]\n\t\t\t\t\t\tGL_list = GL_list.split(',')\n\t\t\t\t\t\tGL_list = map(float, GL_list)\n\t\t\t\t\t\tGL = GL_list[1]\n\t\t\t\t\t\tsndHighestGL = max([GL_list[0], GL_list[2]])\n\t\t\t\t\t\tdeltaGL = GL-sndHighestGL\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tAD = genotype_data_ls[format_column_name2index.get('AD')]\n\t\t\t\t\t\tAD = map(int, AD.split(','))\n\t\t\t\t\t\tminorAlleleCoverage = min(AD)\n\t\t\t\t\t\tmajorAlleleCoverage = max(AD)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif minorAlleleCoverage<=maxMinorAlleleCoverage*coverage and minorAlleleCoverage>=minMinorAlleleCoverage*coverage \\\n\t\t\t\t\t\t\t\tand majorAlleleCoverage<=maxMajorAlleleCoverage*coverage:\n\t\t\t\t\t\t\tDP4_ratio = float(AD[0])/AD[1]\n\t\t\t\t\t\t\tallele = '%s%s'%(refBase, altBase)\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t\tdata_row = [individual_name, 'chr%s:%s'%(chr, pos), chr, pos, quality, \\\n\t\t\t\t\t\t\t\t\tdepth, minorAlleleCoverage, DP4_ratio,\\\n\t\t\t\t\t\t\t\t\tinfo_tag2value.get('MQ'), genotype_quality, GL,\\\n\t\t\t\t\t\t\t\t\tinfo_tag2value.get('SB'), info_tag2value.get('QD'), sndHighestGL, deltaGL]\n\t\t\t\t\t\t\t#for i in range(3, len(moreHeader)):\n\t\t\t\t\t\t\t#\tinfo_tag = moreHeader[i]\n\t\t\t\t\t\t\t#\tdata_row.append(info_tag2value.get(info_tag))\n\t\t\t\t\t\t\twriter.writerow(data_row)\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\telif genotype_call=='./.':\t#missing\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif genotype_call =='1/1':\n\t\t\t\t\t\tallele = '%s%s'%(altBase, altBase)\n\t\t\t\t\telif genotype_call =='0/0':\n\t\t\t\t\t\tallele = '%s%s'%(refBase, refBase)\n\t\t\t\t\tcol_index = read_group2col_index.get(read_group)\n\t\t\t\t\tdata_row[col_index] = allele\n\t\t\t\t\tif allele!='NA':\n\t\t\t\t\t\tif allele not in allele2count:\n\t\t\t\t\t\t\tallele2count[allele] = 0\n\t\t\t\t\t\tallele2count[allele] += 1\n\t\t\t\t\n\t\t\t\tif len(allele2count)>site_type-1:\t#whether polymorphic across samples or all sites in vcf\n\t\t\t\t\treal_counter += 1\n\t\t\t\t\tlocus_id2row_index[current_locus] = len(locus_id2row_index)\n\t\t\t\t\tdata_matrix.append(data_row)\n\t\t\t\"\"\"\n\t\t\telif VCFOutputType==1:\t#samtools. 2011-7-20 outdated.\n\t\t\t\tsample_id = row[8]\n\t\t\t\tfor tag in info_tag2value.keys():\n\t\t\t\t\tvalue = info_tag2value.get(tag)\n\t\t\t\t\tif tag=='DP4':\n\t\t\t\t\t\ttag = 'DP4_ratio'\n\t\t\t\t\t\tvalue = value.split(',')\n\t\t\t\t\t\tvalue = map(int, value)\n\t\t\t\t\t\tno_of_ref_allele = sum(value[0:2])\n\t\t\t\t\t\tno_of_non_ref_allele = sum(value[2:])\n\t\t\t\t\t\tMAC = min(no_of_ref_allele, no_of_non_ref_allele)\n\t\t\t\t\t\tif MAC<=maxMinorAlleleCoverage and MAC>=minMinorAlleleCoverage:\n\t\t\t\t\t\t\toutputHet = True\n\t\t\t\t\t\t\tvalue = float(no_of_ref_allele)/no_of_non_ref_allele\n\t\t\t\t\t\t\tinfo_tag2value['minDP4'] = min(no_of_ref_allele, no_of_non_ref_allele)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvalue = None\n\t\t\t\t\t\tinfo_tag2value[tag] = value\n\t\t\t\tif outputHet:\n\t\t\t\t\treal_counter += 1\n\t\t\t\t\toutput_row = [sample_id, 'chr%s:%s'%(chr, pos), chr, pos, quality, info_tag2value.get('DP'), \\\n\t\t\t\t\t\t\t\tinfo_tag2value.get('minDP4'), info_tag2value.get('DP4_ratio'), info_tag2value.get('MQ')]\n\t\t\t\t\twriter.writerow(output_row)\n\t\t\t\"\"\"\n\t\t\tcounter += 1\n\t\t\tif counter%2000==0 and report:\n\t\t\t\tsys.stderr.write(\"%s\\t%s\\t%s\"%(\"\\x08\"*80, counter, real_counter))\n\t\tdel reader\n\t\t\n\t\tcls.outputCallMatrix(data_matrix, refFastaFname, outputFname=outputFname, refNameSet=refNameSet, \\\n\t\t\t\t\tread_group2col_index=read_group2col_index, \\\n\t\t\t\t\tlocus_id2row_index=locus_id2row_index, outputDelimiter=outputDelimiter)\n\t\t\n\t\tsys.stderr.write(\"%s\\t%s\\t%s.\\n\"%(\"\\x08\"*80, counter, real_counter))",
"def main():\n\n (options, args) = parse_options(sys.argv)\n\n iterator = GFFParser.GFFAddingIterator() \n examiner = GFFParser.GFFExaminer()\n\n exon_map = dict()\n\n id_dict = examiner.available_limits(options.anno)['gff_id']\n intron_lists = dict()\n\n ### collect all available sources from gff-file\n source_dict = examiner.available_limits(options.anno)['gff_source_type']\n taken_sources = set()\n #types = ['gene', 'mRNA', 'exon', 'CDS']\n types = ['exon']\n\n ### parse only for exons and let the GFFparser \n ### infer the respective parents (otherwise doubled entries occured)\n ### we sanitize the structure later on anyways\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### try different type, if sources are empty \n if len(taken_sources) == 0:\n types = ['CDS']\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### print taken_sources\n if len(taken_sources) == 0:\n print >> sys.stderr, 'No suitable sources found!'\n sys.exit(-1)\n\n ### only show available sources - if neccessary\n if options.show_sources:\n print 'Parsed file %s\\n' % options.anno\n print 'Following sources are available:\\n'\n for source in taken_sources:\n print source \n print '\\nUse option -s to specify a comma-separated list of sources (-s source1,source2,source3), otherwise all sources are taken'\n sys.exit(0)\n\n if options.sources != '':\n user_sources = set(options.sources.split(','))\n taken_sources = taken_sources.intersection(user_sources)\n if len(taken_sources) == 0:\n print >> sys.stderr, 'The specified sources do not match any of the available sources - Please use option -S to get a list of available sources'\n sys.exit(-1)\n\n if options.verbose:\n print \"take sources %s\" % str(list(taken_sources))\n\n ### build up gff-parsing filter\n gff_sources = []\n for source in taken_sources:\n gff_sources.extend(zip([source] * len(types), types))\n\n ### parse gff-file\n for idx in id_dict.keys():\n print 'parsing chromosome %s' % idx\n if len(gff_sources) > 0:\n trans_dict = iterator.get_all_features(options.anno, {'gff_source_type':gff_sources, 'gff_id':idx})\n else:\n trans_dict = iterator.get_all_features(options.anno, {'gff_id':idx})\n ### since we parse only one chromosome, this loop is evaluated only once\n for chrm in trans_dict.keys():\n ### verify/sanitize the created dictionairy\n fix_structure(trans_dict[chrm])\n intron_lists[chrm] = dict()\n for gene in trans_dict[chrm].features:\n for trans in gene.sub_features:\n if trans.type == 'exon':\n print \"WARNING: Exon on transcript level:\"\n print trans\n print 'will continue\\n'\n continue\n elif len(trans.sub_features) > 1: ### at least two exons for one intron ...\n strand = trans.sub_features[0].strand\n contig_list = [(trans.sub_features[i].location.nofuzzy_start, trans.sub_features[i].location.nofuzzy_end) for i in range(len(trans.sub_features))]\n contig_list.sort(lambda u, v:u[0]-v[0])\n for exon in range(len(contig_list) - 1):\n ### update intron lists\n if contig_list[exon][1] - contig_list[exon + 1][0] == 0:\n continue\n try:\n assert(contig_list[exon][1] < contig_list[exon + 1][0])\n except AssertionError:\n print >> sys.stderr, 'exon_1 %i, exon_2 %i' % (contig_list[exon][1], contig_list[exon + 1][0]) \n print >> sys.stderr, contig_list[exon]\n print >> sys.stderr, contig_list[exon+1]\n print >> sys.stderr, exon\n sys.exit(-1)\n ### for now strand information is only dummy\n intron_lists[chrm][(0, contig_list[exon][1], contig_list[exon + 1][0])] = strand\n \n ### update exon map\n for exon in range(len(contig_list)):\n if not exon_map.has_key(chrm):\n exon_map[chrm] = dict()\n\n if not exon_map[chrm].has_key(trans.id):\n exon_map[chrm][trans.id] = dict()\n ### we assume, that an exon cannot occurr twice in the same transcript!\n ### the value in the dict is a binary encoding, if the left/right end is intronic 10 = 2 means, 5' end is intronic\n if len(contig_list) == 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 0 ### 00 -> should never occurr\n elif exon == 0:\n exon_map[chrm][trans.id][contig_list[exon]] = 2 ### 10\n elif exon == len(contig_list) - 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 1 ### 01\n else:\n exon_map[chrm][trans.id][contig_list[exon]] = 3 ### 11 \n\n outfile = open(options.outfile, 'w')\n cPickle.dump(intron_lists, outfile)\n outfile.close()\n \n outfile = open(options.outfile + '.' + 'cov', 'w')\n cPickle.dump(exon_map, outfile)\n outfile.close()",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]",
"def readFasta(self, fp):\n\t\t\n\t\tfor head, seq in self.parseFasta(fp):\n\t\t\t#analyzing the sequence\n\t\t\tself.analyzeSequence(seq)\n\t\t\t#saving the header\n\t\t\tif head == '':\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tself.header.append(head)",
"def sequence(self, f, asstring=True):\n\n assert \"chr\" in f, \"`chr` field required\"\n name = f[\"chr\"]\n\n assert name in self, \"feature: %s not in `%s`\" % (f, self.filename)\n\n fasta = self[f[\"chr\"]]\n\n seq = Fasta.subseq(fasta, f.get(\"start\"), f.get(\"stop\"), f.get(\"strand\"))\n\n if asstring:\n return str(seq)\n\n return seq"
]
| [
"0.75194967",
"0.58702254",
"0.58072424",
"0.57938075",
"0.577255",
"0.5770231",
"0.5736186",
"0.57126784",
"0.5697429",
"0.5694916",
"0.55857223",
"0.5553263",
"0.5482889",
"0.54565287",
"0.54469514",
"0.5441418",
"0.5434659",
"0.541446",
"0.536763",
"0.5361356",
"0.53465176",
"0.5307874",
"0.5300702",
"0.52938324",
"0.5284584",
"0.5282846",
"0.5270726",
"0.52594894",
"0.5235935",
"0.52325016"
]
| 0.7629038 | 0 |
Checks if the sequence provided is valid fasta sequence. Returns True if the sequence is valid, otherwise returns False. | def seq_validator(sequence):
# checks for ascii characters that should not appear in a fasta sequence
seq_val = re.compile(r"[.-@|\s| -)|z-~|Z-`|EFIJLOPQX|efijlopqx+,]+")
if seq_val.search(sequence) is None:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isSequenceValid(sequence):\n if not sequence:\n return False\n allowed_chars = set('GCAU')\n return set(sequence).issubset(allowed_chars)",
"def validate_fasta_seq(sequence):\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('>\\S*\\n[ACTGNRYSWKMBDHVEFILPQSXZ]*', re.MULTILINE)\n if regex.search(sequence) is not None:\n return True\n else:\n return False",
"def validate_single_fasta_seq(sequence):\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('>\\S*\\n[ACTGNRYSWKMBDHVEFILPQSXZ]', re.MULTILINE)\n if regex.search(sequence) is not None:\n return True\n else:\n return False",
"def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False",
"def isValidPeptide(self, sequence):\n for position, disallowedAAs in self._rules.iteritems():\n nextAA = sequence[position - 1].upper()\n if nextAA in disallowedAAs:\n return False\n return True",
"def is_valid_sequence(dna):\n \n nucleotides = 'ATCG'\n error = 0\n \n for char in dna:\n if not char in nucleotides:\n error = error + 1\n return error == 0",
"def is_valid_sequence(dna):\n num_char = 0\n \n for char in dna:\n if not char in 'ATCG':\n num_char += 1\n\n return num_char == 0",
"def verify_valid_fasta_format(input_fasta_fp):\r\n\r\n fasta_f = open(input_fasta_fp, \"U\")\r\n\r\n try:\r\n for label, seq in parse_fasta(fasta_f):\r\n continue\r\n except RecordError:\r\n raise RecordError(\"Input fasta file not valid fasta format. Error \" +\r\n \"found at %s label and %s sequence \" % (label, seq))\r\n\r\n fasta_f.close()",
"def is_fasta(filename: Path) -> bool:\n filename = Path(filename)\n if filename.exists():\n fasta = list(SeqIO.parse(str(filename), \"fasta\"))\n return any(fasta)\n else:\n return False",
"def is_legit_DNA_sequence(record_seq: str) -> bool:\n nts = {\"A\", \"G\", \"T\", \"C\", \"N\"}\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(nts)",
"def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))",
"def CheckSeq(Seq):\n OkNucleo = (\"A\", \"C\", \"G\", \"T\")\n for i in Seq:\n if i not in OkNucleo:\n raise InputError(Seq,\"malformed input\")",
"def is_legit_peptide_sequence(record_seq: str) -> bool:\n aas = {\n \"A\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"V\",\n \"W\",\n \"Y\",\n \"*\",\n }\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(aas)",
"def check_if_fasta(file):\n if os.path.splitext(file)[1] != \".zip\":\n with open(file, \"r\") as handle:\n fasta = SeqIO.parse(handle, \"fasta\")\n return any(fasta)\n else:\n return False",
"def is_aligned_dna(sequence):\r\n #ensure that the given sequence is uppercase\r\n sequence = sequence.upper()\r\n \r\n #replace all A C G and T and compare length with 0\r\n if len(sequence.replace(\"A\", \"\").replace(\"C\", \"\").replace(\"G\",\"\").replace(\"T\",\"\").replace(\"-\",\"\")) == 0:\r\n return True\r\n else:\r\n return False",
"def validate_sequence(outcome):\n from collections.abc import Sequence\n if not isinstance(outcome, Sequence):\n raise ditException('Outcome class is not a sequence.')\n else:\n return True",
"def validate_sequence_numbers(self):\n return self.hive_sequence1() == self.hive_sequence2()",
"def __validate(self, seqdata):\n\n _Sequence = namedtuple('Seq', ['name', 'data'])\n\n # file-like object\n # isinstance(obj, file) does not hold in Py3\n if hasattr(seqdata, 'read') and hasattr(seqdata, 'name'):\n self.logger.debug('Reading data from file-like object {}'.format(seqdata.name))\n fname = seqdata.name\n\n elif isinstance(seqdata, basestring):\n self.logger.debug('Reading data from file path {}'.format(seqdata))\n fname = seqdata\n\n # can be file name string or sequence\n if not os.path.isfile(fname):\n raise OSError('Sequence file not found: {}'.format(seqdata))\n else:\n raise TypeError('Sequence input format not recognized: {}'.format(seqdata))\n\n # parse and validate sequences\n # defining these two a prior just in case later we decide to support more stuff\n _seq_alphabet = IUPACProtein()\n _seq_format = 'fasta'\n\n seq_iterator = SeqIO.parse(seqdata, _seq_format, alphabet=_seq_alphabet)\n for seq_i, seq_record in enumerate(seq_iterator, start=1):\n\n seq_name = seq_record.name\n seq_raw = str(seq_record.seq)\n if not _verify_alphabet(seq_record.seq):\n msg = 'Entry #{} ({}) in {} is not a valid protein sequence'\n raise ParseError(msg.format(seq_i, seq_name, fname))\n\n self.sequences.append(_Sequence(seq_name, seq_raw))\n\n return self.sequences",
"def valid(self):\n try:\n if self.getPret() > 0 and self.getAn() > 0 and self.validProgram(self.getProgram()):\n return True\n except:\n return False\n return False",
"def __validate_pdu_sequence(pdu_sequence: PDUs) -> None:\n if not isinstance(pdu_sequence, (tuple, list)):\n raise TypeError(\"'pdu_sequence' is not list or tuple type\")\n if not all([isinstance(pdu, AbstractPDU) for pdu in pdu_sequence]):\n raise ValueError(\"'pdu_sequence' does not contain AbstractPDU instances only\")",
"def isValidSequence(self, root: TreeNode, arr: List[int]) -> bool:\n self.arr = arr\n self.found = False\n\n self.dfs(root, [root.val])\n\n return self.found",
"def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False",
"def __contains__(self, seq):\n return bool(libhts.faidx_has_seq(self._fai, seq))",
"def verifyFasta(head,seq,pred):\n\treturn True",
"def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False",
"def validate_bowtie_seq(sequence_file):\n too_long = False\n for seq_record in SeqIO.parse(sequence_file, \"fasta\"):\n seq = str(seq_record.seq)\n\n if len(seq) > 1000:\n too_long = True\n return too_long",
"def test_check_fasta_seqs_all_valid(self):\r\n\r\n # Test against all valid data\r\n\r\n sample_barcodes = set(['ACCATACC', 'CCAGATTACG'])\r\n sample_primers = set(['ACATTATTTT', 'TTATTACCGAT'])\r\n total_seq_count = 3\r\n\r\n perc_invalid_chars, perc_barcodes_detected, perc_primers_detected,\\\r\n perc_bcs_seq_start =\\\r\n check_fasta_seqs(self.sample_fasta_fp, sample_barcodes,\r\n sample_primers, total_seq_count)\r\n\r\n expected_perc_invalid_chars = \"%1.3f\" % 0\r\n expected_perc_barcodes_detected = \"%1.3f\" % 0\r\n expected_perc_primers_detected = \"%1.3f\" % 0\r\n\r\n self.assertEqual(perc_invalid_chars, expected_perc_invalid_chars)\r\n self.assertEqual(perc_barcodes_detected,\r\n expected_perc_barcodes_detected)\r\n self.assertEqual(perc_primers_detected,\r\n expected_perc_primers_detected)",
"def check_fasta(filename):\n fa_exts = [\".fa\", \".fasta\"]\n p, ext = os.path.splitext(filename)\n if not ext.lower() in fa_exts: # TODO: check if bowtie2, STAR handle gzipped fa files\n raise ValueError(\"Error: \\\"\" + filename + \"\\\" does not match expected extensions: \" + str(fa_exts))",
"def validate_strand(strand: str) -> bool:\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True",
"def check_seq_file(filename):\n if filename.endswith(\".vcf.gz\") or filename.endswith(\".bcf\"):\n return all([file_exists(filename), file_exists(filename + \".csi\")])\n if filename.endswith(\".bam\"):\n return all([file_exists(filename), file_exists(filename + \".bai\")])"
]
| [
"0.76752746",
"0.75949574",
"0.7480253",
"0.72784495",
"0.7131281",
"0.69375044",
"0.6605551",
"0.64831626",
"0.6424644",
"0.63862854",
"0.6311217",
"0.6299898",
"0.62989056",
"0.61712056",
"0.61065215",
"0.6092603",
"0.60289305",
"0.5995795",
"0.5978732",
"0.5963744",
"0.5960768",
"0.59573984",
"0.595407",
"0.59470594",
"0.59341776",
"0.59339035",
"0.5885375",
"0.5846877",
"0.575519",
"0.57293683"
]
| 0.79304975 | 0 |
Display the top 10 retail traded stocks for last days | def display_top_retail(
limit: int = 3, export: str = "", sheet_name: Optional[str] = None
):
retails = nasdaq_model.get_retail_tickers()
if retails.empty:
return
for date, df in retails.head(limit * 10).groupby("Date"):
df = df.drop(columns=["Date"])
df = df.reset_index(drop=True)
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title=f"[bold]{date} Top Retail:[/bold]",
export=bool(export),
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rtat",
retails,
sheet_name,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top_performers(self, market, country, number_of_stocks=5, percent_change=1):\n # creating api-object\n # using defined function above to retrieve dataframe of all instruments\n instruments = self.instruments_with_meta_data()\n # filtering out the instruments with correct market and country\n filtered_instruments = instruments.loc[(instruments['market'] == market) & (instruments['country'] == country)]\n # creating new, empty dataframe\n stock_prices = pd.DataFrame()\n # looping through all rows in filtered dataframe\n for index, instrument in filtered_instruments.iterrows():\n # fetching the stock prices for the current instrument\n instrument_stock_price = self._borsdata_api.get_instrument_stock_prices(int(instrument['ins_id']))\n # calculating the current instruments percent change\n instrument_stock_price['pct_change'] = instrument_stock_price['close'].pct_change(percent_change)\n # getting the last row of the dataframe, i.e. the last days values\n last_row = instrument_stock_price.iloc[[-1]]\n # appending the instruments name and last days percent change to new dataframe\n stock_prices = stock_prices.append({'stock': instrument['name'], 'pct_change': round(last_row['pct_change'].values[0]*100, 2)}, ignore_index=True)\n # printing the top sorted by pct_change-column\n print(stock_prices.sort_values('pct_change', ascending=False).head(number_of_stocks))\n return stock_prices",
"def get_top_10(df):\n\n grouped_df = df.groupby(\"country\").max()\n\n # Confirmed cases\n print(grouped_df.sort_values(\"confirmed\",\n ascending=False)[\"confirmed\"][:10])\n\n # Deaths\n print(grouped_df.sort_values(\"deaths\", ascending=False)[\"deaths\"][:10])\n\n # Recoveries\n print(grouped_df.sort_values(\"recovered\",\n ascending=False)[\"recovered\"][:10])\n\n a = grouped_df.sort_values(\"recovered\", ascending=False)[\"recovered\"][:10]\n print(a.to_markdown())",
"def get_crypto_daily_price(cryptotickers = [], allData=False,limit = 90):\n api_key = os.getenv(\"CC_API\")\n ticker_list = cryptotickers\n crypto_df = pd.DataFrame()\n\n for ticker in ticker_list:\n #if allData is true, then it gets all the data available. If not, select data according to limit.\n if allData:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&allData=true&api_key={api_key}\"\n else:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&limit={limit}&api_key={api_key}\"\n \n raw_data = read_json(url)\n #print(json.dumps(raw_data, indent=5))\n df = pd.DataFrame(raw_data['Data']['Data'])\n df['time'] = pd.to_datetime(df['time'],unit='s')\n df.set_index(df['time'], inplace=True)\n df['close'] = df['close'].astype(float)\n crypto_df[ticker] = df['close']\n \n #\n new_columns = pd.MultiIndex.from_product([ crypto_df.columns, [\"close\"] ])\n crypto_df.columns = new_columns\n\n return crypto_df",
"def trending(request):\n assert isinstance(request, HttpRequest)\n try:\n stocks = StockList.objects.all()\n hold = []\n count = 0\n except StockList.DoesNotExist:\n return print(\"No Stocks Available\")\n\n\n\n while len(hold) < 8:\n for stock in stocks:\n stock.trend = stock.positiveSentimentCount + stock.negativeSentimentCount\n if stock.trend>= count:\n hold.append(stock)\n count = stock.trend\n \n \n\n context = {\n 'title': 'Trending',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stocks': stocks,\n 'hold': hold,\n\n }\n\n \n return render(\n request,\n 'app/trending.html',\n context,\n )",
"def get_todays_posts():\n \n return sorted(requests.get(TODAY_URL).json()['hunts'], \n key=lambda post: post['rank'])",
"def getTopMovies(endpoint, date, count=10):\n\n try:\n response = urlreq.urlopen(endpoint.format(date))\n soup = BeautifulSoup(response.read(), \"html.parser\")\n table = soup.find('table', border=\"0\", cellpadding=\"5\", cellspacing=\"1\")\n tdata = []\n\n for i, row in enumerate(table.find_all('tr')[1:], start=1):\n if i > count:\n break\n\n cells = row.find_all('td')\n tdict = {}\n\n tdict['rank'] = i\n tdict['title'] = cells[2].text.strip()\n tdict['daily_gross'] = int(re.sub(r'[^\\d]', '', cells[4].text))\n tdict['theaters'] = int(re.sub(r'[^\\d]', '', cells[7].text))\n tdict['todate_gross'] = int(re.sub(r'[^\\d]', '', cells[9].text))\n tdict['release_day'] = int(cells[10].text)\n\n tdata.append(tdict)\n\n tdata = pd.DataFrame(tdata)\n tdata['gross_date'] = date\n return tdata\n\n except urlerr.URLError as err:\n print(\"\\nThere was an error retrieving daily revenue information\")\n print(err)\n return None\n except Exception:\n print(\"\\nThere's something wrong with the BOMojo daily revenue page\")\n return None",
"def trending(request):\n\titems = Item.objects.all()\n\ttrending = []\n\n\tfor item in Item.objects.order_by('-dailyVisits'):\n\t\t#Include items that have been uploaded within the past day and havent been sold\n\t\tif (date.today() - item.datePosted).days <= 0 and item.sold_to == None:\n\t\t\tif (len(trending) <= 5):\n\t\t\t\ttrending.append(item)\n\t\telse:\n\t\t\titem.dailyVisits = 0\n\t\t\titem.save()\n\n\t#If there are not enough items in the trending list, add older items to the list\n\tif len(trending) <= 5:\n\t\tfor item in Item.objects.order_by('-dailyVisits'):\n\t\t\tif ((len(trending) <= 5) and (item.sold_to == None) and (item not in trending)):\n\t\t\t\ttrending.append(item)\n\n\tcontext_dict = {\"trendingItems\": trending[0:3], \"search_bar\" :Search_bar()}\n\treturn render(request, 'tailored/index.html', context_dict)",
"def top_products(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['product_name']).agg({'total_payment': 'sum'})\n data_set = data_set.nlargest(10, 'total_payment')\n return data_set",
"def display_top_lobbying(\n limit: int = 10,\n raw: bool = False,\n export: str = \"\",\n sheet_name: Optional[str] = None,\n external_axes: bool = False,\n):\n df_lobbying = quiverquant_model.get_top_lobbying()\n\n if df_lobbying.empty:\n return\n\n df_lobbying[\"Amount\"] = df_lobbying.Amount.astype(float).fillna(0) / 100_000\n\n lobbying_by_ticker = pd.DataFrame(\n df_lobbying.groupby(\"Ticker\")[\"Amount\"].agg(\"sum\")\n ).sort_values(by=\"Amount\", ascending=False)\n\n df = lobbying_by_ticker.head(limit)\n\n fig = OpenBBFigure(xaxis_title=\"Ticker\", yaxis_title=\"Total Amount ($100k)\")\n fig.set_title(f\"Corporate Lobbying Spent since {df_lobbying['Date'].min()}\")\n\n fig.add_bar(\n x=df.index,\n y=df.Amount,\n name=\"Amount ($100k)\",\n marker_color=theme.get_colors(),\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"lobbying\",\n df_lobbying,\n sheet_name,\n fig,\n )\n\n if raw:\n return print_rich_table(\n lobbying_by_ticker,\n headers=[\"Amount ($100k)\"],\n show_index=True,\n title=\"Top Lobbying Tickers\",\n export=bool(export),\n limit=limit,\n )\n\n return fig.show(external=external_axes)",
"def do_top(cs, args):\n resp, data = cs.repositories.get_top(args.count)\n utils.print_list(data, ['name', 'count'], sortby='count')",
"def top(self, **kwargs):\n return self.client.api.top(self.id, **kwargs)",
"def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols",
"def top_ten(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__lte=datetime.date.today())\n movies = movies.order_by('-rating')[:10]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)",
"def get_esg_top_ten(self,\n measure: ESGMeasure,\n pricing_date: dt.date = None):\n return self._get_esg_ranked_card(ESGCard.TOP_TEN_RANKED, measure, pricing_date)",
"def active_roast():\n c = mongo.db[app.config['INVENTORY_COLLECTION']]\n items = c.find({'user': current_user.get_id()})\n output = list()\n for x in items:\n x['id'] = str(x['_id'])\n if int(x['stock']) < 100:\n continue\n if app.config['SIMULATE_ROAST'] and x['label'] != 'Test Beans':\n continue\n output.append(x)\n output.sort(key=lambda x: x['datetime'], reverse=True)\n return render_template('roast.html', inventory=output)",
"def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices",
"async def get_top_trending_tags_summary():\n # Same results, more overhead:\n #return [tag['name'] for tag in await get_trending_tags('', 50)]\n sql = \"\"\"\n SELECT category\n FROM hive_posts_cache\n WHERE is_paidout = '0'\n GROUP BY category\n ORDER BY SUM(payout) DESC\n LIMIT 50\n \"\"\"\n return query_col(sql)",
"def old_start_to_scrape_stocks():\n # the way it works is: 20 stocks are displayed per page, and the r= paramater in the url tells where to start listing with the stocks\n res = req.get(stocks_url.format('1'), headers={'user-agent': ua.random})\n soup = bs(res.content, 'lxml')\n # get last page number to get pages that need to be iterated through\n last_page_num = int(soup.findAll('a', {'class': 'screener-pages'})[-1].text)\n # the last page should be the (last page number - 1) * 20 + 1\n last_r = (last_page_num - 1) * 20 + 1 + 1 # add another one for range to work\n for p in range(21, last_r, 20):\n pass",
"def getLatestItems():\n return session.query(\n Item.name.label('i_n'),\n Category.name.label(\n 'c_n')).outerjoin(\n Category,\n Item.category_id == Category.id).order_by(\n Item.time.desc()).filter(Item.time > 0).limit(10).all()",
"def get_popular_stocks(self):\n response = requests.get('https://brokerage-static.s3.amazonaws.com/popular_stocks/data.json')\n response_json = response.json()\n assert len(response_json) == 1\n return response_json[0]",
"def get_max_increase_from_yesterday():\n start = datetime.now() - timedelta(2)\n end = datetime.now()\n f = open(\"nasdaqtraded.txt\")\n stock_symbols, nasdaq_stock_symbols = extract_symbols(f)\n\n result = {}\n\n for i in range(len(nasdaq_stock_symbols)):\n symbol = nasdaq_stock_symbols[i]\n if isinstance(symbol, str):\n if not symbol.isalpha():\n continue\n else:\n continue\n\n try:\n df = get_data(start, end, symbol)\n except:\n continue\n\n if len(df['Close'].tolist()) != 2:\n continue\n yes, td = df['Close'].tolist()[:]\n\n inc = (td - yes) / yes\n if len(result) < 100:\n result[symbol] = inc\n else:\n min = list(result.keys())[0]\n for key in result:\n if result[key] < result[min]:\n min = key\n if result[min] > inc:\n result.pop(min)\n result[symbol] = inc\n print(result)\n return result",
"def getTopTen():\n\n if moviesRanked > 10:\n return moviesRanked[0:10]\n else: \n return moviesRanked",
"def main():\n print \"Apple Inc. (AAPL) Daily Closing Prices:\"\n for i in stock_data:\n t_data = i.findAll('td', {\"class\":\"yfnc_tabledata1\"})\n if len(t_data) is 7:\n date = t_data[0].contents[0]\n close = t_data[6].contents[0]\n print (\"Date: {}, Closing Price: {}\").format(date, close)",
"def top_rated(self, **kwargs):\n\n path = self._get_movie_id_path('top_rated')\n resp = self._get_method(path, kwargs)\n return resp",
"def history():\n\n rows = db.execute('SELECT operation, symbol, shares, price, date FROM transactions WHERE id = :id',\n id=session['user_id'])\n\n return render_template('history.html', stocks=rows[::-1])",
"def stock_data_query(ticker=['AMZN'], db_name='test_stock_raw', collection_name='amzn_raw', past_days=5*365):\n\n stock_db = db_client[db_name]\n stock_collection = stock_db[collection_name]\n\n if collection_name in stock_db.list_collection_names():\n past_days_start = datetime.now() - timedelta(days=past_days+5) #manually added 5 more days\n today = datetime.now()\n\n # query stock data for the past 30 days ago from today\n query_result = stock_collection.find({ 'Stock':ticker,\n 'Datetime': {'$gte': past_days_start, '$lte': today}})\n \n raw_df = pd.DataFrame(list(query_result)).sort_values(by=['Datetime'], ascending=True)\n\n else:\n raw_df = pd.DataFrame({'Datetime':[]})\n print(f'Query process interrupted... No collection {collection_name} in DB {db_name} exists!!')\n\n return raw_df",
"def top_ten(subreddit):\n url = \"https://www.reddit.com/r/\" + subreddit + \"/hot.json?limit=10\"\n identify = {\"User-Agent\": \"Requests library from Python\",\n \"From\": \"[email protected]\"}\n to_print = []\n hot = requests.get(url, headers=identify, allow_redirects=False)\n if hot.status_code == 404:\n print(\"None\")\n return 0\n if hot.status_code == 200:\n hot = hot.json()\n hot = hot[\"data\"]\n hot = hot[\"children\"]\n for items in hot:\n del items[\"kind\"]\n for data in hot:\n to_print.append(data[\"data\"])\n hot = to_print\n to_print = []\n for dictio in hot:\n to_print.append(dictio[\"title\"])\n for itera in to_print:\n print(itera)",
"def top_ten(subreddit):\n\n limit = \"10\"\n\n url = \"https://www.reddit.com/r/{}/hot.json?limit={}\".format(subreddit,\n limit)\n\n user_agent = {\"User-Agent\": \"Python\"}\n response = requests.get(url, headers=user_agent, allow_redirects=False)\n if response.status_code >= 300:\n print(\"None\")\n else:\n for elem in response.json().get(\"data\").get(\"children\"):\n print(elem.get(\"data\").get(\"title\"))",
"def top_ten(subreddit):\n header = {\"User-Agent\": \"Holberton\"}\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(subreddit)\n response = requests.get(url, headers=header, allow_redirects=False)\n if response.status_code == 200:\n\n for item in response.json().get(\"data\", None).get(\"children\", None):\n print(item.get(\"data\", None).get(\"title\", None))\n else:\n print(None)\n return",
"def top_ten(subreddit):\n req = get(\n \"https://www.reddit.com/r/{}/hot.json\".format(subreddit),\n headers={\n \"User-Agent\": \"alx_app\"},\n params={\n \"limit\": 10},\n allow_redirects=False)\n if req.status_code != 200:\n print(None)\n else:\n posts = req.json().get(\"data\").get(\"children\")\n for post in posts:\n print(post.get(\"data\").get(\"title\"))"
]
| [
"0.60119677",
"0.600845",
"0.59116054",
"0.5886201",
"0.5875277",
"0.5846949",
"0.5815054",
"0.5814599",
"0.5805478",
"0.5791384",
"0.57870483",
"0.5783525",
"0.5782132",
"0.5722076",
"0.57188046",
"0.570357",
"0.5679663",
"0.5674654",
"0.562725",
"0.5620135",
"0.56172734",
"0.561492",
"0.56001407",
"0.5598137",
"0.55846477",
"0.5578953",
"0.55783474",
"0.552998",
"0.5515211",
"0.55116314"
]
| 0.68973005 | 0 |
Display NASDAQ dividend calendar | def display_dividend_calendar(
date: Optional[str] = None,
sortby: str = "Dividend",
ascend: bool = False,
limit: int = 10,
export: str = "",
sheet_name: Optional[str] = None,
):
if date is None:
date = datetime.today().strftime("%Y-%m-%d")
div_map = {
"symbol": "Symbol",
"companyName": "Name",
"dividend_Ex_Date": "Ex-Dividend Date",
"payment_Date": "Payment Date",
"record_Date": "Record Date",
"dividend_Rate": "Dividend",
"indicated_Annual_Dividend": "Annual Dividend",
"announcement_Date": "Announcement Date",
}
calendar = nasdaq_model.get_dividend_cal(date)
if calendar.empty:
console.print(
"No data found. Check that the date provided is a market day. If it is then try this function"
" again as the request may have not gone through.\n"
)
return
calendar = calendar.drop(columns=["announcement_Date"])
calendar.columns = calendar.columns.map(div_map)
calendar = calendar.sort_values(by=sortby, ascending=ascend)
print_rich_table(
calendar,
headers=[x.title() for x in calendar.columns],
title=f"[bold]Dividend Calendar for {date}[/bold]",
export=bool(export),
limit=limit,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"divcal",
calendar,
sheet_name,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cal():\n this_cal = Kalendar()\n to_display = \"\"\n\n for elements in this_cal.get_all_elements():\n to_display += elements[\"key\"] + \":<BR>\"\n for element in elements[\"value\"]:\n to_display += \" \" + str(element) + \"<BR>\"\n\n return to_display",
"def display_calendar(daze, month, year):\n log = daze.dateDict\n if not year:\n year = date.today().year # defaults to this year\n if month:\n first = date(year, month, 1)\n last = max([day for day in cal.itermonthdates(year, month) if day.month == month])\n s, ndates, firstdate, lastdate = daze.summarize(firstdate=first, lastdate=last)\n else:\n s, ndates, firstdate, lastdate = daze.summarize()\n places = sorted(s, key=s.get, reverse=True)\n colors = ['green', 'magenta', 'white', 'cyan', 'blue', 'red', 'yellow']\n months = calendar.month_name[1:]\n dates = [firstdate + timedelta(days=i) for i in range((lastdate - firstdate).days + 1)]\n\n matches = {p: c for (p, c) in zip(places, colors)}\n\n for (p, c) in matches.items():\n click.secho(\" %s \" % p, bg=c, fg='black', bold=True)\n\n for _date in dates:\n if _date.day == 1 or _date == firstdate:\n click.echo('')\n click.echo(\"\\n\" + months[_date.month - 1])\n if (_date.isoweekday() != 7):\n click.echo(\" \" * 3 * _date.isoweekday(), nl=False)\n if _date in log:\n p = log[_date]\n click.secho(\"%s\" % str(_date.day).rjust(3),\n fg='black',\n bg=matches[p],\n nl=(_date.isoweekday() == 6))\n else:\n click.secho(\"%s\" % str(_date.day).rjust(3),\n fg='black', nl=(_date.isoweekday() == 6))\n\n click.echo('\\n\\n\\n')",
"def display(self, date):\n mod = date.get_modifier()\n cal = date.get_calendar()\n qual = date.get_quality()\n start = date.get_start_date()\n newyear = date.get_new_year()\n\n qual_str = self._qual_str[qual]\n \n if mod == Date.MOD_TEXTONLY:\n return date.get_text()\n elif start == Date.EMPTY:\n return \"\"\n elif mod == Date.MOD_SPAN:\n d1 = self.display_cal[cal](start)\n d2 = self.display_cal[cal](date.get_stop_date())\n scal = self.format_extras(cal, newyear)\n return \"%s%s %s %s %s%s\" % (qual_str, 'des de', d1, 'fins a', d2, scal)\n elif mod == Date.MOD_RANGE:\n d1 = self.display_cal[cal](start)\n d2 = self.display_cal[cal](date.get_stop_date())\n scal = self.format_extras(cal, newyear)\n return \"%s%s %s %s %s%s\" % (qual_str, 'entre', d1, 'i', d2, scal)\n else:\n text = self.display_cal[date.get_calendar()](start)\n scal = self.format_extras(cal, newyear)\n return \"%s%s%s%s\" % (qual_str, self._mod_str[mod], text, scal)",
"def print_calendar(month, year):\n print MONTH_NAME[month - 1] + ', ' + str(year)\n\n calendar = calculate_date(month, year)\n for i in DAY_NAME:\n print(i),\n\n print\n\n for i in range(len(calendar)):\n if calendar[i] == 0:\n print(align_day_block(0)),\n else:\n print(align_day_block(calendar[i])),\n\n if i % 7 == 0:\n print",
"def nasaCalendar(self):\n return requests.get(self.nasaURL).text",
"def command_show(calendar):\n cal = {k: v for k, v in sorted(calendar.items(), key=lambda item: (\n item[0][0:4], item[0][5:7], item[0][8:]))}\n cal = {k: v for k, v in sorted(\n calendar.items(), key=lambda item: item[1][0][\"start\"])}\n\n cal_str = \"\\n\"\n for key in cal.keys():\n cal_str += f\"{key} : \\n\"\n\n for event in cal[key]:\n for sub_key in event.keys():\n if sub_key in (\"start\", \"end\"):\n cal_str += f\" {sub_key} : {str(event[sub_key]).zfill(2)}:00,\\n\"\n else:\n cal_str += f\" {sub_key} : {event[sub_key]}\\n\"\n if len(cal[key]) > 1:\n cal_str += \"\\n\"\n cal_str = cal_str.rstrip()\n cal_str += \"\\n\"\n return cal_str.rstrip()",
"def calender(self, month, year):\n\n day = ['S', ' M', ' T', ' W', ' Th', 'F', ' S']\n\n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n values = 1\n d = 1\n\n m = month\n y = year\n y0 = y - (14 - m) // 12\n x = y0 + y0 // 4 - y0 // 100 + y0 // 400\n m0 = m + 12 * ((14 - m) // 12) - 2\n d0 = (d + x + 31 * m0 // 12) % 7\n\n if utility_obj.isleap_year(str(year)):\n days[1] = 29\n row = 6\n column = 7\n two_d_array = [[0 for j in range(column)] for i in range(row)]\n\n print('Your Calender is Ready\\n')\n\n for i in range(0, 6 + 1):\n print(day[i], end=' ')\n print()\n for i in range(row):\n\n for j in range(column):\n\n if values <= days[m - 1]:\n if i == 0 and j < d0:\n two_d_array[i][j] = ' '\n continue\n\n two_d_array[i][j] = values\n values += 1\n\n for i in range(row):\n\n for j in range(column):\n if two_d_array[i][j] != 0:\n x = two_d_array[i][j]\n x1 = str(x).ljust(2)\n print(x1, end=\" \")\n\n print()",
"def banner(self):\n\t\trundays = 0\n\t\tsqr = self.sqc.cursor()\n\t\tsqr.execute(\"SELECT value FROM sord WHERE name = 'gdays'\")\n\t\tfor value in sqr.fetchall():\n\t\t\trundays = value[0]\n\t\tthismsg = \"\\r\\n\"+self.cntransi(self.ESC+\"32mSaga Of The Red Dragon\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32m\"+self.ESC+\"1m\"+self.config.host)+\"\\r\\n\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mCompiled June 25, 2009: Version \"+self.ESC+\"1m\"+self.ESC+\"37m\"+self.config.version+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"22m\"+self.ESC+\"32m(c) pre-2009 by Someone Else\\r\\n\\r\\n\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32m\"+self.ESC+\"1m\"+self.ESC+\"37mREGISTERED TO \"+self.ESC+\"0m\"+self.ESC+\"1m\"+self.ESC+\"34m\"+self.config.admin+self.ESC+\"0m\")+\"\\r\\n\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mThe current game has been running for \"+self.ESC+\"1m\"+str(rundays)+self.ESC+\"22m game days.\\r\\n\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are deleted after \"+self.ESC+\"1m\"+str(self.config.delinactive)+self.ESC+\"22m real days of inactivity.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are enjoying \"+self.ESC+\"1m\"+str(self.config.ffight)+self.ESC+\"22m forest fights per day.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are enjoying \"+self.ESC+\"1m\"+str(self.config.pfight)+self.ESC+\"22m player fights per day.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are enjoying \"+self.ESC+\"1m\"+str(self.config.bankinterest)+\"%\"+self.ESC+\"22m interest at the bank per day.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mThe current game day is \"+self.ESC+\"1m\"+str(self.config.daylength)+self.ESC+\"22m real hours long.\\r\\n\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mE\"+self.ESC+\"22m)nter the realm of the Dragon\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mL\"+self.ESC+\"22m)ist Warriors\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mI\"+self.ESC+\"22m)nstructions\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mQ\"+self.ESC+\"22m)uit the game server\\r\\n\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m Your choice, warrior? [\"+self.ESC+\"1mE\"+self.ESC+\"22m]: \"+self.ESC+\"0m\"+self.ESC+\"0m \"\n\t\tsqr.close()\n\t\treturn thismsg",
"def show_cal(request, year=None, month=None):\n if year == None:\n # get the current comic as a starting point\n lToday = Comic.objects.filter(published=True).order_by('-date')[0].date\n year = lToday.year\n month = lToday.month\n\n return calendar(request, year, month)",
"def get_chartshow(html, year, month, day):\n soup = BeautifulSoup(html)\n results = \"\"\n\n if len(soup.findAll(\"table\", {\"class\": \"chartshow\"})) > 0:\n for l in soup.findAll(\"table\", {\"class\": \"chartshow\"})[0].findAll(\"tr\")[2:]:\n cells = l.findAll(\"td\")\n results = results + \",\".join([cells[0].contents[0], cells[3].contents[0], cells[4].contents[0]]) + \"\\n\"\n else:\n for p in soup.findAll('p'):\n if len(p.contents) == 41:\n for el in p:\n if el is not None and len(el) > 0 and re.match(\"^[0-9].*$\", el):\n results = results + el.strip() + \"\\n\"\n return results",
"def date_tag():\n import pylab\n pylab.figtext(0.04, 0.02, str(datetime.datetime.today())[:16], size=8)",
"def abrirCalendar():\n try:\n var.dlgcalendar.show()\n except Exception as error:\n print('Error: %s ' % str(error))",
"def display_calendar_redo(daze, year, month):\n log = daze.dateDict\n\n # Set first and last dates\n if year is None:\n year = date.today().year\n if month is None:\n first = date(year, 1, 1)\n if year == date.today().year:\n last = date.today()\n else:\n last = date(year, 12, 31)\n else:\n first = date(year, month, 1)\n last = date(2016, month, calendar.monthrange(2016, month)[1])\n\n # Get summarized data\n s, ndates, firstdate, lastdate = daze.summarize()\n places = sorted(s, key=s.get, reverse=True)\n colors = ['green', 'magenta', 'white', 'cyan', 'blue', 'red', 'yellow']",
"def get_tradingview_ecocal(width, height, show_copyright):\n return_data = ''\n theme = get_sa_theme()\n tradingview_copyright = ''\n\n if str(width) == '0':\n width = '\"100%\"'\n if str(height) == '0':\n height = '\"100%\"'\n\n if str(show_copyright) == '1':\n tradingview_copyright = ''+\\\n '<div class=\"tradingview-widget-copyright\">'+\\\n '<a href=\"https://www.tradingview.com/markets/currencies/economic-calendar/\" rel=\"noopener\" target=\"_blank\">'+\\\n '<span class=\"blue-text\">Economic Calendar</span></a> by TradingView'+\\\n '</div>'\n\n return_data = '' +\\\n '<div class=\"tradingview-widget-container\">'+\\\n ' <div class=\"tradingview-widget-container__widget\"></div>'+\\\n tradingview_copyright+\\\n ' <script type=\"text/javascript\" '+\\\n 'src=\"https://s3.tradingview.com/external-embedding/embed-widget-events.js\" async>'+\\\n ' {'+\\\n ' \"colorTheme\": \"'+ theme +'\",'+\\\n ' \"isTransparent\": true,'+\\\n ' \"width\": '+ width +','+\\\n ' \"height\": '+ height +','+\\\n ' \"locale\": \"en\",'+\\\n ' \"importanceFilter\": \"-1,0,1\"'+\\\n '}'+\\\n ' </script>'+\\\n '</div>'\n return return_data",
"def calendar(self):\r\n self.cal = QCalendarWidget()\r\n self.cal.setWindowTitle(\"Get Birthday\")\r\n self.cal.show()\r\n self.cal.clicked.connect(self.dateB)",
"def get_chartshow_html(year, month, day):\n u = CHARTSHOW_URL.format(year=year, month=str(month).zfill(2), day=str(day).zfill(2))\n return urlopen(u, timeout=60).read()",
"def day(d):\n\t\tx = db.cquery(\"day\",d)\n\t\tprint \"Total:\", x[0]\n\t\tf = raw_input(\"[L]ist [N]ew overview or [B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(), i[0], i[1], \" \", ui.statstimein(), i[2], ui.statstimeout(), i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\":\n\t\t\thome_stats()\n\t\telif f == \"b\":\n\t\t\thome()\n\t\telse:\n\t\t\tpass",
"def on_btnCalendar_clicked(self, widget):\n try:\n variables.semaforo = 1\n variables.vencalendar.connect('delete-event', lambda w, e: w.hide() or True)\n variables.vencalendar.show()\n\n except:\n print('error abrir calendario')",
"def main():\n# year = int(input(\"Enter year for calendar: \"))\n# first_day = first_day_of_year(year)\n\n # Loop through months 1 through 12\n # for month in range(1, NUM_MONTHS + 1):\n# first_day = print_month(first_day, month, year)\n\n canvas = make_canvas(CANVAS_WIDTH, CANVAS_HEIGHT, 'Calendar')\n # present the header, today's date\n\n top_rows(canvas)\n # present two buttons: weekly display and monthly display\n weekly_display_type = True\n date_to_present = date.today()\n #button_weekly(canvas,weekly_display_type,date_to_present)\n #button_monthly(canvas, weekly_display_type, date_to_present)\n # present weekly display\n canvas.update()\n canvas.mainloop()",
"def formatDay(self, themonth, date, num_weeks):\n if date.month == themonth:\n day_class = 'day'\n else:\n day_class = 'noday' # day outside month\n\n html = '<td class=\"%s' % day_class\n\n # if this is today then highlight it\n if date == self.today:\n html += ' today'\n today_text = 'Today '\n else:\n today_text = ''\n\n # if this is the selected date then tag it\n if date == self.selected_date or (self.selected_record\n and date == self.selected_record.start_date):\n html += ' selected'\n # if a filter range is set then tag it\n elif (self.filter_start_date and self.filter_finish_date\n and self.filter_start_date <= date\n and date <= self.filter_finish_date):\n html += ' filtered'\n\n html += ('\" style=\"height: %f%%\"><div class=\"%s_header\">'\n '<a class=\"block\" '\n 'href=\"?year=%d&month=%d&day=%d&clear_recording_id=1\">'\n '%s%d</a></div>' % (90.0 / num_weeks, day_class,\n date.year, date.month, date.day, today_text, date.day))\n\n if self._storage:\n for recording in self._storage.getRecordings(date,\n station=self.filter_station):\n extra_div_class = \"\"\n if (self.selected_record\n and recording.id == self.selected_record.id):\n extra_div_class += \" selected_entry\"\n if ((self.filter_title and self.filter_title\n != recording.title)\n or (self.filter_start_date and self.filter_start_date\n > recording.finish_time.date())\n or (self.filter_finish_date and self.filter_finish_date\n < recording.start_time.date())):\n extra_div_class += \" filtered_out\"\n html += ('<div class=\"day_entry%s\"><a class=\"block\" '\n 'href=\"?year=%d&month=%d&recording_id=%d'\n '&set_recording_id=1\">\\n'\n '<span class=\"recording_time\">%s</span>\\n'\n '<span class=\"recording_station\">%s</span>\\n'\n '<span class=\"recording_title\">%s</span>\\n'\n '</a></div>\\n' % (extra_div_class, date.year,\n date.month, recording.id,\n formatTimeUI(recording.start_time, compact=True),\n formatStationName(recording.station, compact=True),\n recording.title))\n\n return html + '</td>'",
"def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))",
"def name(self):\n return 'D07 Indice de calidad de vida por manzanas'",
"def today():\n this_cal = Kalendar()\n to_display = \"TODAY:<BR><BR>\"\n\n elements = this_cal.get_all_day_elements(datetime.datetime.now())\n for element in elements:\n for key, values in element.items():\n to_display += key + \":<BR>\"\n for val in values:\n to_display += \" \" + val + \"<BR>\"\n\n return to_display",
"def display(self):\n caldesc = settings.CALENDAR_DESC[self.type]\n if \"display\" in caldesc:\n return caldesc[\"display\"](self)\n else:\n return self.pretty_duration()",
"def search_display_date(self):\n return ''",
"def build_period_rdi_chart(nuts_totals_df, start_date=None,\n end_date=None, charts_label=None,\n elem_fig_id=None,\n vits_fig_id=None,\n macros_fig_id=None):\n # calc num days\n if start_date is not None and end_date is not None:\n delta = end_date - start_date\n num_days = float(delta.days)\n print(f'num days: {num_days}')\n else:\n num_days = 1.\n\n usr_life_stg = ''\n usr_type = ''\n usr_age = ''\n usr_active_lvl = \"\"\n if current_user.is_authenticated:\n usr_life_stg = current_user.lifestage_grp\n usr_type = current_user.person_type\n usr_age = current_user.age\n usr_active_lvl = current_user.active_level\n\n # df of nuts by category with field values as %\n elems_df = pd.DataFrame(columns=list(rdi_elems_dict.keys()))\n vits_df = pd.DataFrame(columns=list(rdi_vits_dict.keys()))\n macros_df = pd.DataFrame(columns=list(rdi_macros_dict.keys()))\n\n # fill row 0 of each nut_type df with percentages\n for idx, row in nuts_totals_df.iterrows():\n # todo: need to process and take out brackets, extra words\n cnf_nut = row['Name'].lower()\n cnf_nut = preprocess_cnf_nuts(cnf_nut)\n cnf_amt = float(row['Value'])\n # todo: take out micro symbol from units but not used as units\n # taken from dicts_arrs in def find_type\n cnf_units = row['Units']\n if '\\xb5g' in cnf_units:\n cnf_units = cnf_units.replace(\"\\xb5g\", \"ug\")\n nut_type, rdi_nut, multiplier = find_type(cnf_nut, cnf_elems_dicts)\n if nut_type == \"\":\n nut_type, rdi_nut, multiplier = find_type(cnf_nut, cnf_vits_dicts)\n if nut_type == \"\":\n nut_type, rdi_nut, multiplier = find_type(cnf_nut, cnf_macros_dicts)\n\n # get start and exclusive end idx of rdi_df\n start_idx, end_idx = get_lifestage_idxs(usr_type)\n if nut_type == 'element':\n elems_df = fill_nut_df(nut_type, start_idx, end_idx, usr_life_stg,\n cnf_nut, cnf_amt, multiplier,\n elems_df,\n usr_type, usr_age, usr_active_lvl, num_days)\n\n elif nut_type == 'vitamin':\n vits_df = fill_nut_df(nut_type, start_idx, end_idx, usr_life_stg,\n cnf_nut, cnf_amt, multiplier,\n vits_df,\n usr_type, usr_age, usr_active_lvl, num_days)\n\n elif nut_type == 'macronutrient':\n macros_df = fill_nut_df(nut_type, start_idx, end_idx, usr_life_stg,\n cnf_nut, cnf_amt, multiplier,\n macros_df,\n usr_type, usr_age, usr_active_lvl, num_days)\n\n # make bar charts and html.Div containing them, return\n # style chart\n elem_colors = color_bars(elems_df)\n vits_colors = color_bars(vits_df)\n macros_colors = color_bars(macros_df)\n\n fig_elems = go.Figure(data=[go.Bar(\n x=list(elems_df.columns),\n y=list(elems_df.iloc[0]),\n marker_color=elem_colors\n )])\n fig_elems.update_layout(title_text=f'elements for{charts_label}')\n fig_vits = go.Figure(data=[go.Bar(x=list(vits_df.columns),\n y=list(vits_df.iloc[0]),\n marker_color=vits_colors)])\n fig_vits.update_layout(title_text=f'vitamins for{charts_label}')\n fig_macros = go.Figure(data=[go.Bar(x=list(macros_df.columns),\n y=list(macros_df.iloc[0]),\n marker_color=macros_colors)])\n fig_macros.update_layout(title_text=f\"macronutrients for{charts_label}\")\n\n figs_div = html.Div([\n dcc.Graph(\n figure=fig_elems,\n id=elem_fig_id\n ),\n dcc.Graph(\n figure=fig_vits,\n id=vits_fig_id\n ),\n dcc.Graph(\n figure=fig_macros,\n id=macros_fig_id\n )\n ])\n return figs_div",
"def display_month(month_name, num_days):\n print(\"---------------------------\")\n print(\" \" + str(month_name))\n print(\"---------------------------\")\n for x in range(1, num_days + 1):\n if x < 7:\n print(str(x) + \" \", end=\" \")\n elif x == 7:\n print(str(x) + \" \", end=\" \")\n print()\n elif x < 10:\n print(str(x) + \" \", end=\" \")\n elif x < 14:\n print(str(x) + \" \", end=\" \")\n elif x == 14:\n print(str(x) + \" \", end=\" \")\n print()\n elif x < 21:\n print(str(x) + \" \", end=\" \")\n elif x == 21:\n print(str(x) + \" \", end=\" \")\n print()\n elif x < 28:\n print(str(x) + \" \", end=\" \")\n elif x == 28:\n print(str(x) + \" \", end=\" \")\n print()\n elif x < num_days:\n print(str(x) + \" \", end=\" \")\n elif x == num_days:\n print(str(x) + \" \", end=\" \")\n print()\n print(\"---------------------------\")",
"def test_hcal(self):\n hc = microformats.models.hCalendar()\n hc.summary = 'Important Meeting'\n hc.location = 'BBC in London'\n hc.url = 'http://www.bbc.co.uk/'\n hc.dtstart = datetime.datetime(2009, 4, 11, 13, 30)\n hc.dtend = datetime.datetime(2009, 4, 11, 15, 30)\n hc.description = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.'\n hc.street_address = 'Broadcasting House'\n hc.extended_address = 'Portland Place'\n hc.locality = 'London'\n hc.region = ''\n hc.country_name = 'GB'\n hc.postal_code = 'W1A 1AA'\n hc.save()\n hc.save()\n result = hcal(hc, autoescape=True)\n expected = u'\\n<div id=\"hcalendar_1\" class=\"vevent\">\\n <a href=\"http://www.bbc.co.uk/\" class=\"url\">\\n \\n <abbr title=\"2009-04-11T13:30:00\" class=\"dtstart\">Sat 11 Apr 2009 1:30 p.m.</abbr>\\n \\n \\n - \\n \\n <abbr title=\"2009-04-11T15:30:00\" class=\"dtend\">All day event</abbr>\\n \\n \\n : \\n <span class=\"summary\">Important Meeting</span>\\n at <span class=\"location\">BBC in London</span>\\n </a>\\n \\n<div class=\"adr\">\\n <div class=\"street-address\">Broadcasting House</div>\\n <div class=\"extended-address\">Portland Place</div>\\n <span class=\"locality\">London</span> \\n \\n <span class=\"postal-code\">W1A 1AA</span> \\n <span class=\"country-name\">United Kingdom</span>\\n</div>\\n\\n <p class=\"description\">Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.</p> \\n</div>\\n'\n self.assertEquals(expected, result)\n # Make sure things render correctly *if* all_day_event = True\n hc.all_day_event = True\n hc.save()\n result = hcal(hc, autoescape=True)\n expected = u'\\n<div id=\"hcalendar_1\" class=\"vevent\">\\n <a href=\"http://www.bbc.co.uk/\" class=\"url\">\\n \\n <abbr title=\"2009-04-11T13:30:00\" class=\"dtstart\">Sat 11 Apr 2009</abbr>\\n \\n \\n - \\n \\n <abbr title=\"2009-04-11T15:30:00\" class=\"dtend\">All day event</abbr>\\n \\n \\n : \\n <span class=\"summary\">Important Meeting</span>\\n at <span class=\"location\">BBC in London</span>\\n </a>\\n \\n<div class=\"adr\">\\n <div class=\"street-address\">Broadcasting House</div>\\n <div class=\"extended-address\">Portland Place</div>\\n <span class=\"locality\">London</span> \\n \\n <span class=\"postal-code\">W1A 1AA</span> \\n <span class=\"country-name\">United Kingdom</span>\\n</div>\\n\\n <p class=\"description\">Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.</p> \\n</div>\\n'\n self.assertEquals(expected, result)\n hc.all_day_event = False\n hc.save()\n # Lets cut things down to the essentials with a different end date\n hc.url = ''\n hc.location = ''\n hc.description = ''\n hc.street_address = ''\n hc.extended_address = ''\n hc.locality = ''\n hc.region = ''\n hc.country_name = ''\n hc.postal_code = ''\n hc.dtend = datetime.datetime(2009, 4, 15, 15, 30)\n hc.save()\n result = hcal(hc, autoescape=True)\n expected = u'\\n<div id=\"hcalendar_1\" class=\"vevent\">\\n \\n \\n <abbr title=\"2009-04-11T13:30:00\" class=\"dtstart\">Sat 11 Apr 2009 1:30 p.m.</abbr>\\n \\n \\n - \\n \\n <abbr title=\"2009-04-15T15:30:00\" class=\"dtend\">3:30 p.m.</abbr>\\n \\n \\n : \\n <span class=\"summary\">Important Meeting</span>\\n \\n \\n \\n<div class=\"adr\">\\n \\n \\n \\n \\n \\n \\n</div>\\n\\n \\n</div>\\n'\n self.assertEquals(expected, result)\n # Absolute minimum\n hc.dtend = None\n hc.dtstart = datetime.datetime(2009, 4, 15)\n result = hcal(hc, autoescape=True)\n # We probably want to separate the date and time of dtstart and\n # dtend so we don't default to midnight... ToDo: Fix date/time\n expected = u'\\n<div id=\"hcalendar_1\" class=\"vevent\">\\n \\n \\n <abbr title=\"2009-04-15T00:00:00\" class=\"dtstart\">Wed 15 Apr 2009 midnight</abbr>\\n \\n \\n : \\n <span class=\"summary\">Important Meeting</span>\\n \\n \\n \\n<div class=\"adr\">\\n \\n \\n \\n \\n \\n \\n</div>\\n\\n \\n</div>\\n'\n self.assertEquals(expected, result)",
"def output(self):\n return self.cal.to_ical()",
"def output(self):\n return self.cal.to_ical()"
]
| [
"0.61509955",
"0.6010468",
"0.58811027",
"0.5843441",
"0.5834613",
"0.5769455",
"0.57268524",
"0.56952477",
"0.5682419",
"0.5652854",
"0.55936015",
"0.55887353",
"0.55171776",
"0.55142546",
"0.5458473",
"0.5439535",
"0.535989",
"0.53140265",
"0.5308999",
"0.5267314",
"0.5266001",
"0.5262649",
"0.5248085",
"0.5241326",
"0.51889455",
"0.51670676",
"0.51668555",
"0.5162946",
"0.5133069",
"0.5133069"
]
| 0.67086595 | 0 |
This function will write a sitemap index file that references individual sitemaps for all the batches, issues, pages and titles that have been loaded. | def write_sitemaps():
sitemap_index = open('static/sitemaps/sitemap.xml', 'w')
sitemap_index.write('<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n')
max_urls = 50000
page_count = 0
url_count = 0
sitemap_file = None
for loc, last_mod in sitemap_urls():
# if we've maxed out the number of urls per sitemap
# close out the one we have open and open a new one
if url_count % max_urls == 0:
page_count += 1
if sitemap_file:
sitemap.write('</urlset>\n')
sitemap.close()
sitemap_file = 'sitemap-%05d.xml' % page_count
sitemap_path = 'static/sitemaps/%s' % sitemap_file
_logger.info("writing %s" % sitemap_path)
sitemap = open(sitemap_path, 'w')
sitemap.write('<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n')
sitemap_index.write('<sitemap><loc>http://chroniclingamerica.loc.gov/%s</loc></sitemap>\n' % sitemap_file)
# add a url to the sitemap
sitemap.write("<url><loc>http://chroniclingamerica.loc.gov%s</loc><lastmod>%s</lastmod></url>\n" % (loc, rfc3339(last_mod)))
url_count += 1
# necessary to avoid memory bloat when settings.DEBUG = True
if url_count % 1000 == 0:
reset_queries()
# wrap up some open files
sitemap.write('</urlset>\n')
sitemap.close()
sitemap_index.write('</sitemapindex>\n')
sitemap_index.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_sitemap ( self ):\n try:\n self.output_fd = open ( file=dflt_cfg.DFLT_CFG[ OUTPUT_PATH ], mode='w' )\n self.print_url_links ( self.root )\n except (PermissionError, AttributeError) as err:\n self.logger.error ( \"Error {0} occurred. Output file {1} cannot be created\".format ( err, \\\n dflt_cfg.DFLT_CFG[\n OUTPUT_PATH ] ) )\n except Exception as err:\n self.logger.error ( \"Error {0} occurred while writing sitemap in output file: {1}\".format ( err, \\\n dflt_cfg.DFLT_CFG[ OUTPUT_PATH ] ) )\n self.output_fd.close ( )\n else:\n print(\"Sitemap for {} is written in {}.\".format(dflt_cfg.DFLT_CFG[DOMAIN], dflt_cfg.DFLT_CFG[ OUTPUT_PATH ]))\n print( \"Logs (Broken or dead URLs along with application logs) for domain {0} are available in {1} directory.\".format ( dflt_cfg.DFLT_CFG[DOMAIN], \"./logs\" ) )\n self.output_fd.close ( )",
"def build_finished(app, exception):\n filename = join(app.outdir, \"sitemap.xml\")\n\n links_iter = status_iterator(sorted(app.sitemap_links), \"adding links to sitemap... \", \"brown\", len(app.sitemap_links), app.verbosity)\n\n try:\n with open(filename, \"w\") as f:\n f.write(_header)\n for link in links_iter:\n http_link = escape(link.strip().replace(\"https://\", \"http://\"))\n f.write(_item.format(link=http_link))\n f.write(_footer)\n except OSError as e:\n raise SphinxError(f\"cannot write sitemap.txt, reason: {e}\")",
"def save(self):\n for page in self.pages.get_published_pages():\n site_path = page.path_to_page.replace('.md', '').replace(\n self.source_path, '').strip('/')\n save_path = self.output_path\n\n # ensure we are not creating a directory for the index file that\n # that lives at the source_path\n if page.full_path() != f'{self.source_path}{os.sep}index.md':\n site_path = slugify_path(site_path)\n save_path = os.path.join('', self.output_path, site_path)\n\n try:\n os.makedirs(save_path, exist_ok=True)\n except Exception as e:\n log((f'unable to create directories: {save_path}'\n f' because: {e}'), True)\n continue\n\n try:\n save_file = os.path.join(save_path, 'index.html')\n log(f'saving {save_file}')\n\n published = self.pages.get_published_pages()\n prev_page = self.pages.get_previous_page(page)\n next_page = self.pages.get_next_page(page)\n content = page.render(published_pages=published,\n previous_page=prev_page, next_page=next_page)\n write(save_file, content)\n except Exception as e:\n log(f'unable to save file: {save_file} -- {e}', True)\n\n unpublished = self.pages.get_unpublished_pages()\n if len(unpublished):\n log('')\n log('these pages were unpublished and not rendered:', True)\n for up in unpublished:\n log(up.path_to_page, True)\n log('')\n\n # build the _tags pages\n for tag, pages in self.tags.pages.items():\n content = self.tags.render(tag, pages)\n tag_index_dir = f'{self.tag_dir}/{slugify(tag)}'\n tag_index = f'{tag_index_dir}/index.html'\n os.makedirs(tag_index_dir, exist_ok=True)\n write(tag_index, content)\n\n log('finished builidng site')",
"def _newfile(counter):\n name = '%s/sitemap-%s.xml.gz' % (settings.SITEMAPS_DIR,\n counter)\n fp = gzip.open(name, 'wb')\n fp.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<urlset xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">\\n\"\"\")\n return fp",
"def generate(self, conf, env, data):\n\n path = joinurl(conf['output_dir'], self.path)\n sm = Map()\n\n pages = []\n for i in env.globals.pages:\n pages.append(i.slug)\n for i in env.globals.entrylist:\n pages.append(i.slug)\n\n if exists(path) and not self.modified and not conf.modified:\n event.skip('sitemap', path)\n raise StopIteration\n for ns, fname in self.files:\n\n if ns == 'page' or ns == 'entry' or ns == 'index':\n pass\n else:\n continue\n\n permalink = '/' + fname.replace(conf['output_dir'], '')\n permalink = rchop(permalink, 'index.html')\n url = conf['www_root'] + permalink\n\n priority, changefreq = self.scores.get(ns, (0.5, 'weekly'))\n\n _pages = env.globals.entrylist + env.globals.pages\n\n for i in _pages:\n if permalink.strip('/') in pages:\n if i.slug.strip('/') == permalink.strip('/'):\n pages.remove(permalink.strip('/'))\n video = re.search('youtube.com/(v/|watch\\?v=|embed/)([a-zA-Z0-9\\-_]*)', i.content)\n if video:\n yt = video.group().split('/')[-1]\n sm.add(url, yt, i.title, i.metadesc, getmtime(fname), changefreq, priority)\n\n sm.finish()\n yield sm, path",
"def sitemap_urls():\n for batch in m.Batch.objects.all():\n yield batch.url, batch.released\n yield rdf_uri(batch), batch.released\n for issue in batch.issues.all():\n yield issue.url, batch.released\n yield rdf_uri(issue), batch.released\n for page in issue.pages.all():\n yield page.url, batch.released\n yield rdf_uri(page), batch.released\n\n paginator = Paginator(m.Title.objects.all(), 10000)\n for page_num in range(1, paginator.num_pages + 1):\n page = paginator.page(page_num)\n for title in page.object_list:\n yield title.url, title.created",
"def sitemap():\n pages=[]\n ten_days_ago=datetime.now() - timedelta(days=10).date().isoformat()\n # static pages\n for rule in current_app.url_map.iter_rules():\n if \"GET\" in rule.methods and len(rule.arguments)==0:\n pages.append(\n [rule.rule,ten_days_ago]\n )\n \n # user model pages\n users=User.query.order_by(User.modified_time).all()\n for user in users:\n url=url_for('user.pub',name=user.name)\n modified_time=user.modified_time.date().isoformat()\n pages.append([url,modified_time]) \n\n sitemap_xml = render_template('frontend/sitemap_template.xml', pages=pages)\n response= make_response(sitemap_xml)\n response.headers[\"Content-Type\"] = \"application/xml\" \n \n return responseapp.route('/')",
"def write(self):\n\n mr = MakoRenderer(self._TEMPLATE, StaticFlowEnvironment.rootTemplatePath, {'sitemap':self})\n result = mr.render()\n\n if not mr.success:\n self.site.writeLogError(unicode(mr.errorMessage))\n return False\n\n if not FileUtils.putContents(result, self.targetPath):\n self.site.writeLogError(u'Unable to save sitemap file at: \"%s\"' % self.targetPath)\n return False\n\n self.site.writeLogSuccess(u'SITEMAP', u'Created sitemap at: \"%s\"' % self.targetPath)\n return True",
"def sitemap():\n pages = []\n pages.append(['http://mattcarter.co/', '2018-07-21'])\n pages.append(['http://mattcarter.co/blog', '2018-07-21'])\n\n content_path = os.path.join(app.root_path, 'content')\n for file in os.listdir(content_path):\n if not file.endswith('.md'):\n continue\n full_path = os.path.join(content_path, file)\n post_obj = parse_markdown_post(full_path)\n url = 'http://mattcarter.co/blog/%s' % file.replace('.md', '')\n last_mod = post_obj.date\n pages.append([url, last_mod])\n\n response= make_response(render_template(\"sitemap.xml\", pages=pages))\n response.headers['Content-Type'] = 'application/xml'\n return response",
"def gen_links(self, sitemap):\n ctr = 0\n # links = []\n with open(sitemap, 'r') as fh:\n logger.info('Loading sitemap file %s' % sitemap)\n\n js = json.load(fh)\n logger.info('Loaded, number of packages: %s' % len(js))\n\n for rec in js:\n try:\n burl = utils.strip_leading_slash(rec['url'])\n artifact_detected = rec['artifact_detected']\n if not artifact_detected:\n continue\n\n artifact_id = utils.get_last_url_segment(burl)\n versions = [x['v'] for x in rec['versions']]\n if len(versions) == 0:\n continue\n\n group_id = get_group_id(burl)\n for cur_version in pick_versions(versions):\n url = '%s/%s' % (burl, cur_version)\n base_name = '%s-%s' % (artifact_id, cur_version)\n meta = {'burl': burl,\n 'artifact_id': artifact_id,\n 'group_id': group_id,\n 'max_version': cur_version\n }\n pom_link = '%s/%s.pom' % (url, base_name)\n yield Request(pom_link, callback=self.spider.parse_pom, meta=dict(meta))\n\n ctr += 1\n\n except Exception as e:\n logger.error('Exception in parsing %s' % e)\n logger.debug(traceback.format_exc())\n\n # logger.info('Generated %s links' % len(links))\n # return links",
"def write_main_index(self):\n\n for miEntry in self.mainIndex:\n self.db_file.write(miEntry.get_representation())",
"def sitemap():\n host_components = urlparse(request.host_url)\n host_base = host_components.scheme + \"://\" + host_components.netloc\n\n static_url_list = {'blog_index':'main.blog_index'}\n # Static routes with static content\n static_urls = list()\n for key, value in static_url_list.items():\n print 'rule', key, value \n url = {\n \"loc\": \"{}/{}\".format(host_base, url_for(value))\n }\n static_urls.append(url)\n\n # Dynamic routes with dynamic content\n dynamic_urls = list()\n blog_posts = Post.query.all()\n for post in blog_posts:\n if post.is_blog is True:\n url_ext = url_for('.blogpost', id=post.id, header=post.header)\n else:\n url_ext = url_for('main.post', id=post.id)\n\n url = {\n \"loc\": \"{}/{}\".format(host_base, url_ext),\n \"lastmod\": post.timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n }\n dynamic_urls.append(url)\n\n xml_sitemap = render_template(\"sitemap.xml\", static_urls=static_urls, dynamic_urls=dynamic_urls, host_base=host_base)\n response = make_response(xml_sitemap)\n response.headers[\"Content-Type\"] = \"application/xml\"\n\n return response",
"def sitemap_xml(self):\n if self.should_noindex():\n return []\n return self.main_menu()",
"def save(self, content_dir):\n print_verbose(\n \"INFO : Writing random HTML documents to files...\",\n self.args.verbose,\n )\n for i in range(self.n):\n dir_path = content_dir + \"/\" + \"staticpage\" + str(i)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n index_file = os.path.join(dir_path, \"index.html\") \n with open(index_file, \"w\") as file:\n file.write(self.doc_strings[i].decode(\"utf-8\"))",
"def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])",
"def index_indexes(folder,saveAs=\"index2.html\"):\n indexes=[]\n saveAs=os.path.abspath(os.path.join(folder,saveAs))\n for subFolder in glob.glob(folder+\"/*/\"):\n if os.path.exists(subFolder+\"/SWH2P/index.html\"):\n indexes.append(os.path.abspath(subFolder+\"/SWH2P/index.html\"))\n\n html='<html><body><h1>Automatic Index</h1><ul>'\n for item in sorted(indexes):\n html+='<li><a href=\"%s\">%s</a>'%(item,\n os.path.basename(os.path.dirname(os.path.dirname(item))))\n html+='</ul></body></html>'\n with open(saveAs,'w') as f:\n f.write(html)\n print(\"saved\",saveAs)\n webbrowser.open(saveAs)",
"def cmd_sitemap(arguments):\r\n\r\n # create a sitemap job\r\n job = jobs.encode(jobs.sitemap())\r\n\r\n # get a producer\r\n producer = getProducer()\r\n\r\n # get an item, set content and make it ready\r\n item = producer.item()\r\n item.content = job\r\n producer.ready(item)\r\n\r\n print \"Sitemap command put in queue\"\r\n\r\n return 0",
"def write_root_index(self):\n self.logger.info('writing package index')\n temp_dir = self.output_path / 'simple'\n with tempfile.NamedTemporaryFile(mode='w', dir=str(temp_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Pi Wheels Simple Index'),\n tag.meta(name='api-version', value=2),\n ),\n tag.body(\n (tag.a(package, href=package), tag.br())\n for package in self.package_cache\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name,\n str(self.output_path / 'simple' / 'index.html'))",
"def sitemap_xml(request, project):\n\n def priorities_generator():\n \"\"\"\n Generator returning ``priority`` needed by sitemap.xml.\n\n It generates values from 1 to 0.1 by decreasing in 0.1 on each\n iteration. After 0.1 is reached, it will keep returning 0.1.\n \"\"\"\n priorities = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]\n yield from itertools.chain(priorities, itertools.repeat(0.1))\n\n def hreflang_formatter(lang):\n \"\"\"\n sitemap hreflang should follow correct format.\n\n Use hyphen instead of underscore in language and country value.\n ref: https://en.wikipedia.org/wiki/Hreflang#Common_Mistakes\n \"\"\"\n if '_' in lang:\n return lang.replace(\"_\", \"-\")\n return lang\n\n def changefreqs_generator():\n \"\"\"\n Generator returning ``changefreq`` needed by sitemap.xml.\n\n It returns ``weekly`` on first iteration, then ``daily`` and then it\n will return always ``monthly``.\n\n We are using ``monthly`` as last value because ``never`` is too\n aggressive. If the tag is removed and a branch is created with the same\n name, we will want bots to revisit this.\n \"\"\"\n changefreqs = ['weekly', 'daily']\n yield from itertools.chain(changefreqs, itertools.repeat('monthly'))\n\n if project.privacy_level == constants.PRIVATE:\n raise Http404\n\n sorted_versions = sort_version_aware(\n Version.internal.public(\n project=project,\n only_active=True,\n ),\n )\n\n # This is a hack to swap the latest version with\n # stable version to get the stable version first in the sitemap.\n # We want stable with priority=1 and changefreq='weekly' and\n # latest with priority=0.9 and changefreq='daily'\n # More details on this: https://github.com/rtfd/readthedocs.org/issues/5447\n if (\n len(sorted_versions) >= 2 and\n sorted_versions[0].slug == LATEST and\n sorted_versions[1].slug == STABLE\n ):\n sorted_versions[0], sorted_versions[1] = sorted_versions[1], sorted_versions[0]\n\n versions = []\n for version, priority, changefreq in zip(\n sorted_versions,\n priorities_generator(),\n changefreqs_generator(),\n ):\n element = {\n 'loc': version.get_subdomain_url(),\n 'priority': priority,\n 'changefreq': changefreq,\n 'languages': [],\n }\n\n # Version can be enabled, but not ``built`` yet. We want to show the\n # link without a ``lastmod`` attribute\n last_build = version.builds.order_by('-date').first()\n if last_build:\n element['lastmod'] = last_build.date.isoformat()\n\n if project.translations.exists():\n for translation in project.translations.all():\n translation_versions = (\n Version.internal.public(project=translation)\n .values_list('slug', flat=True)\n )\n if version.slug in translation_versions:\n href = project.get_docs_url(\n version_slug=version.slug,\n lang_slug=translation.language,\n private=False,\n )\n element['languages'].append({\n 'hreflang': hreflang_formatter(translation.language),\n 'href': href,\n })\n\n # Add itself also as protocol requires\n element['languages'].append({\n 'hreflang': project.language,\n 'href': element['loc'],\n })\n\n versions.append(element)\n\n context = {\n 'versions': versions,\n }\n return render(\n request,\n 'sitemap.xml',\n context,\n content_type='application/xml',\n )",
"def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)",
"def build_index():\n for site in get_sites():\n text = read_site(site)\n while text == False:\n text = read_site(site) # keep attempting to read until successful\n index_site(site, text)",
"def build_index():\n for site in get_sites():\n text = read_site(site)\n while text == False:\n text = read_site(site) # keep attempting to read until successful\n index_site(site, text)",
"def sitemap(request):\n upysitemap = UPYSitemap(request)\n return HttpResponse(upysitemap._do_sitemap(), content_type = \"text/xml\")",
"async def _write_all_index_records_to_file(\n commons_url, output_filename, num_processes, max_concurrent_requests\n):\n index = Gen3Index(commons_url)\n logging.debug(f\"requesting indexd stats...\")\n num_files = int(index.get_stats().get(\"fileCount\"))\n logging.debug(f\"number files: {num_files}\")\n # paging is 0-based, so subtract 1 from ceiling\n # note: float() is necessary to force Python 3 to not floor the result\n max_page = int(math.ceil(float(num_files) / INDEXD_RECORD_PAGE_SIZE)) - 1\n logging.debug(f\"max page: {max_page}\")\n logging.debug(f\"num processes: {num_processes}\")\n\n pages = [x for x in range(max_page + 1)]\n\n # batch pages into subprocesses\n chunk_size = int(math.ceil(float(len(pages)) / num_processes))\n logging.debug(f\"page chunk size: {chunk_size}\")\n\n if not chunk_size:\n page_chunks = []\n else:\n page_chunks = [\n pages[i : i + chunk_size] for i in range(0, len(pages), chunk_size)\n ]\n\n processes = []\n for x in range(len(page_chunks)):\n pages = \",\".join(map(str, page_chunks[x]))\n\n # call the cli function below and pass in chunks of pages for each process\n command = (\n f\"python {CURRENT_DIR}/download_manifest.py --commons_url \"\n f\"{commons_url} --pages {pages} --num_processes {num_processes} \"\n f\"--max_concurrent_requests {max_concurrent_requests}\"\n )\n logging.info(command)\n\n process = await asyncio.create_subprocess_shell(command)\n\n logging.info(f\"Process_{process.pid} - Started w/: {command}\")\n processes.append(process)\n\n for process in processes:\n # wait for the subprocesses to finish\n stdout, stderr = await process.communicate()\n\n if process.returncode == 0:\n logging.info(f\"Process_{process.pid} - Done\")\n else:\n logging.info(f\"Process_{process.pid} - FAILED\")\n\n logging.info(f\"done processing, combining outputs to single file {output_filename}\")\n\n # remove existing output if it exists\n if os.path.isfile(output_filename):\n os.unlink(output_filename)\n\n with open(output_filename, \"wb\") as outfile:\n outfile.write(\"guid,urls,authz,acl,md5,file_size,file_name\\n\".encode(\"utf8\"))\n for filename in glob.glob(TMP_FOLDER + \"*\"):\n if output_filename == filename:\n # don't want to copy the output into the output\n continue\n logging.info(f\"combining {filename} into {output_filename}\")\n with open(filename, \"rb\") as readfile:\n shutil.copyfileobj(readfile, outfile)\n\n logging.info(f\"done writing output to file {output_filename}\")",
"def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())",
"def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)",
"def output(sitemap, domain):\n labels = dict((n, n +\"\\n\" + (\"-\" * len(n)) + \"\\n\" + \"\\n\".join(d['statics'])) for n,d in sitemap.nodes(data=True))\n # These work decreasingly well for larger sites.\n # I guess that's to be expected, as top notch visualisation of\n # these kind of graphs is, itself an interesting problem.\n # Possibly out of scope here.\n networkx.draw(sitemap, labels=labels)\n # Adjust the size up to the point at which it's useful\n F = pylab.gcf()\n DPI = F.get_dpi()\n DEF = F.get_size_inches()\n F.set_size_inches(DEF[0] * 5, DEF[1] * 5)\n\n filename = '{0}.sitemap.png'.format(domain)\n plt.savefig(filename)\n return filename",
"def _newurl(counter):\n return \"%s/sitemap-%s.xml.gz\" % (settings.SITEMAPS_BASE_URL, counter)",
"def write_sub_index(self):\n for sie in self.subIndex:\n self.db_file.write(sie.get_representation())",
"def create_map(\n datapointsPath: Union[Path, str],\n linksPath: Union[Path, str],\n datapointAttrPath: Union[Path, str],\n node_attr_map: Dict[str, str],\n link_attr_map: Dict[str, str],\n snapshots: List[Dict] = [],\n playerSettings: Dict[str, Any] = {},\n outFolder: Union[Path, str] = \"data_out\",\n):\n\n # create folders and copy the index file\n print(f\">> creating folders\")\n out_dir = Path(outFolder)\n out_data_path = out_dir / \"data\"\n if not out_data_path.exists():\n print(f\"\\t- new folder - {out_data_path}\")\n out_data_path.mkdir(parents=True, exist_ok=True)\n else:\n print(f\"\\t- found existing. overwriting - {out_data_path}\")\n\n # copy the index and run scripts to out directory\n shutil.copy(\"src/index.html\", out_dir)\n print(f\"\\t- copied {out_dir}/index.html\")\n\n shutil.copy(\"src/run_local.sh\", out_dir)\n print(f\"\\t- copied {out_dir}/run_local.sh\\n\")\n\n # write the files\n print(f\">> building dataset\")\n __write_dataset_file(datapointsPath, datapointAttrPath, out_data_path)\n print(f\"\\t- new dataset file written to {out_data_path / 'nodes.json'}.\\n\")\n\n print(f\">> building network\")\n __write_network_file(datapointsPath, linksPath, node_attr_map, link_attr_map, out_data_path)\n print(f\"\\t- new network file written to {out_data_path / 'links.json'}.\\n\")\n\n print(f\">> building settings\")\n __write_settings_file(snapshots, playerSettings, out_data_path)\n print(f\"\\t- new settings file written to {out_data_path / 'settings.json'}.\\n\")"
]
| [
"0.73367053",
"0.65047354",
"0.62692606",
"0.6251964",
"0.6188501",
"0.6039944",
"0.5989667",
"0.5955969",
"0.589843",
"0.5862699",
"0.5834666",
"0.5832418",
"0.5780622",
"0.5770587",
"0.57512975",
"0.5719937",
"0.56961554",
"0.56895816",
"0.56689733",
"0.5664793",
"0.56093293",
"0.56093293",
"0.5591249",
"0.555818",
"0.5536609",
"0.55204606",
"0.5436156",
"0.5412423",
"0.53912306",
"0.5391072"
]
| 0.8053461 | 0 |
A generator that returns all the urls for batches, issues, pages and titles, and their respective modified time as a tuple. | def sitemap_urls():
for batch in m.Batch.objects.all():
yield batch.url, batch.released
yield rdf_uri(batch), batch.released
for issue in batch.issues.all():
yield issue.url, batch.released
yield rdf_uri(issue), batch.released
for page in issue.pages.all():
yield page.url, batch.released
yield rdf_uri(page), batch.released
paginator = Paginator(m.Title.objects.all(), 10000)
for page_num in range(1, paginator.num_pages + 1):
page = paginator.page(page_num)
for title in page.object_list:
yield title.url, title.created | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def urls(self) -> list[str]:\r\n ...",
"def getURLs():",
"async def org_info_below_13(org_urls13):\n org_info_till13 = []\n project_urls_till13 = []\n for url in org_urls13:\n # General information about the org\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[0].text.splitlines()[-1].strip()\n mailing_list = org_info[1].text.split(\":\")[-1].strip()\n detail = org_info[2].text\n org_info_till13.append({'name': org_name, 'about': detail,\n 'page': web_page, 'mail': mailing_list,\n 'link': url})\n project_urls_till13.extend(grab_project_links(soup))\n\n except IndexError:\n print(url)\n\n return org_info_till13, get_project_info(project_urls_till13)",
"def data(urls):\r\n for url in urls:\r\n d = dict(url)\r\n d['url'] = url.hashed.url\r\n yield d",
"def get_urls():\n return (constants.UNREVIEWED.col_values(3) +\n constants.REVIEWED.col_values(3) +\n constants.LAST.col_values(3))",
"def get_filename_and_url( filenames ):\n\tfor filename in filenames:\n\t\tfor url in get_links_from_file( filename ):\n\t\t\t yield ( filename, url )",
"def job_info(url):\n for job in requests.get(url).json():\n yield job",
"def ListUrlEntries(self):\n return [WprUrlEntry(request, self._http_archive[request])\n for request in self._http_archive.get_requests()]",
"def get_urls():\r\n return []",
"def do_batch(self, batches: List[Batch]) -> Iterator[Tuple[List[str], str]]:\n crawler = self.do_records(batches)\n\n try:\n first_record = next(crawler)\n except StopIteration:\n logging.error(\"nothing to crawl\")\n return\n\n current_seq = first_record[1]\n current_headers = [first_record[0]]\n\n crawler = (\n tqdm(crawler, initial=1, desc=self.desc, total=self.count_records(batches))\n if self.verbose\n else crawler\n )\n\n for record in crawler:\n if current_seq == record[1]:\n current_headers.append(record[0])\n else:\n yield (current_headers, current_seq)\n current_seq = record[1]\n current_headers = [record[0]]\n\n yield (current_headers, current_seq)",
"def get_task_urls(self):\n # get the count of task\n task_data = ()\n try:\n task_db_con = pymysql.connect(**self._taskdb_config)\n with task_db_con.cursor() as task_cursor:\n task_cursor.execute('SELECT DISTINCT url FROM mv')\n task_data = task_cursor.fetchall()\n finally:\n task_db_con.close()\n\n try:\n result_db_con = pymysql.connect(**self._resultdb_config)\n with result_db_con.cursor() as result_cursor:\n # get all result\n result_cursor.execute('SELECT DISTINCT result FROM dytt8')\n data = result_cursor.fetchall()\n\n # compare task count with result count\n if len(task_data) < len(data):\n return map(lambda row_item: json.loads(row_item[0]).get(\"mv_url\"), data)\n return iter(task_data)\n finally:\n result_db_con.close()",
"def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url",
"async def org_info_above_14(orgs_urls14):\n org_info_14 = []\n project_urls_from14 = []\n for url in orgs_urls14:\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[1].text.splitlines()[-1].strip()\n mailing_list = org_info[2].text.split(\":\")[-1].strip()\n description = soup.find('div', {'class': 'main mdl-cell mdl-cell--8-col\\\n mdl-card mdl-shadow--4dp'})\n detail = description.find_all('p')[2].nextSibling\n org_info_14.append({'name': org_name, 'page': web_page,\n 'about': detail, 'mail': mailing_list,\n 'link': url})\n project_urls_from14.extend(grab_project_links(soup))\n except IndexError:\n print(url)\n\n return org_info_14, get_project_info(project_urls_from14)",
"def start_requests(self):\n authors_pandas = conf.read_from_data('authors.json')\n author_link_list = list(\n map(lambda obj: (obj['keyUrl'], conf.gd_base_url + obj['article_url'], obj['article_url']),\n authors_pandas))\n for link in author_link_list:\n yield Request(url=link[1])",
"def _urls(*, repository, commit, mirrors):\n result_with_nulls = [\n _format_url(\n pattern = x,\n repository = repository,\n commit = commit,\n )\n for x in mirrors.get(\"github\")\n ]\n return [\n url\n for url in result_with_nulls\n if url != None\n ]",
"def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)",
"def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)",
"async def parse_links(\n self, response: aiohttp.ClientResponse\n ) -> typing.Tuple[UrlStatistic, set]:\n links = set()\n content_type = None\n encoding = None\n body = await response.read()\n\n if response.status == 200:\n content_type = response.headers.get('content-type')\n enc: typing.Dict[str, str] = {}\n\n if content_type:\n content_type, enc = cgi.parse_header(content_type)\n\n encoding = enc.get('charset', 'utf-8')\n\n if content_type in ('text/html', 'application/xml'):\n text = await response.text()\n\n # replace href with (?:href|src) to follow image links\n urls = set(re.findall(r'''(?i)href=[\"']([^\\s\"'<>]+)''', text))\n if urls:\n LOGGER.info(\n f'found {len(urls)} distinct urls via {response.url.human_repr()}' # pylint: disable=C0301 # noqa: E501\n )\n\n for url in urls:\n normalized = urllib.parse.urljoin(str(response.url), url)\n\n defragmented, _ = urllib.parse.urldefrag(normalized)\n\n if self.is_url_valid(defragmented):\n links.add(defragmented)\n\n stat = UrlStatistic(\n url=response.url,\n next_url=None,\n status=response.status,\n exception=None,\n size=len(body),\n content_type=content_type,\n encoding=encoding,\n num_urls=len(links),\n num_new_urls=len(links - self.crawled_urls),\n )\n\n return stat, links",
"def fetch_report_urls(start, end, batch_size):\n db = db_connect()\n db_ensure_init(db)\n\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n\n cmd = db.execute(\"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\".format(start=start, end=end))\n\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n # print(r)\n log_row = r\n\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))\n\n logwriter.writerow(log_row)\n\n db_insert(db, to_insert)",
"def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def parse_urls(data):\n testing = [0] * len(data[\"Commit_URL\"])\n build = [0] * len(data[\"Commit_URL\"])\n maintenance = [0] * len(data[\"Commit_URL\"])\n for ii in range(len(data[\"Commit_URL\"])):\n try:\n html = urlopen(data[\"Commit_URL\"].iloc[ii])\n bsObj = BeautifulSoup(html, \"html.parser\")\n paths = bsObj.findAll(\"a\", {\"href\": re.compile(r\"#diff-[a-z0-9]+\")})\n for path in paths:\n if len(path.attrs) == 1:\n if re.match(r\".*(build|pom).*\", str(path)):\n build[ii] = 1\n if re.match(r\".*(test|tests|tester).*\", str(path)):\n testing[ii] = 1\n if re.match(r\".*(u|U)til.*\", str(path)) or re.match(r\".*(h|H)elper.*\", str(path)):\n maintenance[ii] = 1\n except HTTPError as e:\n print(data[\"Commit_ID\"].iloc[ii])\n except URLError as e:\n print(\"The server could not be found!\")\n data[\"Testing\"] = testing\n data[\"Build\"] = build\n data[\"Maintenance\"] = maintenance\n return data",
"def batches_list(project='batch', n_batches=5):\n\tbatches_links = [(project, i+1, f\"Batch {i+1}\") for i in range(n_batches)]\n\treturn batches_links",
"def iter_pages(self) -> Generator[Tuple[Optional[List[dict]], int], None, None]:\n # retrieves the data for the given url\n data_list, response, result = self.retrieve_data(self.url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n yield None, None\n return\n\n # this retrieves the page for the given url\n page_number = get_url_page_number(self.url)\n\n # yields the first page of data and its page number\n yield data_list, page_number\n\n while 'next' in response.links.keys():\n\n # gets the next page from the last responses header\n next_page = response.links['next']['url']\n\n # Here we don't need to pass in params with the page, or the default params because the url from the headers already has those values\n data_list, response, result = self.retrieve_data(next_page)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(f\"Failed to retrieve the data for even though 10 attempts were given. Url: {next_page}\")\n return\n\n page_number = get_url_page_number(next_page)\n\n # if either the data or response is None then yield None and return\n if data_list is None or response is None:\n return\n\n # yield the data from the page and its number\n yield data_list, page_number",
"def get_guide_urls(self):\n # data structures for returns\n urls = []\n link_labels = []\n link_class = []\n # data structures for tracking classes for links\n cur_class = None\n dict_counter = {}\n for tag in self.post_div.find_all(\"a\"):\n url = tag[\"href\"]\n # update class for the links if boundary found\n if url in url_to_class:\n dict_count = min(dict_counter.get(url, 0), len(url_to_class[url]) - 1)\n cur_class = url_to_class[url][dict_count]\n dict_counter[url] = dict_counter.get(url, 0) + 1\n # record the data for the link\n if cur_class is not None:\n urls += [url]\n link_labels += [tag.text]\n link_class += [cur_class]\n return urls, link_labels, link_class",
"def load_links(self) -> Tuple[List[str], List[str]]:\n\n with open(URL_FILE, 'r') as txt_file:\n lines = txt_file.read().split()\n\n urls = []\n for line in lines:\n urls.append(line.split(',')[0])\n \n return lines, urls",
"def create_url_batch(cls, path, batch_size, params={}):\n url_list = []\n for page in range(1, batch_size + 1):\n params[\"page\"] = page\n url = (path, dict(params))\n url_list.append(url)\n return url_list",
"def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])",
"def gather_headlines(urls):\n pass",
"def __fetch_data(file_spider, links_titles, white_list, target_format, time_out, path,\n recursion_depth, recursion_max_depth, prev_link_size, first_run):\n if file_spider == 'yes':\n Crawler.__check_target_link(links_titles, white_list, target_format,\n time_out, path)\n return [recursion_depth, prev_link_size, first_run]\n\n else:\n url_size = len(str(links_titles[0]).rstrip('/').split('/'))\n recursion_settings = Crawler.__count_recursion_depth(url_size, recursion_depth,\n prev_link_size, first_run)\n recursion_depth = recursion_settings[0]\n prev_link_size = recursion_settings[1]\n first_run = recursion_settings[2]\n\n if recursion_depth > recursion_max_depth and recursion_depth != 0:\n return [recursion_depth, prev_link_size, first_run]\n else:\n Crawler.__download_link_contents(links_titles, time_out, path)\n return [recursion_depth, prev_link_size, first_run]"
]
| [
"0.5946937",
"0.5943982",
"0.58248746",
"0.58053154",
"0.5790465",
"0.57346326",
"0.57095873",
"0.5661363",
"0.56141263",
"0.5576062",
"0.5567747",
"0.5489056",
"0.54827327",
"0.54785705",
"0.54465747",
"0.54260623",
"0.54244757",
"0.54214203",
"0.5409607",
"0.5382013",
"0.5359606",
"0.5342528",
"0.53390694",
"0.5317825",
"0.5305901",
"0.5300679",
"0.5292488",
"0.52904814",
"0.5289218",
"0.5283849"
]
| 0.6931409 | 0 |
Not exactly the same as the executor's definition of get_field_def, in this statically evaluated environment we do not always have an Object type, and need to handle Interface and Union types. | def get_field_def(schema, parent_type, field_ast):
name = field_ast.name.value
if name == SchemaMetaFieldDef.name and schema.get_query_type() == parent_type:
return SchemaMetaFieldDef
elif name == TypeMetaFieldDef.name and schema.get_query_type() == parent_type:
return TypeMetaFieldDef
elif name == TypeNameMetaFieldDef.name and \
isinstance(parent_type, (
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
)):
return TypeNameMetaFieldDef
elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):
return parent_type.get_fields().get(name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetFieldDef(fielddef, fields=\"format_, addrdef, baseaddr, bits, bitshift, strindex, datadef, arraydef, validate, cmd, group, tasmotacmnd, converter, readconverter, writeconverter\"):\n format_ = addrdef = baseaddr = datadef = arraydef = validate = cmd = group = tasmotacmnd = converter = readconverter = writeconverter = strindex = None\n bits = bitshift = 0\n\n # calling with nothing is wrong\n if fielddef is None:\n print('<fielddef> is None', file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # get top level items\n if len(fielddef) == 3:\n # converter not present\n format_, addrdef, datadef = fielddef\n elif len(fielddef) == 4:\n # converter present\n format_, addrdef, datadef, converter = fielddef\n else:\n print('wrong <fielddef> {} length ({}) in setting'.format(fielddef, len(fielddef)), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # ignore calls with 'root' setting\n if isinstance(format_, dict) and baseaddr is None and datadef is None:\n return eval(fields)\n\n if not isinstance(format_, (str,dict)):\n print('wrong <format> {} type {} in <fielddef> {}'.format(format_, type(format_), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # extract addrdef items\n baseaddr = addrdef\n if isinstance(baseaddr, (list,tuple)):\n if len(baseaddr) == 3:\n # baseaddr bit definition\n baseaddr, bits, bitshift = baseaddr\n if not isinstance(bits, int):\n print('<bits> must be defined as integer in <fielddef> {}'.format(fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if not isinstance(bitshift, int):\n print('<bitshift> must be defined as integer in <fielddef> {}'.format(fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n elif len(baseaddr) == 2:\n # baseaddr string definition\n baseaddr, strindex = baseaddr\n if not isinstance(strindex, int):\n print('<strindex> must be defined as integer in <fielddef> {}'.format(fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if strindex >= SettingsTextIndex.index('SET_MAX'):\n print('<strindex> out of range [0,{}] in <fielddef> {}'.format(SettingsTextIndex.index('SET_MAX'), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <addrdef> {} length ({}) in <fielddef> {}'.format(addrdef, len(addrdef), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if not isinstance(baseaddr, int):\n print('<baseaddr> {} must be defined as integer in <fielddef> {}'.format(baseaddr, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # extract datadef items\n arraydef = datadef\n if isinstance(datadef, (tuple)):\n if len(datadef) == 2:\n # datadef has a validator\n arraydef, validate = datadef\n elif len(datadef) == 3:\n # datadef has a validator and cmd set\n arraydef, validate, cmd = datadef\n # cmd must be a tuple with 2 objects\n if isinstance(cmd, tuple) and len(cmd) == 2:\n group, tasmotacmnd = cmd\n if group is not None and not isinstance(group, str):\n print('wrong <group> {} in <fielddef> {}'.format(group, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if isinstance(tasmotacmnd, tuple):\n for tcmnd in tasmotacmnd:\n if tcmnd is not None and not callable(tcmnd) and not isinstance(tcmnd, str):\n print('wrong <tasmotacmnd> {} in <fielddef> {}'.format(tcmnd, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n if tasmotacmnd is not None and not callable(tasmotacmnd) and not isinstance(tasmotacmnd, str):\n print('wrong <tasmotacmnd> {} in <fielddef> {}'.format(tasmotacmnd, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <cmd> {} length ({}) in <fielddef> {}'.format(cmd, len(cmd), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <datadef> {} length ({}) in <fielddef> {}'.format(datadef, len(datadef), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n if validate is not None and (not isinstance(validate, str) and not callable(validate)):\n print('wrong <validate> {} type {} in <fielddef> {}'.format(validate, type(validate), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # convert single int into one-dimensional list\n if isinstance(arraydef, int):\n arraydef = [arraydef]\n\n if arraydef is not None and not isinstance(arraydef, (list)):\n print('wrong <arraydef> {} type {} in <fielddef> {}'.format(arraydef, type(arraydef), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # get read/write converter items\n readconverter = converter\n if isinstance(converter, (tuple)):\n if len(converter) == 2:\n # converter has read/write converter\n readconverter, writeconverter = converter\n if readconverter is not None and not isinstance(readconverter, str) and not callable(readconverter):\n print('wrong <readconverter> {} type {} in <fielddef> {}'.format(readconverter, type(readconverter), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if writeconverter is not None and (not isinstance(writeconverter, (bool,str)) and not callable(writeconverter)):\n print('wrong <writeconverter> {} type {} in <fielddef> {}'.format(writeconverter, type(writeconverter), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <converter> {} length ({}) in <fielddef> {}'.format(converter, len(converter), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n\n return eval(fields)",
"def get_def(self, _def):\n if not isinstance(_def, AnodeObjectDefinition):\n if isinstance(_def, AnodeObjectType):\n _type = _def\n _def = _type.latest_def\n if is_hash(_def):\n hash = _def\n if not hash in self.def_by_hash:\n raise AnodeObjectError('ION object hash \"%s\" not in registry' % (hash))\n _def = self.def_by_hash[hash]\n elif isinstance(_def, str): # Don't support unicode strings\n name = _def\n if not name in self.type_by_name:\n raise AnodeObjectError('ION object type \"%s\" not in registry' % (name))\n _type = self.type_by_name[name]\n _def = _type.latest_def\n else:\n raise AnodeObjectError(\"Invalid ION object definition\")\n return _def",
"def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field",
"def _get_decl_for_model_field(field: Field) -> Optional[declarations.BaseDeclaration]:\n if isinstance(field, PartialDateField):\n return factory.Faker('date')\n internal_type = field.get_internal_type()\n declaration = None\n if internal_type in ('CharField', 'TextField'):\n if field.unique:\n declaration = UniqueFaker('word')\n else:\n declaration = factory.Faker('word')\n elif internal_type in (\n 'IntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField',\n 'BigIntegerField'):\n if field.unique:\n declaration = factory.Sequence(lambda n: n)\n else:\n declaration = factory.Faker('pyint')\n elif internal_type in ('BooleanField', 'NullBooleanField'):\n declaration = factory.Faker('pybool')\n elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):\n # The providers for these fields are called 'date','date_time','time'.\n # Derive the provider name from the internal_type.\n provider = ''\n for i, c in enumerate(internal_type.replace('Field', '')):\n if i and c.isupper():\n provider += '_'\n provider += c.lower()\n declaration = factory.Faker(provider)\n elif internal_type == 'DurationField':\n declaration = factory.Faker('time_delta')\n if declaration is None:\n raise Exception(\n f\"Could not find a faker declaration appropriate for model field {field!r}\"\n )\n return declaration",
"def _get_def(self,oname,obj):\n\n # There used to be a lot of fancy code here, until I realized that the\n # proper way of calling formatargspec() is with a * in the args! Now\n # this function is trivial.\n try:\n return oname + inspect.formatargspec(*self.getargspec(obj)), 1\n except:\n return 'Could not get definition header for ' + `oname` , 0",
"def get_resolved_calculation(fieldObj, allFieldsObj, resolutionType=None):\n \"\"\" options: calc (default); caption; calcwithref \"\"\"\n if not _field_has_calculation(fieldObj):\n return None\n\n print('checker: ', fieldObj.name, _field_has_calculation(fieldObj))\n\n ## Resolve non-named calculated fields\n # look for the fields with 'Calculation_' ids\n #TODO make the function call to resolve the names\n #this is the target field which we're resoliving calculation names to\n mappedField = fieldObj.calculation\n while mappedField.find('[') >= 0:\n n = mappedField.find('[') #starting point of calc\n toMap = mappedField[n:mappedField.find(']', n)+1] #find the end of the calc name\n\n #set output to the specified resolutionType\n #TODO need to check if the field is a calculation or just an attributes\n if resolutionType is None or resolutionType == 'calc':\n # if toMap == '[Returned]':\n # print ('RETURN FOUND: PRINTING OBJECTS IN LIST')\n # for x in allFieldsObj:\n # print x, x[0]\n # print \"GETTING: \", toMap, ' ,', allFieldsObj[toMap],\" | RESULT: \", _get_calc_value(allFieldsObj[toMap], 'name')\n rfield = _get_calc_value(allFieldsObj[toMap], 'name')\n # rfield = allFieldsObj[toMap].calculation\n # elif resolutionType == 'caption':\n # rfield = allFieldsObj[toMap].caption\n # elif resolutionType == 'calcwithref':\n # rfield = '{[{}] {}}'.format(allFieldsObj[toMap].caption, allFieldsObj[toMap].calculation)\n # else:\n # rfield = 'could not resolve type'\n # print (\"invalid parameter [{}] passed for 'resolutionType' \".format(resolutionType))\n # print toMap\n mappedField = mappedField.replace(toMap, 'rfield')\n return mappedField\n # return _resolve_calculation_parts(fieldObj, allFieldsObj, resolutionType)",
"def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef",
"def GetField(dobj, fieldname, fielddef, raw=False, addroffset=0):\n\n if isinstance(dobj, str):\n dobj = bytearray(dobj)\n\n valuemapping = None\n\n # get field definition\n format_, baseaddr, strindex, arraydef, group = GetFieldDef(fielddef, fields='format_, baseaddr, strindex, arraydef, group')\n\n # filter groups\n if not IsFilterGroup(group):\n return valuemapping\n\n # <arraydef> contains a integer list\n if isinstance(arraydef, list) and len(arraydef) > 0:\n valuemapping = []\n offset = 0\n for i in range(0, arraydef[0]):\n subfielddef = GetSubfieldDef(fielddef)\n length = GetFieldLength(subfielddef)\n if length != 0:\n if strindex is not None:\n value = GetField(dobj, fieldname, subfielddef, raw=raw, addroffset=i)\n else:\n value = GetField(dobj, fieldname, subfielddef, raw=raw, addroffset=addroffset+offset)\n valuemapping.append(value)\n offset += length\n\n # <format> contains a dict\n elif isinstance(format_, dict):\n mapping_value = {}\n # -> iterate through format\n for name in format_:\n value = None\n value = GetField(dobj, name, format_[name], raw=raw, addroffset=addroffset)\n if value is not None:\n mapping_value[name] = value\n # copy complete returned mapping\n valuemapping = copy.deepcopy(mapping_value)\n\n # a simple value\n elif isinstance(format_, (str, bool, int, float)):\n if GetFieldLength(fielddef) != 0:\n if strindex is not None:\n value = GetFieldValue(fielddef, dobj, baseaddr, addroffset)\n else:\n value = GetFieldValue(fielddef, dobj, baseaddr+addroffset)\n valuemapping = ReadWriteConverter(value, fielddef, read=True, raw=raw)\n\n else:\n exit(ExitCode.INTERNAL_ERROR, \"Wrong mapping format definition: '{}'\".format(format_), type_=LogType.WARNING, doexit=not args.ignorewarning, line=inspect.getlineno(inspect.currentframe()))\n\n return valuemapping",
"def field_mapping(self):\n fields = self.fields\n if self.target_field is not None:\n del fields[self.target_field.get('name')]\n field_labels = list(self.fields.keys())\n\n field_mapping = {\n name: (\n field_labels.index(name),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DataField'\n }\n\n field_mapping.update({\n name: (\n field_labels.index(self.find(e, 'FieldRef').get('field')),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DerivedField'\n })\n\n return field_mapping",
"def objectFields(self):\n raise NotImplementedError",
"def field_type(self):\n return \"\"",
"def _classify_object_field(field: s_obj.Field[Any]) -> FieldStorage:\n\n ftype = field.type\n shadow_ptr_kind = None\n shadow_ptr_type = None\n fieldtype = FieldType.OTHER\n\n is_array = is_multiprop = False\n if issubclass(ftype, s_obj.MultiPropSet):\n is_multiprop = True\n ftype = ftype.type\n elif (\n issubclass(\n ftype,\n (checked.CheckedList, checked.FrozenCheckedList,\n checked.CheckedSet, checked.FrozenCheckedSet))\n and not issubclass(ftype, s_expr.ExpressionList)\n ):\n is_array = True\n ftype = ftype.type # type: ignore\n\n if issubclass(ftype, s_obj.ObjectCollection):\n ptr_kind = 'multi link'\n ptr_type = 'schema::Object'\n if issubclass(ftype, s_obj.ObjectDict):\n fieldtype = FieldType.OBJ_DICT\n\n elif issubclass(ftype, s_obj.Object):\n ptr_kind = 'link'\n ptr_type = f'schema::{ftype.__name__}'\n\n elif issubclass(ftype, s_expr.Expression):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'tuple<text: str, refs: array<uuid>>'\n ptr_kind = 'property'\n ptr_type = 'str'\n fieldtype = FieldType.EXPR\n\n elif issubclass(ftype, s_expr.ExpressionList):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = (\n 'array<tuple<text: str, refs: array<uuid>>>'\n )\n ptr_kind = 'property'\n ptr_type = 'array<str>'\n fieldtype = FieldType.EXPR_LIST\n\n elif issubclass(ftype, s_expr.ExpressionDict):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = '''array<tuple<\n name: str,\n expr: tuple<text: str, refs: array<uuid>>\n >>'''\n ptr_kind = 'property'\n ptr_type = 'array<tuple<name: str, expr: str>>'\n fieldtype = FieldType.EXPR_DICT\n\n elif issubclass(ftype, collections.abc.Mapping):\n ptr_kind = 'property'\n ptr_type = 'json'\n\n elif issubclass(ftype, (str, sn.Name)):\n ptr_kind = 'property'\n ptr_type = 'str'\n\n if field.name == 'name':\n # TODO: consider shadow-reflecting names as tuples\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'str'\n\n elif issubclass(ftype, bool):\n ptr_kind = 'property'\n ptr_type = 'bool'\n\n elif issubclass(ftype, int):\n ptr_kind = 'property'\n ptr_type = 'int64'\n\n elif issubclass(ftype, uuid.UUID):\n ptr_kind = 'property'\n ptr_type = 'uuid'\n\n elif issubclass(ftype, verutils.Version):\n ptr_kind = 'property'\n ptr_type = '''\n tuple<\n major: std::int64,\n minor: std::int64,\n stage: sys::VersionStage,\n stage_no: std::int64,\n local: array<std::str>,\n >\n '''\n else:\n raise RuntimeError(\n f'no metaschema reflection for field {field.name} of type {ftype}'\n )\n\n if is_multiprop:\n ptr_kind = 'multi property'\n if is_array:\n ptr_type = f'array<{ptr_type}>'\n\n return FieldStorage(\n fieldtype=fieldtype,\n ptrkind=ptr_kind,\n ptrtype=ptr_type,\n shadow_ptrkind=shadow_ptr_kind,\n shadow_ptrtype=shadow_ptr_type,\n )",
"def field_mapping_for(field_obj: Field):\n field_cls = type(field_obj)\n\n if field_cls is Auto:\n if field_obj.increment is True:\n return sa_types.Integer\n else:\n return _get_identity_type()\n\n return field_mapping.get(field_cls)",
"def field_type(self) -> Optional[NameObject]:\n return self.get(\"/FT\")",
"def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields",
"def on_get_field(self, ins, const, obj):\n pass",
"def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )",
"def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)",
"def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)",
"def parse_def(self, sql):\n parsed = sqlparse.parse(sql)[0]\n\n # extract the parenthesis which holds column definitions\n _, par = parsed.token_next_by(i=sqlparse.sql.Parenthesis)\n columns = self.extract_definitions(par)\n\n r = []\n for column in columns:\n s = {}\n s['key'] = column[0]\n s['type'] = column[1:]\n r.append(s)\n #print('NAME: {name!s:12} DEFINITION: {definition}'.format(\n # name=column[0], definition=' '.join(str(t) for t in column[1:])))\n return r",
"def get_drupal_field_defaults(db_obj, db_cur, entity_type, bundle):\n\n # query string and arguments\n query_str = (\n'''\nSELECT fci.field_name, fci.data\nFROM field_config_instance as fci\nLEFT JOIN field_config as fc\nON fc.id = fci.field_id\nWHERE fci.entity_type = %s\nAND fci.bundle = %s\nAND fc.deleted = 0\n'''\n )\n query_args = [entity_type, bundle]\n\n # execute the query\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n\n # before we worry about the phpserialize module, make sure there are\n # actually defaults\n found_default = 0\n for row in ret[1]:\n if re.search('s:13:\"default_value\";(?!N;)', row[1]):\n found_default = 1\n if found_default == 0:\n return []\n\n if 'phpserialize' not in sys.modules:\n nori.core.email_logger.error(\n'''Warning: there are defaults for Drupal fields under entity type\n{0} and bundle {1}, but the 'phpserialize' module\nis not available, so they can't be interpreted.''' .\n format(*map(nori.pps, [entity_type, bundle]))\n )\n return None\n\n # massage the defaults - not implemented yet\n nori.core.email_logger.error(\n'''Warning: there are defaults for Drupal fields under entity type\n{0} and bundle {1}, but the interpretation code\nhasn't been implemented yet.''' .\n format(*map(nori.pps, [entity_type, bundle]))\n )\n return None\n #ret[1]\n #field_name: endpoints, field_ram, etc.\n #phpserialize.loads(data)['default_value'][0]['value'] -> '2222'",
"def _GetField(bpo_field_id):\n bpo_field = BPOField.objects.get(id=bpo_field_id)\n # Get BPO Field based on type.\n if bpo_field.type == 'auto_sum':\n return BPOAutoSumField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'multiple_choice':\n return BPOMultipleChoiceField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'float_input':\n return BPOFloatField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'integer_input':\n return BPOIntegerField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'date_input':\n return BPODateField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'table':\n return BPOTable.objects.get(id=bpo_field_id)\n else:\n return bpo_field",
"def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"assignedDateTime\": lambda n : setattr(self, 'assigned_date_time', n.get_datetime_value()),\n \"capabilityStatus\": lambda n : setattr(self, 'capability_status', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"service\": lambda n : setattr(self, 'service', n.get_str_value()),\n \"servicePlanId\": lambda n : setattr(self, 'service_plan_id', n.get_uuid_value()),\n }\n return fields",
"def get_column_def(self):\r\n db_type = self.db_type.format(\r\n self.key_type.db_type,\r\n self.value_type.db_type\r\n )\r\n return '{} {}'.format(self.cql, db_type)",
"def field(base : SetupVal, field_name : str) -> SetupVal:\n if not isinstance(base, SetupVal):\n raise ValueError('field expected a SetupVal, but got {base!r}')\n if not isinstance(field_name, str):\n raise ValueError('field expected a str, but got {field_name!r}')\n return FieldVal(base, field_name)",
"def get_data_structure(entries, types, field_object):\n\n data = {\n 'parameters': [],\n 'variables': [],\n 'exceptions': [],\n 'return': {},\n 'references': [],\n }\n\n def make_param(_id, _description, _type=None, _required=None):\n ret = {\n 'id': _id,\n 'description': _description.strip(\" \\n\\r\\t\")\n }\n if _type:\n ret['type'] = _type\n\n if _required is not None:\n ret['isRequired'] = _required\n\n return ret\n\n def transform_para(para_field):\n if isinstance(para_field, addnodes.pending_xref):\n return transform_node(para_field)\n else:\n return para_field.astext()\n\n def resolve_type(data_type):\n # Remove @ ~ and \\n for cross reference in parameter/return value type to apply to docfx correctly\n data_type = re.sub('[@~\\n]', '', data_type)\n\n # Add references for docfx to resolve ref if type contains TYPE_SEP_PATTERN\n _spec_list = []\n _spec_fullnames = re.split(TYPE_SEP_PATTERN, data_type)\n\n _added_reference = {}\n if len(_spec_fullnames) > 1:\n _added_reference_name = ''\n for _spec_fullname in _spec_fullnames:\n if _spec_fullname != '':\n _spec = {}\n _spec['name'] = _spec_fullname.split('.')[-1]\n _spec['fullName'] = _spec_fullname\n if re.match(TYPE_SEP_PATTERN, _spec_fullname) is None:\n _spec['uid'] = _spec_fullname\n _spec_list.append(_spec)\n _added_reference_name += _spec['name']\n\n _added_reference = {\n 'uid': data_type,\n 'name': _added_reference_name,\n 'fullName': data_type,\n 'spec.python': _spec_list\n }\n\n return data_type, _added_reference\n\n def extract_exception_desc(field_object):\n ret = []\n if len(field_object) > 0:\n for field in field_object:\n if 'field_name' == field[0].tagname and field[0].astext() == 'Raises':\n assert field[1].tagname == 'field_body'\n field_body = field[1]\n\n children = [n for n in field_body\n if not isinstance(n, nodes.Invisible)]\n\n for child in children:\n if isinstance (child, nodes.paragraph):\n pending_xref_index = child.first_child_matching_class(addnodes.pending_xref)\n if pending_xref_index is not None:\n pending_xref = child[pending_xref_index]\n raise_type_index = pending_xref.first_child_matching_class(nodes.literal)\n if raise_type_index is not None:\n raise_type = pending_xref[raise_type_index]\n ret.append({'type': pending_xref['reftarget'], 'desc': raise_type.astext()})\n\n return ret\n\n for entry in entries:\n if isinstance(entry, nodes.field):\n # pass-through old field\n pass\n else:\n fieldtype, content = entry\n fieldtypes = types.get(fieldtype.name, {})\n if fieldtype.name == 'exceptions':\n for _type, _description in content:\n data['exceptions'].append({\n 'type': _type,\n 'description': transform_node(_description[0]).strip(\" \\n\\r\\t\")\n })\n if fieldtype.name == 'returntype':\n for returntype_node in content[1]:\n returntype_ret = transform_node(returntype_node)\n if returntype_ret:\n # Support or in returntype\n for returntype in re.split('[ \\n]or[ \\n]', returntype_ret):\n returntype, _added_reference = resolve_type(returntype)\n if _added_reference:\n if len(data['references']) == 0:\n data['references'].append(_added_reference)\n elif any(r['uid'] != _added_reference['uid'] for r in data['references']):\n data['references'].append(_added_reference)\n\n data['return'].setdefault('type', []).append(returntype)\n if fieldtype.name == 'returnvalue':\n returnvalue_ret = transform_node(content[1][0])\n if returnvalue_ret:\n data['return']['description'] = returnvalue_ret.strip(\" \\n\\r\\t\")\n if fieldtype.name in ['parameter', 'variable', 'keyword']:\n for field, node_list in content:\n _id = field\n _description = transform_node(node_list[0])\n if field in fieldtypes:\n _type = u''.join(transform_para(n) for n in fieldtypes[field])\n else:\n _type = None\n\n _para_types = []\n if fieldtype.name == 'parameter' or fieldtype.name == 'keyword':\n if _type:\n # Support or in parameter type\n for _s_type in re.split('[ \\n]or[ \\n]', _type):\n _s_type, _added_reference = resolve_type(_s_type)\n if _added_reference:\n if len(data['references']) == 0:\n data['references'].append(_added_reference)\n elif any(r['uid'] != _added_reference['uid'] for r in data['references']):\n data['references'].append(_added_reference)\n\n _para_types.append(_s_type)\n\n\n\n _data = make_param(_id=_id, _type=_para_types, _description=_description, _required=False if fieldtype.name == 'keyword' else True)\n data['parameters'].append(_data)\n\n if fieldtype.name == 'variable':\n if _type:\n # Support or in variable type\n for _s_type in re.split('[ \\n]or[ \\n]', _type):\n _s_type, _added_reference = resolve_type(_s_type)\n if _added_reference:\n if len(data['references']) == 0:\n data['references'].append(_added_reference)\n elif any(r['uid'] != _added_reference['uid'] for r in data['references']):\n data['references'].append(_added_reference)\n\n _para_types.append(_s_type)\n\n _data = make_param(_id=_id, _type=_para_types, _description=_description)\n data['variables'].append(_data)\n\n ret_list = extract_exception_desc(field_object)\n for ret in ret_list:\n # only use type in exceptions\n data.setdefault('exceptions', []).append({\n 'type': ret['type']\n })\n\n return data",
"def field(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField':\n return pulumi.get(self, \"field\")",
"def __parse_field(self, field, tuple_descriptor, alias_on_complex_types, make_visible):\r\n alias = None\r\n field_type = None\r\n return_type = None\r\n underlying_fields = None\r\n aggregate_factory = None\r\n literal_value = None\r\n func_factory = None\r\n fields_to_verify = []\r\n parsed_fds = []\r\n field_backup = list(field)\r\n self.__clean_list(field)\r\n \r\n # parse aliases if they exist\r\n if (len(field) >= 4) and (field[-2] == QueryTokens.AS):\r\n alias = field[-1]\r\n field = field[:-2]\r\n if (field[0] == QueryTokens.STRING_LITERAL) or \\\r\n (field[0] == QueryTokens.INTEGER_LITERAL) or \\\r\n (field[0] == QueryTokens.FLOAT_LITERAL): \r\n alias = self.unnamed_operator_name()\r\n underlying_fields = []\r\n field_type = FieldType.LITERAL\r\n literal_value = field[1]\r\n if field[0] == QueryTokens.STRING_LITERAL:\r\n return_type = ReturnType.STRING\r\n elif field[0] == QueryTokens.INTEGER_LITERAL:\r\n return_type = ReturnType.INTEGER\r\n literal_value = int(literal_value)\r\n elif field[0] == QueryTokens.FLOAT_LITERAL:\r\n return_type = ReturnType.FLOAT\r\n literal_value = float(literal_value)\r\n elif field[0] == QueryTokens.COLUMN_NAME: # field or alias\r\n if alias == None:\r\n alias = field[1]\r\n field_descriptor = tuple_descriptor.get_descriptor(field[1])\r\n if field_descriptor == None: # underlying field not yet defined. mark to check later\r\n field_type = FieldType.UNDEFINED\r\n underlying_fields = [field[1]]\r\n # check alias and underlying once this process is done to\r\n # find yet-undefined fields\r\n fields_to_verify.append(field[1])\r\n fields_to_verify.append(alias)\r\n else: # field found, copy information\r\n field_type = field_descriptor.field_type\r\n return_type = field_descriptor.return_type\r\n underlying_fields = field_descriptor.underlying_fields\r\n aggregate_factory = field_descriptor.aggregate_factory\r\n func_factory = field_descriptor.func_factory\r\n elif field[0] == QueryTokens.FUNCTION_OR_AGGREGATE: # function or aggregate \r\n if alias == None:\r\n if alias_on_complex_types:\r\n raise QueryException(\"Must specify alias (AS clause) for '%s'\" % (field[1]))\r\n else:\r\n alias = self.unnamed_operator_name()\r\n underlying_field_list = field[2:]\r\n underlying_fields = []\r\n for underlying in underlying_field_list:\r\n (parsed_fd_list, parsed_verify) = self.__parse_field(underlying, tuple_descriptor, False, False)\r\n for parsed_fd in parsed_fd_list:\r\n parsed_fd.visible = False\r\n fields_to_verify.extend(parsed_verify)\r\n parsed_fds.extend(parsed_fd_list)\r\n underlying_fields.append(parsed_fd_list[0].alias)\r\n aggregate_factory = get_aggregate_factory(field[1])\r\n if aggregate_factory != None: # found an aggregate function\r\n field_type = FieldType.AGGREGATE\r\n return_type = ReturnType.FLOAT\r\n else:\r\n function_information = self.function_registry.get_function(field[1])\r\n if function_information != None:\r\n field_type = FieldType.FUNCTION\r\n func_factory = function_information.func_factory\r\n return_type = function_information.return_type\r\n else:\r\n raise QueryException(\"'%s' is neither an aggregate or a registered function\" % (field[1]))\r\n else:\r\n raise QueryException(\"Empty field clause found: %s\" % (\"\".join(field_backup)))\r\n fd = FieldDescriptor(alias, underlying_fields, field_type, return_type, aggregate_factory, func_factory, literal_value)\r\n fd.visible = make_visible\r\n parsed_fds.insert(0, fd)\r\n return (parsed_fds, fields_to_verify)",
"def define_fields(cls, dbmanager):\n return []",
"def GetFieldFromConfigText(self,\n fieldname_as_written: str,\n run_unsafe: bool = False) -> Optional[OptWrapper]:\n try:\n # Check the field as if it's fully qualified.\n return self.GetField(fieldname_as_written, run_unsafe)\n except TypeError:\n pass\n\n # Field is unqualified so it is either global or type-namespace-local\n # Check for a locally defined field first using type's namespace\n field = self._GetField(\n self.namespace.namespace + '/' + fieldname_as_written, run_unsafe)\n if not field:\n # Check field as if it's in the global namespace\n field = self._GetField('/' + fieldname_as_written, run_unsafe)\n return field"
]
| [
"0.66650426",
"0.61259234",
"0.5725323",
"0.5587269",
"0.55751014",
"0.54820305",
"0.53778094",
"0.53522205",
"0.5343069",
"0.52950495",
"0.52775973",
"0.5277331",
"0.5243576",
"0.52401596",
"0.52392125",
"0.5216343",
"0.51968074",
"0.5188637",
"0.5114995",
"0.511223",
"0.5089547",
"0.5085712",
"0.5076501",
"0.50722057",
"0.50649756",
"0.5061819",
"0.5056445",
"0.50547445",
"0.50520927",
"0.5032552"
]
| 0.6130992 | 1 |
Draws a texture on the internal image. If the texture is smaller than the image it loops. | def __draw_board_texture(self, texture):
textureWidth, textureHeight = texture.size
for x in range(0, self.width, textureWidth):
for y in range(0, self.height, textureHeight):
self.baseImage.paste(texture, (x, y)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw(self):\n\n glEnable(self.texture.target)\n glBindTexture(self.texture.target, self.texture.id)\n if self.mipmaps:\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)",
"def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())",
"def setupTexture( self ):\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glBindTexture(GL_TEXTURE_2D, self.imageID)",
"def setupTexture(self):\r\n # Configure the texture rendering parameters\r\n glEnable(GL_TEXTURE_2D)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\r\n\r\n # Re-select our texture, could use other generated textures\r\n # if we had generated them earlier.\r\n glBindTexture(GL_TEXTURE_2D, self.imageID)",
"def populate_texture(self, texture):\n texture.blit_buffer(self._cbuffer, colorfmt='bgr', bufferfmt='ubyte')",
"def draw(self, surface):\r\n surface.blit(self.image, self.rect)",
"def draw(self):\n self.screen.blit(self.image, self.rect)",
"def draw(self, surface):\n surface.blit(self.image, self.rect)",
"def draw(self, surface):\n surface.blit(self.image, self.rect)",
"def drawFloor(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n sx = width / 2\n ex = -sx\n sz = height / 2\n ez = -sz\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n glBegin(GL_QUADS)\n glTexCoord2f(0, 0)\n glVertex3f(sx, 0, sz)\n glTexCoord2f(0, 1)\n glVertex3f(sx, 0, ez)\n glTexCoord2f(1, 1)\n glVertex3f(ex, 0, ez)\n glTexCoord2f(1, 0)\n glVertex3f(ex, 0, sz)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)",
"def RenderTexture(self, vtkVolume, vtkRenderer, p_int=..., p_int=..., *args, **kwargs):\n ...",
"def draw(self, img, tile_img, tiles):\n rect = get_tile_rect(self.pos)\n rect = Rect([rect.x + self.anim_offset.x, rect.y + self.anim_offset.y, rect.w, rect.h])\n img.blit(tile_img, rect, tiles[self.tile])",
"def draw_offscreen(context):\n offscreen = SprytileGui.offscreen\n target_img = SprytileGui.texture_grid\n tex_size = SprytileGui.tex_size\n\n offscreen.bind()\n glClear(GL_COLOR_BUFFER_BIT)\n glDisable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0, tex_size[0], 0, tex_size[1])\n\n def draw_full_quad():\n texco = [(0, 0), (0, 1), (1, 1), (1, 0)]\n verco = [(0, 0), (0, tex_size[1]), (tex_size[0], tex_size[1]), (tex_size[0], 0)]\n glBegin(bgl.GL_QUADS)\n for i in range(4):\n glTexCoord2f(texco[i][0], texco[i][1])\n glVertex2f(verco[i][0], verco[i][1])\n glEnd()\n\n glColor4f(0.0, 0.0, 0.0, 0.5)\n draw_full_quad()\n\n if target_img is not None:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n target_img.gl_load(0, GL_NEAREST, GL_NEAREST)\n glBindTexture(GL_TEXTURE_2D, target_img.bindcode[0])\n # We need to backup and restore the MAG_FILTER to avoid messing up the Blender viewport\n old_mag_filter = Buffer(GL_INT, 1)\n glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glEnable(GL_TEXTURE_2D)\n draw_full_quad()\n glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n\n # Translate the gl context by grid matrix\n grid_matrix = sprytile_utils.get_grid_matrix(SprytileGui.loaded_grid)\n matrix_vals = [grid_matrix[j][i] for i in range(4) for j in range(4)]\n grid_buff = bgl.Buffer(bgl.GL_FLOAT, 16, matrix_vals)\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glLoadMatrixf(grid_buff)\n\n glDisable(GL_TEXTURE_2D)\n\n # Get data for drawing additional overlays\n grid_size = SprytileGui.loaded_grid.grid\n padding = SprytileGui.loaded_grid.padding\n margin = SprytileGui.loaded_grid.margin\n curr_sel = SprytileGui.loaded_grid.tile_selection\n is_pixel_grid = sprytile_utils.grid_is_single_pixel(SprytileGui.loaded_grid)\n is_use_mouse = context.scene.sprytile_ui.use_mouse\n is_selecting = SprytileGui.is_selecting\n\n glLineWidth(1)\n\n # Draw box for currently selected tile(s)\n # Pixel grid selection is drawn in draw_tile_select_ui\n sprytile_data = context.scene.sprytile_data\n is_not_base_layer = sprytile_data.work_layer != \"BASE\"\n draw_outline = sprytile_data.outline_preview or is_not_base_layer\n if draw_outline and is_selecting is False and not is_pixel_grid:\n if is_not_base_layer:\n glColor4f(0.98, 0.94, 0.12, 1.0)\n elif SprytileGui.is_moving:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n else:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n curr_sel_min, curr_sel_max = SprytileGui.get_sel_bounds(\n grid_size, padding, margin,\n curr_sel[0], curr_sel[1],\n curr_sel[2], curr_sel[3]\n )\n SprytileGui.draw_selection(curr_sel_min, curr_sel_max)\n\n # Inside gui, draw appropriate selection for under mouse\n if is_use_mouse and is_selecting is False and SprytileGui.cursor_grid_pos is not None:\n\n cursor_pos = SprytileGui.cursor_grid_pos\n # In pixel grid, draw cross hair\n if is_pixel_grid and SprytileGui.is_moving is False:\n glColor4f(1.0, 1.0, 1.0, 0.5)\n glBegin(GL_LINE_STRIP)\n glVertex2i(0, int(cursor_pos.y + 1))\n glVertex2i(tex_size[0], int(cursor_pos.y + 1))\n glEnd()\n\n glBegin(GL_LINE_STRIP)\n glVertex2i(int(cursor_pos.x + 1), 0)\n glVertex2i(int(cursor_pos.x + 1), tex_size[1])\n glEnd()\n # Draw box around selection\n elif SprytileGui.is_moving is False:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n cursor_min, cursor_max = SprytileGui.get_sel_bounds(grid_size, padding, margin,\n int(cursor_pos.x), int(cursor_pos.y),)\n SprytileGui.draw_selection(cursor_min, cursor_max)\n\n glPopMatrix()\n offscreen.unbind()",
"def draw(self, screen, size_block):\n pos = self.board.coordinate_to_position(self.coordinate)\n screen.blit(pygame.transform.scale(self.image, (size_block, size_block)), (pos[0], pos[1]))",
"def draw_img(self, i, j, k):\n if k < len(self.images):\n img = self.images[k]\n r = self.get_rect(i, j)\n self.screen.blit(img, r)",
"def draw_background(imname, width, height):\n \n # load background image (should be .bmp) to OpenGL texture\n bg_image = pygame.image.load(imname).convert()\n bg_data = pygame.image.tostring(bg_image,\"RGBX\",1)\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n \n # bind the texture\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D,glGenTextures(1))\n glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,bg_data)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)\n \n # create quad to fill the whole window\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0); glVertex3f(-1.0,-1.0,-1.0)\n glTexCoord2f(1.0,0.0); glVertex3f( 1.0,-1.0,-1.0)\n glTexCoord2f(1.0,1.0); glVertex3f( 1.0, 1.0,-1.0)\n glTexCoord2f(0.0,1.0); glVertex3f(-1.0, 1.0,-1.0)\n glEnd()\n \n # clear the texture\n glDeleteTextures(1)",
"def draw(self):\n if self.dirty or (self.image is None):\n self._render()\n self.screen.blit(self.image, self.rect)",
"def blitme(self):\r\n self.screen.blit(self.image, self.rect)",
"def blitme(self):\r\n self.screen.blit(self.image, self.rect)",
"def blitme(self):\r\n self.screen.blit(self.image, self.rect)",
"def blitme(self):\r\n self.screen.blit(self.image, self.rect)",
"def draw(self, screen):\n pg.draw.rect(screen, self.bg_color, self.rect)\n\n for y, surf in enumerate(self.images):\n # Don't blit below the rect area.\n if y * self.font_height + self.font_height > self.rect.h:\n break\n screen.blit(surf, (self.rect.x, self.rect.y+y*self.font_height))",
"def render(self):\n self._surface.fill(Color('black'))\n for y in range(0, self.height):\n for x in range(0, self.length):\n if self.grid.get_cell(x, y) == CellType.snake:\n self._surface.blit(self.snake_cell_image,\n (x * self.cell_size, y * self.cell_size))\n elif self.grid.get_cell(x, y) == CellType.apple:\n self._surface.blit(self.apple_cell_image,\n (x * self.cell_size, y * self.cell_size))\n pg.display.update()",
"def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))",
"def blitme(self):\n self.screen.blit(self.image, self.rect)",
"def draw(self, screen):\n\n if self.exist:\n screen.blit(self._img, self._rect)",
"def blit(self):\n self.screen.blit(self.image, self.rect)",
"def BuildTexture(path):\n # Catch exception here if image file couldn't be loaded\n try:\n # Note, NYI, path specified as URL's could be access using python url lib\n # OleLoadPicturePath () supports url paths, but that capability isn't critcial to this tutorial.\n Picture = Image.open(path)\n except:\n print(\"Unable to open image file '%s'.\" % (path))\n return False, 0\n\n glMaxTexDim = glGetIntegerv(GL_MAX_TEXTURE_SIZE)\n\n WidthPixels = Picture.size[0]\n HeightPixels = Picture.size[1]\n\n if (WidthPixels > glMaxTexDim) or (HeightPixels > glMaxTexDim):\n # The image file is too large. Shrink it to fit within the texture dimensions\n # support by our rendering context for a GL texture.\n # Note, Feel free to experiemnt and force a resize by placing a small val into\n # glMaxTexDim (e.g. 32,64,128).\n if WidthPixels > HeightPixels:\n # Width is the domainant dimension.\n resizeWidthPixels = glMaxTexDim\n squash = float(resizeWidthPixels) / float(WidthPixels)\n resizeHeightPixels = int(HeighPixels * squash)\n else:\n resizeHeightPixels = glMaxTexDim\n squash = float(resizeHeightPixels) / float(HeightPixels)\n resizeWidthPixels = int(WidthPixels * squash)\n else:\n # // Resize Image To Closest Power Of Two\n if WidthPixels > HeightPixels:\n # Width is the domainant dimension.\n resizeWidthPixels = next_p2(WidthPixels)\n squash = float(resizeWidthPixels) / float(WidthPixels)\n resizeHeightPixels = int(HeighPixels * squash)\n else:\n resizeHeightPixels = next_p2(HeightPixels)\n squash = float(resizeHeightPixels) / float(HeightPixels)\n resizeWidthPixels = int(WidthPixels * squash)\n #\n # Resize the image to be used as a texture.\n # The Python image library provides a handy method resize ().\n # Several filtering options are available.\n # If you don't specify a filtering option will default NEAREST\n Picture = Picture.resize((resizeWidthPixels, resizeHeightPixels), Image.BICUBIC)\n lWidthPixels = next_p2(resizeWidthPixels)\n lHeightPixels = next_p2(resizeWidthPixels)\n # Now we create an image that has the padding needed\n newpicture = Image.new(\"RGB\", (lWidthPixels, lHeightPixels), (0, 0, 0))\n newpicture.paste(Picture)\n\n # Create a raw string from the image data - data will be unsigned bytes\n # RGBpad, no stride (0), and first line is top of image (-1)\n pBits = (\n newpicture.tostring(\"raw\", \"RGBX\", 0, -1)\n if hasattr(newpicture, \"tostring\")\n else newpicture.tobytes(\"raw\", \"RGBX\", 0, -1)\n )\n\n # // Typical Texture Generation Using Data From The Bitmap\n texid = glGenTextures(1)\n # // Create The Texture\n glBindTexture(GL_TEXTURE_2D, texid)\n # // Bind To The Texture ID\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n # // (Modify This For The Type Of Filtering You Want)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n # // (Modify This For The Type Of Filtering You Want)\n\n # // (Modify This If You Want Mipmaps)\n glTexImage2D(\n GL_TEXTURE_2D,\n 0,\n 3,\n lWidthPixels,\n lHeightPixels,\n 0,\n GL_RGBA,\n GL_UNSIGNED_BYTE,\n pBits,\n )\n\n # Cleanup (python actually handles all memory for you, so this isn't necessary)\n # // Decrements IPicture Reference Count\n Picture = None\n newpicture = None\n return True, texid # // Return True (All Good)",
"def blitme(self):\n self.screen.blit(self.image, self.rect)",
"def blitme(self):\n self.screen.blit(self.image, self.rect)"
]
| [
"0.7347973",
"0.64104",
"0.6329882",
"0.6255792",
"0.62519455",
"0.62173605",
"0.6149818",
"0.6123493",
"0.6123493",
"0.6082988",
"0.6069757",
"0.6025694",
"0.59712917",
"0.59451574",
"0.59443766",
"0.59195346",
"0.58604574",
"0.58564085",
"0.58564085",
"0.58564085",
"0.58564085",
"0.58476615",
"0.58195263",
"0.5816389",
"0.5803701",
"0.57969713",
"0.5794447",
"0.5784183",
"0.5759226",
"0.5759226"
]
| 0.715414 | 1 |
Get the appropriate static method for exec'ing a QFileDialog. | def get_file_dialog_exec_func(mode):
if mode not in _STATIC_METHOD_NAMES:
raise ValueError("Unknown file dialog mode: '%s'" % mode)
return getattr(QFileDialog, _STATIC_METHOD_NAMES[mode]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def runOpenFileDialog(\n self,\n c: Cmdr,\n title: str,\n filetypes: list[str],\n defaultextension: str='',\n multiple: bool=False,\n startpath: str=None,\n ) -> Union[list[str], str]: # Return type depends on the evil multiple keyword.\n if g.unitTesting:\n return ''\n\n # 2018/03/14: Bug fixes:\n # - Use init_dialog_folder only if a path is not given\n # - *Never* Use os.curdir by default!\n if not startpath:\n # Returns c.last_dir or os.curdir\n startpath = g.init_dialog_folder(c, c.p, use_at_path=True)\n filter_ = self.makeFilter(filetypes)\n dialog = QtWidgets.QFileDialog()\n self.attachLeoIcon(dialog)\n func = dialog.getOpenFileNames if multiple else dialog.getOpenFileName\n if c:\n try:\n c.in_qt_dialog = True\n val = func(parent=None, caption=title, directory=startpath, filter=filter_)\n finally:\n c.in_qt_dialog = False\n else:\n val = func(parent=None, caption=title, directory=startpath, filter=filter_)\n # This is a *PyQt* change, not a Qt change.\n val, junk_selected_filter = val\n if multiple:\n files = [g.os_path_normslashes(s) for s in val]\n if c and files:\n c.last_dir = g.os_path_dirname(files[-1])\n # A consequence of the evil \"multiple\" kwarg.\n return files\n s = g.os_path_normslashes(val)\n if c and s:\n c.last_dir = g.os_path_dirname(s)\n return s",
"def browse(self):\n formats = [\n \"Text - comma separated (*.csv, *)\",\n \"Text - tab separated (*.tsv, *)\",\n \"Text - all files (*)\"\n ]\n\n dlg = QFileDialog(\n self, windowTitle=\"Open Data File\",\n acceptMode=QFileDialog.AcceptOpen,\n fileMode=QFileDialog.ExistingFile\n )\n dlg.setNameFilters(formats)\n state = self.dialog_state\n lastdir = state.get(\"directory\", \"\")\n lastfilter = state.get(\"filter\", \"\")\n\n if lastdir and os.path.isdir(lastdir):\n dlg.setDirectory(lastdir)\n if lastfilter:\n dlg.selectNameFilter(lastfilter)\n\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QFileDialog.Accepted:\n self.dialog_state[\"directory\"] = dlg.directory().absolutePath()\n self.dialog_state[\"filter\"] = dlg.selectedNameFilter()\n\n selected_filter = dlg.selectedNameFilter()\n path = dlg.selectedFiles()[0]\n # pre-flight check; try to determine the nature of the file\n mtype = _mime_type_for_path(path)\n if not mtype.inherits(\"text/plain\"):\n mb = QMessageBox(\n parent=self,\n windowTitle=\"\",\n icon=QMessageBox.Question,\n text=\"The '{basename}' may be a binary file.\\n\"\n \"Are you sure you want to continue?\".format(\n basename=os.path.basename(path)),\n standardButtons=QMessageBox.Cancel | QMessageBox.Yes\n )\n mb.setWindowModality(Qt.WindowModal)\n if mb.exec() == QMessageBox.Cancel:\n return\n\n # initialize dialect based on selected extension\n if selected_filter in formats[:-1]:\n filter_idx = formats.index(selected_filter)\n if filter_idx == 0:\n dialect = csv.excel()\n elif filter_idx == 1:\n dialect = csv.excel_tab()\n else:\n dialect = csv.excel_tab()\n header = True\n else:\n try:\n dialect, header = sniff_csv_with_path(path)\n except Exception:\n dialect, header = csv.excel(), True\n\n options = None\n # Search for path in history.\n # If found use the stored params to initialize the import dialog\n items = self.itemsFromSettings()\n idx = index_where(items, lambda t: samepath(t[0], path))\n if idx is not None:\n _, options_ = items[idx]\n if options_ is not None:\n options = options_\n\n if options is None:\n if not header:\n rowspec = []\n else:\n rowspec = [(range(0, 1), RowSpec.Header)]\n options = Options(\n encoding=\"utf-8\", dialect=dialect, rowspec=rowspec)\n\n dlg = CSVImportDialog(\n self, windowTitle=\"Import Options\", sizeGripEnabled=True)\n dlg.setWindowModality(Qt.WindowModal)\n dlg.setPath(path)\n dlg.setOptions(options)\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QDialog.Accepted:\n self.set_selected_file(path, dlg.options())",
"def open_fileDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Открыть исходный файл\", os.path.expanduser(\"~\"),\n \"XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format == 'xml':\n self.data_from_xml(fileName)\n elif file_format == 'json':\n self.data_from_json(fileName)\n self.msg2Statusbar.emit('Импорт из файла {0}'.format(fileName))",
"def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])",
"def locatefile(self):\r\n dm = DialogManager()\r\n print \"Opening file chooser ...\"\r\n file = dm.choosefile(\"Choose Raw File\")\r\n return file",
"def callDialog(self):\n self.pathTuple = filedialog.askopenfilenames(filetypes=[(\"Excel files\", \".xlsx .xls .xlsm .xlsb\")])\n self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]",
"def get_file_dialog(*, defaultpath=None, extensionfilter=None):\n qapp = QApplicationStarter()\n kwargs = {'directory': defaultpath,\n 'filter': extensionfilter}\n directory = QtGui.QFileDialog.getOpenFileName(\n None, \"Choose a file\", **kwargs)\n return directory",
"def select_file_upload_method():\n\n if not Settings.prompt(\"upload files\"): \n return \"unset\"\n Settings.print(\"Select an upload source\")\n sources = Settings.get_source_options()\n question = {\n 'type': 'list',\n 'name': 'upload',\n 'message': 'Upload:',\n 'choices': [src.title() for src in sources]\n }\n upload = PyInquirer.prompt(question)[\"upload\"]\n\n\n # everything after this part should be in another function\n # this should just return the string of the upload source\n\n\n if str(upload) == \"Local\":\n return File.select_files()\n elif str(upload) == \"Google\":\n return Google_File.select_files()\n # elif str(upload) == \"Dropbox\":\n # return Dropbox.select_files()\n elif str(upload) == \"Remote\":\n return Remote.select_files()\n return File.select_files()",
"def fileDialog(self, startingDir, fileFilter='All files (*.*)'):\n\n\t\tdialog = QtWidgets.QFileDialog.getOpenFileName(\n\t\t\tself, self.tr('Files'), startingDir, fileFilter)\n\n\t\ttry:\n\t\t\treturn dialog[0]\n\t\texcept IndexError:\n\t\t\treturn None",
"def __init__(self,path='.',pattern='*.*',exist=False,multi=False,dir=False):\n QtGui.QFileDialog.__init__(self)\n if os.path.isfile(path):\n self.setDirectory(os.path.dirname(path))\n self.selectFile(path)\n else:\n self.setDirectory(path)\n if type(pattern) == str:\n self.setFilter(pattern)\n else: # should be a list of patterns\n self.setFilters(pattern)\n if dir:\n mode = QtGui.QFileDialog.Directory\n caption = \"Select a directory\"\n elif exist:\n if multi:\n mode = QtGui.QFileDialog.ExistingFiles\n caption = \"Select existing files\"\n else:\n mode = QtGui.QFileDialog.ExistingFile\n caption = \"Open existing file\"\n else:\n mode = QtGui.QFileDialog.AnyFile\n caption = \"Save file as\"\n self.setFileMode(mode)\n self.setWindowTitle(caption)\n if exist:\n self.setLabelText(QtGui.QFileDialog.Accept,'Open')\n else:\n self.setLabelText(QtGui.QFileDialog.Accept,'Save')",
"def __showPathPickerDialog(self):\n if self.__mode == E5PathPickerModes.NoMode:\n return\n \n if self.__mode == E5PathPickerModes.CustomMode:\n self.pickerButtonClicked.emit()\n return\n \n self.aboutToShowPathPickerDialog.emit()\n \n windowTitle = self.__windowTitle\n if not windowTitle:\n if self.__mode == E5PathPickerModes.OpenFileMode:\n windowTitle = self.tr(\"Choose a file to open\")\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n windowTitle = self.tr(\"Choose files to open\")\n elif self.__mode in [\n E5PathPickerModes.SaveFileMode,\n E5PathPickerModes.SaveFileEnsureExtensionMode,\n E5PathPickerModes.SaveFileOverwriteMode]:\n windowTitle = self.tr(\"Choose a file to save\")\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n windowTitle = self.tr(\"Choose a directory\")\n \n directory = self._editorText()\n if not directory and self.__defaultDirectory:\n directory = self.__defaultDirectory\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n directory = os.path.expanduser(directory.split(\";\")[0])\n else:\n directory = os.path.expanduser(directory)\n if not os.path.isabs(directory) and self.__defaultDirectory:\n directory = os.path.join(self.__defaultDirectory, directory)\n directory = Utilities.fromNativeSeparators(directory)\n \n if self.__mode == E5PathPickerModes.OpenFileMode:\n path = E5FileDialog.getOpenFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n paths = E5FileDialog.getOpenFileNames(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = \";\".join([Utilities.toNativeSeparators(path)\n for path in paths])\n elif self.__mode == E5PathPickerModes.SaveFileMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.SaveFileEnsureExtensionMode:\n path, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(\n self,\n windowTitle,\n directory,\n self.__filters,\n None,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n if path:\n ext = QFileInfo(path).suffix()\n if not ext:\n ex = selectedFilter.split(\"(*\")[1].split(\")\")[0]\n if ex:\n path += ex\n elif self.__mode == E5PathPickerModes.SaveFileOverwriteMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.ShowDirsOnly))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n elif self.__mode == E5PathPickerModes.DirectoryShowFilesMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.DontUseNativeDialog))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n \n if path:\n self._setEditorText(path)\n self.pathSelected.emit(path)",
"def runSaveFileDialog(self,\n c: Cmdr, title: str='Save', filetypes: list[str]=None, defaultextension: str='',\n ) -> str:\n if g.unitTesting:\n return ''\n dialog = QtWidgets.QFileDialog()\n if c:\n # dialog.setStyleSheet(c.active_stylesheet)\n self.attachLeoIcon(dialog)\n try:\n c.in_qt_dialog = True\n obj = dialog.getSaveFileName(\n None, # parent\n title,\n # os.curdir,\n g.init_dialog_folder(c, c.p, use_at_path=True),\n self.makeFilter(filetypes or []),\n )\n finally:\n c.in_qt_dialog = False\n else:\n self.attachLeoIcon(dialog)\n obj = dialog.getSaveFileName(\n None, # parent\n title,\n # os.curdir,\n g.init_dialog_folder(None, None, use_at_path=True),\n self.makeFilter(filetypes or []),\n )\n # Bizarre: PyQt5 version can return a tuple!\n s = obj[0] if isinstance(obj, (list, tuple)) else obj\n s = s or ''\n if c and s:\n c.last_dir = g.os_path_dirname(s)\n return s",
"def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass",
"def popup(self, title, callfn, initialdir=None, filename=None):\n self.cb = callfn\n filenames = QtGui.QFileDialog.getOpenFileNames(\n self.parent, title, initialdir, filename)\n\n # Special handling for PyQt5, see\n # https://www.reddit.com/r/learnpython/comments/2xhagb/pyqt5_trouble_with_openinggetting_the_name_of_the/\n filenames = filenames[0]\n\n all_paths = []\n for filename in filenames:\n\n # Special handling for wildcard or extension.\n # This is similar to open_files() in FBrowser plugin.\n if '*' in filename or '[' in filename:\n info = iohelper.get_fileinfo(filename)\n ext = iohelper.get_hdu_suffix(info.numhdu)\n files = glob.glob(info.filepath) # Expand wildcard\n paths = ['{0}{1}'.format(f, ext) for f in files]\n if self.all_at_once:\n all_paths.extend(paths)\n else:\n for path in paths:\n self.cb(path)\n\n else:\n # Normal load\n if self.all_at_once:\n all_paths.append(filename)\n else:\n self.cb(filename)\n\n if self.all_at_once and len(all_paths) > 0:\n self.cb(all_paths)",
"def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result",
"def _get_filename_from_dialog(file_type):\n\n if file_type is 'res':\n caption = 'Select a results file.'\n filter = 'Adams Results Files (*.res)'\n # Bring up a dialog for the user to select a results file\n filename = PyQt4.QtGui.QFileDialog.getOpenFileName(caption=caption, filter=filter)\n\n elif file_type is 'csv':\n caption='Select location to save the csv results file.'\n filter='CSV Files (*.csv)'\n # Bring up a dialog for the user to select a results file\n filename = PyQt4.QtGui.QFileDialog.getSaveFileName(caption=caption, filter=filter) \n\n return filename",
"def fileDialog2(*args, buttonBoxOrientation: int=0, cancelCaption: AnyStr=\"\", caption:\n AnyStr=\"\", dialogStyle: int=0, fileFilter: AnyStr=\"\", fileMode: int=0,\n fileTypeChanged: Script=None, hideNameEdit: bool=True, okCaption: AnyStr=\"\",\n optionsUICancel: Script=None, optionsUICommit: Script=None, optionsUICommit2:\n Script=None, optionsUICreate: Script=None, optionsUIInit: Script=None,\n returnFilter: bool=True, selectFileFilter: AnyStr=\"\", selectionChanged:\n Script=None, setProjectBtnEnabled: bool=True, startingDirectory: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass",
"def menu_Open():\n asdf = tkFileDialog.askopenfilename()\n print(asdf)",
"def openFileExplorer(self, caption=''):\n\n file_path = None\n file_path, idk = QFileDialog.getOpenFileName(caption=caption)\n\n if file_path == '':\n file_path = None\n\n return file_path",
"def fileBrowserDialog(*args, actionName: AnyStr=\"\", dialogStyle: int=0, fileCommand:\n Script=None, fileType: AnyStr=\"\", filterList: Union[AnyStr,\n List[AnyStr]]=\"\", includeName: AnyStr=\"\", mode: int=0, operationMode:\n AnyStr=\"\", tipMessage: AnyStr=\"\", windowTitle: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass",
"def choose_file(self):\n pass",
"def __init__(\n self,\n title:str=\"Universal File Dialog\",\n icon:str=\"\",\n show_hidden:bool=False,\n include_files:bool=True,\n multiselect:bool=True,\n select_dirs:bool=True,\n select_files:bool=True,\n unix_delimiter:bool=True,\n stdout:bool=False\n ):\n\n if not isinstance(title, str):\n raise TypeError(\"Argument title must be type string.\")\n\n self.title = title\n\n if icon:\n if not isinstance(icon, str):\n raise TypeError(\"Argument icon must be type string.\")\n\n if not isfile(icon):\n raise FileNotFoundError(f\"File not found: {icon}\")\n\n self.icon = icon\n\n else: \n self.icon = \"\"\n\n if show_hidden:\n self.show_hidden = True\n else:\n self.show_hidden = False\n\n if include_files:\n self.include_files = True\n else:\n self.include_files = False\n\n if multiselect:\n self.multiselect = True\n else:\n self.multiselect = False\n\n if select_dirs:\n self.select_dirs = True\n else:\n self.select_dirs = False\n\n if select_files:\n self.select_files = True\n else:\n self.select_files = False\n\n if unix_delimiter:\n self.unix_delimiter = True\n else:\n self.unix_delimiter = False\n\n if stdout:\n self.stdout = True\n else:\n self.stdout = False\n\n # Tkinter:\n self.dialog = Tk()\n self.dialog.withdraw()\n self.dialog.title(self.title)\n self.dialog.minsize(width=300, height=200)\n self.dialog.geometry(\"500x300\")\n self.dialog.update_idletasks()\n\n self.file_icon=PhotoImage(\n file=f\"{dirname(__file__)}/file.gif\",\n master=self.dialog\n ).subsample(50)\n\n self.folder_icon=PhotoImage(\n file=f\"{dirname(__file__)}/folder.gif\",\n master=self.dialog\n ).subsample(15)\n \n self.disk_icon=PhotoImage(\n file=f\"{dirname(__file__)}/disk.gif\",\n master=self.dialog\n ).subsample(15)\n\n if self.icon:\n self.dialog.iconbitmap(self.icon)\n else:\n self.dialog.iconbitmap(f\"{dirname(__file__)}/icon.ico\")\n \n # Widgets:\n self.paneview = PanedWindow(\n self.dialog,\n sashwidth=7,\n bg=\"#cccccc\",\n bd=0,\n )\n\n self.left_pane = PanedWindow(self.paneview)\n self.right_pane = PanedWindow(self.paneview)\n self.paneview.add(self.left_pane)\n self.paneview.add(self.right_pane)\n\n self.treeview_x_scrollbar=Scrollbar(self.left_pane, orient=\"horizontal\")\n self.treeview_y_scrollbar=Scrollbar(self.left_pane, orient=\"vertical\")\n self.list_box_x_scrollbar=Scrollbar(self.right_pane, orient=\"horizontal\")\n self.list_box_y_scrollbar=Scrollbar(self.right_pane, orient=\"vertical\")\n \n # tstyle = Style().configure(\".\", )\n\n self.treeview=Treeview(\n self.left_pane,\n xscrollcommand=self.treeview_x_scrollbar.set,\n yscrollcommand=self.treeview_y_scrollbar.set,\n show=\"tree\",\n selectmode=\"browse\",\n # style=tstyle\n )\n\n\n self.list_box=Listbox(\n self.right_pane,\n xscrollcommand=self.list_box_x_scrollbar.set,\n yscrollcommand=self.list_box_y_scrollbar.set,\n width=34,\n highlightthickness=0,\n bd=2,\n relief=\"ridge\"\n )\n\n if self.multiselect:\n self.list_box.config(selectmode=\"extended\")\n else:\n self.list_box.config(selectmode=\"browse\")\n\n self.cancel_button = Button(\n self.left_pane,\n text=\"Cancel\",\n command=self.cancel\n )\n\n self.submit_button = Button(\n self.right_pane,\n text=\"Submit\",\n command=self.submit\n )\n\n self.treeview_x_scrollbar.config(command=self.treeview.xview)\n self.treeview_y_scrollbar.config(command=self.treeview.yview)\n self.list_box_x_scrollbar.config(command=self.list_box.xview)\n self.list_box_y_scrollbar.config(command=self.list_box.yview)\n \n #Layout:\n self.dialog.rowconfigure(0, weight=1)\n self.dialog.columnconfigure(0, weight=1)\n\n self.left_pane.grid_rowconfigure(0, weight=1)\n self.left_pane.grid_columnconfigure(0, weight=1)\n self.right_pane.grid_rowconfigure(0, weight=1)\n self.right_pane.grid_columnconfigure(0, weight=1)\n\n self.paneview.paneconfigure(\n self.left_pane,\n minsize=100,\n #Start off w/ the sash centered in the GUI:\n width=(self.dialog.winfo_width() / 2) - \n ceil((self.paneview.cget(\"sashwidth\") * 1.5)),\n )\n self.paneview.paneconfigure(self.right_pane, minsize=100)\n\n self.paneview.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n\n self.treeview.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n self.treeview_y_scrollbar.grid(\n row=0,\n column=1,\n sticky=\"ns\"\n )\n self.treeview_x_scrollbar.grid(\n row=1,\n column=0,\n columnspan=2,\n sticky=\"ew\"\n )\n\n self.list_box.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n self.list_box_y_scrollbar.grid(\n row=0,\n column=1,\n sticky=\"ns\"\n )\n self.list_box_x_scrollbar.grid(\n row=1,\n column=0,\n columnspan=2,\n sticky=\"ew\"\n )\n\n self.cancel_button.grid(\n row=2,\n column=0,\n sticky=\"w\",\n padx=10, \n pady=10\n )\n self.submit_button.grid(\n row=2,\n column=0,\n columnspan=2,\n sticky=\"e\",\n padx=10,\n pady=10\n )\n \n #Bindings, Protocols, & Misc:\n self.dialog.bind(\"<Control-w>\", self.cancel)\n self.treeview.bind(\"<<TreeviewSelect>>\", self.treeview_select)\n self.treeview.bind(\"<Double-Button-1>\", self.dialog_populate)\n self.treeview.bind(\"<Return>\", self.dialog_populate)\n self.treeview.bind(\"<Right>\", self.dialog_populate)\n self.list_box.bind(\"<<ListboxSelect>>\", self.list_box_select)\n self.list_box.bind(\"<Return>\", self.submit)\n self.dialog.protocol(\"WM_DELETE_WINDOW\", self.cancel)\n\n self.dialog_selection = deque()\n self.selection_paths = deque()\n\n for disk in self.get_disks():\n self.treeview.insert(\n \"\",\n index=\"end\",\n text=disk,\n image=self.disk_icon,\n )\n\n self.dialog.focus()",
"def open_file_dialog(gui, progress_bar):\n\n # File dialog\n fname, ftype = QFileDialog\\\n .getOpenFileName(caption=\"Open URDF File\",\n filter=\"Supported files (*.urdf *.dhparams)\"\n \";;All files (*)\",\n directory=path + '../Examples')\n if fname == '':\n return\n global robot_obj\n # Open the file\n if fname.split(\".\")[-1].lower() == \"urdf\":\n with open(fname) as file:\n urdf_obj = URDF.URDF(file)\n robot_obj = cr.RobotURDF(urdf_obj, progress_bar)\n\n elif fname.split(\".\")[-1].lower() == \"dhparams\":\n dh_obj = dh(fname)\n robot_obj = cr.RobotDH(dh_obj)\n init_gui_from_robot(gui, robot_obj)",
"def getFilename(self,timeout=None):\n self.show(timeout,modal=True)\n self.exec_()\n if self.result() == QtGui.QDialog.Accepted:\n files = map(str,self.selectedFiles())\n if self.fileMode() == QtGui.QFileDialog.ExistingFiles:\n return files\n else:\n return files[0]\n else:\n return None",
"def _launch_file_b(self):\n types = [\n (\"JPG\", \"*.jpg\"),\n (\"Bitmap\", \"*.bmp\"),\n (\"PNG\", \"*.png\"),\n (\"GIF\", \"*.gif\"),\n (\"All files\", \"*\")]\n dialog = tkFileDialog.Open(self, filetypes = types)\n self._file_path = dialog.show()\n\n self._file_name = self._scrub_name(self._file_path)\n self._move_img()\n return self._file_name",
"def seleccionar(self):\n try:\n button = self.sender()\n if button:\n file = QFileDialog.getOpenFileName(None, \"Selecciona la coleccion\", \"/home/\", \"Zip-files: *\")\n print(\"Ruta seleccionada: \", file[0])\n self.ruta = file[0]\n return file\n except Exception:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Ruta de archivo no valida\")\n msg.setInformativeText('Esto puede ser debido a que se ha introducido una ruta de una archivo no valido o '\n 'no se ha indicado ninguna ruta')\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n return None",
"def filepicker():\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.askopenfilename()\n return file_path",
"def PromptPathOpenCmd(self):\n defaultPath = DEFAULT_PATH_TST_FILES\n defaultName = \"\"\n \n dlgSave = wxFileDialog(self, \"Run Command File\", defaultPath, defaultName,\n \"*.tst|*.*\", wxOPEN|wxCHANGE_DIR)\n if dlgSave.ShowModal() == wxID_OK:\n path = dlgSave.GetPath()\n else:\n path = None\n dlgSave.Destroy()\n return path",
"def choosefile(self, diagtitle):\r\n root = Tk()\r\n root.withdraw()\r\n sfile = tkFileDialog.askopenfilename(\r\n parent=root,\r\n filetypes = [('.TXT files', '.txt')],\r\n title=diagtitle )\r\n return sfile",
"def get_path_via_file_ui():\n\n import Tkinter as tk\n import tkFileDialog as filedialog\n root = tk.Tk()\n root.withdraw()\n return filedialog.askopenfilename()"
]
| [
"0.6026292",
"0.5952529",
"0.5807682",
"0.5787794",
"0.5634657",
"0.5631036",
"0.56236327",
"0.5597713",
"0.5584776",
"0.5563499",
"0.5553169",
"0.5518008",
"0.5509124",
"0.5458683",
"0.5433406",
"0.5431244",
"0.5426807",
"0.54229605",
"0.53799635",
"0.5375665",
"0.5331219",
"0.53218836",
"0.53172934",
"0.53146607",
"0.52739567",
"0.52722013",
"0.5262766",
"0.52589554",
"0.5258032",
"0.52394396"
]
| 0.78081536 | 0 |
Load only the last saved tensor. | def load_last_tensor(path: str, params: Optional[Params]) -> Optional[th.Tensor]:
path = get_path_with_hash(path, params)
f = get_last_file(path)
return th.load(f) if f else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_latest_save(self, device=None):\n return torch.load(str(self.previous_saves()[-1].absolute()), map_location=device)",
"def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")",
"def load(self, sess, file_path, verbose=True):\n if(verbose): print(\"Loading model from: \" + str(file_path))\n self.tf_saver.restore(sess, file_path)\n if(verbose): print(\"Done!\")",
"def load_full_model(self, session, model_dir):\n self.full_saver.restore(session, model_dir)",
"def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint",
"def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))",
"def get_saved_state( self ):\n try:\n return torch.load(\"{}/model.torch\".format( self.config.neuron.full_path ))\n except Exception as e:\n logger.exception('Failed to reload model with error: {}', e)\n return None",
"def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint",
"def restore(self):\n if self._restored_model:\n return\n with self.eval_graph.graph.as_default():\n last_checkpoint = self._find_last_checkpoint()\n # TODO(rbharath): Is setting train=False right here?\n saver = tf.train.Saver()\n saver.restore(self._get_shared_session(train=False), last_checkpoint)\n self._restored_model = True",
"def load(self, tensor):\n # TODO mock for now, load will use worker's store in a future work\n if self.tracing:\n return PlaceHolder.create_from(tensor, role=self, tracing=True)\n else:\n return tensor",
"def restore_model(self, path):\n # if cuda is not available load everything to cpu\n if not self.use_cuda:\n state = torch.load(path, map_location=lambda storage, loc: storage)\n else:\n state = torch.load(path)\n self.net.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optimizer'])\n self.epoch_counter = state['epoch'] # counts number of epochs",
"def load(self, io=None):\n if io is not None:\n io._download_dir_from_bucket(self.save_path, self.save_path, True)\n\n step_var = tf.Variable(1, dtype=tf.int32, trainable=False)\n epoch_var = tf.Variable(1, dtype=tf.int32, trainable=False)\n ckpt = tf.train.Checkpoint(model=self.model, step=step_var, epoch=epoch_var)\n ckpt.restore(tf.train.latest_checkpoint(self.save_path))\n logging.info(f\"Restored model from {tf.train.latest_checkpoint(self.save_path)} [step:{int(step_var)}, epoch:{int(epoch_var)}]\")\n print_summary(self.model)",
"def restore(self, sess, path=None, var_list=None):\n\n saver = tf.train.Saver(var_list)\n if path is None:\n path = tf.train.latest_checkpoint(os.path.dirname(self.config.CHECKPOINTS_PATH))\n saver.restore(sess, path)\n print(\"model restored from %s\" % path)",
"def load_model_state(filename: str) -> OrderedDictType[str, torch.Tensor]:\n return torch.load(filename)",
"def load_trainable_variables (self, sess, savefn):\r\n self.state = utils.train.load_trainable_variables(sess, savefn)",
"def load_resnet(self, resnet_dir, keep_last=False):\n ckpt = tf.train.latest_checkpoint(resnet_dir)\n with tf.Session(config=self.config) as sess:\n # init model\n init = [tf.global_variables_initializer(), tf.local_variables_initializer()]\n sess.run(init)\n if keep_last:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name]\n else:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name\n and 'conv6' not in v.name]\n loader = tf.train.Saver(var_list=restore_var)\n # load model\n self.load(ckpt, sess, loader)",
"def restore(self, sess: tf.Session) -> None:\n super().restore(sess)\n BaseModel._restore_checkpoint(self.pretrained_saver, sess, path=FLAGS.pretrained_checkpoint)",
"def load(self, sess, step=None):\n if step==None:\n ckpt_path = tf.train.latest_checkpoint(self.model.ckpt_dir)\n else:\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model-'+str(step))\n self.saver.restore(sess, ckpt_path)\n step = tf.train.global_step(sess, self.gstep)\n print('Load model at step {} from check point {}.'.format(step, ckpt_path))",
"def restore(self, PATH):\n self._saver.restore(self._sess, PATH)",
"def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)",
"def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)",
"def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))",
"def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)",
"def load_last_chkpnt(self, directory):\n last_chkpnt = sorted(list(directory.glob('chkpnt_epoch*.pth')))[-1]\n self.load_checkpoint(last_chkpnt)",
"def _load_restored(self, dataset_path):\n for group in ['knowledge', 'source', 'target']:\n if getattr(self, group + '_format') != 'none':\n text_data = load_restored(dataset_path, group + '.', ignore_file='vocab')[0]\n setattr(self, group + '_text_data', text_data)\n idx2token, token2idx = load_restored(dataset_path, ignore_file='data')\n setattr(self, 'idx2token', idx2token)\n setattr(self, 'token2idx', token2idx)\n self.max_vocab_size = len(self.idx2token)\n self.logger.info(\"Restore finished!\")",
"def _load_training_data(self):\n self._save_training_data()",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def load(self, outdir=None, predict=False, restore=True, allow_missing=True):\n if outdir is None:\n outdir = self.outdir\n with self.sess.as_default():\n with self.sess.graph.as_default():\n if not self.initialized():\n self.sess.run(tf.global_variables_initializer())\n tf.tables_initializer().run()\n if restore and os.path.exists(outdir + '/checkpoint'):\n self._restore_inner(outdir + '/model.ckpt', predict=predict, allow_missing=allow_missing)\n else:\n if predict:\n stderr('No EMA checkpoint available. Leaving internal variables unchanged.\\n')",
"def _load_target(self, id_: str) -> Tensor:\n filename = os.path.join(self.root, \"output\", id_ + \"_m.png\")\n with Image.open(filename) as img:\n array = np.array(img.convert(\"L\"))\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n return tensor",
"def init_saver(self):\n self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep, save_relative_paths=True)"
]
| [
"0.6790029",
"0.643399",
"0.6366274",
"0.62869316",
"0.6262909",
"0.6245039",
"0.6235464",
"0.6201017",
"0.6193098",
"0.61869633",
"0.6171663",
"0.6100199",
"0.60631746",
"0.6054893",
"0.6017372",
"0.6001262",
"0.5974573",
"0.5973511",
"0.5964038",
"0.5951946",
"0.5906625",
"0.5905858",
"0.5892144",
"0.58547884",
"0.5848538",
"0.58329356",
"0.5813807",
"0.58112985",
"0.5810529",
"0.58090895"
]
| 0.71581215 | 0 |
Same as the anchor target layer in original Fast/er RCNN | def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):
A = num_anchors
total_anchors = all_anchors.shape[0]
K = total_anchors / num_anchors
# allow boxes to sit over the edge by a small amount
_allowed_border = 0
# map of shape (..., H, W)
height, width = rpn_cls_score.shape[1:3]
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -_allowed_border) &
(all_anchors[:, 1] >= -_allowed_border) &
(all_anchors[:, 2] < im_info[1] + _allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + _allowed_border) # height
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
# first set the negatives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
# only the positive ones have regression targets
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
rpn_labels = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4))
rpn_bbox_targets = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4))
rpn_bbox_inside_weights = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4))
rpn_bbox_outside_weights = bbox_outside_weights
return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, src, label):\r\n \"\"\"color distort\"\"\"\r\n # img = random_color_distort(src)\r\n\r\n # print(\"previous label shape = \", label.shape)\r\n target = np.zeros(shape=(label.shape[0],))\r\n\r\n \"\"\"Pyramid Anchor sampling\"\"\"\r\n img, boxes, label = self.random_baiducrop(src, label[:, :4], target)\r\n # print(\"label shape = \", label.shape)\r\n # print('boxes shape =', boxes.shape)\r\n bbox = boxes\r\n # img = mx.nd.array(img)\r\n\r\n \"\"\"color distort\"\"\"\r\n img = mx.nd.array(img)\r\n img = random_color_distort(img)\r\n\r\n # \"\"\"random crop, keep aspect ration=1\"\"\"\r\n # h, w, _ = img.shape\r\n # bbox, crop_size = random_crop_with_constraints(label, (w, h))\r\n # x_offset, y_offset, new_width, new_height = crop_size\r\n # img = mx.image.fixed_crop(img, x_offset, y_offset, new_width, new_height)\r\n\r\n \"\"\"resize with random interpolation\"\"\"\r\n h, w, _ = img.shape\r\n interp = np.random.randint(0, 5)\r\n img = gimage.imresize(img, self._width, self._height, interp=interp)\r\n bbox = gbbox.resize(bbox, (w, h), (self._width, self._height))\r\n\r\n \"\"\"random horizontal flip\"\"\"\r\n h, w, _ = img.shape\r\n img, flips = gimage.random_flip(img, px=0.5)\r\n bbox = gbbox.flip(bbox, (w, h), flip_x=flips[0])\r\n\r\n \"\"\"To Tensor & Normalization\"\"\"\r\n img = mx.nd.image.to_tensor(img)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n\r\n if self._anchors is None:\r\n return img, bbox\r\n\r\n # @TODO: generating training target so cpu workers can help reduce the workload on gpu\r\n face_anchors, head_anchors, body_anchors = self._anchors\r\n gt_bboxes = mx.nd.array(bbox[:, :4]).expand_dims(0)\r\n gt_ids = mx.nd.zeros((1, gt_bboxes.shape[1], 1), dtype=gt_bboxes.dtype)\r\n\r\n face_cls_targets, face_box_targets, _ = self._target_generator(\r\n face_anchors, None, gt_bboxes, gt_ids)\r\n\r\n head_cls_targets, head_box_targets, _ = self._target_generator(\r\n head_anchors, None, gt_bboxes, gt_ids)\r\n\r\n body_cls_targets, body_box_targets, _ = self._target_generator(\r\n body_anchors, None, gt_bboxes, gt_ids)\r\n\r\n return img, \\\r\n face_cls_targets[0], head_cls_targets[0], body_cls_targets[0], \\\r\n face_box_targets[0], head_box_targets[0], body_box_targets[0]",
"def forward_target(self, img, **kwargs):\n assert img.dim() == 5, \\\n \"Input must have 5 dims, got: {}\".format(img.dim())\n\n img_v_l = []\n idx_unshuffle_v_l = []\n for idx in range(img.shape[1]):\n img_vi = img[:, idx, ...].contiguous()\n img_vi, idx_unshuffle_vi = self._batch_shuffle_ddp(img_vi)\n img_v_l.append(img_vi)\n idx_unshuffle_v_l.append(idx_unshuffle_vi)\n\n if self.start_block > 0:\n for idx in range(img.shape[1]):\n for i, best_path in enumerate(self.best_paths):\n img_v_l[idx] = self.target_backbone(img_v_l[idx],\n start_block=i,\n forward_op=best_path,\n block_op=True)[0]\n\n self.forward_op_target = self.forward_op_online\n proj_target_v1 = 0\n proj_target_v2 = 0\n v2_idx = img.shape[1]//2\n with torch.no_grad():\n for op_idx, forward_singleop_target in enumerate(self.forward_op_target):\n temp_v1 = self.target_neck(self.target_backbone(img_v_l[op_idx],\n start_block=self.start_block,\n forward_op=forward_singleop_target))[\n 0].clone().detach()\n temp_v2 = self.target_neck(self.target_backbone(img_v_l[v2_idx + op_idx],\n start_block=self.start_block,\n forward_op=forward_singleop_target))[\n 0].clone().detach()\n temp_v1 = nn.functional.normalize(temp_v1, dim=1)\n temp_v1 = self._batch_unshuffle_ddp(temp_v1, idx_unshuffle_v_l[op_idx])\n\n temp_v2 = nn.functional.normalize(temp_v2, dim=1)\n temp_v2 = self._batch_unshuffle_ddp(temp_v2, idx_unshuffle_v_l[v2_idx + op_idx])\n\n proj_target_v1 += temp_v1\n proj_target_v2 += temp_v2\n\n self.proj_target_v1 = proj_target_v1 / (len(self.forward_op_target))\n self.proj_target_v2 = proj_target_v2 / (len(self.forward_op_target))",
"def test_anchor_output():\n anchorDict = {\n \"ReplicationFactor\": 2,\n # Accl factor must divide batch size\n \"AccumulationFactor\": 4,\n \"Pipelining\": True,\n \"ReturnType\": \"ALL\",\n }\n label_array = np.ones([BATCH_SIZE]).astype(np.int32)\n\n micro_batch_size = BATCH_SIZE // (\n anchorDict[\"AccumulationFactor\"] * anchorDict[\"ReplicationFactor\"]\n )\n\n builder = popart.Builder()\n input_shape = [micro_batch_size, CHANNELS, DATA_LEN, DATA_LEN]\n\n data_shape = popart.TensorInfo(\"FLOAT\", input_shape)\n lbl_shape = popart.TensorInfo(\"INT32\", [micro_batch_size])\n w = builder.addInitializedInputTensor(\n np.random.random_sample(input_shape).astype(np.float32)\n )\n\n ip = builder.addInputTensor(data_shape)\n lb = builder.addInputTensor(lbl_shape)\n\n a = builder.aiOnnx.matmul([ip, w])\n o = builder.reshape_const(\n builder.aiOnnx, [a], [micro_batch_size, CHANNELS * DATA_LEN * DATA_LEN]\n )\n o = builder.aiOnnx.relu([o])\n o = builder.aiOnnx.softmax([o])\n nll = builder.aiGraphcore.nllloss([o, lb])\n\n GRAD = popart.reservedGradientPrefix() + w\n ACCL = popart.reservedAccumPrefix() + w\n art = popart.AnchorReturnType(\"All\")\n data_flow = popart.DataFlow(\n BATCHES_PER_STEP, {o: art, a: art, ip: art, w: art, GRAD: art, ACCL: art}\n )\n\n opts, deviceContext = return_options(anchorDict)\n with deviceContext as device:\n if device is None:\n pytest.skip(\"Test needs to run on IPU, but none are available\")\n\n session = popart.TrainingSession(\n fnModel=builder.getModelProto(),\n dataFlow=data_flow,\n loss=nll,\n optimizer=popart.ConstSGD(LEARNING_RATE),\n userOptions=opts,\n deviceInfo=device,\n )\n\n session.prepareDevice()\n\n if anchorDict[\"ReplicationFactor\"] > 1:\n input_shape = [anchorDict[\"ReplicationFactor\"]] + input_shape\n label_array = label_array.reshape([anchorDict[\"ReplicationFactor\"], -1])\n if anchorDict[\"AccumulationFactor\"] > 1:\n input_shape = [anchorDict[\"AccumulationFactor\"]] + input_shape\n label_array = label_array.reshape([anchorDict[\"AccumulationFactor\"], -1])\n if BATCHES_PER_STEP > 1:\n input_shape = [BATCHES_PER_STEP] + input_shape\n label_array = np.repeat(label_array[np.newaxis], BATCHES_PER_STEP, 0)\n\n anchors = session.initAnchorArrays()\n in_array = np.random.random_sample(input_shape).astype(np.float32)\n\n stepio = popart.PyStepIO({ip: in_array, lb: label_array}, anchors)\n session.weightsFromHost()\n\n session.run(stepio)\n\n # Returned anchors will be of shape\n # [bps, grad_accl_factor, repl_factor, micro_batch_size, channels, data_len, data_len]\n for batch in range(anchors[w].shape[0]):\n for replica in range(anchors[w].shape[1]):\n # Weights should not change over the gradient accumulation\n # dimension - only after gradAccl steps.\n assert np.allclose(\n anchors[w][batch, 0, :, :, :, :, :],\n anchors[w][batch, replica, :, :, :, :, :],\n )\n\n # Check that the accumulated gradient plus the weights for the current batch\n # equals the weights for the next batch.\n # Batch loop\n for batch in range(anchors[w].shape[0] - 1):\n calc_weight = {}\n # Replica loop.\n for replica in range(anchors[w].shape[2]):\n # For each replica in each batch, take the relevant replica's\n # last weight tensor in the accumulation loop minus\n # the sum of the accumulated gradients across replicas\n calc_weight[replica] = anchors[w][\n batch, -1, replica, :, :, :, :\n ] - np.sum(anchors[ACCL][batch, -1, :, :, :, :, :], axis=0)\n # Then compare against the last weight tensor of the next batch,\n # for the relevant replica. These should match.\n assert np.allclose(\n calc_weight[replica], anchors[w][batch + 1, -1, replica, :, :, :, :]\n )",
"def tgn_link_predictor(src: tf.Tensor, dst: tf.Tensor) -> tf.Tensor:\n assert src.shape == dst.shape\n feature_size = int(src.shape[-1])\n\n with tf.variable_scope(\"hidden\"):\n hidden = tf.nn.relu(\n linear(tf.concat([src, dst], axis=-1), feature_size))\n with tf.variable_scope(\"output\"):\n return linear(hidden, 1)[..., 0]",
"def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img = img.astype(np.float32)\n\n if self.augmentation:\n img = self.random_color_aug(img)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype)\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), cls_target, box_target, box_mask",
"def __init__(self, \n mask, \n anchors, \n scale_anchors = 1, \n num_extras = 0, \n ignore_thresh = 0.7, \n truth_thresh = 1, \n loss_type = \"mse\", \n iou_normalizer = 1.0,\n cls_normalizer = 1.0, \n scale_x_y = 1.0,\n nms_kind = \"greedynms\",\n beta_nms = 0.6,\n reduction = tf.keras.losses.Reduction.AUTO, \n name=None, \n dtype = tf.float32,\n **kwargs):\n super(Yolo_Loss, self).__init__(reduction = reduction, name = name, **kwargs)\n self.dtype = dtype\n self._anchors = tf.convert_to_tensor([anchors[i] for i in mask], dtype= self.dtype)/scale_anchors #<- division done for testing\n\n self._num = tf.cast(len(mask), dtype = tf.int32)\n self._num_extras = tf.cast(num_extras, dtype = self.dtype)\n self._truth_thresh = tf.cast(truth_thresh, dtype = self.dtype) \n self._ignore_thresh = tf.cast(ignore_thresh, dtype = self.dtype)\n\n # used (mask_n >= 0 && n != best_n && l.iou_thresh < 1.0f) for id n != nest_n\n # checks all anchors to see if another anchor was used on this ground truth box to make a prediction\n # if iou > self._iou_thresh then the network check the other anchors, so basically \n # checking anchor box 1 on prediction for anchor box 2\n self._iou_thresh = tf.cast(0.213, dtype = self.dtype) # recomended use = 0.213 in [yolo]\n \n self._loss_type = loss_type\n self._iou_normalizer= tf.cast(iou_normalizer, dtype = self.dtype)\n self._cls_normalizer = tf.cast(cls_normalizer, dtype = self.dtype)\n self._scale_x_y = tf.cast(scale_x_y, dtype = self.dtype)\n\n #used in detection filtering\n self._beta_nms = tf.cast(beta_nms, dtype = self.dtype)\n self._nms_kind = nms_kind\n return",
"def forward(self, bbox, anchor, width, height):\n F = mx.nd\n with autograd.pause():\n # anchor with shape (N, 4)\n a_xmin, a_ymin, a_xmax, a_ymax = self._bbox_split(anchor)\n # invalid anchor mask with shape (N, 1)\n imask = (\n (a_xmin >= -self._allowed_border) *\n (a_ymin >= -self._allowed_border) *\n (a_xmax <= (width + self._allowed_border)) *\n (a_ymax <= (height + self._allowed_border))) <= 0\n imask = mx.nd.array(np.where(imask.asnumpy() > 0)[0], ctx=anchor.context)\n\n # calculate ious between (N, 4) anchors and (M, 4) bbox ground-truths\n # ious is (N, M)\n ious = F.contrib.box_iou(anchor, bbox, format='corner').transpose((1, 0, 2))\n ious[:, imask, :] = -1\n matches = self._matcher(ious)\n samples = self._sampler(matches, ious)\n samples = samples.as_in_context(anchor.context)\n # training targets for RPN\n cls_target, _ = self._cls_encoder(samples)\n box_target, box_mask = self._box_encoder(\n samples, matches, anchor.expand_dims(axis=0), bbox)\n return cls_target, box_target, box_mask",
"def __init__(self, target_real_label=1.0, target_fake_label=0.0):\n super(GANLocalLoss, self).__init__()\n # self.pooling = nn.MaxPool2d(kernel_size=4, stride=2, padding=1)\n self.adaptivepooling = nn.AdaptiveAvgPool2d(64)",
"def build_targets(pred_boxes, pred_conf, pred_cls, target, anchors, num_anchors, num_classes, grid_size, ignore_thres, img_dim):\n nB = target.size(0)\n nA = num_anchors\n nC = num_classes\n nG = grid_size\n mask = torch.zeros(nB, nA, nG, nG)\n conf_mask = torch.ones(nB, nA, nG, nG)\n tx = torch.zeros(nB, nA, nG, nG)\n ty = torch.zeros(nB, nA, nG, nG)\n tw = torch.zeros(nB, nA, nG, nG)\n th = torch.zeros(nB, nA, nG, nG)\n tconf = torch.ByteTensor(nB, nA, nG, nG).fill_(0)\n tcls = torch.ByteTensor(nB, nA, nG, nG, nC).fill_(0)\n\n nGT = 0\n nCorrect = 0\n for b in range(nB):\n for t in range(target.shape[1]):\n if target[b, t].sum() == 0:\n # pad\n continue\n nGT += 1\n # Convert to position relative to box\n gx = target[b, t, 1] * nG\n gy = target[b, t, 2] * nG\n gw = target[b, t, 3] * nG\n gh = target[b, t, 4] * nG\n # Get grid box indices\n gi = int(gx)\n gj = int(gy)\n # Get shape of gt box\n gt_box = torch.FloatTensor(\n np.array([0, 0, gw, gh])).unsqueeze(0)\n # Get shape of anchor box\n anchor_shapes = torch.FloatTensor(np.concatenate(\n (np.zeros((len(anchors), 2)), np.array(anchors)), 1))\n\n # Calculate iou between gt and anchor shapes\n # 1 on 3\n anch_ious = bbox_iou(gt_box, anchor_shapes)\n # Where the overlap is larger than threshold set mask to zero (ignore)\n conf_mask[b, anch_ious > ignore_thres, gj, gi] = 0\n # Find the best matching anchor box\n\n best_n = np.argmax(anch_ious)\n # Get ground truth box\n gt_box = torch.FloatTensor(\n np.array([gx, gy, gw, gh])).unsqueeze(0)\n # Get the best prediction\n pred_box = pred_boxes[b, best_n, gj, gi].unsqueeze(0)\n # Masks\n mask[b, best_n, gj, gi] = 1\n conf_mask[b, best_n, gj, gi] = 1\n # Coordinates\n tx[b, best_n, gj, gi] = gx - gi\n ty[b, best_n, gj, gi] = gy - gj\n # Width and height\n tw[b, best_n, gj, gi] = math.log(\n gw / anchors[best_n][0] + 1e-16)\n th[b, best_n, gj, gi] = math.log(\n gh / anchors[best_n][1] + 1e-16)\n # One-hot encoding of label\n target_label = int(target[b, t, 0])\n tcls[b, best_n, gj, gi, target_label] = 1\n tconf[b, best_n, gj, gi] = 1\n\n # Calculate iou between ground truth and best matching prediction\n iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)\n pred_label = torch.argmax(pred_cls[b, best_n, gj, gi])\n score = pred_conf[b, best_n, gj, gi]\n if iou > 0.5 and pred_label == target_label and score > 0.5:\n nCorrect += 1\n\n return nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls",
"def narration_target(self):",
"def forward(self, image, dtype=torch.float32):\n image_shape = image.shape[2:]\n\n if image_shape == self.last_shape and image.device in self.last_anchors:\n return self.last_anchors[image.device]\n\n if self.last_shape is None or self.last_shape != image_shape:\n self.last_shape = image_shape\n\n if dtype == torch.float16:\n dtype = np.float16\n else:\n dtype = np.float32\n\n boxes_all = []\n for stride in self.strides:\n boxes_level = []\n for scale, ratio in itertools.product(self.scales, self.ratios):\n if image_shape[1] % stride != 0:\n raise ValueError('input size must be divided by the stride.')\n base_anchor_size = self.anchor_scale * stride * scale\n anchor_size_x_2 = base_anchor_size * ratio[0] / 2.0\n anchor_size_y_2 = base_anchor_size * ratio[1] / 2.0\n\n x = np.arange(stride / 2, image_shape[1], stride)\n y = np.arange(stride / 2, image_shape[0], stride)\n xv, yv = np.meshgrid(x, y)\n xv = xv.reshape(-1)\n yv = yv.reshape(-1)\n\n # y1,x1,y2,x2\n boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,\n yv + anchor_size_y_2, xv + anchor_size_x_2))\n boxes = np.swapaxes(boxes, 0, 1)\n boxes_level.append(np.expand_dims(boxes, axis=1))\n # concat anchors on the same level to the reshape NxAx4\n boxes_level = np.concatenate(boxes_level, axis=1)\n boxes_all.append(boxes_level.reshape([-1, 4]))\n\n anchor_boxes = np.vstack(boxes_all)\n\n anchor_boxes = torch.from_numpy(anchor_boxes.astype(dtype)).to(image.device)\n anchor_boxes = anchor_boxes.unsqueeze(0)\n\n # save it for later use to reduce overhead\n self.last_anchors[image.device] = anchor_boxes\n return anchor_boxes",
"def addTarget(self, data):\n #tgt: length x n_feature, n_feture is 5, AMR_CAT, AMR_LE, AMR_AUX, AMR_SENSE, AMR_CAN_COPY\n # after make all node aligned to a word or NULL word, length is equal to the length of tokes.\n if \"amr_id\" in data:\n self.tgt.append(torch.LongTensor(data[\"amr_id\"])) # lemma,cat, lemma_sense,ner,is_high\n # align_index, simple append all the aligned index\n # align_index = [[[i1,ij],[i2,ik] ]]\n self.align_index.append(data[\"amr_index\"]) # this index is also recategorized id\n\n amrl = len(data[\"amr_id\"])\n for i in data[\"amr_rel_index\"]:\n assert i <amrl,data\n #rel\n self.rel_seq.append(torch.LongTensor(data[\"amr_rel_id\"])) # lemma,cat, lemma_sense, the order is in gold amr node order\n self.rel_index.append(torch.LongTensor(data[\"amr_rel_index\"])) # index of head node from recategorized node order\n # here use rel dict to exchange the roleStr into id., mats is a matrix [real_gold_amr_len x real_gold_amr_len]\n mats = role_mat_to_sparse(data[\"amr_roles_mat\"], self.rel_dict)\n\n self.rel_mat.append(mats) #role, index\n self.root.append(data[\"amr_root\"]) #role, index for gold amr nodes\n\n #source means raw contents before becoming a tensor\n self.tgt_source.append([data[\"amr_rel_seq\"],data[\"amr_rel_triples\"],data[\"amr_convertedl_seq\"],data[\"amr_seq\"]])\n elif \"psd_id\" in data:\n self.tgt.append(torch.LongTensor(data[\"psd_id\"])) # lemma,cat, lemma_sense,ner,is_high\n # align_index, simple append all the aligned index\n # align_index = [[[i1,ij],[i2,ik] ]]\n self.align_index.append(data[\"psd_index\"])\n\n amrl = len(data[\"psd_id\"])\n for i in data[\"psd_rel_index\"]:\n assert i <amrl,data\n #rel\n self.rel_seq.append(torch.LongTensor(data[\"psd_rel_id\"])) # lemma,cat, lemma_sense\n self.rel_index.append(torch.LongTensor(data[\"psd_rel_index\"]))\n # here use rel dict to exchange the roleStr into id.\n mats = role_mat_to_sparse(data[\"psd_roles_mat\"], self.rel_dict)\n self.rel_mat.append(mats) #role, index\n self.root.append(data[\"psd_root\"]) #role, index\n\n #source means raw contents before becoming a tensor\n self.tgt_source.append([data[\"psd_rel_seq\"],data[\"psd_rel_triples\"],data[\"psd_convertedl_seq\"],data[\"psd_seq\"]])\n elif \"dm_id\" in data:\n self.tgt.append(torch.LongTensor(data[\"dm_id\"])) # lemma,cat, lemma_sense,ner,is_high\n # align_index, simple append all the aligned index\n # align_index = [[[i1,ij],[i2,ik] ]]\n self.align_index.append(data[\"dm_index\"])\n\n amrl = len(data[\"dm_id\"])\n for i in data[\"dm_rel_index\"]:\n assert i <amrl,data\n #rel\n self.rel_seq.append(torch.LongTensor(data[\"dm_rel_id\"])) # lemma,cat, lemma_sense\n self.rel_index.append(torch.LongTensor(data[\"dm_rel_index\"]))\n # here use rel dict to exchange the roleStr into id.\n mats = role_mat_to_sparse(data[\"dm_roles_mat\"], self.rel_dict)\n self.rel_mat.append(mats) #role, index\n self.root.append(data[\"dm_root\"]) #role, index\n\n #source means raw contents before becoming a tensor\n self.tgt_source.append([data[\"dm_rel_seq\"],data[\"dm_rel_triples\"],data[\"dm_convertedl_seq\"],data[\"dm_seq\"]])",
"def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.rpn_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.rpn_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.rpn_cls = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.rpn_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4 * (self.reg_max + 1), 3, padding=1)\n self.rpn_iou = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.anchor_generator.strides])\n\n ##############V2################\n conf_vector = [nn.Conv2d(self.num_anchors * 4 * self.total_dim, self.num_anchors * self.reg_channels, 1)]\n conf_vector += [self.relu]\n conf_vector += [nn.Conv2d(self.num_anchors * self.reg_channels, self.num_anchors, 1), nn.Sigmoid()]\n\n self.reg_conf = nn.Sequential(*conf_vector)\n ##############V2################",
"def _get_target_single(self,\n flat_anchors,\n valid_flags,\n num_level_anchors,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n img_meta,\n label_channels=1,\n unmap_outputs=True):\n inside_flags = self.anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg.allowed_border)\n if not inside_flags.any():\n return (None,) * 7\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n assign_result = self.assigner.assign(anchors, num_level_anchors_inside,\n gt_bboxes, gt_bboxes_ignore,\n gt_labels)\n\n sampling_result = self.sampler.sample(assign_result, anchors,\n gt_bboxes)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors,),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n if gt_labels is None:\n # Only rpn gives gt_labels as None\n # Foreground is the first class since v2.5.0\n labels[pos_inds] = 0\n else:\n labels[pos_inds] = gt_labels[\n sampling_result.pos_assigned_gt_inds]\n if self.train_cfg.pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg.pos_weight\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = self.unmap(anchors, num_total_anchors, inside_flags)\n labels = self.unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = self.unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = self.unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = self.unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds)",
"def __init__(self):\n # def __init__(self, embed_size, input_channels):\n super(OffsetCNN, self).__init__()\n self.conv0 = nn.Conv3d(1, 1, (1, 1, 1), padding=(0,0,0))\n self.bn0 = nn.BatchNorm3d(1)\n self.lrelu = nn.LeakyReLU()",
"def forward(self, tgt: Tensor, tgt_mask: Optional[Tensor] = None,\n\t\t\t\ttgt_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n\t\ttgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,\n\t\t\t\t\t\t\t key_padding_mask=tgt_key_padding_mask)[0]\n\t\ttgt = tgt + self.dropout1(tgt2)\n\t\ttgt = self.norm1(tgt)\n\t\t# tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,\n\t\t# \t\t\t\t\t\t key_padding_mask=memory_key_padding_mask)[0]\n\t\t# tgt = tgt + self.dropout2(tgt2)\n\t\t# tgt = self.norm2(tgt)\n\t\ttgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n\t\ttgt = tgt + self.dropout3(tgt2)\n\t\ttgt = self.norm3(tgt)\n\t\treturn tgt",
"def _get_target_single(self,\n flat_anchors,\n valid_flags,\n cls_scores,\n bbox_preds,\n num_level_anchors,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n img_meta,\n label_channels=1,\n unmap_outputs=True,\n is_cls_assigner=True):\n inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg.allowed_border)\n if not inside_flags.any():\n return (None, ) * 7\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n bbox_preds_valid = bbox_preds[inside_flags, :]\n cls_scores_valid = cls_scores[inside_flags, :]\n\n assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner\n\n # decode prediction out of assigner\n bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid)\n assign_result = assigner.assign(anchors, num_level_anchors_inside,\n gt_bboxes, gt_bboxes_ignore, gt_labels,\n cls_scores_valid, bbox_preds_valid)\n sampling_result = self.sampler.sample(assign_result, anchors,\n gt_bboxes)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n if hasattr(self, 'bbox_coder'):\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n else:\n # used in VFNetHead\n pos_bbox_targets = sampling_result.pos_gt_bboxes\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n if gt_labels is None:\n # Only rpn gives gt_labels as None\n # Foreground is the first class since v2.5.0\n labels[pos_inds] = 0\n else:\n labels[pos_inds] = gt_labels[\n sampling_result.pos_assigned_gt_inds]\n if self.train_cfg.pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg.pos_weight\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = unmap(anchors, num_total_anchors, inside_flags)\n labels = unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds)",
"def predict_from(self, inputs, to_layers):",
"def __getitem__(self, index):\n # Read the target image.\n image_path = self.image_paths[index]\n target_image = self.load_and_process_image(image_path)\n\n # Read the layer IUVs and convert to network inputs.\n people_layers = [self.load_and_process_iuv(self.iuv_paths[l - 1][index], index) for l in\n self.composite_order[index]]\n iuv_h, iuv_w = people_layers[0][0].shape[-2:]\n\n # Create the background layer UV from homographies.\n background_layer = self.get_background_inputs(index, iuv_w, iuv_h)\n\n uv_maps, masks, pids = zip(*([background_layer] + people_layers))\n uv_maps = torch.cat(uv_maps) # [L*2, H, W]\n masks = torch.stack(masks) # [L, H, W]\n pids = torch.stack(pids) # [L, H, W]\n\n if self.opt.use_mask_images:\n for i in range(1, len(people_layers)):\n mask_path = os.path.join(self.opt.dataroot, 'mask', f'{i:02d}', os.path.basename(image_path))\n if os.path.exists(mask_path):\n mask = Image.open(mask_path).convert('L').resize((masks.shape[-1], masks.shape[-2]))\n mask = transforms.ToTensor()(mask) * 2 - 1\n masks[i] = mask\n\n transform_params = self.get_params(do_jitter=self.opt.phase=='train')\n pids = self.apply_transform(pids, transform_params, 'nearest')\n masks = self.apply_transform(masks, transform_params, 'bilinear')\n uv_maps = self.apply_transform(uv_maps, transform_params, 'nearest')\n image_transform_params = transform_params\n if self.opt.do_upsampling:\n image_transform_params = { p: transform_params[p] * 2 for p in transform_params}\n target_image = self.apply_transform(target_image, image_transform_params, 'bilinear')\n\n return {'image': target_image, 'uv_map': uv_maps, 'mask': masks, 'pids': pids, 'image_path': image_path}",
"def forward(self, pred, target):\n if self.mask:\n target, mask = target\n # todo: loss with mask\n else:\n # todo: loss w/o mask\n pass\n return loss",
"def __call__(self, src, label, segm):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n segm = [tmask.resize(polys, (w, h), (img.shape[1], img.shape[0])) for polys in segm]\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n segm = [tmask.flip(polys, (w, h), flip_x=flips[0]) for polys in segm]\n\n # gt_masks (n, im_height, im_width) of uint8 -> float32 (cannot take uint8)\n masks = [mx.nd.array(tmask.to_mask(polys, (w, h))) for polys in segm]\n # n * (im_height, im_width) -> (n, im_height, im_width)\n masks = mx.nd.stack(*masks, axis=0)\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype), masks\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), masks, cls_target, box_target, box_mask",
"def target(self):",
"def forward(self, fmap):\n rez = self._reshape_channels(self.conv(fmap))\n rez = rez.view(rez.size(0), rez.size(1), rez.size(2), self._A, self.anchor_target_dim)\n return rez",
"def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2",
"def forward(self, x, target, k, a, m):\n x = x.view(-1, 28*28)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n y = F.relu(self.fc3(x))\n loss = self.loss(y, target, k, a, m)\n return y, loss",
"def create_target_image(self, builder, target, base_image, parameters):",
"def __getitem__(self, index):\n\n totensor = transforms.Compose(\n [transforms.Resize((224, 224)),\n transforms.ToTensor()\n ])\n\n assert (index < len(self.data))\n assert (index < self.len)\n images = self.data[index]\n # print(images)\n img = cv2.imread(os.path.join(self.dataset.directory, images))\n\n target = self.bbox[index]\n\n scale = np.array(img.shape) / 224\n\n # img = cv2.rectangle(img, (target[0]-10, target[1]-10), (target[2]+10, target[3]+10),\n # color=(255, 255, 0), thickness=10)\n\n # cv2.imwrite(os.path.join(\"res\", str(index)+\".jpg\"), draw)\n\n # print(img.shape, scale)\n img = cv2.resize(img, (224, 224))\n\n # print(target)\n\n target[0] = int(target[0] / scale[1] - 5)\n target[1] = int(target[1] / scale[0] - 5)\n target[2] = int(target[2] / scale[1] + 5)\n target[3] = int(target[3] / scale[0] + 5)\n\n # print(target)\n t = target\n if self.transform is not None:\n seq_det = self.transform.to_deterministic() # call this for each batch again, NOT only once at the start\n\n keypoints_on_images = []\n keypoints = []\n keypoints.append(ia.Keypoint(x=target[0], y=target[1]))\n keypoints.append(ia.Keypoint(x=target[2], y=target[3]))\n\n keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=np.asarray(img).shape[:-1]))\n\n # augment keypoints and images\n img = seq_det.augment_image(np.asarray(img))\n after_aug = []\n\n target = seq_det.augment_keypoints(keypoints_on_images)\n for point in target[0].keypoints:\n # print(point)\n x_new, y_new = point.x, point.y\n after_aug.append(point.x)\n after_aug.append(point.y)\n target = after_aug\n # print(after_aug)\n newImg = Image.fromarray(img)\n reg_targets = np.float32(np.array(target))\n\n b=self.labels[index]\n\n #a = np.array(self.labels[index])\n #b = np.zeros((a.size, 2))\n #b[np.arange(a.size), a] = 1\n\n #print(\"B=\",b,self.labels[index])\n\n #print(targets)\n ##draw = cv2.rectangle(cv2.resize(np.array(newImg), (224, 224)), (t[1], t[0]), (t[3], t[2]), color=(0, 0, 0),\n # thickness=6)\n\n #draw = cv2.rectangle(cv2.resize(np.array(draw), (224, 224)), (targets[0], targets[1]), (targets[2], targets[3]),\n # color=(0, 255, 0), thickness=3)\n\n #cv2.imwrite(os.path.join(\"res\", str(index) + \".jpg\"), draw)\n #print(reg_targets)\n\n return totensor(newImg), reg_targets,b ,index",
"def adjust_anchors(self):\n pass",
"def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox",
"def get_model_edge_cnn(point_cloud, img_cnn, is_training, bn_decay=None):\n batch_size = point_cloud.get_shape()[0].value\n num_point = point_cloud.get_shape()[1].value\n end_points = {}\n l0_xyz = point_cloud#tf.slice(point_cloud, [0,0,0], [-1,-1,3])\n l0_points = point_cloud#tf.slice(point_cloud, [0,0,3], [-1,-1,3])\n\n # Set Abstraction layers\n l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')\n l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')\n l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')\n\n # CNN layers for edge detection\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(inputs=img_cnn,filters=64,kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu)#32\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n conv2 = tf.layers.conv2d(inputs=pool1,filters=128,kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu)#64\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n net_edge = tf.reshape(pool2, [batch_size, -1])\n net_edge = tf_util.fully_connected(net_edge, 2048, bn=True, is_training=is_training, scope='fc1_edge', bn_decay=bn_decay)\n net_edge = tf_util.dropout(net_edge, keep_prob=0.5, is_training=is_training, scope='dp1_edge')\n net_edge = tf_util.fully_connected(net_edge, 1024, bn=True, is_training=is_training, scope='fc2_edge', bn_decay=bn_decay)\n\n # Fully connected layers for classification\n net_class = tf.reshape(l3_points, [batch_size, -1])\n net_class = tf.concat([net_class,net_edge],axis=-1)\n net_class = tf_util.fully_connected(net_class, 512, bn=True, is_training=is_training, scope='fc1_class', bn_decay=bn_decay)\n net_class = tf_util.dropout(net_class, keep_prob=0.5, is_training=is_training, scope='dp1_class')\n net_class = tf_util.fully_connected(net_class, 256, bn=True, is_training=is_training, scope='fc2_class', bn_decay=bn_decay)\n net_class = tf_util.dropout(net_class, keep_prob=0.5, is_training=is_training, scope='dp2_class')#256\n net_class = tf_util.fully_connected(net_class, 10, activation_fn=None, scope='fc3_class')\n\n # Fully connected layers for center estimation and offset\n net_pose = tf.reshape(l3_points, [batch_size, -1])\n net_pose = tf.concat([net_pose,net_edge],axis=-1)\n net_pose = tf_util.fully_connected(net_pose, 512, bn=True, is_training=is_training, scope='fc1_pose', bn_decay=bn_decay)\n net_pose = tf_util.dropout(net_pose, keep_prob=0.5, is_training=is_training, scope='dp1_pose')\n net_pose = tf_util.fully_connected(net_pose, 256, bn=True, is_training=is_training, scope='fc2_pose', bn_decay=bn_decay)\n net_pose = tf_util.dropout(net_pose, keep_prob=0.5, is_training=is_training, scope='dp2_pose')\n net_pose = tf_util.fully_connected(net_pose, 7, activation_fn=None, scope='fc3_pose')\n\n # Fully connected layers for anchor box classification\n net_anchor = tf.reshape(l3_points, [batch_size, -1])\n net_anchor = tf.concat([net_anchor,net_edge],axis=-1)\n net_anchor = tf_util.fully_connected(net_anchor, 512, bn=True, is_training=is_training, scope='fc1_anchor', bn_decay=bn_decay)#512\n net_anchor = tf_util.dropout(net_anchor, keep_prob=0.5, is_training=is_training, scope='dp1_anchor')\n net_anchor = tf_util.fully_connected(net_anchor, 256, bn=True, is_training=is_training, scope='fc2_anchor', bn_decay=bn_decay)\n net_anchor = tf_util.dropout(net_anchor, keep_prob=0.5, is_training=is_training, scope='dp2_anchor')#256\n net_anchor = tf_util.fully_connected(net_anchor, 4, activation_fn=None, scope='fc3_anchor')\n \n # Fully connected layers for anchor box classification\n net_anchor_theta = tf.reshape(l3_points, [batch_size, -1])\n net_anchor_theta = tf.concat([net_anchor_theta,net_edge],axis=-1)\n net_anchor_theta = tf_util.fully_connected(net_anchor_theta, 512, bn=True, is_training=is_training, scope='fc1_anchor_theta', bn_decay=bn_decay)#512\n net_anchor_theta = tf_util.dropout(net_anchor_theta, keep_prob=0.5, is_training=is_training, scope='dp1_anchor_theta')\n net_anchor_theta = tf_util.fully_connected(net_anchor_theta, 256, bn=True, is_training=is_training, scope='fc2_anchor_theta', bn_decay=bn_decay)\n net_anchor_theta = tf_util.dropout(net_anchor_theta, keep_prob=0.5, is_training=is_training, scope='dp2_anchor_theta')#256\n net_anchor_theta = tf_util.fully_connected(net_anchor_theta, 4, activation_fn=None, scope='fc3_anchor_theta')\n \n return net_class, net_pose, net_anchor,end_points,net_anchor_theta"
]
| [
"0.6049695",
"0.59141093",
"0.59044313",
"0.5859675",
"0.5762134",
"0.56918836",
"0.5655732",
"0.5647954",
"0.5621027",
"0.55535257",
"0.5541016",
"0.55154335",
"0.5508736",
"0.55031645",
"0.5502034",
"0.5467258",
"0.5453469",
"0.5444473",
"0.53857285",
"0.53718",
"0.5365043",
"0.53627306",
"0.5343544",
"0.53431636",
"0.5336957",
"0.53351223",
"0.5331612",
"0.5312327",
"0.53084487",
"0.52998126"
]
| 0.61300606 | 0 |
adds the passed params to to the list of query params map | def __add_query_param(self, params_to_map, params, overwrite=True):
for key, value in params_to_map.items():
(is_query_param, tkey) = self.__get_solr_key_from_app_key(key)
if is_query_param:
if not overwrite:
# check if the param is already present, if it is don't overwrite
# since the param can have aliases or equivalents, treat equivalent as present
aliases = [tkey]
for group in self.__config.equivalent_params:
if key in self.__config.equivalent_params[group]:
aliases = self.__config.equivalent_params[group]
break
present_in_map = False
for alias in aliases:
(is_qp, akey) = self.__get_solr_key_from_app_key(alias)
if is_qp and (akey in params[self.QUERY_FILTER_KEY] or akey in params[self.FILTER_KEY]):
present_in_map = True
# don't add this param move on to next param since it is present in the map
if present_in_map:
continue
# query params are filter query params if they are present in filter_query_params list
if tkey in self.__config.filter_query_params:
params[self.QUERY_FILTER_KEY][tkey] = value
else:
params[self.FILTER_KEY][tkey] = value
else:
params[tkey] = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addParams(self, *params):\n for param in params:\n self.addParam(param)\n self.params = list(set(self.params))",
"def add_params(self, params):\n return self.set_param('params', params)",
"def add_params(self, params: t.Mapping[str, str]) -> 'Request':\n return replace(self, params={**self.params, **params})",
"def query_append(*query_params):\n li = []\n for qp in query_params:\n qs = urlencode_s(query_unflatten(qp))\n if qs:\n li.append(qs)\n return \"&\".join(li)",
"def updatekwargs(self,request):\n updated_dict={}\n if isinstance(request.query_params,QueryDict):\n updated_dict = {k:','.join(v) for k,v in request.query_params.iterlists()}\n else:\n updated_dict = request.query_params\n updated_dict.update(self.kwargs)\n\n self.kwargs = updated_dict",
"def _build_param_request(self):\n search_params = []\n for param in self.params:\n # print(param)\n if self.params[param] is not None:\n search_params.append(param + '={}'.format(self.params[param]))\n search_params = '&' + '&'.join(search_params)\n return search_params",
"def add_query_params(\n url: str, **params: Mapping[str, Union[str, List[str]]]\n ) -> str:\n o = urlparse(url)\n qp = parse_qs(o.query, keep_blank_values=True)\n\n for k, v in params.items():\n if isinstance(v, str):\n v = [v]\n try:\n qp[k].extend(v)\n except KeyError:\n qp[k] = v\n\n qs = urlencode(qp, doseq=True, quote_via=quote)\n return urlunparse(o._replace(query=qs))",
"def query_add(*query_params):\n d = {}\n for qp in query_params:\n qp = query_unflatten(qp)\n for name, value in qp.items():\n if name in d:\n d[name].extend(value)\n else:\n d[name] = value\n return d",
"def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''",
"def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()",
"def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()",
"def _get_query_params(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n 'sort_key': [self._get_sort_key(req)],\n 'sort_dir': [self._get_sort_dir(req)],\n 'marker': self._get_marker(req),\n }\n\n for key, value in params.items():\n if value is None:\n del params[key]\n\n return params",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''",
"def _get_query_params(self, req):\n params = {}\n for PARAM in SUPPORTED_PARAMS:\n if req and PARAM in req:\n params[PARAM] = req.get(PARAM)\n\n return params",
"def _prepare_params(self, params):\n for key, value in params.items():\n if type(value) is list:\n params[key] = [(6, 0, value)]\n\n return params",
"def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)",
"def set_params(self, params):\n for step_id, step_params in _iteritems(params):\n for name, value in _iteritems(step_params):\n self.add_param(step_id, name, value)",
"def set_query_parameters(url, params):\n url_parts = list(urlparse(url))\n\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n\n return urlunparse(url_parts)",
"def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url",
"def add_parameters(self, path, method, params: set):\n if path not in self.param_infos:\n self.param_infos[path] = {}\n\n method = method.upper()\n if method not in self.param_infos[path]:\n self.param_infos[path][method] = set()\n\n self.param_infos[path][method] |= params",
"def _GetUrlParams(self, query=None):\n params = sum([c_w_c._GetUrlParams() for c_w_c in self.reactants], [])\n params.extend(self.aq_params._GetUrlParams())\n \n if query is not None:\n for arrow in constants.POSSIBLE_REACTION_ARROWS:\n tmp_query = query.replace(arrow, '=>')\n params.append('query=%s' % urllib.quote(tmp_query))\n \n return params",
"def get_params(self):\r\n query = urlparse.urlparse(self.path).query\r\n\r\n # By default, `parse_qs` returns a list of values for each param\r\n # For convenience, we replace lists of 1 element with just the element\r\n return {\r\n k:v[0] if len(v) == 1 else v\r\n for k,v in urlparse.parse_qs(query).items()\r\n }",
"def _add_query_param(self, route_path, name, type_, default=None):\n route = self._find_route(route_path)\n # logging.info(\"Before:\", route.dependant.query_params)\n query_param = create_query_param(name, type_, default)\n route.dependant.query_params.append(query_param)\n # logging.info(\"After:\", route.dependant.query_params)",
"def set_params(self, params):\n\n self.url_params.update(params)\n return self",
"def addParams(self, params, container = None):\n\n\t\tfor param, value in params.iteritems():\n\t\t\tself.addParam([param, value], container)\n\n\t\treturn self",
"def _inject_params(self, params):\n\n params.extend([LocaleParam(), CompileDomainsParam(),\n UseFuzzyParam(), StatisticsParam(),\n DirectoryParam(), OutputFileParam()])\n\n return super()._inject_params(params)",
"def _params(self, qs):\n return [str_id for str_id in qs.split(',')]",
"def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass"
]
| [
"0.7209539",
"0.7026987",
"0.69236857",
"0.68808097",
"0.6757168",
"0.67167974",
"0.6687688",
"0.6580263",
"0.6561481",
"0.6480852",
"0.6480852",
"0.64774287",
"0.6393934",
"0.6393934",
"0.63533145",
"0.632442",
"0.6317008",
"0.6285332",
"0.62216014",
"0.6214522",
"0.6209007",
"0.6183783",
"0.61709577",
"0.6161991",
"0.6159219",
"0.6142925",
"0.61381316",
"0.6134795",
"0.6113741",
"0.611246"
]
| 0.7101635 | 1 |
internal method that returns the name of the app parameter given the solr parameter name. Returns the mapped key if mapping is found else the default value Returned value is a tuple with first value | def __get_app_key_from_solr_key(self, key):
keys = self.__config.solr_response_map['map']
if key in keys:
return (True, keys[key])
return (False, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mapping_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping_name\")",
"def __get_solr_key_from_app_key(self, key):\n keys = self.__config.get_inverse_map()\n if key in keys:\n return (True, keys[key])\n\n return (False, key)",
"def get_param_with_name(self, param_name):\n return self.params[param_name]",
"def _get_app_param_info(app_info, resultCount=1, resultKey='primaryGenreId'):\n try:\n assert app_info['results'][resultCount - 1][resultKey] is not None, \"Null item\"\n return app_info['results'][resultCount - 1][resultKey]\n except AssertionError as e:\n print(\"get_app_param_info\", e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"get_app_param_info\", e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: get_app_param_info %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)",
"def get(self, name):\n parts = name.split('.', 1)\n return getattr(self, parts[0]).get(parts[1], self.input_params_default[parts[1]])",
"def key_pair_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_pair_name\")",
"def key_pair_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_pair_name\")",
"def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")",
"def getParam(self, params, name):\n return params.get(name)",
"def GetUniqueParam(self, name):\n if not hasattr(self, '_params'):\n self._params = cgi.parse_qs(self.path[self.path.find('?') + 1:])\n\n param_list = self._params.get(name, [])\n if len(param_list) == 1:\n return param_list[0]\n return None",
"def _get_app_name(app):\n return app[APP_NAME_KEY]",
"def _getConfigParam(self, name, default=None):\n return self.config.get(self._configPrefix + name.lower(), default)",
"def get_parameter(self, name: str) -> any:\r\n if name in self.kwargs:\r\n return self.kwargs[name]\r\n for x in self.args:\r\n if isinstance(x, dict) and name in x:\r\n return x[name]\r\n else:\r\n return None",
"def get_parm_value(parameters, name, env_name, default_value):\n value = parameters.get(name, '')\n return os.environ.get(env_name, default=default_value) if not value else value",
"def appsetting(self, workflow, name):\n\n if workflow:\n config = workflow.get(\"app\")\n if config:\n return config.get(name)\n\n return None",
"def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")",
"def get_param(name, default=''):\n return request.GET.get(name, default).strip()",
"def _get_packed_param_name(self, seen_q_op_info: SeenQOpInfo) -> Optional[str]:\n return self.idx_to_packed_weight_name.get(seen_q_op_info.idx, None)",
"def lookup_name(self, name, default=None):\n for frames in reversed(self._arg_transformer):\n if name in frames:\n return frames[name]\n return name if default is None else default",
"def AttributeName(self, param_name):\n for attribute_name, p in six.iteritems(self.attribute_to_params_map):\n if p == param_name:\n return attribute_name",
"def getparam(name, default=None, stash=None, params=None):\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return v[0]\n return default",
"def getStrParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n return value",
"def get_param_name(self, param_id, syselem):\n\n with self.__connection.cursor() as cursor:\n query = \"SELECT NAME FROM %s WHERE PID= '%s' AND SYSTEM_ELEMENT= '%s'\" % (self.__schema, param_id, syselem)\n cursor.execute(query)\n result = cursor.fetchone()\n return result['NAME']",
"def get_param(self, key, default=None):\n\n return self._request.query_params[\n key] if key in self._request.query_params else default",
"def _get_param_from_request(self, nombreParametro):\n\t\treturn request.args.get(nombreParametro)",
"def configuration_configmap_name(self) -> Optional[str]:\n return pulumi.get(self, \"configuration_configmap_name\")",
"def get_argument(self, name):\n val = self.arguments.get(name)\n if val:\n return val[0]\n return None",
"def mapping(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping\")",
"def mapping(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping\")",
"def get_parameter_value(self, parameter_name):\n if parameter_name in self.description[\"config\"][\"values\"].keys():\n return self.description[\"config\"][\"values\"][parameter_name][\"value\"]\n else:\n return \"No such parameter\""
]
| [
"0.637135",
"0.6053645",
"0.60306513",
"0.60296565",
"0.59799975",
"0.5863355",
"0.5863355",
"0.58111364",
"0.5795304",
"0.57818747",
"0.573486",
"0.573421",
"0.5727314",
"0.5697753",
"0.5696839",
"0.56940734",
"0.5673148",
"0.566544",
"0.56475675",
"0.55933285",
"0.55770475",
"0.55766094",
"0.55602163",
"0.5554151",
"0.55450106",
"0.5538573",
"0.55366635",
"0.55248696",
"0.55248696",
"0.55151004"
]
| 0.6488492 | 0 |
Creates a box from a contour representation of the box. | def create(cls, boxCon):
array = boxCon.tolist()
startX = min(boxCon, key = lambda vertex : vertex[0][0])[0][0]
endX = max(boxCon, key = lambda vertex : vertex[0][0])[0][0]
startY = min(boxCon, key = lambda vertex : vertex[0][1])[0][1]
endY = max(boxCon, key = lambda vertex : vertex[0][1])[0][1]
box = cls(startX, startY, endX - startX, endY - startY)
return box | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box",
"def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box",
"def from_bounds(cls, bounds) -> \"Box\":\n return cls(\n x=bounds[\"left\"],\n y=bounds[\"top\"],\n width=bounds[\"right\"] - bounds[\"left\"],\n height=bounds[\"bottom\"] - bounds[\"top\"],\n )",
"def covering_box(boxes):\n x_min = np.amin([b.x for b in boxes])\n x_max = np.amax([b.x + b.width for b in boxes])\n y_min = np.amin([b.y for b in boxes])\n y_max = np.amax([b.y + b.height for b in boxes])\n cover = Box(x_min, y_min, x_max - x_min, y_max - y_min)\n return cover",
"def make_box(self, name=None) -> 'Box':\n\n if self.size().x == 0:\n box = Rect(self.size().z, self.size().y, name=name)\n box.ry(90)\n elif self.size().y == 0:\n box = Rect(self.size().x, self.size().z, name=name)\n box.rx(90)\n elif self.size().z == 0:\n box = Rect(self.size().x, self.size().y, name=name)\n else:\n box = Box(*self.size().asArray(), name=name)\n\n box.place(\n ~box == ~self,\n ~box == ~self,\n ~box == ~self)\n return box",
"def create_box(self, a, b, c):\n proj_to_xy = lambda x: x[:2]\n get_angle = lambda x,y: (x @ y) / (np.linalg.norm(x) * np.linalg.norm(y))\n\n ab = proj_to_xy(b) - proj_to_xy(a)\n ac = proj_to_xy(c) - proj_to_xy(a)\n bc = proj_to_xy(c) - proj_to_xy(b)\n\n ab_ac = np.abs(get_angle(ab, ac))\n ab_bc = np.abs(get_angle(ab, bc))\n\n x1, y1, z1 = a\n x2, y2, z2 = b\n x3, y3, z3 = c\n\n z = (z1 + z2)/2\n\n down = np.array([0., 0., z - z3])\n\n if ab_ac < ab_bc: # 3. point is bottom-left\n back = np.array([ac[0], ac[1], 0])\n else: # 3. point is bottom-right\n back = np.array([bc[0], bc[1], 0])\n\n tfl = np.array([x1, y1, z])\n tfr = np.array([x2, y2, z])\n\n tbl = tfl + back\n tbr = tfr + back\n\n bfl = tfl - down\n bfr = tfr - down\n\n bbl = bfl + back\n bbr = bfr + back\n\n return np.array([\n tfl, tfr,\n tbl, tbr,\n bfl, bfr,\n bbl, bbr\n ])",
"def box(self) -> math.Box:\n area = self.__dict__[\"area\"]\n if area is None:\n return math.Box()\n return math.Box(math.Point(*area[:2]), math.Point(*area[-2:]))",
"def createBox( size=(1,1,1), position=(0,0,0), colour=(0.6,0.6,0.6) ):\r\n \r\n size = PyUtils.toVector3d(size)\r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n delta = MathLib.Vector3d()\r\n for repeat in range(3):\r\n for x in (-0.5,0.5) :\r\n delta.x = size.x * x\r\n for y in (-0.5,0.5) :\r\n delta.y = size.y * y\r\n for z in (-0.5,0.5) :\r\n delta.z = size.z * z\r\n vertices.append( position + delta )\r\n \r\n faces = [(0,1,3,2),(5,4,6,7), # YZ Faces\r\n (9,13,15,11),(12,8,10,14), # XY Faces\r\n (18,19,23,22),(17,16,20,21)] # XZ Faces\r\n \r\n return create( vertices, faces, colour )",
"def createContinuosBox(self):\n return box(self._startCorner[0],\n self._startCorner[1],\n self._endCorner[0] + 1,\n self._endCorner[1] + 1)",
"def box(self):\n b = Bnd_Box()\n brepbndlib_Add(self.topods_shape(), b)\n return geom_utils.box_to_geometry(b)",
"def box_to_rect(box, color, linewidth=3):\r\n box = box.asnumpy()\r\n return plt.Rectangle(\r\n (box[0], box[1]), box[2]-box[0], box[3]-box[1],\r\n fill=False, edgecolor=color, linewidth=linewidth)",
"def box_to_rect(box, color, linewidth=3):\r\n box = box.asnumpy()\r\n return plt.Rectangle(\r\n (box[0], box[1]), box[2] - box[0], box[3] - box[1],\r\n fill=False, edgecolor=color, linewidth=linewidth)",
"def box_to_rect(box, color, linewidth=3):\n box = box.asnumpy()\n return plt.Rectangle(\n (box[0], box[1]), box[2] - box[0], box[3] - box[1],\n fill=False, edgecolor=color, linewidth=linewidth)",
"def add_box(self, l, w, h, x, y, z, comment=\"\"):\n self.data['shape']['compound'].append({'box': {'#': comment, 'pose': {'x': x, 'y': y, 'z': z},\n 'size': {'x': l, 'y': w, 'z': h}}})",
"def box2cs(box):\r\n input_size = (256, 256)\r\n\r\n x, y, w, h = box[:4]\r\n aspect_ratio = input_size[0] / input_size[1]\r\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\r\n\r\n if w > aspect_ratio * h:\r\n h = w * 1.0 / aspect_ratio\r\n elif w < aspect_ratio * h:\r\n w = h * aspect_ratio\r\n\r\n # pixel std is 200.0\r\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\r\n\r\n scale = scale * 1.25\r\n\r\n return center, scale",
"def Box(bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0), level=0, quads=True):\n if np.array(bounds).size != 6:\n raise TypeError(\n 'Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)'\n )\n src = _vtk.vtkTessellatedBoxSource()\n src.SetLevel(level)\n src.SetQuads(quads)\n src.SetBounds(bounds)\n src.Update()\n return wrap(src.GetOutput())",
"def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")",
"def convert(size, box):\n # TODO rewrite box to be [TL, BR] coordinates\n #pdb.set_trace()\n dw = 1./size[0]\n dh = 1./size[1]\n x = (box[0] + box[1])/2.0\n y = (box[2] + box[3])/2.0\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)",
"def __init__(self, boxCoord):\n self.boxCoord =boxCoord\n v1 = self.boxCoord[0] \n \n self.xmin = v1[0]\n self.ymin = v1[1]\n self.zmin =v1[2]\n\n v8 = self.boxCoord[7] \n \n self.xmax = v8[0]\n self.ymax = v8[1]\n self.zmax = v8[2]\n\n x = abs(self.xmax-self.xmin)\n y = abs(self.ymax-self.ymin)\n z = abs(self.zmax-self.zmin)\n\n XYZBox.__init__(self, x,y,z)",
"def box(original, diameter):\n return Box(original, h.ones(original.size()) * diameter, None).checkSizes()",
"def create_box(world, space, density, lx, ly, lz):\r\n\r\n\t\t# Create body\r\n\t\tbody = ode.Body(world)\r\n\t\tM = ode.Mass()\r\n\t\tM.setBox(density, lx, ly, lz)\r\n\t\tbody.setMass(M)\r\n\r\n\t\t# Set parameters for drawing the body\r\n\t\tbody.shape = \"box\"\r\n\t\tbody.boxsize = (lx, ly, lz)\r\n\r\n\t\t# Create a box geom for collision detection\r\n\t\tgeom = ode.GeomBox(space, lengths = body.boxsize)\r\n\t\tgeom.setBody(body)\r\n\r\n\t\treturn body, geom",
"def draw_box(image, box, color, thickness=2):\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)",
"def cut(image,box,margin=0,bg=0,dtype=None):\n (r0,c0,r1,c1) = box\n return sl.cut(image,sl.box(r0,r1,c0,c1),margin=margin,bg=bg,dtype=dtype)",
"def resize_and_crop_box(self):\n box = self.scale_box(self._box, self._scaled_width, self._scaled_height)\n box = self.offset_box(box, self._crop_offset_x, self._crop_offset_y)\n box = self.clip_boxes(box)\n return box",
"def build_box(blcorner, trcorner):\n modelbox = [[], []]\n if blcorner != None:\n x, y = literal_eval(blcorner)\n modelbox[0].append(str(x))\n modelbox[0].append(str(y))\n else:\n raise TelemacException(\\\n '... could not find your bounding box bottom left'\n 'corner. Please use --bl option (, delimited, no '\n 'spaces).\\n\\n')\n if trcorner != None:\n x, y = literal_eval(trcorner)\n modelbox[1].append(str(x))\n modelbox[1].append(str(y))\n else:\n raise TelemacException(\\\n '... could not find your bounding box top right '\n 'corner. Please use --tr option (, delimited, no '\n 'spaces).\\n\\n')\n return modelbox",
"def coco_box_to_bbox(box):\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n return bbox",
"def make_box_square(box, offset_scale=0.05):\n\n x_min, y_min, x_max, y_max = box[:4]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n\n if height >= width:\n half_box = height / 2.\n x_min = center_x - half_box\n x_max = center_x + half_box\n if width > height:\n half_box = width / 2.\n y_min = center_y - half_box\n y_max = center_y + half_box\n\n box_side_lenght = (x_max + x_min) / 2.\n offset = offset_scale * box_side_lenght\n x_min = x_min - offset\n x_max = x_max + offset\n y_min = y_min - offset\n y_max = y_max + offset\n return (int(x_min), int(y_min), int(x_max), int(y_max))",
"def box(original, radius):\n batches = original.size()[0]\n num_elem = h.product(original.size()[1:])\n ei = h.getEi(batches,num_elem)\n \n if len(original.size()) > 2:\n ei = ei.contiguous().view(num_elem, *original.size())\n\n return HBox(original, None, ei * radius).checkSizes()",
"def get_box(ra0, ra1, dec0, dec1):\n\n box = np.array([[dec0, ra1], [dec1, ra0]]) * np.pi / 180\n\n return box",
"def make_lattice(box):\n from quippy.atoms import make_lattice\n if box.shape == (3, 3):\n\t# http://lammps.sandia.gov/doc/Section_howto.html#howto-12 Describes the\n\t# methodology (look for the section entitled \"6.12. Triclinic\n\t# (non-orthogonal) simulation boxes\") The [a, b, c, alpha, beta, gamma]\n\t# vector can be passed to the ase.Atoms object as a definition for the\n\t# triclinic box (note that the quippy.Atoms class inherits from\n\t# ase.Atoms) Make sure that you note that the data is provided:\n\t# \n\t# ITEM: BOX BOUNDS xy xz yz ....\n\t# xlo_bound xhi_bound xy\n\t# ylo_bound yhi_bound xz\n\t# zlo_bound zhi_bound yz\n\t# \n\t# whereas we need xlo, xhi, etc. not xlo_bound, xhi_bound, etc.\n\txlo = box[0][0] - min(0.0, box[0][2], box[1][2], box[0][2] + box[1][2])\n\txhi = box[0][1] - max(0.0, box[0][2], box[1][2], box[0][2] + box[1][2])\n\tylo = box[1][0] - min(0.0, box[2][2])\n\tyhi = box[1][1] - max(0.0, box[2][2])\n\tzlo = box[2][0]\n\tzhi = box[2][1]\n\n\ta = (xhi - xlo)\n\tb = np.sqrt((yhi - ylo)**2 + (box[0][2])**2)\n\tc = np.sqrt((zhi - zlo)**2 + (box[1][2])**2 + (box[2][2])**2)\n\talpha = np.arccos((box[0][2] * box[1][2] + (yhi - ylo) * box[2][2]) / (b * c))\n\tbeta = np.arccos(box[1][2] / c)\n\tgamma = np.arccos(box[0][2] / b)\n\treturn make_lattice(a, b, c, alpha, beta, gamma)\n \n elif box.shape == (3, 2):\n\treturn make_lattice(box[0][1] - box[0][0],\n box[1][1] - box[1][0],\n box[2][1] - box[2][0])\n else:\n raise ValueError(\"Unexpected box size/parameters: {}\".format(box))"
]
| [
"0.6670145",
"0.6670145",
"0.65071213",
"0.63027024",
"0.62290746",
"0.617087",
"0.60887074",
"0.60454017",
"0.60419816",
"0.5995471",
"0.59096074",
"0.58819383",
"0.58666605",
"0.58454734",
"0.5770305",
"0.571959",
"0.5706984",
"0.5702862",
"0.56961715",
"0.568718",
"0.56661737",
"0.5623803",
"0.5583088",
"0.5582596",
"0.5569761",
"0.5563401",
"0.5557761",
"0.55505943",
"0.5534427",
"0.5529203"
]
| 0.68660736 | 0 |
Access the coordinates of this vector by index. | def __getitem__(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
raise IndexError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, index):\n return self.points[index]",
"def __getitem__(self, index: int) -> float:\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n else:\n raise IndexError",
"def __getitem__(self, j):\n\t\treturn self._coords[j]",
"def __getitem__(self, index):\n return [self.points][index]",
"def get_element(self, index):\n return self.name[index], self.voxels[index], self.vertices[index]",
"def __getitem__(self, index):\n return self.position[index]",
"def __getitem__(self, k):\n return self._coords[k]",
"def xyz(self, i):\n return self.xp[i], self.yp[i], self.zp[i]",
"def __getitem__(self, i):\n return self.__points[i]",
"def __getitem__(self, idx):\n return self.transform(self.X[idx]), self.y[idx]",
"def __getitem__(self, ind):\n if not isinstance(ind, (str, unicode)):\n raise TypeError('Supply a valid str for the index')\n if self.indices[0] == ind:\n return self.x\n if self.indices[1] == ind:\n return self.y\n if self.indices[2] == ind:\n return self.z\n else:\n raise ValueError('Not a defined index')",
"def __getitem__(self, index):\n if self._list_like(index):\n len_var = len(index)\n if len_var==0:\n raise IndexError(\"Received empty index.\")\n elif len_var==1:\n return self._points[index[0]]\n elif len_var==2:\n return self._points[index[0]][index[1]]\n else:\n raise IndexError(\"Received too long index.\")\n return self._points[index]",
"def __getitem__(self, index):\n group = self.groups[index]\n return self.get_x_y(group)",
"def ind2coord(self, index):\n\n # assert (index >= 0)\n # assert(index < self.n - 1)\n\n col = index // self.rows\n row = index % self.rows\n\n return [row, col]",
"def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y",
"def __getitem__(self, i):\n return self.__x[i]",
"def position_tuple_for_index(self, index):\n x = self.base_values[index % self.size]\n y = self.base_values[index // self.size]\n return x, y",
"def __getitem__(self, index):\n return self._value_at(index)",
"def __getitem__(self, index):\n return self._value_at(index)",
"def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])",
"def __getitem__(self, i):\n if i < X:\n raise IndexError(\"point3d::__getitem__: negative index {0}\".format(i))\n if i == X:\n return self._x\n if i == Y:\n return self._y\n if i == Z:\n return self._z\n # beyond Z\n raise IndexError(\"point3d::__getitem__: index too large {0}\".format(i))",
"def coordinates_of(self, coord_index_list):\n return [self.transformed_coords[i] for i in coord_index_list]",
"def get_coords(self):\r\n # get the coordinates from V, skipping every 3 since it's a vector\r\n meshpts = self.V.tabulate_dof_coordinates()[::3]\r\n\r\n # create r vector if not already created\r\n try:\r\n self.r\r\n except:\r\n self.r = Function(self.V)\r\n\r\n # set the r vector\r\n self.r.vector()[:] = meshpts.flatten()",
"def location_of(self, index):\n img_w, img_h = self.conv_dims()\n \n x = (index % img_w) * self.stride\n y = (index // img_w) * self.stride\n \n return x, y",
"def vector(self,\n i: int,\n j: int) -> np.ndarray:\n return self[j].coord - self[i].coord",
"def vector(self, x):\n if isinstance(x, tuple):\n index = self.tuple_to_index[x]\n elif isinstance(x, str):\n index = self.string_to_index[x]\n else:\n index = x\n\n return self.vectors[index]",
"def __getitem__(self, index):\n\n return self.user_item_coordinates[index, :], self.rating[index]",
"def xyz(self) -> np.ndarray:\n return self._vector[0:3]",
"def coord(self, i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef const double *coord = freesasa_structure_coord_array(self._c_structure)\n return [coord[3*i], coord[3*i+1], coord[3*i+2]]",
"def __getitem__(self, index):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n return self._elements[index]"
]
| [
"0.73434335",
"0.71966803",
"0.7094703",
"0.7090151",
"0.7011572",
"0.6990462",
"0.6972173",
"0.6955546",
"0.683827",
"0.6811535",
"0.6756246",
"0.6756099",
"0.66974694",
"0.6631126",
"0.66226995",
"0.66208637",
"0.6599712",
"0.6575773",
"0.6575773",
"0.6520088",
"0.64941",
"0.64860374",
"0.64601",
"0.6440644",
"0.64373344",
"0.6421142",
"0.6394995",
"0.6387894",
"0.63775104",
"0.63751715"
]
| 0.7436619 | 0 |
Verifies that the node returns a shifted pose according to the specified linear offset. | def test_pose_shifter(self):
self.dyn_client.update_configuration({"linear_offset_x":0.1, "linear_offset_y":0.0, "linear_offset_z":0.05})
pose_in = geometry_msgs.msg.PoseStamped()
expected = geometry_msgs.msg.PoseStamped()
pose_in.header.frame_id = "base_link"
expected.header.frame_id = "base_link"
pose_in.pose.position.x = 1.0
pose_in.pose.position.y = 2.0
pose_in.pose.position.z = 3.0
pose_in.pose.orientation.x = 0.0
pose_in.pose.orientation.y = 0.0
pose_in.pose.orientation.z = 0.0
pose_in.pose.orientation.w = 1.0
# shift of 10 cm in X and 5 cm in Z
expected.pose.position.x = 1.1
expected.pose.position.y = 2.0
expected.pose.position.z = 3.05
expected.pose.orientation.x = 0.0
expected.pose.orientation.y = 0.0
expected.pose.orientation.z = 0.0
expected.pose.orientation.w = 1.0
self.pose_in_pub.publish(pose_in)
while not self.wait_for_result:
self.event_out.publish('e_start')
self.assertEqual(self.result.header.frame_id, expected.header.frame_id)
self.assertEqual(self.result.pose, expected.pose) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_validate_input_offset(self):\n final_config = self.dtm2.validate_input('01010101')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'yyx1010101\\')')",
"def is_ahead_of(self, pose, x, y):\n x1 = pose.position.x\n y1 = pose.position.y\n orientation = pose.orientation\n euler = tf.transformations.euler_from_quaternion(\n [orientation.x, orientation.y, orientation.z, orientation.w])\n yaw = euler[2]\n return ((x - x1) * math.cos(yaw) + (y - y1) * math.sin(yaw)) > 0",
"def testOffsetSetNegative(self):\n offset = [-1.3782, 278.32, 0.738378233782]\n self.node.offset = offset\n offsetD = tuple([Decimal(str(i)) for i in offset])\n\n self.assertEqual(\n offsetD,\n self.node.offset\n )",
"def get_offset_check_function(consumer, tp, target_date, margin):\n\n def check_offset(offset, consumer=consumer, tp=tp, target_date=target_date, margin=margin):\n consumer.seek(tp, offset)\n message = next(consumer)\n change = change_from_kafka_message(message)\n diff = (change.metadata.publish_timestamp - target_date).total_seconds()\n if diff < 0 and abs(diff) < margin:\n return 0\n return diff\n\n return check_offset",
"def is_valid_date_offset(user_date, offset):\n return",
"def _handle_scene_offset_changed(self, master, slave, offset_delta):\n target_offset = self._get_target_scene_offset(master, slave, offset_delta)\n if target_offset >= 0:\n slave.set_offsets(slave.track_offset_method(), target_offset)",
"def offset(self, offset):\n raise NotImplementedError(\"This should have been implemented.\")",
"def testGetOffset(self):\n # Bypass setter\n self.node._offset = [12.8, 1.2, 1.4]\n\n self.assertEqual(\n (12.8, 1.2, 1.4),\n self.node.offset,\n )",
"def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4",
"def check_position(self, desired_pos):\n new_pos = self.get_current_position()\n for i, pos in enumerate(new_pos):\n if abs(float(pos) - float(desired_pos[i])) > 0.5: # up to a half micrometer\n self.log.error(\n \"Table movement failed. Position: \"\n + str(new_pos)\n + \" is not equal to desired position: \"\n + str(desired_pos)\n )\n return False\n return True",
"def test_stark_offset_always_positive(self):\n exp = StarkRamseyXY(physical_qubits=[0], stark_amp=0.1)\n with self.assertRaises(ValueError):\n exp.set_experiment_options(stark_freq_offset=-10e6)",
"def offset(self, offset):\n return Line3d(self.p + offset * self.cross.normalized(), self.v)",
"def ReturnOffset(offset):\r\n return _hiew.ReturnOffset(offset)",
"def contains_offset(self, offset):\n return (offset >= self.offset) and (offset < self.offset + self.filesize)",
"def is_valid_pos(self, pos_step):\n return not (self.pos.x % pos_step or self.pos.y % pos_step)",
"def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)",
"def testSetOffsetWithNegativeInt(self):\n self.node.offset = -20\n\n self.assertEqual(\n (Decimal('-20.0'), Decimal('-20.0'), Decimal('-20.0')),\n self.node.offset\n )",
"def testOffsetBadLength(self):\n def setOffset():\n self.node.offset = ['banana']\n\n self.assertRaises(\n ValueError,\n setOffset\n )",
"def testSetOffsetWithNegativeString(self):\n self.node.offset = '-20'\n\n self.assertEqual(\n (Decimal('-20.0'), Decimal('-20.0'), Decimal('-20.0')),\n self.node.offset\n )",
"def test_first_pos() -> None:\n assert sw.walk_to(1) == sw.Coordinate(0, 0)",
"def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')",
"def test_findXCoordinateFromDirection_left(self):\n actual_result = rules.findXCoordinateFromDirection(8)\n expected_result = -2\n self.assertEqual(actual_result, expected_result)",
"def isSetOffset(self):\n return _libsbml.Unit_isSetOffset(self)",
"def testSetOffsetWithNegativeFloat(self):\n self.node.offset = -20.1\n\n self.assertEqual(\n (Decimal('-20.1'), Decimal('-20.1'), Decimal('-20.1')),\n self.node.offset\n )",
"def test_pos():\n # Test for positive special method with scalar Rnode object\n x = Rnode(5)\n z = +x\n try:\n assert z.value == 1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError",
"def offset(self, offset):\n return Line(self.p + offset * self.cross_z.normalized(), self.v)",
"def test_transform(self):\n pt = np.array([1.0, 2.0, 3.0])\n tr = pose.Pose()\n tr.position = onp.array([4.0, 5.0, 6.0])\n pt2 = tr.transform(pt)\n self.assertLess(np.linalg.norm(pt2 - np.array([5.0, 7.0, 9.0])), 1e-6)",
"def test_coord_preceding_fs(self):",
"def test_to_delta_time_positive_difference(with_tf_random_seed, np_time_points):\n time_points = tf.constant(np_time_points, dtype=default_float())\n\n with pytest.raises(InvalidArgumentError) as exp:\n to_delta_time(time_points)\n\n assert exp.value.message.find(\"Condition x >= y\") >= 0",
"def significant_position_change(self, timestamp, new_position):\n timediff = (timestamp - self.timestamp).total_seconds()\n posdiff = (new_position - self.position) / 1000\n diffdiff = timediff - posdiff\n\n if abs(diffdiff) > 5:\n return True\n return False"
]
| [
"0.60130155",
"0.5325522",
"0.52897584",
"0.5204398",
"0.5195322",
"0.5172869",
"0.5134362",
"0.50898653",
"0.5074088",
"0.50511104",
"0.50483215",
"0.50256354",
"0.50066537",
"0.49883357",
"0.49804953",
"0.49800643",
"0.49333018",
"0.49173224",
"0.49115375",
"0.49074942",
"0.49001443",
"0.48891103",
"0.48863244",
"0.4886196",
"0.48723674",
"0.4852869",
"0.48412067",
"0.48212048",
"0.48210835",
"0.4818445"
]
| 0.55197906 | 1 |
Return the x coordinate of the rectangle's left edge. | def _rect_left(self):
return min(self.x, self.x + self.w) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def left(self):\n return self.points['topLeft'].x",
"def left(self):\n return self.points['topLeft'].x",
"def pos_left(self, x=1):\n\n self.x -= x\n return self.pos(self.x, self.y)",
"def left(self):\n x, y = (self.loc[0] - 1, self.loc[1])\n\n if x < 0:\n return None # None\n\n return self.garden.cells[y][x]",
"def getAbsoluteLeft(self):\n return self.canvas.getAbsoluteLeft()",
"def getLeft(self):\n return self.left",
"def get_x_position(self):\n return self.rect.x",
"def left_edge(f: SwimmingFish) -> float:\n if f.dx > 0:\n return f.posn.x\n else:\n return f.posn.x - f.fish.size",
"def border_box_x(self):\n return self.position_x + self.margin_left",
"def get_left(self):\n return self.left",
"def left_distance(self):\n return self.x",
"def getupperleft(self):\n return (self.rect.x, self.rect.y)",
"def getMouseLeft(self):\n return self.mouseData.leftActive",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def left(self):\n # type: () -> float\n return self._left",
"def get_left(self):\n return self.__left",
"def x(self):\n return self.left",
"def left(self):\n\t\treturn self._left",
"def left(self):\n return self._left",
"def left(self):\n return self._left",
"def getExceedingBoxLeft(self):\n return self.exceedingBoxLeft",
"def top_left(self):\n return Point(self.left, self.top)",
"def get_x(self):\n\t\treturn self._collision_rect.x + 14",
"def get_left_side(grid):\n right = int(grid.width / 2)\n left_side = Grid(\n grid=grid, crop=Crop(left=0, right=right, top=0, bottom=0))\n left_side.find_grid_lines()\n left_side.vert_insert_line(0, distance=-80)\n left_side.get_cells()\n left_side.get_row_labels()\n return left_side",
"def left(self):\n\n return self._left",
"def _get_x(self):\n return self.position.x",
"def left(self, p):\n node = self._validate_position(p)\n return self._make_position(node.left)",
"def get_x(self):\n return self.posX",
"def left(self):\n return self.l",
"def left(self, p):\n node = self._validate(p)\n return self._make_position(node._left)"
]
| [
"0.7934868",
"0.7934868",
"0.75278914",
"0.75106096",
"0.74849534",
"0.7438955",
"0.73301005",
"0.72666276",
"0.7224344",
"0.7210953",
"0.7207516",
"0.716287",
"0.7159352",
"0.7111415",
"0.7107011",
"0.7096342",
"0.70923966",
"0.7085268",
"0.7046702",
"0.7046702",
"0.70441395",
"0.6966913",
"0.6952905",
"0.6940754",
"0.6940278",
"0.69137096",
"0.6894875",
"0.68824375",
"0.687068",
"0.68555677"
]
| 0.8043434 | 0 |
Return the xcoordinate of the rectangle's right edge. | def _rect_right(self):
return max(self.x, self.x + self.w) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def right(self):\n return self.points['topRight'].x",
"def right(self):\n return self.points['topRight'].x",
"def right(self):\n return self.left + self.width",
"def get_rightmost_edge(self):\n return self.offset + sum(self.get_screen_widths())",
"def getupperleft(self):\n return (self.rect.x, self.rect.y)",
"def _rect_left(self):\n\treturn min(self.x, self.x + self.w)",
"def get_right(self):\n return self.right",
"def pos_right(self, x=1):\n\n self.x += x\n return self.pos(self.x, self.y)",
"def right(self):\n\t\treturn self._right",
"def getRight(self):\n return self.right",
"def right(self):\n # type: () -> float\n return self._right",
"def get_right(self):\n return self.__right",
"def right(self):\n return self._right",
"def right(self):\n return self._right",
"def right(self):\n return self.r",
"def right_edge(f: SwimmingFish) -> float:\n if f.dx > 0:\n return f.posn.x + f.fish.size\n else:\n return f.posn.x",
"def right(self):\n\n return self._right",
"def topRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMinY)",
"def getMouseRight(self):\n return self.mouseData.rightActive",
"def getExceedingBoxRight(self):\n return self.exceedingBoxRight",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def right_distance(self):\n return self.board.length - 1 - self.x",
"def left(self):\n return self.points['topLeft'].x",
"def left(self):\n return self.points['topLeft'].x",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def x(self):\n return self.left",
"def right(self):\n return self.__r",
"def right(self, node):\r\n if self._col(node.count) < self.width - 1:\r\n return self.nodes[node.count + 1]\r\n else:\r\n return None",
"def rightChild(self, pos):\n return (2 * pos) + 1",
"def rightChild(self, pos):\n return (2 * pos) + 1"
]
| [
"0.78304136",
"0.78304136",
"0.752309",
"0.71365297",
"0.71340376",
"0.7098199",
"0.6944949",
"0.6943818",
"0.6876187",
"0.6863866",
"0.6857044",
"0.6842752",
"0.68269175",
"0.68269175",
"0.6822077",
"0.67144597",
"0.6675918",
"0.66704386",
"0.6663189",
"0.6648827",
"0.6647672",
"0.65750074",
"0.6558969",
"0.6558969",
"0.65012425",
"0.6472909",
"0.64481217",
"0.6443038",
"0.64273036",
"0.64273036"
]
| 0.78578556 | 0 |
Return the ycoordinate of the rectangle's top edge. | def _rect_top(self):
return max(self.y, self.y + self.h) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top(self):\n return self.points['topRight'].y",
"def top(self):\n return self.points['topRight'].y",
"def y(self):\n return self.top",
"def top_y(self):\r\n return self.position.y + self.size.y + self.bulk",
"def get_y_position(self): \n return self.rect.y",
"def border_box_y(self):\n return self.position_y + self.margin_top",
"def get_y(self):\n\t\treturn self._collision_rect.y + 25",
"def top_distance(self):\n return self.y",
"def _rect_bottom(self):\n\treturn min(self.y, self.y + self.h)",
"def _get_y(self):\n return self.position.y",
"def get_y(self):\n return self.posY",
"def get_y(self):\n return self.coords[1]",
"def get_y_position(self):\n return self.actual_coordinates[1]",
"def getAbsoluteTop(self):\n return self.canvas.getAbsoluteTop()",
"def height(self):\n return self.upper_right.y - self.lower_left.y",
"def getY(self):\n return _libsbml.BoundingBox_getY(self)",
"def content_box_y(self):\n return self.position_y + self.margin_top + self.padding_top + \\\n self.border_top_width",
"def ymax(self):\n return self.bbox[1][1]",
"def padding_box_y(self):\n return self.position_y + self.margin_top + self.border_top_width",
"def bottom(self):\n return self.points['bottomRight'].y",
"def bottom(self):\n return self.points['bottomRight'].y",
"def topRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMinY)",
"def y(self):\n return self._coords[1]",
"def y(self):\n return _libsbml.Point_y(self)",
"def get_pos_y(self):\n return self.__pos_y",
"def getYpos(self):\n return self.y",
"def y(self):\n return self.coords[1]",
"def top_height_px(self):\n return self.top_pieces * PipePair.PIECE_HEIGHT",
"def getY(self):\n return self.position[1]",
"def bottom(self):\n return self.top + self.height"
]
| [
"0.8013071",
"0.8013071",
"0.7716812",
"0.7624312",
"0.7223911",
"0.7139694",
"0.7086764",
"0.70743144",
"0.68563634",
"0.6853508",
"0.68473446",
"0.68470025",
"0.6838489",
"0.68368363",
"0.68299663",
"0.6772753",
"0.67421156",
"0.67209494",
"0.6692827",
"0.6545961",
"0.6545961",
"0.6531015",
"0.6528902",
"0.65162253",
"0.65145314",
"0.64947003",
"0.6479815",
"0.6476836",
"0.64735717",
"0.64532644"
]
| 0.8257693 | 0 |
Return the ycoordinate of the rectangle's bottom edge. | def _rect_bottom(self):
return min(self.y, self.y + self.h) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bottom(self):\n return self.points['bottomRight'].y",
"def bottom(self):\n return self.points['bottomRight'].y",
"def bottom_y(self):\r\n return self.position.y - self.size.y - self.bulk",
"def bottom(self):\n return self.top + self.height",
"def _rect_top(self):\n\treturn max(self.y, self.y + self.h)",
"def height(self):\n return self.upper_right.y - self.lower_left.y",
"def bottom_distance(self):\n return self.board.height - 1 - self.y",
"def bb_bottom(self) -> float:\n return self._bb_bottom",
"def get_y(self):\n\t\treturn self._collision_rect.y + 25",
"def get_y_position(self): \n return self.rect.y",
"def border_box_y(self):\n return self.position_y + self.margin_top",
"def bottom_right(self):\n return Point(self.right, self.bottom)",
"def y(self):\n return self.top",
"def bottom(self):\n return self._bottom",
"def bottom(self):\n # type: () -> float\n return self._bottom",
"def height(self):\n return self.y.max() - self.y.min()",
"def bottom(self):\n\n return self._bottom",
"def _get_y(self):\n return self.position.y",
"def height(self):\n return self.maxy - self.miny",
"def bottomRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMaxY)",
"def top_y(self):\r\n return self.position.y + self.size.y + self.bulk",
"def bottom(self):\n return self.__b",
"def top(self):\n return self.points['topRight'].y",
"def top(self):\n return self.points['topRight'].y",
"def getExceedingBoxBottom(self):\n return self.exceedingBoxBottom",
"def bottom_height_px(self):\n return self.bottom_pieces * PipePair.PIECE_HEIGHT",
"def ymax(self):\n return self.bbox[1][1]",
"def get_y(self):\n return self.coords[1]",
"def bottom(self) -> float:\n points = self.get_adjusted_points()\n y_points = [point[1] for point in points]\n return min(y_points)",
"def getY(self):\n return _libsbml.BoundingBox_getY(self)"
]
| [
"0.7935119",
"0.7935119",
"0.7601335",
"0.7535251",
"0.73022354",
"0.7227902",
"0.71988606",
"0.71584624",
"0.7158076",
"0.71382874",
"0.7126317",
"0.70913297",
"0.70295787",
"0.6968889",
"0.69069374",
"0.6902811",
"0.6878907",
"0.6869381",
"0.67873216",
"0.67635727",
"0.67492706",
"0.67469203",
"0.674177",
"0.674177",
"0.6736869",
"0.67365766",
"0.6712105",
"0.67029595",
"0.6682646",
"0.6669189"
]
| 0.8172904 | 0 |
Compute weights minimizing Maximum Mean Discrepancy. X Samples matrix. target_X Target samples matrix. method Method of weights computation. | def _compute_weights(self, X, target_X, method='l2'):
if method == 'proj_l2' or method == 'proj_l2_nonorm':
#
# At first calculate unrestricted weights: (X.T)^-1
# Then project answer onto Unit simplex
#
target_center = np.mean(target_X, axis=0) # * X.shape[0]
# Solve the system
w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]
weights = project_onto_simplex(w, normalize=True if method == 'proj_l2' else False)
print(f"Weights sum: {np.sum(weights)}")
weights /= np.sum(weights)
return weights
#
# Pure solution, which make unrestricted weights
#
# Compute target center multiplied by number of source rows
# target_center = np.mean(target_X, axis=0) * X.shape[0]
# Solve the system
# print("X^T shape: ({}), target_center^T shape: ({})".format(X.T.shape, target_center.T.shape))
# w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]
# print(w)
# return w.T
if method == 'dist' or method == 'dist2':
print("Using distance weighting")
target_center = np.mean(target_X, axis=0)
residuals = X - target_center
norm = np.linalg.norm(residuals, axis=1)
print(f"Max norm: {np.max(norm)}")
if method == 'dist':
weights = np.max(norm) - norm # inverse weights
elif method == 'dist2':
small_eps = 1e-9
weights = 1.0 / (norm + small_eps)
weights = np.exp(weights) # softmax
print(f"Weights sum: {np.sum(weights)}")
weights /= np.sum(weights)
return weights
# Compute target center multiplied by number of source rows
target_center = np.mean(target_X, axis=0) # * X.shape[0]
# Solve the system
q = cp.Constant(value=target_center.flatten())
x_ = cp.Constant(value=X)
w = cp.Variable(X.shape[0])
# lam = self.optimization_lambda # 0.001
# M = len(J)
M = np.linalg.norm(X) ** 2 # target_X)
print("M:", M)
lam = self.reg_lambda # 0.1
if lam == 0:
print("No regularization")
# cp.norm2(cp.matmul(X, beta) - Y)**2
objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_)) # cp.Minimize(cp.sum_squares(q - x_ * w))
else:
objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_) / M + lam * cp.norm2(w)) # + lam * cp.norm2(w))
constraints = [w >= 0, cp.sum_entries(w) == 1] #, w >= self.simplex_lower_boundary]
prob = cp.Problem(objective, constraints)
print("Problem is prepared")
try:
result = prob.solve()
except Exception as ex:
print("Exception occurred: {}".format(ex))
print("Using SCS solver")
result = prob.solve(solver=cp.SCS, verbose=False)
print("Problem status: {}".format(prob.status))
try:
weights = w.value.A.flatten()
except Exception as ex:
print("Can't compute weights, use uniform distribution")
weights = np.ones((X.shape[0],)) / X.shape[0]
print(weights)
weights[weights < 0] = 0
weights_sum = np.sum(weights)
print("Weights sum: {}".format(weights_sum))
if weights_sum != 1.0: # probably always true
weights /= weights_sum
return weights | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error= (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost =0.5* error**2\n return cost",
"def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost",
"def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost",
"def _weights_for_target(self, target):\n\n self._update_global_transform(target)\n projected_target = self.global_transform.pseudoinverse().apply(target)\n # now we have the target in model space, project it to recover the\n # weights\n new_weights = self.model.project(projected_target)\n # TODO investigate the impact of this, could be problematic\n # the model can't perfectly reproduce the target we asked for -\n # reset the global_transform.target to what it CAN produce\n #refined_target = self._target_for_weights(new_weights)\n #self.global_transform.target = refined_target\n return new_weights",
"def fit(self, features, targets):\n w = np.array([1, 1, 1])\n old_w = np.array([42, 42, 42])\n itercount = 0\n while itercount < self.max_iterations:#not np.array_equal(w, old_w) or \n for itc, example in enumerate(features):\n this_target = targets[itc]\n one_example = np.insert(example, 0, 1)\n prediction = 1 if w.dot(one_example)*this_target >= 0 else -1\n if prediction < 0:\n old_w = w\n w = w + one_example * this_target\n itercount += 1\n print(\"learned weights are\", w)\n self.w = w",
"def maximum_mean_discrepancy(source_samples, target_samples, weight=1., minimum=0., **args):\n\n sigmas = [\n 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,\n 1e3, 1e4, 1e5, 1e6\n ]\n gaussian_kernel = partial(gaussian_kernel_matrix, sigmas=sigmas)\n loss_value = mmd_kernel(source_samples, target_samples, kernel=gaussian_kernel)\n loss_value = tf.maximum(minimum, loss_value) * weight\n return loss_value",
"def gen_weights(self, f_target):\n\n # calculate x and psi\n x_track = self.cs.rollout()\n psi_track = self.gen_psi(x_track)\n\n # efficiently calculate BF weights using weighted linear regression\n self.w = jnp.zeros((self.n_dmps, self.n_bfs))\n for d in range(self.n_dmps):\n # spatial scaling term\n k = self.goal[d] - self.y0[d]\n for b in range(self.n_bfs):\n numer = jnp.sum(x_track * psi_track[:, b] * f_target[:, d])\n denom = jnp.sum(x_track ** 2 * psi_track[:, b])\n self.w[d, b] = numer / denom\n if abs(k) > 1e-5:\n self.w[d, b] /= k\n\n self.w = jnp.nan_to_num(self.w)",
"def maximum_mean_discrepancy(source_samples, target_samples, weight=1., minimum=0.):\n\n sigmas = [\n 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,\n 1e3, 1e4, 1e5, 1e6\n ]\n gaussian_kernel = partial(gaussian_kernel_matrix, sigmas=sigmas)\n loss_value = mmd_kernel(source_samples, target_samples, kernel=gaussian_kernel)\n loss_value = tf.maximum(minimum, loss_value) * weight\n return loss_value",
"def forward_pass(X, target_Y, W):\n\n pred_Y = activation(weighted_sum(W, X))\n print(\"\\tI/P:\", X, \" O/P:\", target_Y, \" W:\", W, \" W_Sum:\", round(weighted_sum(W, X), 3))\n\n if pred_Y != target_Y:\n for j in range(len(W)):\n W[j] = update_weight(W[j], pred_Y, target_Y, X[j])\n\n return W",
"def maximum_mean_discrepancy(source_samples, target_samples, weight=1., minimum=0., **_kwargs):\n\n sigmas = [\n 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,\n 1e3, 1e4, 1e5, 1e6\n ]\n gaussian_kernel = partial(_gaussian_kernel_matrix, sigmas=sigmas)\n loss_value = _mmd_kernel(source_samples, target_samples, kernel=gaussian_kernel)\n loss_value = tf.maximum(minimum, loss_value) * weight\n return loss_value",
"def ml_weights(inputs, targets):\n Phi = np.matrix(inputs)\n targets = np.matrix(targets).reshape((len(targets),1))\n weights = linalg.inv(Phi.transpose()*Phi)*Phi.transpose()*targets\n return np.array(weights).flatten()",
"def compute_cost_function(target: 'xr.DataArray',\n weights_matrix: 'xr.DataArray',\n performance_sigma: float) -> float:\n percentiles = PERCENTILES\n inside_ratio_reference = percentiles[1] - percentiles[0]\n\n # calculate the equally weighted case once as baseline\n if len(confidence_test_values['baseline']) == 0:\n percentiles_spread, inside_ratio = calculate_percentiles(\n target, weights_matrix, percentiles, weighted=False)\n confidence_test_values['baseline']['percentile_spread'] = (\n percentiles_spread)\n confidence_test_values['baseline']['inside_ratio'] = inside_ratio\n\n percentiles_spread, inside_ratio = calculate_percentiles(\n target, weights_matrix, percentiles)\n confidence_test_values[performance_sigma] = {\n 'percentile_spread': percentiles_spread,\n 'inside_ratio': inside_ratio\n }\n\n difference = inside_ratio - inside_ratio_reference\n\n if difference < 0: # overconfident\n return 99 - difference\n return performance_sigma",
"def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n #The cost now is maximize the predicted and target pair,\n #minimizing the predicted and the sampled pairs\n cost = 0\n gradPred = np.zeros(predicted.shape[0])\n grad = np.zeros(outputVectors.shape)\n\n for indice in indices:\n target_vector = outputVectors[indice]\n vector_product = np.dot(target_vector,predicted)\n if indice != target:\n cost += -np.log(sigmoid(-vector_product))\n gradPred += sigmoid(vector_product)*target_vector\n # use += in case of repeatance\n grad[indice,:] += sigmoid(vector_product)*predicted\n else:\n cost += -np.log(sigmoid(vector_product))\n gradPred += (sigmoid(vector_product) - 1)*target_vector\n grad[indice,:] += (sigmoid(vector_product)-1)*predicted\n ### END YOUR CODE\n #print indices\n\n return cost, gradPred, grad",
"def computeDiff(self, input, target):\n\t\tdiffWeight = [np.zeros(weight.shape) for weight in self.weights]\n\t\tdiffBias = [np.zeros(bias.shape) for bias in self.biases]\n\n\t\t# Forward\n\t\t# layerSum contents all the result of nodes\n\t\t# layerAct = fActivation(layerSum)\n\t\tlayerSum = []\n\t\tlastRes = input\n\t\tlayerAct = [lastRes]\n\t\tfor layer in range(self.layersNumber):\n\t\t\tlayerRes = np.dot(self.weights[layer], lastRes) + self.biases[layer]\n\t\t\tlastRes = fActivation(layerRes)\n\t\t\tlayerSum.append(layerRes)\n\t\t\tlayerAct.append(lastRes)\n\n\t\tclassified = False\n\t\tif (np.argmax(lastRes) == np.argmax(target)) :\n\t\t\tclassified = True\n\n\t\t# Backward\n\t\tdiffError = sum(fCost(lastRes, target))\n\t\tdelta = dCost(lastRes, target) * dActivation(lastRes)\n\t\tdiffBias[-1] = delta\n\t\tdiffWeight[-1] = np.dot(delta, layerAct[-2].transpose())\n\t\tfor layer in reversed(range(self.layersNumber-1)):\n\t\t\tdelta = np.dot(self.weights[layer+1].transpose(), delta) *\\\n\t\t\t\tdActivation(layerSum[layer])\n\t\t\tdiffBias[layer] = delta\n\t\t\tdiffWeight[layer] = np.dot(delta, layerAct[layer].transpose())\n\n\t\treturn diffWeight, diffBias, diffError, classified",
"def fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray) -> None:\n if not len(X) == len(y) == len(weights):\n raise ValueError(\"First dimension of arguments must be equal.\")\n if abs(weights).sum() == 0:\n raise ValueError(\"Weights must not be all 0.\")\n\n best_error = np.inf\n best_indices: Tuple[int, int] = (0, 0)\n for i in range(len(X)):\n for j in range(X.shape[1]):\n left_indices = X[:, j] < X[i, j]\n right_indices = np.logical_not(left_indices)\n left_weights = weights[left_indices]\n right_weights = weights[right_indices]\n left_y = y[left_indices]\n right_y = y[right_indices]\n\n error = (\n left_weights[left_y != -1].sum()\n + right_weights[right_y != -1].sum() # THIS IS CORRECT\n )\n error = error / weights.sum()\n if error < best_error:\n best_error = error\n best_indices = (i, j)\n\n self.threshold = X[best_indices]\n self.feature = best_indices[1]",
"def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost",
"def optimize_reference(self, num_targets):\n \n target_weights = np.ones(num_targets)\n target_weights = target_weights/sum(target_weights)\n \n #target_weights = [0.8,0.2]\n\n x0 = np.asarray(np.insert(np.zeros(self._refs.shape[0]-len(target_weights)),0,target_weights))\n print(x0)\n \n # num_weights = self._refs.shape[0]\n # x0 = np.ones(num_weights)/sum(np.ones(num_weights))\n\n #x0 = np.asarray([0.8,0.2,0,0,0,0,0,0])\n\n assert num_targets >= 1, 'num_targets >= 1'\n assert num_targets <= len(x0), 'num_targets < len(x0)'\n\n\n lsq = sciopt.least_squares(self.residual, \n x0, \n #args=(np.reshape(self._true, (1,36)), num_targets), \n args=(self._true, num_targets), \n bounds=(0,1), \n method='trf', \n tr_solver='lsmr', \n diff_step=0.1, \n loss='soft_l1',\n tr_options={'damp':1.0},\n ftol=1e-6,\n verbose=2) #diff_step=0.2, verbose=2)#, loss='linear')\n\n return (lsq.x/sum(lsq.x), lsq)",
"def learn(self, Xtrain, ytrain):\n pass\n self.weights = np.zeros(Xtrain.shape[1],)\n\n ### YOUR CODE HERE\n \n lmbd = self.params['lamb']\n \n numsamples = Xtrain.shape[0]\n # Xless = Xtrain[:,self.params['features']]\n Xless = Xtrain\n self.weights = np.random.rand(Xless.shape[1])\n err = 10000;\n #cw =0;\n tolerance = 10*np.exp(-4)\n i=0;\n \n \n w1 = self.weights\n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain) + lmbd * self.regularizer[0](self.weights)\n # print(cw)\n errors = []\n runtm = []\n epch = []\n \n err = 1\n iteration= 1000\n #tm= time.time()\n while (abs(cw-err)>tolerance) and (i <iteration):\n err = cw\n g = self.logit_cost_grad(cw_v, Xless, ytrain)\n obj = cw\n j=0\n ita = -1* self.params['stepsize']\n w = self.weights\n # w1 = np.add(w,np.dot(ita,g))\n while(j<iteration):\n w1 = np.add(w,np.dot(ita,g))\n # cw_v =(np.dot(Xless, w1)-ytrain)\n # cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, w1.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)+lmbd * self.regularizer[0](w1)\n ## print (cw)\n \n if(cw<np.absolute(obj-tolerance)): ############################################\n break\n ita = 0.7*ita\n j=j+1\n \n if(j==iteration):\n self.weights=w\n ita =0\n else:\n self.weights = w1\n \n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)\n #tm1 = time.time()-tm\n #runtm.append(tm1)\n #err = cw\n errors.append(err)\n i=i+1\n epch.append(i)",
"def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n # Similar to softmax, we find the target matrix\n # v_c structured as 1xD matrix\n # u_o assume to be 1xD matrix\n # u_k assume to be K x D matrix\n # we pull the data assuming that each row represent one vector\n v_c = predicted\n u_o = outputVectors[target]\n u_k = outputVectors[indices]\n\n\n # The intermediary matrix outputs\n # z_o, h_o: single scalar number\n # z_k, h_k: K x 1 vector, wich each number associated with a neg sample\n z_o = np.dot(u_o, v_c)\n h_o = sigmoid(z_o)\n z_k = np.dot(u_k, v_c)\n h_k = sigmoid( - z_k)\n\n J_1 = - np.log(h_o)\n J_2 = - np.sum( np.log(h_k) )\n cost = J_1+ J_2\n\n # Return the gradient for the prediction function\n # the prediction vector interacts with both the predicted vector\n # the negative sample vectors so below are both parts of the gradient\n # here we are trying to increase the prediction matrix to maximize\n # the similarity with the predicted vector\n # output is a 1 x D matrix\n grad_pred_o = - (1 - h_o)*u_o\n\n # the second part is tyring to decrease\n # similarity with the negative sample vectors\n # K x 1 multiply be input is a k x D matrix, we will need to sum all negative samples\n # along the rows. output is a 1 x D matrix\n # reshape h_k so that it can multiple\n grad_pred_k = np.dot(( 1 - h_k).T, u_k)\n # find the predicted matrix gradient\n # output is a 1 x D matrix\n gradPred = grad_pred_o + grad_pred_k\n\n\n # Return the gradient of the output vector\n # create a matrix the same shape as outputVector\n grad = np.zeros(outputVectors.shape)\n # first find the gradient wrt to the target output vector\n # here we want to increase the similarity between\n # the target output vector and the center vector\n # outputs is a 1 x D matrix\n grad_u_o = - (1-h_o)*v_c\n\n # print('***************grad_u_o************')\n # print(grad_u_o)\n # print(grad_u_o.shape)\n # replace the target row in output vector gradient\n grad[target, ] = grad_u_o\n # then find the gradient descent of all the u_k matrices\n # K x 1 matrix multiply by 1 x 3\n # K x D\n grad_uk = - np.outer((h_k - 1), v_c)\n # print('***************grad_uk************')\n # print(grad_uk)\n # for each token (row) replace gradient\n for k in xrange(u_k.shape[0]):\n index = indices[k]\n grad[index] += grad_uk[k]\n\n ### END YOUR CODE\n return cost, gradPred, grad",
"def adjust_weights(weights, target, learn_rate):\r\n\r\n for w in range(0, len(target)):\r\n weights[w] += learn_rate * (target[w] - weights[w])",
"def fit(self, X, y):\n # X = as_float_array(X)\n # X, y = check_array(X, dtype=np.float64)\n if not isinstance(X, sp.csr_matrix):\n X = sp.csr_matrix(X)\n\n self._validate_params()\n\n self.t_ = 1.0\n self.max_target_ = y.max()\n self.min_target_ = y.min()\n\n # convert member variables to ints for use in cython\n k0 = self._bool_to_int(self.k0)\n k1 = self._bool_to_int(self.k1)\n shuffle_training = self._bool_to_int(self.shuffle_training)\n verbose = self._bool_to_int(self.verbose)\n learning_rate_schedule = self._get_learning_rate_type(self.learning_rate_schedule)\n task = self._get_task(self.task)\n\n # use sklearn to create a validation dataset for lambda updates\n if self.verbose:\n print(\"Creating validation dataset of %.2f of training for adaptive regularization\"\n % self.validation_size)\n X_train, validation, train_labels, validation_labels = cross_validation.train_test_split(\n X, y, test_size=self.validation_size, random_state=self.seed)\n\n self.n_features_ = X_train.shape[1]\n\n # Convert datasets to sklearn sequential datasets for fast traversal\n X_train_dataset = _make_dataset(X_train, train_labels)\n validation_dataset = _make_dataset(validation, validation_labels)\n\n # Set up params\n self.w0 = 0.0\n self.w = np.zeros(self.n_features_, dtype=np.float64)\n rng = np.random.RandomState(self.seed)\n self.v = rng.normal(scale=self.init_stdev,\n size=(self.num_factors, self.n_features_)).astype(np.float64)\n\n self.fm_fast = FM_fast(self.w,\n self.v,\n self.num_factors,\n self.n_features_,\n self.num_iter,\n k0,\n k1,\n self.w0,\n self.t_,\n self.t0,\n self.power_t,\n self.min_target_,\n self.max_target_,\n self.eta0,\n learning_rate_schedule,\n shuffle_training,\n task,\n self.seed,\n verbose)\n\n self.fm_fast.fit(X_train_dataset, validation_dataset)\n return self",
"def compute_weights(self):\n # Do the leave-one-out experiments\n loocv = np.zeros((self.M, self.nump))\n for i in range(self.M):\n for j in range(self.nump):\n loocv[i, j] = self.surrogate_list[i][j].eval(self.x[j, :])\n\n # Compute the model characteristics\n corr_coeff = np.ones(self.M)\n for i in range(self.M):\n corr_coeff[i] = np.corrcoef(np.vstack(\n (loocv[i, :], self.get_fx().flatten())))[0, 1]\n\n root_mean_sq_err = np.ones(self.M)\n for i in range(self.M):\n root_mean_sq_err[i] = 1.0 / math.sqrt(\n self._mean_squared_error(self.get_fx().flatten(), loocv[i, :]))\n\n mean_abs_err = np.ones(self.M)\n for i in range(self.M):\n mean_abs_err[i] = 1.0 / self._mean_abs_err(\n self.get_fx().flatten(), loocv[i, :])\n\n # Make sure no correlations are negative\n corr_coeff[np.where(corr_coeff < 0.0)] = 0.0\n if np.max(corr_coeff) == 0.0:\n corr_coeff += 1.0\n\n # Normalize the test statistics\n corr_coeff /= np.sum(corr_coeff)\n root_mean_sq_err /= np.sum(root_mean_sq_err)\n mean_abs_err /= np.sum(mean_abs_err)\n\n # Create mass functions based on the model characteristics\n m1 = self._prob_to_mass(corr_coeff)\n m2 = self._prob_to_mass(root_mean_sq_err)\n m3 = self._prob_to_mass(mean_abs_err)\n\n # Compute pignistic probabilities from Dempster-Shafer theory\n pignistic = m1.combine_conjunctive([m2, m3]).to_dict()\n self.weights = np.ones(self.M)\n for i in range(self.M):\n self.weights[i] = pignistic.get(str(i+1))",
"def compute_test_best(self,\n x: Array2D,\n target: float) -> Tuple[float, bool]:",
"def propagate(w, b, X, Y):\n\n m = X.shape[1];\n\n # A.shape should be (1, m), m -- number of training examples\n A = sigmoid(np.dot(w.T, X) + b);\n # print(\"A = \" +str(A))\n\n # single training exmaple cost = - (y * log(A) + (1-y) * log(1-A))\n # cost = (-1/m) * (np.dot(Y, np.log(A).T) + np.dot((1-Y), np.log(1-A).T));\n cost = (-1/m)*(np.dot(Y, np.log(A).T) + np.dot((1-Y), np.log(1-A).T));\n # print(\"cost in function: \" +str(cost))\n\n # Backward propagation \n dw = (1/m) * np.dot(X, (A - Y).T)\n # np.sum() : axis = 0 means along the column and axis = 1 means working along the row.\n db = (1/m) * ((A - Y).sum(1))\n\n # dw.shape should be (num_px * nump_px * 3, 1), db should be a scalar\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n\n # np.squeeze(arr) -- remove one dimentional entry from the shape of given array. \n # -- arr: input array\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw, \"db\": db}\n \n return grads, cost",
"def softmax(self,Weights,X,b):\n N = X.shape[0]\n D = X.shape[1]\n C = Weights.shape[1]\n \n #P = np.zeros((N,C))\n #print P.shape\n\n P1 = np.dot(X,Weights) + b\n P1 = np.exp(P1)\n \n for i in range(N):\n P1[i,:] = P1[i,:]/P1[i,:].sum()\n # print P1\n return P1",
"def cost_func(w, X, y):\n y_pred = np.dot(X, w)\n err = np.sum(np.square(y_pred - y)) / (2 * len(y))\n\n return err",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def _update_samples_weight(self):\n m, n = 0, self.u.shape[0]\n T = self.u.shape[1]\n N = n + T\n d_0 = matrix(self.d_0.reshape(n, 1))\n\n # Linear Inequallity Constraints, Gx <= h\n G = matrix(-1 * np.eye(N))\n h = matrix(np.zeros(shape=(N, 1)))\n\n # Linear Equality Constraints, Ax = b\n A = matrix(np.concatenate((np.ones(shape=(T, 1)), np.zeros(shape=(n, 1))), axis=0).T)\n b = matrix(1.0)\n\n def F(x=None, z=None):\n if x is None: return 0, matrix(0.5, (N, 1))\n w = x[:T, :]\n phi = x[T:, :]\n reg_inv = 1 / self.reg\n\n weighted_u = np.dot(self.u, w) # n x 1\n scores = -1 * reg_inv * (weighted_u + phi) # n x 1\n\n # Numeric correction\n scores -= max(scores)\n\n # Auxilliaries\n weighted_scores_exp = np.multiply(d_0, np.exp(scores))\n sum_weighted_scores_exp = np.sum(weighted_scores_exp)\n sum_weighted_scores_exp_square = sum_weighted_scores_exp ** 2\n squared_weighted_scores_exp = np.square(weighted_scores_exp)\n weighted_scores_exp_mults = np.dot(weighted_scores_exp, weighted_scores_exp.T)\n uw_mult = np.multiply(self.u, weighted_scores_exp)\n uw_mult_sum = np.sum(np.multiply(self.u, weighted_scores_exp), axis=0)\n\n f = self.reg * np.log(sum_weighted_scores_exp) + self.kappa * np.sum(phi) # f(x)\n\n dfdw = -1 * uw_mult_sum.T / sum_weighted_scores_exp\n dfdphi = (-1 * weighted_scores_exp / sum_weighted_scores_exp) + self.kappa\n Df = np.concatenate((dfdw, dfdphi), axis=0) # Gradient\n\n mf = matrix(f)\n mDf = matrix(Df.T)\n if z is None:\n return mf, mDf\n # Assumes d_0 is uniform\n H = np.zeros(shape=(N, N)) # Hessian\n dfdwiwi = np.zeros(shape=(T, 1))\n dfdphiiphij = -1 * reg_inv * (np.tril(weighted_scores_exp_mults)) / sum_weighted_scores_exp_square\n dfdphiiphii = reg_inv * (np.multiply(weighted_scores_exp,\n sum_weighted_scores_exp - weighted_scores_exp) / sum_weighted_scores_exp_square)\n # dfdwiwj, dfwiphij are zeros\n dfdphiiwj = reg_inv * ((\n uw_mult * sum_weighted_scores_exp - weighted_scores_exp * uw_mult_sum) / sum_weighted_scores_exp_square)\n\n H[T:, T:] = dfdphiiphij\n H[T:, :T] = dfdphiiwj\n H_diagonal = np.concatenate((dfdwiwi, dfdphiiphii), axis=0)\n np.fill_diagonal(H, H_diagonal)\n\n mH = matrix(z[0] * H)\n return mf, mDf, mH\n\n prev_w = self.w\n prev_slacks = self.slacks\n try:\n wphi = solvers.cp(F, G=G, h=h, A=A, b=b)['x']\n self.w = wphi[:T, :]\n self.slacks = wphi[T:, :]\n except Exception as e: # Catch rank errors and continue to next iteration\n self.slacks = prev_slacks\n self.w = prev_w\n try:\n self.w = np.concatenate((self.w, [[1 / (len(self.w) + 1)]]), axis=0)\n except:\n self.w = np.concatenate((self.w, [1 / (len(self.w) + 1)]), axis=0)\n self.w /= np.sum(self.w)\n\n scores = ((-1 / self.reg) * np.squeeze(np.asarray(np.dot(self.u, self.w) + self.slacks))) + np.log(\n self.d_0) # Update according to Equation (6)\n return self.softmax(scores)",
"def _update(self, x: np.ndarray, y: int):\n decision = self.weights.dot(x)\n v_t = x @ np.diag(np.diag(self._sigma)) @ x.T\n m_t = y * decision\n loss = (self._phi * math.sqrt(v_t) - m_t)\n #print(loss)\n if loss > 0:\n # We scale our learning rate (alpha) using the weight/cost\n alpha_t = self.class_weight_[y] * self._get_alpha(m_t, v_t)\n u_t = 0.25 * (-alpha_t * v_t * self._phi + math.sqrt(\n alpha_t ** 2 * v_t ** 2 * self._phi ** 2 + 4 * v_t)) ** 2\n beta_t = alpha_t * self._phi / (math.sqrt(u_t) +\n alpha_t * self._phi * v_t)\n sigma = np.expand_dims(x @ self._sigma, axis=0)\n self.weights += alpha_t * y * np.squeeze(sigma)\n self._sigma -= beta_t * sigma.T @ sigma",
"def apply_weights(self):\n return self.X.dot(self.get_weights())"
]
| [
"0.6762532",
"0.6708177",
"0.6708177",
"0.6503242",
"0.6470267",
"0.6403376",
"0.63818663",
"0.6355758",
"0.628095",
"0.6246446",
"0.6223636",
"0.62204105",
"0.6191204",
"0.61547375",
"0.6106272",
"0.6090991",
"0.6075102",
"0.60460854",
"0.6040239",
"0.603885",
"0.60202414",
"0.60014683",
"0.59903204",
"0.5986103",
"0.596282",
"0.59317124",
"0.5921624",
"0.5921293",
"0.5917694",
"0.5914632"
]
| 0.7669641 | 0 |
Returns the formula string, but with elements anonymized by atom type using a greater number of anonymizations than the normal fuzzy_formula method. e.g., Y2SiO5 becomes Tm2X1Y5 | def get_fuzzy_formula_strict(composition):
x = defaultdict(float)
for e in composition.elements:
if e.is_lanthanoid:
x["Ln"] += composition[e]
elif e.is_actinoid:
x["Ac"] += composition[e]
elif e.is_alkali:
x["A"] += composition[e]
elif e.is_alkaline:
x["B"] += composition[e]
elif e.is_transition_metal:
x["Tm"] += composition[e]
elif e.is_metalloid:
x["X"] += composition[e]
elif e.Z in [13, 31, 49, 50, 81, 82, 83]:
x["C"] += composition[e]
elif e.Z in [6, 7, 8, 15, 16, 34]:
x["Y"] += composition[e]
elif e.is_halogen:
x["Z"] += composition[e]
elif e.is_noble_gas:
x["Nb"] += composition[e]
else:
x["U"] += composition[e]
c = Composition(x).get_reduced_composition_and_factor()[0]
my_form=""
for label in ["A", "B", "C", "Tm", "Ln", "Ac", "X", "Y", "Z", "Nb", "U"]:
if c[label]:
amt = c[label] if c[label] != int(c[label]) else int(c[label])
my_form += "{}{}".format(label, amt)
return my_form | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_fuzzy_formula(composition):\n x = defaultdict(float)\n\n for e in composition.elements:\n if e.is_lanthanoid or e.is_actinoid:\n x[\"Re\"] += composition[e]\n elif e.is_alkali or e.is_alkaline:\n x[\"A\"] += composition[e]\n elif e.is_transition_metal:\n x[\"Tm\"] += composition[e]\n elif e.Z in [13, 31, 49, 50, 81, 82, 83]:\n x[\"B\"] += composition[e]\n elif e.is_metalloid or e.Z in [6, 7, 8, 15, 16, 34] or e.is_halogen:\n x[\"Y\"] += composition[e]\n elif e.is_noble_gas:\n x[\"Nb\"] += composition[e]\n else:\n x[\"U\"] += composition[e]\n\n c = Composition(x).get_reduced_composition_and_factor()[0]\n\n my_form=\"\"\n for label in [\"A\", \"B\", \"Tm\", \"Re\", \"Y\", \"Nb\", \"U\"]:\n if c[label]:\n amt = c[label] if c[label] != int(c[label]) else int(c[label])\n my_form += \"{}{}\".format(label, amt)\n\n return my_form",
"def format_formula(self):\n label_formula = re.sub(\"([0-9])\", \"_\\\\1\", self.formula)\n label_formula = '$\\mathregular{'+label_formula+'}$'\n return label_formula",
"def getFormula(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # generate formula\n ll = [None] * (len(dd)*2)\n next = 0\n for ss in ('C', 'H'):\n ii = dd.get(ss)\n if ii is not None:\n ll[next] = ss\n ll[next+1] = str(ii)\n del dd[ss]\n next = next + 2\n \n items = dd.items()\n items.sort()\n for ss, ii in items:\n ll[next] = ss\n ll[next+1] = str(ii)\n next = next + 2\n #\n result = ''.join(ll)\n return result",
"def clean_formula(formula: str) -> str:\n formula_splitted = COLUMN_PATTERN.split(formula)\n return ''.join(clean_formula_element(elem) for elem in formula_splitted)",
"def phonetize(accented_spell: str) -> str:\n result = accented_spell\n for t in phon_transforms:\n result = t.apply_to(result)\n return result",
"def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)",
"def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)",
"def rewrite(self, axiom) -> str:\n return \"\".join(self.rules.get(c, c) for c in axiom)",
"def synthesize_for_model(model: Model) -> Formula:\r\n # the idea is -> first step put the var or ~var\r\n # than each time do - > add '(' at first\r\n # '(' + the_string '&' + the_new_string + ')'\r\n \"\"\"\r\n We solve this equation by using CNF.\r\n every var that is false we doing ~var, and connecting all the var by '&'\r\n and this will provide us with formula which is true just \r\n for the given model\r\n \"\"\"\r\n assert is_model(model)\r\n # Task 2.6\r\n first = True\r\n str_formula = \"\"\r\n for key, value in model.items():\r\n if first:\r\n first = False\r\n if not value:\r\n str_formula += '~'\r\n str_formula += key\r\n else:\r\n str_formula = \"(\" + str_formula + \"&\"\r\n if not value:\r\n str_formula += '~'\r\n str_formula += key\r\n str_formula += \")\"\r\n # creating a list, that list[0] contain the string, because that what\r\n # list_to_string function is required\r\n list_of_string = list()\r\n list_of_string.append(str_formula)\r\n return str_to_form(list_of_string)",
"def create_automaton(formula, predicates):\n \n if \"--spot\" in sys.argv:\n spot = True\n else:\n spot = False\n if \"--rabit\" in sys.argv:\n rabit = True\n else:\n rabit = False\n \n stack=[]\n atom=[]\n first=True \n for element in formula:\n if element!=\")\":\n stack.append(element)\n else:\n atom.append(element)\n # pop everything to '(' and add to atom\n while(stack[-1]!=\"(\"):\n atom.append(stack.pop())\n atom.append(stack.pop())\n atom.reverse()\n error=False\n\n # user-defined predicates\n if atom[1] in predicates.keys():\n a = deepcopy(predicates[atom[1]][1])\n for i in range(predicates[atom[1]][0]):\n for j in range(len(a.transitions)):\n a.transitions[j][1] = a.transitions[j][1].replace(\"#\"+str(i+1), atom[i+2])\n new_alphabet = set()\n for symbol in a.alphabet:\n new = symbol.replace(\"#\"+str(i+1), atom[i+2])\n new_alphabet.add(new)\n a.alphabet = deepcopy(new_alphabet)\n\n # operations with automata\n elif atom[1]==\"exists\":\n if not (isinstance(atom[3], Automaton)):\n error=True\n else:\n a=exists(atom[2],atom[3])\n elif atom[1]==\"forall\":\n if not (isinstance(atom[3], Automaton)):\n error=True\n else:\n a = atom[3]\n if spot:\n a = spot_complement(a)\n else:\n a=comp2(a)\n a = exists(atom[2], a)\n if rabit:\n a = rabit_reduction(a)\n if spot:\n a = spot_complement(a)\n else:\n a=comp2(a)\n elif atom[1]==\"and\":\n if not (isinstance(atom[2], Automaton) and isinstance(atom[3], Automaton)):\n error=True\n else:\n a=intersection(atom[2],atom[3])\n elif atom[1]==\"or\":\n if not (isinstance(atom[2], Automaton) and isinstance(atom[3], Automaton)):\n error=True\n else:\n a=union(atom[2],atom[3])\n elif atom[1]==\"neg\":\n if not (isinstance(atom[2], Automaton)):\n error=True\n else:\n a = atom[2]\n if spot:\n a = spot_complement(a)\n else:\n a=comp2(a)\n elif atom[1]==\"implies\":\n if not (isinstance(atom[2], Automaton) and isinstance(atom[3], Automaton)):\n error=True\n else:\n a = atom[2]\n if spot:\n a = spot_complement(a)\n else:\n a = comp2(a)\n if rabit:\n a = rabit_reduction(a)\n a=union(a, atom[3])\n\n # atomic automata\n elif atom[1]==\"zeroin\":\n a=zeroin(atom[2])\n elif atom[1]==\"sing\":\n a=sing(atom[2])\n elif atom[1]==\"sub\":\n a=sub(atom[2],atom[3])\n elif atom[1]==\"succ\":\n a=succ(atom[2],atom[3])\n elif atom[1]==\"<\":\n a=less(atom[2],atom[3])\n \n else:\n if (not first) or len(atom)!=4:\n raise SyntaxError('Invalid form of input formula near \"{}\".'.format(' '.join(map(str,atom))))\n if isinstance(atom[2], Automaton) or isinstance(atom[3], Automaton):\n raise SyntaxError('Invalid form of input formula near \"{}\".'.format(atom[1]))\n\n # arguments of succ or sub can be in parentheses\n atom.remove('(')\n atom.remove(')')\n atom.reverse()\n for i in range(len(atom)):\n stack.append(atom[len(atom)-i-1])\n atom=[]\n first=False\n continue\n\n if error:\n raise SyntaxError('Invalid form of input formula near \"{}\".'.format(atom[1]))\n stack.append(a)\n first=True\n atom=[]\n\n # reduction\n if rabit:\n a = rabit_reduction(a)\n\n return a",
"def generate_analogy(word1, word2, word3, model):\n LoM = model.most_similar(positive=[word2, word3], negative=[word1], topn=100)\n return LoM",
"def str_all_completions(self, maxn=10, use_precompute=True) -> str:\n rows = [f\"{self.weight} -- {self.value} -- {self.str_mks()}\"]\n if self._info is not None:\n rows.append(\"------------------\")\n for el in self._info._log_imp:\n rows.append(str(el))\n for i in range(len(self.value)):\n prefix = self.value[:i]\n rows.append(\"------------------\")\n rows.append(f\"i={i} - {prefix}\")\n completions = self._info._completions.get(prefix, [])\n for i2, el in enumerate(completions):\n ar = \" \" if el.value != self.value else \"-> \"\n add = \"{5}{0}:{1} -- {2}{4}-- {3}\".format(\n i2,\n el.weight,\n el.value,\n el.str_mks(),\n \" \" * (20 - len(el.value)),\n ar,\n )\n rows.append(add)\n else:\n rows.append(\"NO INFO\")\n return \"\\n\".join(rows)",
"def get_model_formula(self):\n\n if self._response_variable and self._explanatory_variables[0]:\n\n model_formula = self._response_variable + ' ~ ' + ' + '.join(self._explanatory_variables)\n\n else:\n\n model_formula = None\n\n return model_formula",
"def _group2formula(self, elem_dict):\n formula = \"\"\n for key, value in elem_dict:\n formula += \"{}{}\".format(key, value)\n return formula",
"def get_formula(oncat_formula):\n regular_expression = r'\\$(?P<formula>.+)\\`'\n m = re.search(regular_expression, oncat_formula)\n if m:\n return m.group('formula')\n else:\n return \"\"",
"def antonym(self, sense=0):\n s = self._synset(self.text, sense=sense)\n\n if not s:\n return []\n\n lemmas = s.lemmas()\n\n result = list()\n\n for lemma in lemmas:\n if lemma.antonyms():\n result.append(lemma.antonyms()[0].name())\n\n return result if result else []",
"def sanitize(formula):\n TR_UNICODE = {u\"·\": u\"*\", u\"−\": u\"-\"}\n return \"\".join(TR_UNICODE.get(ch, ch) for ch in formula if ch > \" \")",
"def fuzz(text):\r\n\r\n return ' '.join([fuzz_word(word) for word in text.split()])",
"def atoms(formula):\n pass\n # ======== YOUR CODE HERE ========",
"def simplify(self, text: str):\n # sanitise the input, remove common words and swap out similar words\n text = self.sanitise(text)\n # Decided not to use stopwords because that would take out too many and then get false positives\n common_words = [\"what\", \"i\", \"you\", \"do\", \"a\", \"thing\", \"of\", \"was\", \"would\", \"were\", \"are\", \"been\"\n \"reddit\", \"why\",\n \"your\", \"it\", \"is\", \"the\", \"that\", \"has\", \"had\", \"for\", \"at\", \"in\", \"on\",\n \"with\", \"if\", \"to\", \"be\", \"and\", \"some\"]\n for common_word in common_words:\n text = text.replace(\" \" + common_word + \" \", \" \")\n\n # Replace common synonyms\n synonyms = [[\"funniest\", \"funny\", \"hilarious\"], [\"food\", \"diet\"], [\"life\", \"lifetime\"],\n [\"stop\", \"end\", \"quit\"], [\"advice\", \"help\"],\n [\"grossest\", \"worst\"], [\"buy\", \"bought\", \"purchase\"],\n [\"scary\", \"scariest\", \"spookiest\"]]\n for synonym in synonyms:\n for word in synonym:\n text = text.replace(\" \" + word + \" \", \" \" + synonym[0] + \" \")\n\n # Stem words\n from nltk.stem.porter import PorterStemmer\n ps = PorterStemmer()\n text = text.split()\n text = [ps.stem(word) for word in text]\n text = \" \".join(text)\n text = \" \" + text + \" \"\n\n # Missed words that still need to be stemmed:\n text = text.replace(\" given \", \" give \")\n text = text.replace(\" younger \", \" young \")\n\n return text",
"def _atoms_string(self):\n return_str = 'Atoms\\n\\n'\n for atom in self.atoms:\n return_str += '{}\\n'.format(atom.input_string())\n return_str += '\\n'\n return return_str",
"def isFormula(string):\r\n string = string.replace(' ', '')\r\n if string == '':\r\n return True\r\n elif re.sub(r\"\\w|\\d|->|_|\\(|\\)|~\", '', string):\r\n return False\r\n elif re.findall(r\"(?<!\\w_)\\d+|(?<!\\w)\\d+|->->\", string):\r\n return False\r\n else:\r\n string1 = string.replace('~', '').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string1)\r\n for part in info:\r\n string1 = string1.replace(part, '(-1)')\r\n try:\r\n eval(string1)\r\n except:\r\n return False\r\n string2 = string.replace('~', '-').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string2)\r\n for part in info:\r\n string2 = string2.replace(part, '(-1)')\r\n try:\r\n eval(string2)\r\n except:\r\n return False\r\n return True",
"def getAtom(formula : str, i : int) -> tuple:\n acc = formula[i]\n running = True\n\n while running:\n\n c = formula[i+1] # Index shift because the i-te char has already been read\n\n if c not in string.ascii_lowercase:\n running = False\n else:\n acc += c\n i += 1\n\n return (acc, i)",
"def uniquely_rename_quantified_variables(formula: Formula) -> \\\r\n Tuple[Formula, Proof]:\r\n # Task 11.5\r\n\r\n prover = Prover(Prover.AXIOMS.union(ADDITIONAL_QUANTIFICATION_AXIOMS))\r\n\r\n if is_quantifier_free(formula):\r\n prover.add_tautology(equivalence_of(formula, formula))\r\n return formula, prover.qed()\r\n\r\n if is_unary(formula.root):\r\n\r\n form, proof = uniquely_rename_quantified_variables(formula.first)\r\n new_form = Formula(\"~\", form)\r\n\r\n ccl = equivalence_of(formula, new_form)\r\n step1 = prover.add_proof(proof.conclusion, proof)\r\n prover.add_tautological_implication(ccl, [step1])\r\n\r\n return new_form, prover.qed()\r\n\r\n\r\n if is_quantifier(formula.root):\r\n\r\n return __quantifier_helper(formula, prover)\r\n\r\n else: # is_binary\r\n\r\n form1, proof1 = uniquely_rename_quantified_variables(formula.first)\r\n form2, proof2 = uniquely_rename_quantified_variables(formula.second)\r\n\r\n new_form = Formula(formula.root, form1, form2)\r\n ccl = equivalence_of(formula, new_form)\r\n\r\n step1 = prover.add_proof(proof1.conclusion, proof1)\r\n step2 = prover.add_proof(proof2.conclusion, proof2)\r\n\r\n prover.add_tautological_implication(ccl, [step2, step1])\r\n return new_form, prover.qed()",
"def formulaToString(*args):\n return _libsbml.formulaToString(*args)",
"def fuzzy_token_sort_ratio(thing_1, thing_2):\n return fuzz.token_sort_ratio(thing_1, thing_2)",
"def atom_featurizer(atom):\n\n return str(\n (\n atom.GetSymbol(),\n atom.GetNumRadicalElectrons(),\n atom.GetFormalCharge(),\n atom.GetChiralTag(),\n atom.GetIsAromatic(),\n get_ring_size(atom, max_size=6),\n atom.GetDegree(),\n atom.GetTotalNumHs(includeNeighbors=True),\n )\n )",
"def get_model_formula(self):\n\n if self._response_variable and self._explanatory_variables[0]:\n\n explanatory_variable = self.get_explanatory_variable()\n\n model_formula = self._response_variable + ' ~ ' + explanatory_variable\n\n else:\n\n model_formula = None\n\n return model_formula",
"def AdjustAromaticNs(m, Npatt='[n&D2&H0;r5,r6]'):\n Chem.GetSymmSSSR(m)\n m.UpdatePropertyCache(False)\n\n # break non-ring bonds linking rings:\n em = Chem.EditableMol(m)\n linkers = m.GetSubstructMatches(Chem.MolFromSmarts('[r]!@[r]'))\n plsFix=set()\n for a,b in linkers:\n em.RemoveBond(a,b)\n plsFix.add(a)\n plsFix.add(b)\n nm = em.GetMol()\n for at in plsFix:\n at=nm.GetAtomWithIdx(at)\n if at.GetIsAromatic() and at.GetAtomicNum()==7:\n at.SetNumExplicitHs(1)\n at.SetNoImplicit(True)\n\n # build molecules from the fragments:\n fragLists = Chem.GetMolFrags(nm)\n frags = [Normalize._FragIndicesToMol(nm,x) for x in fragLists]\n\n # loop through the fragments in turn and try to aromatize them:\n ok=True\n for i,frag in enumerate(frags):\n cp = Chem.Mol(frag.ToBinary())\n try:\n Chem.SanitizeMol(cp)\n except ValueError:\n matches = [x[0] for x in frag.GetSubstructMatches(Chem.MolFromSmarts(Npatt))]\n lres,indices = Normalize._recursivelyModifyNs(frag,matches)\n if not lres:\n #print 'frag %d failed (%s)'%(i,str(fragLists[i]))\n ok=False\n break\n else:\n revMap={}\n for k,v in frag._idxMap.items():\n revMap[v]=k\n for idx in indices:\n oatom = m.GetAtomWithIdx(revMap[idx])\n oatom.SetNoImplicit(True)\n oatom.SetNumExplicitHs(1)\n if not ok:\n return None\n return m",
"def antecedents_patterns(self,\n graph: Graph,\n subject_uri: URIRef,\n relation_uri: URIRef,\n object_uri: URIRef) -> Tuple[str, Optional[Literal]]:\n # contains the concatenated SPARQL patterns of the literals, i.e. the SPARQL filter to match nodes that conform\n # with all literals in the premise\n patterns = \"\"\n\n # subject of a matching literal\n matched_literal_subject = None\n\n # object of a matching literal\n matched_literal_object = None\n\n # the literal that matches the new fact\n matched_literal = None\n\n # test if a literal in the premise handles the same relation that is in the new fact\n # save the literal and its subject and object if such an literal exists\n for antecedent in self.antecedents:\n antecedent_relation_uri = antecedent.relation.uri\n if antecedent_relation_uri == relation_uri:\n matched_literal_subject = f\"?{antecedent.literal_subject}\"\n matched_literal_object = f\"?{antecedent.literal_object}\"\n matched_literal = antecedent\n break\n\n # concatenate the SPARQL pattern fo every literal to query nodes matching all literals\n # exclude the literal with a matching relation type since it is already satisfied by the new fact that will be\n # added\n for antecedent in self.antecedents:\n if antecedent.relation != relation_uri:\n patterns += antecedent.sparql_patterns()\n\n subject_entity = f\"<{subject_uri}>\"\n object_entity = f\"<{object_uri}>\"\n\n if matched_literal_subject is not None:\n patterns = patterns.replace(matched_literal_subject, subject_entity)\n\n if matched_literal_object is not None:\n patterns = patterns.replace(matched_literal_object, object_entity)\n\n return patterns, matched_literal"
]
| [
"0.6042614",
"0.53327316",
"0.5252985",
"0.5109102",
"0.4933846",
"0.49012175",
"0.49012175",
"0.48788637",
"0.4818534",
"0.48048475",
"0.4786649",
"0.47793177",
"0.4746812",
"0.47359288",
"0.4733897",
"0.47329667",
"0.47292286",
"0.47193587",
"0.46981952",
"0.4692537",
"0.4631509",
"0.46188524",
"0.4585061",
"0.45779365",
"0.4569077",
"0.45571253",
"0.45553505",
"0.45516914",
"0.4532639",
"0.45295957"
]
| 0.61645776 | 0 |
Function that takes a player's history data and returns an altair chart showing their winning percentage based on their hand totals and the dealer's up card | def result_heatmap(data, result="win", title=None,
width=500, height=500):
possible_results = ["win", "loss", "push", "surrender"]
assert result in possible_results, (
"'result' must be 'win', 'loss', or 'push'"
)
if not title:
title = f"{result.title()} Percentage"
# convert data to a DataFrame if it's just a player's history list
if isinstance(data, list):
data = pd.DataFrame(data)
# remove any hands where the dealer had blackjack or the player busted
sub_data = data[(data["dealer_blackjack"] == 0) &
(data["total"] <= 21)].copy()
# calculate winning percentage for each total and dealer up card combo
grouped_pct = sub_data.groupby(
["total", "dealer_up"]
).apply(results_pct, as_series=False)
# unpack the tuple returned by groupby function and rename columns
grouped_pct = grouped_pct.apply(pd.Series)
grouped_pct.columns = possible_results
# reset index and sort for plotting
pct_data = grouped_pct.reset_index().sort_values("total", ascending=False)
# dynamically determine how the legend should be labeled
min_val = round(min(pct_data[possible_results].min()), 1)
max_val = round(max(pct_data[possible_results].max()), 1)
min_int = int(min_val * 10)
max_int = int(max_val * 10)
values = [
round(x * 0.1, 1) for x in range(min_int, max_int + 1)
]
# create altair heatmap
chart = alt.Chart(
pct_data, title=title, width=width, height=height
).mark_rect(binSpacing=1).encode(
x=alt.X(
"dealer_up:O",
axis=alt.Axis(orient="top", labelAngle=0),
title="Dealer Up Card"
),
y=alt.Y(
"total:O",
title="Player Total",
sort=alt.EncodingSortField(op="mean", order="descending")
),
color=alt.Color(
f"{result}:Q",
legend=alt.Legend(
title=f"{result.title()} Probability",
values=values
)
),
tooltip=[
alt.Tooltip("dealer_up", title="Dealer Up Card"),
alt.Tooltip("total", title="Player Total"),
alt.Tooltip(f"{result}", title=f"{result.title()} Probability")
]
)
return chart | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def outcome_bars(data, name=None, width=100):\n # if it's a dataframe already, just add the name for the legend\n if isinstance(data, pd.DataFrame):\n data_list = [data]\n elif isinstance(data, list):\n # check if it's a list of dicionaries, like player history, or a list\n # of lists\n for item in data:\n l_o_d = isinstance(item, dict)\n # if it's a list of dictionaries, just convert them\n if l_o_d:\n data_list = [pd.DataFrame(data)]\n else:\n data_list = [pd.DataFrame(item) for item in data]\n else:\n msg = \"'data' must be a DataFrame or list\"\n raise TypeError(msg)\n # calculate percentages\n # assign name to data\n if not name:\n name = [f\"Game{i}\" for i in range(len(data))]\n plot_data_list = [] # list to hold dataframes that will be plotted\n for _name, _data in zip(name, data_list):\n win, loss, push, surrender = results_pct(_data, as_series=False)\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Win\", \"pct\": win, \"order\": 1},\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Loss\", \"pct\": loss, \"order\": 2}\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Push\", \"pct\": push, \"order\": 3}\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Surrender\", \"pct\": surrender, \"order\": 3}\n )\n plot_data = pd.DataFrame(plot_data_list)\n\n # create altair chart\n chart = alt.Chart(plot_data, width=width).mark_bar().encode(\n x=alt.X(\n \"game\",\n axis=alt.Axis(labelAngle=-45),\n title=None,\n sort=[\"Win\", \"Loss\", \"Push\"]\n ),\n y=alt.Y(\n \"pct:Q\"\n ),\n color=alt.Color(\n \"game:O\",\n legend=None\n ),\n column=alt.Column(\n \"result:O\",\n title=\"Result\"\n ),\n tooltip=[\n alt.Tooltip(\"pct\", title=\"Pct\")\n ]\n )\n return chart",
"def history(df, team, opponent):\n if opponent:\n games = df[(df.team == team) & (df.opponent == opponent)]#team_games(df, team)\n else:\n games = df[df.team == team]#team_games(df, team)\n\n games['dragkills'] = (games['teamdragkills'] + games['oppdragkills'])\n games['turrkills'] = (games['teamtowerkills'] + games['opptowerkills'])\n result = games[['team', 'opponent', 'player', 'champion', 'fb', 'fd', 'ft', 'fbaron', 'result', 'turrkills', 'dragkills', 'gamelength']]\n\n result = result[result.player == 'Team'].sort_values('gamelength')\n\n return result.round(2)",
"def get_player_data(responses) -> pd.DataFrame:\n result = []\n for p in responses:\n data = p.decode(\"utf8\").replace(\"'\", '\"')\n data = json.loads(data)\n latest_stats = data.get(\"league\").get(\"standard\").get(\"stats\").get(\n \"latest\")\n stats_dict = {\n \"FGP\":\n latest_stats.get(\"fgp\"),\n \"FTP\":\n latest_stats.get(\"ftp\"),\n \"3PM\":\n round(\n int(latest_stats.get(\"tpm\")) / int(\n latest_stats.get(\"gamesPlayed\")), 1),\n \"PPG\":\n latest_stats.get(\"ppg\"),\n \"APG\":\n latest_stats.get(\"apg\"),\n \"RPG\":\n latest_stats.get(\"rpg\"),\n \"SPG\":\n latest_stats.get(\"spg\"),\n \"BPG\":\n latest_stats.get(\"bpg\"),\n \"TPG\":\n latest_stats.get(\"topg\"),\n \"MPG\":\n round(\n int(latest_stats.get(\"min\")) / int(\n latest_stats.get(\"gamesPlayed\")), 1)\n }\n result.append(stats_dict)\n return pd.DataFrame(result)",
"def history():\n hist = db.execute(\"SELECT * FROM shares WHERE userid = :uid ORDER BY date DESC\", uid=session[\"user_id\"])\n for h in hist:\n h[\"total\"] = round(h[\"value\"]*h[\"quantity\"],2)\n return render_template(\"history.html\", context=hist)",
"def tally_results(self, verbose=False):\n\t\tself.tally={\n\t\t\tself.player1.name: {\n\t\t\t\t'wins': 0,\n\t\t\t\t'draws': 0,\n\t\t\t\t'losses': 0\n\t\t\t},\n\t\t\tself.player2.name: {\n\t\t\t\t'wins': 0,\n\t\t\t\t'draws': 0,\n\t\t\t\t'losses': 0\n\t\t\t}\n\t\t}\n\t\tfor key, value in self.winner_dict.items():\n\t\t\tself.tally[self.player1.name]['wins']+=1 if value == self.player1.name else 0\n\t\t\tself.tally[self.player1.name]['draws']+=1 if value is None else 0\n\t\t\tself.tally[self.player1.name]['losses']+=1 if value == self.player2.name else 0\n\t\t\tself.tally[self.player2.name]['wins']+=1 if value == self.player2.name else 0\n\t\t\tself.tally[self.player2.name]['draws']+=1 if value is None else 0\n\t\t\tself.tally[self.player2.name]['losses']+=1 if value == self.player1.name else 0\n\t\tif verbose:\n\t\t\tprint('\\n--- FINAL RESULT ---\\n')\n\t\t\ttally_pretty=pd.DataFrame(self.tally).to_markdown()\n\t\t\tprint(tally_pretty)\n\t\t\tif self.tally[self.player1.name]['wins'] == self.tally[self.player2.name]['wins']:\n\t\t\t\tprint('\\nIt\\'s a draw!\\n')\n\t\t\telse:\n\t\t\t\twinner=self.player1.name if self.tally[self.player1.name]['wins'] > self.tally[self.player2.name]['wins'] else self.player2.name\n\t\t\t\tprint('\\n{player} wins the game!\\n'.format(player=winner))",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def plot_country_representation():\n\n # Get all player data, drops duplicates\n all_players = players.copy().drop_duplicates(subset=\"name\", keep=\"first\")\n # Groupy origin, count unique names (unique since there are no duplicates)\n all_players = all_players.groupby(\"origin\")[\"name\"].count()\n # Push name and origin into columns\n all_players = pd.DataFrame(all_players.reset_index())\n\n # Get all top30 player data, drop duplicates\n top30_players = current_lineups.drop_duplicates(\n subset=\"name\", keep=\"first\")\n # Groupy origin, count unique names (unique since there are no duplicates)\n top30_players = top30_players.groupby(\"origin\")[\"name\"].count()\n # Push name and origin into columns\n top30_players = pd.DataFrame(top30_players.reset_index())\n\n # Get all player data\n majors = players.copy()\n # Filter so only players that have attended Major Tournaments are present\n majors = majors[majors[\"tournament\"].isin(large_tourneys)]\n # Drop duplicates\n majors = majors.drop_duplicates(subset=\"name\", keep=\"first\")\n # Groupby origin, count names\n majors = majors.groupby(\"origin\")[\"name\"].count()\n # Add name and origin back to columns\n majors = pd.DataFrame(majors.reset_index())\n\n # Sort values by count of player\n all_players = all_players.sort_values(by=\"name\", ascending=False)\n top30_players = top30_players.sort_values(by=\"name\", ascending=False)\n majors = majors.sort_values(by=\"name\", ascending=False)\n\n # Renaming columns to better describe data\n top30_players = top30_players.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n all_players = all_players.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n majors = majors.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n\n return top30_players",
"def house_edge(player, params):\n data = pd.DataFrame(player.history)\n results = np.where(\n np.logical_and(data[\"result\"] == \"win\", data[\"blackjack\"]),\n \"blackjack\", data['result']\n )\n results = pd.Series(np.where(\n np.logical_and(data[\"double_down\"], data[\"result\"] == \"win\"),\n \"double\", results\n )).value_counts(normalize=True)\n\n ev = (\n params.payout * results.get(\"win\", 0) +\n (params.blackjack_payout * results.get(\"blackjack\", 0)) +\n (2 * results.get(\"double\", 0)) -\n results.get(\"loss\", 0) -\n (params.surrender_pct * results.get(\"surrender\", 0))\n )\n\n print(\n \"Because all in-game situations may not occur during a simulation, \"\n \"the expected value calculated should be interpreted as an \"\n \"approximation\"\n )\n\n return ev",
"def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)",
"def get_pp_ind_rankings_hist(self, player):\n\n if len(player) != 3:\n raise exceptions.DBValueError(\"Player must be complete\")\n\n rank_hist = []\n self._logger.debug(\"Getting ping pong individual ranking history for player\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT player_id FROM player WHERE \\\nfirst_name = '{0}' AND last_name = '{1}' AND nickname = \\\n'{2}'\".format(player[0], player[1], player[2]))\n player_id = cursor.fetchone()[0]\n cursor.execute(\"SELECT rating, time FROM pp_ind_rating_hist \\\nWHERE player = {0} ORDER BY time DESC\".format(player_id))\n results = cursor.fetchall()\n\n for rating, timestamp in results:\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(rating))\n mu, sigma = cursor.fetchall()[0]\n rank = float(mu) - (3 * float(sigma))\n intermediate_rank = (round(rank, 4), timestamp)\n rank_hist.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return rank_hist",
"def displayReport(trade_history):\n stock_history = dict()\n if len(trade_history) > 0:\n for action in trade_history:\n buysell = action.split()\n if stock_history.get(buysell[3]) != None: \n if buysell[2] == 'BUY':\n stock_history[buysell[3]][0] += 1\n else:\n stock_history[buysell[3]][1] += 1\n stock_history[buysell[3]][2] += float(buysell[6])\n else:\n if buysell[2] == 'BUY':\n stock_history[buysell[3]] = [1,0,0]\n else:\n stock_history[buysell[3]] = [0,1,float(buysell[6])]\n\n print(\"Stock: Buys Sells Total Return\")\n for k, v in stock_history.items():\n print(f\"{k}: {v[0]} {v[1]} {v[2] : .2f}\")",
"def get_fifa_stats(match, player_stats):\n\n # Define variables\n match_id = int(match.match_api_id)\n date = match['date']\n players = ['home_player_1', 'home_player_2', 'home_player_3', \"home_player_4\", \"home_player_5\",\n \"home_player_6\", \"home_player_7\", \"home_player_8\", \"home_player_9\", \"home_player_10\",\n \"home_player_11\", \"away_player_1\", \"away_player_2\", \"away_player_3\", \"away_player_4\",\n \"away_player_5\", \"away_player_6\", \"away_player_7\", \"away_player_8\", \"away_player_9\",\n \"away_player_10\", \"away_player_11\"]\n\n overall_ratings = np.array([])\n for player in players:\n # Get player ID\n player_id = match[player]\n\n # Get player stats\n stats = player_stats[player_stats.player_api_id == player_id]\n\n # Identify current stats\n current_stats = stats[stats.date < date].sort_values(by='date', ascending=False).iloc[0]\n\n # get overall rating for every player, this cannot be nan since we \"dropna\" in main()\n overall_ratings = np.concatenate((overall_ratings, [current_stats[\"overall_rating\"]]))\n\n colNames = np.core.defchararray.add(players, '_overall_rating')\n player_stats_new = pd.Series(overall_ratings, index=colNames)\n player_stats_new['match_api_id'] = match_id\n # print(player_stats_new)\n return player_stats_new",
"def grant_dollars_barchart(dframe):\n # prepare dataframe\n dframe = df.copy()\n dframe.columns = [col.lower().replace(' ','_') for col in dframe.columns]\n dframe = dframe[dframe['organization_name'].notnull()]\n dframe.drop(['thank_you_sent','report_due','report_sent'],axis=1,\n inplace=True)\n dframe.set_index(dframe['date_application_sent'],inplace=True)\n\n # create chart\n color_dict = {'awarded':'#adebad','not approved':'#d6746f',\n 'submitted':'#ffffb3'}\n grant_stage = []\n [grant_stage.append(status.lower().strip()) for status in dframe.stage]\n dframe['stage'] = grant_stage\n grant_status = [] # merge status to 3 primary categories, make 'awarded' tag\n for status in dframe.stage:\n if status not in ['obligations complete','pledged','posted']:\n grant_status.append(status)\n else:\n grant_status.append('awarded')\n dframe['grant_status'] = grant_status\n\n # create chart\n grant_outcomes_trace = []\n for status in dframe.grant_status.unique():\n # sum 'amount' column totals for awarded grants\n if status == 'awarded':\n grant_outcomes_trace.append((go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['amount'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['amount'].sum(),\n name = status,\n marker = {'color': color_dict[status]},\n opacity = .8)))\n\n else:\n # sum 'requested amount' column totals for submitted and not approved\n grant_outcomes_trace.append((go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['requested_amount'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['requested_amount'].sum(),\n name = status,\n marker = {'color': color_dict[status]},\n opacity = .8)))\n\n layout = {'barmode':'stack',\n 'hovermode':'closest',\n 'legend': {'font': {'color': '#CCCCCC'}},\n 'paper_bgcolor': '#303939',\n 'plot_bgcolor': '#303939',\n 'yaxis':\n {'title':'US$',\n 'tickfont':{'color':'#CCCCCC'},\n 'titlefont': {'color':'#CCCCCC'},\n 'showgrid':False},\n 'xaxis':{'title':'quarter submitted',\n 'titlefont': {'color':'#CCCCCC'},\n 'tickfont': {'color':'#CCCCCC'}},\n 'title':'Grant Application<br>Outcomes Overview',\n 'titlefont': {'color':'#CCCCCC'}}\n\n fig = {'data':grant_outcomes_trace,'layout':layout}\n return fig",
"def construct_team_dataframe(game_dict):\n from .models import Game\n\n logger = logging.getLogger(__name__)\n\n # WHEN NOT SPECIFIED ALL STATS RELATE TO US ONLY\n\n # create list of 'vs opponent' row titles to use as index\n # indexlist = [str('vs ' + game.opposing_team.team_name) if game.opposing_team else game.game_ID for game in Game.objects.filter(pk__in=game_dict.keys())]\n\n indexlist = [game.game_ID for game in Game.objects.filter(pk__in=game_dict.keys())]\n\n tf = pd.DataFrame(index=indexlist)\n\n # final score of each game\n scores = [str(frame.iloc[-1]['ourscore_EOP']) + '-' + str(frame.iloc[-1]['theirscore_EOP']) for gameid, frame in game_dict.items()]\n score = pd.Series(scores, index=tf.index)\n\n # total passes per game\n passes = [frame['passes'].sum() for gameid, frame in game_dict.items()]\n passes = pd.Series(passes, index=tf.index)\n\n # turnovers we had\n turnovers = [frame['turnovers'].sum() for gameid, frame in game_dict.items()]\n t_series = pd.Series(turnovers, index=tf.index)\n\n completion = []\n for g_pass, g_turn in zip(passes, turnovers):\n if g_pass:\n comp_pct = round((g_pass - g_turn) / g_pass, 2)*100\n else:\n comp_pct = None\n\n completion.append(comp_pct)\n # completion %\n completion = pd.Series(completion, index=tf.index)\n\n # total possessions we had\n possessions = pd.Series([frame['possessions'].sum() for gameid, frame in game_dict.items()], index=tf.index)\n\n # turnovers our opponent committed\n opp_turns = pd.Series([frame['opp_turns'].sum() for gameid, frame in game_dict.items()], index=tf.index)\n\n # % of our possessions we scored on\n # ( goals / our_possessions ) * 100%\n goals = [frame.iloc[-1]['ourscore_EOP'] for gameid, frame in game_dict.items()]\n goals = pd.Series(goals, index=tf.index)\n score_pct = round((goals / possessions) * 100, 2)\n\n # number of possessions our opponents had\n opp_poss = [frame['opp_poss'].sum() for gameid, frame in game_dict.items()]\n opp_poss = pd.Series(opp_poss, index=tf.index)\n\n # number of blocks we got\n blocks = [frame['blocks'].sum() for gameid, frame in game_dict.items()]\n blocks = pd.Series(blocks, index=tf.index)\n\n # % of our opponents possessions where we got a block\n # ( blocks / opp_possessions ) * 100%\n block_pct = round((blocks / opp_poss) * 100, 2)\n\n tf = tf.assign(score=score)\n tf = tf.assign(passes=passes)\n tf = tf.assign(completion_pct=completion)\n tf = tf.assign(possessions=possessions)\n tf = tf.assign(score_pct=score_pct)\n tf = tf.assign(blocks=blocks)\n\n tf = tf.assign(turns=t_series)\n tf = tf.assign(opp_turns=opp_turns)\n tf = tf.assign(opp_poss=opp_poss)\n\n tf = tf.assign(block_pct=block_pct)\n\n return tf",
"def gamestats(self, table, curr_team):\n\n # Drop unneeded header \n tmp = table.iloc[1:,]\n # Fix the column names by reading line 0\n tmp.columns = [x.replace(\" \", \"\").replace(\"/\",\"\").replace(\".\",\"\") for x in tmp.iloc[0]]\n # Drop row zero which held the header row\n tmp = tmp.drop(tmp.index[0])\n # Forward fill the dates for defensive split later \n tmp['Date'].fillna(method='ffill', inplace = True)\n # Add in the team \n tmp['Team'] = curr_team\n # Create an offense/defense variable\n tmp['OffenseDefense'] = tmp['Opponent']\n # If it's not a defensive total then it's offense - set that in the offensedefense variable\n tmp['OffenseDefense'] = tmp['OffenseDefense'].apply(lambda x: \"Defense\" if x == \"Defensive Totals\" else \"Offense\")\n # Set the defensive totals in the opponent varaible to nullls\n tmp['Opponent'] = tmp['Opponent'].apply(lambda x: None if x == \"Defensive Totals\" else x)\n # Forward fill the opponents in for analysis later\n tmp['Opponent'].fillna(method='ffill', inplace = True)\n # Forward fill the results in for analysis later \n tmp['Result'].fillna(method='ffill', inplace = True)\n return tmp",
"def report():\n for player, dat in players.items():\n if \"Arca\" in dat:\n edad[dat[\"Edad\"]] += arca(dat[\"Arca\"])\n if \"Observatorio\" in dat:\n edad[dat[\"Edad\"]] += observatorio(dat[\"Observatorio\"])\n if \"Atomium\" in dat:\n edad[dat[\"Edad\"]] += atomium(dat[\"Atomium\"])\n\n # En el caso de los edificios gremiales los materiales que se indican\n # en la descripción son siempre la suma de los 5 bienes de la edad\n if \"Dirigible\" in dat:\n for ed in dat[\"Dirigible\"]:\n edad[ed] += 4 # 20/5\n\n if \"Estatua\" in dat:\n for ed in dat[\"Estatua\"]:\n edad[ed[\"Edad\"]] += 2*ed[\"Nivel\"] # 10/5\n\n if \"Baño\" in dat:\n edad[dat[\"Baño\"][\"Edad\"]] += bano(dat[\"Baño\"][\"Nivel\"])\n\n # Calculo coste de expedición\n anterior = expeOrden[dat[\"Edad\"]]\n expeCoste[anterior] += expeLevel2[dat[\"Edad\"]]\n expeCoste[dat[\"Edad\"]] += 2*expeLevel2[dat[\"Edad\"]] + \\\n 4*expeLevel2[dat[\"Edad\"]]\n\n txt = \"\"\n for ed in edades:\n txt += \"%-25s\\t%5i \\t %4i\" % (ed, edad[ed], -expeCoste[ed])\n txt += os.linesep\n return txt",
"def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats",
"def user_contributions_chart():\n\n #get current user according to Flask session\n user = dbwrangler.get_current_user()\n\n #get all users from the user table sharing their address (so, their housemates)\n all_housemates = User.query.filter_by(address=user.address).all()\n\n #calculate total minutes of labor wach month for the user and their housemates\n total_household_labor = dbwrangler.total_houehold_labor(user)\n\n #build a data dictionary to feed jsonify. First entry in the labels list is unclaied labor... total household labor from which we will subtract housemate contributions\n dd_labels = [\"Unclaimed\"]\n\n #dd_data will be the minutes per user. Instantiate as an empty list.\n dd_data = []\n\n #each user gets a unique color\n dd_bgcolors = helpers.color_picker(len(all_housemates)+1)\n\n #Chart.js lets you select bgcolor for each wedge as one hovers over the chart\n dd_hoverbg = [\"#a6a6a6\", \"#a6a6a6\",\"#a6a6a6\",\"#a6a6a6\"]\n\n leftover_labor = total_household_labor\n\n #build lists in each field of our data dictionary, with each member of the household at their own index\n for housemate in all_housemates:\n dd_labels.append(housemate.name)\n individual_labor = dbwrangler.individual_labor(housemate.user_id)\n dd_data.append(individual_labor)\n leftover_labor -= individual_labor\n\n #zeroth index is the unclaimed labor (this from 21 lines above)\n dd_data = [leftover_labor] + dd_data\n\n data_dict = {\n \"labels\": dd_labels, \n \"datasets\":[{\"data\":dd_data, \n \"backgroundColor\":dd_bgcolors, \n \"hoverBackgroundColor\":dd_hoverbg}]\n }\n \n return jsonify(data_dict)",
"def first_round_history(self):\n self.ts_dict = self.get_tourney_slots()\n self.tsr_dict = self.match_seeds()\n first_seed_win = 0\n second_seed_win = 0\n third_seed_win = 0\n fourth_seed_win = 0\n fifth_seed_win = 0\n sixth_seed_win = 0\n seventh_seed_win = 0\n eighth_seed_win = 0\n total_games = 128\n\n for year1 in self.ts_dict: \n for slot, match_up in self.ts_dict[year1].items():\n if slot[:2] == \"R1\":\n for year2 in self.tsr_dict:\n if year1 == year2:\n for winning, losing in self.tsr_dict[year2].items():\n if winning[5:] == match_up[:3]:\n seed = winning[6:] \n if seed == \"01\":\n first_seed_win += 1\n elif seed == \"02\":\n second_seed_win += 1\n elif seed == \"03\":\n third_seed_win += 1\n elif seed == \"04\":\n fourth_seed_win += 1\n elif seed == \"05\":\n fifth_seed_win += 1\n elif seed == \"06\":\n sixth_seed_win += 1\n elif seed == \"07\":\n seventh_seed_win += 1\n elif seed == \"08\":\n eighth_seed_win += 1 \n \n #print(first_seed_win, second_seed_win, third_seed_win, fourth_seed_win, fifth_seed_win, sixth_seed_win, seventh_seed_win, eighth_seed_win, total_games)\n\n gauge = pygal.SolidGauge(inner_radius=0.70, title=\"NCAA First Round Results\")\n ratio_first_seed = int(first_seed_win / total_games * 100)\n ratio_second_seed = int(second_seed_win / total_games * 100)\n ratio_third_seed = int(third_seed_win / total_games * 100)\n ratio_fourth_seed = int(fourth_seed_win / total_games * 100)\n ratio_fifth_seed = int(fifth_seed_win / total_games * 100)\n ratio_sixth_seed = int(sixth_seed_win / total_games * 100)\n ratio_seventh_seed = int(seventh_seed_win / total_games * 100)\n ratio_eighth_seed = int(eighth_seed_win / total_games * 100) \n\n percent_formatter = lambda x: '{:.10g}%'.format(x)\n gauge.value_formatter = percent_formatter\n gauge.add('1 vs. 16', [{'value': ratio_first_seed, 'max_value': 100}])\n gauge.add('2 vs. 15', [{'value': ratio_second_seed, 'max_value': 100}])\n gauge.add('3 vs. 14', [{'value': ratio_third_seed, 'max_value': 100}])\n gauge.add('4 vs. 13', [{'value': ratio_fourth_seed, 'max_value': 100}])\n gauge.add('5 vs. 12', [{'value': ratio_fifth_seed, 'max_value': 100}])\n gauge.add('6 vs. 11', [{'value': ratio_sixth_seed, 'max_value': 100}])\n gauge.add('7 vs. 10', [{'value': ratio_seventh_seed, 'max_value': 100}])\n gauge.add('8 vs. 9', [{'value': ratio_eighth_seed, 'max_value': 100}])\n \n gauge.render_to_file('chart.svg')",
"def chartdata():\n chart = billboard.ChartData('hot-100')\n chart_data = []\n for song in chart:\n song_data = (song.title, song.artist)\n chart_data.append(song_data)\n \n return chart_data",
"def statsbomb(player):\n tiros = pd.read_csv(\"./data/average_cleaned.csv\")\n zones = [\"PLAYER_NAME\", \"% LEFT-CORNER-3\",\"% LEFT-MIDR-2\",\"% LEFT-ELBOW-3\",\"% LEFT-ELB/CENT-2\",\n \"% LEFT-CENTER-3\",\"% LEFT-PAINT\",\"% RIGHT-PAINT\",\"% RIGHT-CENTER-3\",\"% RIGHT-ELB/CENT-2\",\n \"% RIGHT-ELBOW-3\",\"% RIGHT-MIDR-2\",\"% RIGHT-CORNER-3\"]\n player_polygon = tiros[zones]\n values = player_polygon[player_polygon[\"PLAYER_NAME\"] == player]\n values = values.values.tolist()\n values = values[0][1:]\n\n #URLs to get fonts for the plots\n\n URL1 = ('https://github.com/googlefonts/SourceSerifProGFVersion/blob/main/'\n 'fonts/SourceSerifPro-Regular.ttf?raw=true')\n serif_regular = FontManager(URL1)\n URL2 = ('https://github.com/googlefonts/SourceSerifProGFVersion/blob/main/'\n 'fonts/SourceSerifPro-ExtraLight.ttf?raw=true')\n serif_extra_light = FontManager(URL2)\n URL3 = ('https://github.com/google/fonts/blob/main/ofl/rubikmonoone/'\n 'RubikMonoOne-Regular.ttf?raw=true')\n rubik_regular = FontManager(URL3)\n URL4 = 'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Thin.ttf?raw=true'\n robotto_thin = FontManager(URL4)\n URL5 = 'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Regular.ttf?raw=true'\n robotto_regular = FontManager(URL5)\n URL6 = 'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Bold.ttf?raw=true'\n robotto_bold = FontManager(URL6)\n\n # parameter names of the statistics we want to show\n params = [\"% LEFT-CORNER-3\",\"% LEFT-MIDR-2\",\"% LEFT-ELBOW-3\",\"% LEFT-ELB/CENT-2\",\n \"% LEFT-CENTER-3\",\"% LEFT-PAINT\",\"% RIGHT-PAINT\",\"% RIGHT-CENTER-3\",\"% RIGHT-ELB/CENT-2\",\n \"% RIGHT-ELBOW-3\",\"% RIGHT-MIDR-2\",\"% RIGHT-CORNER-3\"]\n\n # The lower and upper boundaries for the statistics\n low = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n high = [0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,]\n \n radar = Radar(params, low, high,\n # whether to round any of the labels to integers instead of decimal places\n round_int=[False]*12,\n num_rings=4, # the number of concentric circles (excluding center circle)\n # if the ring_width is more than the center_circle_radius then\n # the center circle radius will be wider than the width of the concentric circles\n ring_width=1, center_circle_radius=1)\n \n # creating the figure using the function defined above:\n fig, axs = radar_mosaic(radar_height=0.915, title_height=0.06, figheight=14)\n\n # plot the radar\n radar.setup_axis(ax=axs['radar'], facecolor='None')\n rings_inner = radar.draw_circles(ax=axs['radar'], facecolor='#28252c', edgecolor='#39353f', lw=1.5)\n radar_output = radar.draw_radar(values, ax=axs['radar'])\n radar_poly, rings_outer, vertices = radar_output\n range_labels = radar.draw_range_labels(ax=axs['radar'], fontsize=25, color='#fcfcfc',\n fontproperties=robotto_thin.prop)\n param_labels = radar.draw_param_labels(ax=axs['radar'], fontsize=25, color='#fcfcfc',\n fontproperties=robotto_regular.prop)\n\n # adding the endnote and title text (these axes range from 0-1, i.e. 0, 0 is the bottom left)\n # Note we are slightly offsetting the text from the edges by 0.01 (1%, e.g. 0.99)\n endnote_text = axs['endnote'].text(0.99, 0.5, 'Inspired By: StatsBomb / Rami Moghadam',\n color='#fcfcfc', fontproperties=robotto_thin.prop,\n fontsize=15, ha='right', va='center')\n title1_text = axs['title'].text(0.01, 0.65, player, fontsize=25,\n fontproperties=robotto_bold.prop,\n ha='left', va='center', color='#e4dded')\n\n fig.set_facecolor('#121212')\n # save the figure\n fig.tight_layout()\n plt.savefig('./images/plot.png', dpi=300)",
"def get_player_career_reg_season_stats(player_id, player_data_url, headers):\n\tplayer_data_json=json.loads(requests.get(player_data_url+str(player_id), headers=headers).content)\n\tcareer_totals=player_data_json['resultSets'][1]\n\tdf_career_totals=pd.DataFrame.from_records(career_totals['rowSet'], columns=career_totals['headers'])\n\tdf_career_totals.PLAYER_ID=df_career_totals.PLAYER_ID.astype(int)\n\treturn df_career_totals",
"def display_stats(trackers):\n if len(trackers) == 0:\n # number of trackers\n trackers = tracker_list()\n print(\"{} tracker(s)\".format(len(trackers)))\n\n # total number of entries\n entries = count_entries()\n print(\"{} total entries\".format(entries))\n\n # number of entries per tracker\n for t in trackers:\n print()\n print(t)\n t_entries = count_entries(t)\n print(\" {} entries\".format(t_entries))\n\n elif len(trackers) == 2:\n t1 = trackers[0]\n t2 = trackers[1]\n if not tracker_exists(t1):\n error(\"Cannot show stats for unknown tracker '\" + t1 + \"'\")\n if not tracker_exists(t2):\n error(\"Cannot show stats for unknown tracker '\" + t2 + \"'\")\n r = correlation(fetch_all(t1), fetch_all(t2))\n if r == -1:\n error(\"Error: '{}' data has no variance\".format(t1))\n elif r == -2:\n error(\"Error: '{}' data has no variance\".format(t2))\n print(\"Correlation coefficient: {}\".format(r))\n else:\n days = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n for t in trackers:\n if not tracker_exists(t):\n error(\"Cannot show stats for unknown tracker '\" + t + \"'\")\n results = fetch_all(t)\n num_entries = len(results)\n counts = [0] * len(days)\n tally = [0] * len(days)\n mins = [None] * len(days)\n maxs = [None] * len(days)\n min_value = None\n max_value = None\n for r in results:\n count = r[2]\n day = str_to_date(r[1])\n day_index = day.weekday()\n counts[day_index] += count\n tally[day_index] += 1\n if not min_value or min_value[0] > count:\n min_value = (count, day)\n if not max_value or max_value[0] < count:\n max_value = (count, day)\n if not mins[day_index] or count < mins[day_index]:\n mins[day_index] = count\n if not maxs[day_index] or count > maxs[day_index]:\n maxs[day_index] = count\n\n\n print(\"{}: {} entries\\n\".format(t, num_entries))\n\n # TODO: decide how to format output for better readability\n # Give general tracker stats\n total = sum(counts)\n avg = total/sum(tally)\n gen = [total, avg, min_value[0], max_value[0]]\n gen = ['{:,.3f}'.format(v) for v in gen]\n gen_len = len(max(gen, key=len))\n\n # format string for single data point\n point = \" {:3} -- {}\"\n print(point.format(\"sum\", gen[0].rjust(gen_len)))\n print(point.format(\"avg\", gen[1].rjust(gen_len)))\n print(point.format(\"min\", gen[2].rjust(gen_len)) +\n \" (\" + str(min_value[1]) + \")\")\n print(point.format(\"max\", gen[3].rjust(gen_len)) +\n \" (\" + str(max_value[1]) + \")\")\n print()\n\n # Give stats about consistency\n streak = longest_streak(results)\n gap = longest_break(results)\n\n # format string for streak data\n alt_point = \" {:14} -- {:2} day(s) ({} to {})\"\n print(alt_point.format(\"longest streak\", streak[0], streak[1], streak[2]))\n print(alt_point.format(\"longest break\", gap[0], gap[1], gap[2]))\n print()\n\n # prepare min/max data for printing\n mins = [v if v else 0 for v in mins]\n maxs = [v if v else 0 for v in maxs]\n\n\n # Format data into justified columns\n counts_str, count_len = justify_column(counts)\n avgs = [None] * len(counts)\n for i in range(-1,6):\n avgs[i] = 0 if tally[i] is 0 else counts[i]/tally[i]\n avgs_str, avgs_len = justify_column(avgs)\n mins_str, mins_len = justify_column(mins)\n maxs_str, maxs_len = justify_column(maxs)\n\n row = \" {:8}{:<22}{:<22}{:<22}{:<22}\"\n title_row = '\\033[1m' + row + '\\033[0m'\n print(title_row.format(\n \"\",\n \"Total\".rjust(count_len),\n \"Avg\".rjust(avgs_len),\n \"Min\".rjust(mins_len),\n \"Max\".rjust(maxs_len)))\n\n for i in range(-1,6):\n print(row.format(days[i] + \" --\",\n counts_str[i],\n avgs_str[i],\n mins_str[i],\n maxs_str[i]))",
"def result(app: dash.Dash, data: GameData) -> html:\n item = data.items[data.current_round]\n img_raw = str(item.picture_raw)\n img_explained = item.picture_explained\n ai_prediction: Dict[str, Any] = {'x': [], 'y': []}\n ai_prediction['type'] = 'bar'\n ai_prediction['orientation'] = 'h'\n ai_prediction['marker'] = {'color': COLOR_STATWORX}\n max_axis_value = ceil(item.prediction_ai[0].certainty * 10.0) / 10.0\n\n # Prepare data for the plot\n for ai_item in item.prediction_ai:\n ai_prediction['y'].append(ai_item.brand + ' ' + ai_item.model)\n ai_prediction['x'].append(ai_item.certainty)\n\n # Determine result and color\n if item.prediction_user == item.ground_truth:\n clr_user = 'success'\n\n else:\n clr_user = 'danger'\n\n if item.prediction_ai[0] == item.ground_truth:\n clr_ai = 'success'\n\n else:\n clr_ai = 'danger'\n\n layout = dbc.Container([\n dbc.Row(children=[\n dbc.Col(dbc.Card(dbc.CardImg(src=img_raw))),\n dbc.Col(\n dbc.Card([\n dbc.ListGroup([\n dbc.ListGroupItem([\n dbc.ListGroupItemHeading(\"Your Prediction:\"),\n dbc.ListGroupItemText(item.prediction_user.brand + ' - ' +\n item.prediction_user.model)\n ],\n color=clr_user),\n dbc.ListGroupItem([\n dbc.ListGroupItemHeading(\"AI Prediction:\"),\n dbc.ListGroupItemText(item.prediction_ai[0].brand + ' - ' +\n item.prediction_ai[0].model)\n ],\n color=clr_ai),\n dbc.ListGroupItem([\n dbc.ListGroupItemHeading(\"Correct Answer:\"),\n dbc.ListGroupItemText(item.ground_truth.brand + ' - ' +\n item.ground_truth.model)\n ],\n color='secondary',\n className='mb-3')\n ]),\n dbc.ButtonGroup(\n dbc.Button(\"Continue!\",\n id=\"btn\",\n color='primary',\n style={\n \"background-color\": COLOR_STATWORX,\n \"border-color\": COLOR_STATWORX\n }))\n ],\n body=True))\n ],\n className='mb-4'),\n dbc.Row(\n children=[\n dbc.Col(\n dbc.Card([\n # dbc.CardBody(\n # html.H4(\"How the AI sees the car\", className=\"card-title\")),\n dbc.CardImg(src=img_explained)\n ])),\n dbc.Col(\n dbc.Card(\n dbc.CardBody([\n # html.H4(\"Top 5 AI Predictions\", className=\"card-title\"),\n dcc.Graph(figure={\n 'data': [ai_prediction],\n 'layout': {\n 'margin': {\n 'l': 100,\n 'r': 0,\n 'b': 0,\n 't': 0\n },\n 'yaxis': {\n 'automargin': True,\n 'autorange': 'reversed'\n },\n 'xaxis': {\n 'automargin': True,\n 'tickformat': '.2%',\n 'range': [0, max_axis_value]\n },\n 'autosize': True\n }\n },\n config={\n 'showTips': False,\n 'displayModeBar': False,\n 'doubleClick': False,\n },\n style={\n 'flex': 1,\n 'margin': '10px'\n })\n ]), ))\n ],\n className='mb-4'),\n # Needed to circumvent dash limitations\n # See: https://community.plot.ly/t/you-have-already-assigned-a-callback-to-the-output/25334\n html.Div([\n dcc.Input(id='car-brand', value='ignore'),\n dcc.Input(id='car-model', value='ignore')\n ],\n hidden=True)\n ])\n\n return layout",
"def stats_strategy_response(data: OTreeSessionData, ws=None):\n\n rounds = [data.get_round(i + 1) for i in range(data.num_rounds())]\n rnd_stats = {}\n for rnd, (pr, ne) in enumerate(zip(rounds[:-1], rounds[1:]), 2):\n stats = {k1: {k2: 0 for k2 in product(('Node', 'Edge'), repeat=2)}\n for k1 in product(list('CD'), repeat=2)}\n print(f'\\rCalculating strategy response (round {rnd}) ... ', end='')\n for pid in range(1, ne.num_players() + 1):\n nep = ne.loc[pid]\n prp = pr.loc[pid]\n stats[(nep.player.choice_L, prp.player.choice_nei_L)][(nep.player.type, prp.player.type_L)] += 1\n stats[(nep.player.choice_U, prp.player.choice_nei_U)][(nep.player.type, prp.player.type_U)] += 1\n stats[(nep.player.choice_R, prp.player.choice_nei_R)][(nep.player.type, prp.player.type_R)] += 1\n stats[(nep.player.choice_D, prp.player.choice_nei_D)][(nep.player.type, prp.player.type_D)] += 1\n if ws:\n ws.append((rnd, pid, nep.player.type,\n nep.player.choice_L, nep.player.type_L, prp.player.choice_nei_L,\n nep.player.choice_U, nep.player.type_U, prp.player.choice_nei_U,\n nep.player.choice_R, nep.player.type_R, prp.player.choice_nei_R,\n nep.player.choice_D, nep.player.type_D, prp.player.choice_nei_D))\n rnd_stats[rnd] = stats\n print('Done')\n global_vars['rnd_stats'] = rnd_stats\n return rnd_stats",
"def backtest(cache):\n if not cache:\n history = getBars(MARKET, TF)\n else:\n cachefile = \"cache/{}-{}.csv\".format(MARKET, TF)\n try:\n history = pd.read_csv(\n cachefile, index_col=\"datetime\", parse_dates=True)\n except:\n history = getBars(MARKET, TF)\n history.to_csv(cachefile)\n history['ma'] = history['close'].rolling(MEAN).mean()\n weAreLong = False\n PRICE_DIPPED = False\n\n entry = 0.0\n exit = None\n pl = 0.0\n history['pandl'] = 0.0\n trades = 1\n\n ### BEGIN STRATEGY DEFINITION ###\n count = 1\n\n for i in history.index:\n candle_close_rate = history['ma'][i]\n ma = history['close'][i]\n if count > MEAN:\n # playing revert to mean (RTM)\n if not weAreLong and buySignaled(candle_close_rate, ma, PRICE_DIPPED):\n entry = candle_close_rate\n exit = candle_close_rate * (1.0 + (EXIT_PERCENT / 100.0))\n stop = candle_close_rate * (1.0 - (STOP_PERC / 100.0))\n weAreLong = True\n history['pandl'][i] = pl\n elif weAreLong and history['high'][i] >= exit:\n weAreLong = False\n pl += (((exit - entry) / entry) * 100.0 ) - (2.0 * FEE)\n history['pandl'][i] = pl\n trades += 1\n elif weAreLong and candle_close_rate <= stop:\n weAreLong = False\n pl += (((candle_close_rate - entry) / entry) * 100.0 ) - (2.0 * FEE)\n history['pandl'][i] = pl\n trades += 1\n else:\n if weAreLong:\n # fpl = ((history['close'][i] * (1.0 - FEE)) - (entry * (1.0 + FEE)))\n # fpl = ( (history['close'][i] * (100.0 - FEE)) - (entry * (100.0 + FEE)) ) / 100.0\n fpl = ((candle_close_rate - entry) / entry) * 100.0\n history['pandl'][i] = pl + fpl\n else:\n fpl = 0.0\n history['pandl'][i] = pl + fpl\n count += 1\n if candle_close_rate <= ma:\n PRICE_DIPPED = True\n else:\n PRICE_DIPPED = False\n ### END STRATEGY DEFINITION ###\n\n days = (len(history) * TF) / (60 * 24)\n sharpe_ratio = getSharpe(list(history['pandl']), days)\n fig, ax = plt.subplots(1)\n plt.plot(history['pandl'])\n fig.autofmt_xdate()\n plt.ylabel('cumulative %')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n # place a text box in upper left in axes coords\n textstr = \"\"\"{}\n Days: {}\n Trades: {}\n Settings:\n TF = {}\n MEAN = {}\n BREAKOUT = {}\n RTM = {}\n RTM_PERCENT = {}\n BO_PERCENT = {}\n EXIT_PERCENT = {},\n STOP = {}\n Sharpe = {}\n \"\"\".format(MARKET,\n days,\n trades,\n TF,\n MEAN,\n BREAKOUT,\n RTM,\n RTM_PERCENT,\n BO_PERCENT,\n EXIT_PERCENT,\n STOP_PERC,\n sharpe_ratio)\n ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n plt.title(\"BACKTEST {}\".format(MARKET))\n plt.savefig(BACKTESTFILE)\n print(\"{},{},{},{},{},{},{},{},{},{},{},{}\".format(days,\n trades,\n MARKET,\n TF,\n MEAN,\n BREAKOUT,\n RTM,\n RTM_PERCENT,\n BO_PERCENT,\n EXIT_PERCENT,\n STOP_PERC,\n sharpe_ratio))\n # plt.show()",
"def get_player_stats_from_game(team, year, week):",
"def players(self, game_id: int) -> DataFrame[Any]:",
"def userReport():\n for player, dat in players.items():\n if \"Arca\" not in dat and \"Observatorio\" not in dat \\\n and \"Atomium\" not in dat and \"Dirigible\" not in dat \\\n and \"Estatua\" not in dat and \"Baño\" not in dat:\n continue\n\n print(\"-------------------------------------------------------------\")\n print(player, \" - \", dat[\"Edad\"])\n if \"Arca\" in dat:\n print(\" Arca %i\" % dat[\"Arca\"])\n if \"Observatorio\" in dat:\n print(\" Observatorio %i\" % dat[\"Observatorio\"])\n if \"Atomium\" in dat:\n print(\" Atomium %i\" % dat[\"Atomium\"])\n\n if \"Estatua\" in dat:\n for ed in dat[\"Estatua\"]:\n print(\" Estatua %i - %s\" % (ed[\"Nivel\"], ed[\"Edad\"]))\n\n if \"Dirigible\" in dat:\n for ed in dat[\"Dirigible\"]:\n print(\" Dirigible 11 - %s\" % ed)\n if \"Baño\" in dat:\n print(\" Baño Real %i - %s\" % (\n dat[\"Baño\"][\"Nivel\"], dat[\"Baño\"][\"Edad\"]))\n\n print()",
"def history():\n \n u_row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = u_row[0]['username']\n \n result = db.execute(\"SELECT * FROM history WHERE username=:username\", username=username)\n \n if result:\n dict = {}\n dict['symbol'] = []\n dict['shares'] = []\n dict['price'] = []\n dict['time'] = []\n \n for row in result:\n symbol = row['symbol']\n shares = row['shares']\n time = row['time']\n \n quote = lookup(symbol)\n name = quote['name']\n price = quote['price']\n total = shares * price\n \n dict['symbol'].append(symbol)\n dict['shares'].append(shares)\n dict['price'].append(usd(price))\n dict['time'].append(time)\n \n length = len(dict['symbol'])\n \n return render_template(\"history.html\",length=length,dict=dict)\n \n else:\n return render_template(\"history.html\",length=0,dict=[])"
]
| [
"0.634474",
"0.6184851",
"0.586159",
"0.57326776",
"0.5692181",
"0.5665392",
"0.5642855",
"0.5554835",
"0.55420274",
"0.553956",
"0.5507848",
"0.5477072",
"0.5455402",
"0.5449548",
"0.5444684",
"0.5435415",
"0.53835887",
"0.5383032",
"0.5378751",
"0.5371841",
"0.53710645",
"0.53615737",
"0.53509307",
"0.5343361",
"0.53406286",
"0.5334193",
"0.53336877",
"0.53256094",
"0.531553",
"0.5301121"
]
| 0.62488574 | 1 |
Calculate the probability of busting in a given dataset | def pct_bust(data):
return round((data["new_total"] > 21).sum() / len(data), 3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def probability(self, samples):\n pass",
"def calculate_probability(self):\n return 0",
"def test_bp_example():\n signal = np.array([4, 7, 9, 10, 6, 11, 3])\n\n pe = permutation_entropy(signal, 2)\n\n assert 0.91 < pe < 0.92 # Should be approx 0.918.\n\n pe = permutation_entropy(signal, 3)\n\n assert 1.52 < pe < 1.53 # Should be approx 1.522.",
"def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party == \"R\":\r\n republicans+=1\r\n elif party == \"D\":\r\n democrats+=1\r\n total+=1\r\n\r\n if total == 0: return 0\r\n prob_dem = democrats/total\r\n prob_rep = republicans/total\r\n if prob_dem == 0: return -(prob_rep * math.log(prob_rep, 2))\r\n if prob_rep == 0: return -(prob_dem * math.log(prob_dem, 2))\r\n\r\n entropy = (-prob_dem * math.log(prob_dem, 2)) -(prob_rep * math.log(prob_rep, 2))\r\n return entropy",
"def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)",
"def purity(y_true, y_pred, sample_weight=None):\n if sample_weight is None:\n sample_weight = np.ones_like(y_true)\n TP = np.sum((y_pred) * y_true * sample_weight)\n FP = np.sum((y_pred) * (y_true == 0) * sample_weight)\n return TP / (TP + FP)",
"def sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return(np.argmax(probas))",
"def _cluster_hitprobability(self, x, y):\n hm_count = np.zeros_like(y).astype(float)\n hm = np.zeros_like(y).astype(float)\n #skf = StratifiedShuffleSplit(n_splits=self.n_iter, test_size=self.shuffle_test_split, random_state=self.random_state)\n\n ind = self._cluster(x, 35)\n\n for cluster_id in np.unique(ind):\n test = np.argwhere(ind == cluster_id)[:, 0]\n train = np.argwhere(ind != cluster_id)[:, 0]\n #print test\n self.basemodel.fit(x[train, :], y[train], hyperparams_optim=False)\n hm_count[test] += 1.\n hm[test] += (self.basemodel.predict(x[test, :]) == y[test]).astype(float)\n\n proba = hm / hm_count\n if self.verbose:\n # print('H/M count:')\n # print(hm_count)\n print('Proba:')\n print(proba)\n self.basemodel.fit(x, y, hyperparams_optim=False)\n return proba",
"def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)",
"def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)",
"def pbias(self) -> float:\n return float(100.0 * sum(self.predicted - self.true) / sum(self.true))",
"def _sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)",
"def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)",
"def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities",
"def probability(p):\n return p > random.uniform(0.0, 1.0)",
"def bidProbability(self, totalDice, bidCount):\n result = sum([self.confirmProbability(totalDice, i) for i in range(bidCount, totalDice + 1)])\n return result",
"def test_probability(self, dataset = None):\n\n\t\tdataset = self.vectorize(dataset) if (dataset != None) else self.testing_set_vector;\n\n\t\tprediction = self.classifier.decision_function(dataset)\n\n\t\treturn list(map(lambda p: (1 / (1 + math.exp(-p))), prediction))",
"def get_bernoulli_sample(probs):\n return tf.ceil(probs - tf.random_uniform(tf.shape(probs)))",
"def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # Add one for bias to the first columns\n probs = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n probs = log_reg.logistic(z)\n ### END CODE\n assert probs.shape == (X.shape[0],)\n return probs",
"def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01",
"def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)",
"def occupation_distribution(data):",
"def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))",
"def boost_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n Lib_c.set_target(possible_target_location)\n probabilities[possible_target_location] = integrate.quad(\n Lib_c.function,\n -np.inf, np.inf,\n epsabs=0,\n limit=50,\n full_output=1\n )[0]\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted",
"def getBeliefDistribution(self):\n # This essentially gives a point to a location for each particle there, then \n # normalizes the point values so they add up to 1.\n dist = util.Counter()\n for part in self.particles: dist[part] += 1\n dist.normalize()\n return dist",
"def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)",
"def calculate_purity(D, k):\n ti = np.array(D.groupby(by=\"cluster\").count()['x1'])\n ci = np.array(D.groupby(by=\"label\").count()['x1'])\n total_observations = 0\n for i in range(k):\n total_observations += min(ti[i], ci[i])\n purity = total_observations / D.shape[0]\n return purity",
"def mixed_prob( means,stds,weights,validt):",
"def probability(distances):\n v = [1.0/(d + 1) for d in distances]\n s = sum(v)\n return [i/s for i in v]",
"def gain(X, y, column):\n prior_entropy = entropy(y)\n total = y.size\n\n values = X[column].unique()\n proportions = X[column].value_counts() / total\n return prior_entropy - sum(proportions[i] * \\\n entropy(y[np.array(X[column]) == i]) for i in values)"
]
| [
"0.6277068",
"0.59250945",
"0.5895558",
"0.58948094",
"0.58774555",
"0.58189124",
"0.577898",
"0.57713586",
"0.57664675",
"0.57664675",
"0.5738593",
"0.573379",
"0.5720934",
"0.5656361",
"0.56534046",
"0.5645686",
"0.5632636",
"0.56128347",
"0.5610137",
"0.560257",
"0.55929995",
"0.5587092",
"0.55797565",
"0.5575732",
"0.5565211",
"0.55620295",
"0.55426395",
"0.5537576",
"0.5525062",
"0.5524342"
]
| 0.6100775 | 1 |
Function for calculating house edge. | def house_edge(player, params):
data = pd.DataFrame(player.history)
results = np.where(
np.logical_and(data["result"] == "win", data["blackjack"]),
"blackjack", data['result']
)
results = pd.Series(np.where(
np.logical_and(data["double_down"], data["result"] == "win"),
"double", results
)).value_counts(normalize=True)
ev = (
params.payout * results.get("win", 0) +
(params.blackjack_payout * results.get("blackjack", 0)) +
(2 * results.get("double", 0)) -
results.get("loss", 0) -
(params.surrender_pct * results.get("surrender", 0))
)
print(
"Because all in-game situations may not occur during a simulation, "
"the expected value calculated should be interpreted as an "
"approximation"
)
return ev | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def house_oriented_TLE(self, grid):\n # write your code here\n # write your code here\n n, m = len(grid), len(grid[0])\n dist = [[0 for _ in range(m)] for _ in range(n)]\n reachable_count = [[0 for _ in range(m)] for _ in range(n)]\n \n house_count = 0\n for i in range(n):\n for j in range(m):\n if grid[i][j] == DataType.HOUSE:\n house_count += 1\n self.bfs_house_oriented(grid, i, j, dist, reachable_count)\n \n # print(f'dist: {dist}')\n # print(f'reachable_count: {reachable_count}')\n \n min_dist = sys.maxsize\n for i in range(n):\n for j in range(m):\n if reachable_count[i][j] == house_count and dist[i][j] < min_dist : # and dist[i][j] != 0: # shouldn't be a HOUSE, should be handled in reachable_count\n min_dist = dist[i][j]\n \n if min_dist == sys.maxsize:\n return -1\n return min_dist",
"def getEdgeDistance():\n '''\n a\n ◿\n b c\n\n hypotenuse\n ◿ adjacent\n opposite\n\n tan(a) = opposite/adjacent\n adjacent * tan(a) = opposite\n '''\n\n # An estimated multiplier to take into account the larger infrared dot\n # observed when further away from as surface - think torch beam onto a\n # wall getting larger as it gets further away, but only the radius\n # (center downwards) being relevant.\n # TODO: Maybe move into infrared sensor code?\n MULTI = 1.2\n\n edgeDistance = BOT_HEIGHT * math.tan(math.radians(getEdgeAngle()))\n edgeDistance *= MULTI\n\n if DEBUG:\n print \"Distance to edge: \", int(round(edgeDistance))\n\n return edgeDistance",
"def edge_ground(X):\n gradient_x = img_conv(X, kernel_sobel_x)\n gradient_y = img_conv(X, kernel_sobel_x.transpose())\n mag = (gradient_x ** 2.0 + gradient_y ** 2.0) ** 0.5\n is_edge = mag > 1.0\n return is_edge.astype('f')",
"def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area",
"def edgeCurl(self):\n if getattr(self, '_edgeCurl', None) is None:\n assert self.dim > 1, \"Edge Curl only programed for 2 or 3D.\"\n\n n = self.vnC # The number of cell centers in each direction\n L = self.edge # Compute lengths of cell edges\n S = self.area # Compute areas of cell faces\n\n # Compute divergence operator on faces\n if self.dim == 2:\n\n D21 = sp.kron(ddx(n[1]), speye(n[0]))\n D12 = sp.kron(speye(n[1]), ddx(n[0]))\n C = sp.hstack((-D21, D12), format=\"csr\")\n self._edgeCurl = C*sdiag(1/S)\n\n elif self.dim == 3:\n\n D32 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]+1))\n D23 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]+1))\n D31 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]))\n D13 = kron3(speye(n[2]), speye(n[1]+1), ddx(n[0]))\n D21 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]))\n D12 = kron3(speye(n[2]+1), speye(n[1]), ddx(n[0]))\n\n O1 = spzeros(np.shape(D32)[0], np.shape(D31)[1])\n O2 = spzeros(np.shape(D31)[0], np.shape(D32)[1])\n O3 = spzeros(np.shape(D21)[0], np.shape(D13)[1])\n\n C = sp.vstack((sp.hstack((O1, -D32, D23)),\n sp.hstack((D31, O2, -D13)),\n sp.hstack((-D21, D12, O3))), format=\"csr\")\n\n self._edgeCurl = sdiag(1/S)*(C*sdiag(L))\n return self._edgeCurl",
"def edges(self):\n return self.dovetails + self.containments + self.internals",
"def energy_cost(edge):\n return edge_weight(edge) * 1.2",
"def mamajek08_logRpHK_edge():\n return -4.355226174791392",
"def edge_between_neighbors(cell_a, cell_b):\n edge = np.logical_and(dilate_simple(cell_a), dilate_simple(cell_b))\n return edge",
"def edge_num(self,row1,col1,row2,col2):\n\n row = row1\n col = col1\n row_n = row2\n col_n = col2\n \n if row2 < row1 or col2 < col1:\n row = row2\n col = col2\n row_n = row1\n col_n = col1\n \n if not ((row == row_n and col == col_n - 1) or (row == row_n-1 and col == col_n)):\n return -1\n\n if row < 0 or row_n >= self.rows or col < 0 or col_n >= self.cols:\n return -1\n \n node1 = row*self.rows+col+1\n node2 = row_n*self.rows+col_n+1\n edge_number = self.edge2index[(node1,node2)]\n #print \"%s %s: %d\" % (str(node1),str(node2),edge_number)\n \"\"\"\n #THIS DOWN HERE WOULD WORK IF GRAPHILLION NUMBERED EDGES CORRECTLY BUT IT DOESNT\n #print \"(%d,%d) (%d,%d)\" % (row,col,row_n,col_n)\n if row + col < self.cols - 1:\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * row\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 1 + 2 * row\n #edges[edge_number] = 1\n else:\n col_dist = self.cols - col - 1\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * col_dist - 1\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 2 * col_dist\n #edges[edge_number] = 1\n \"\"\"\n\n return edge_number",
"def cell_edges(self):",
"def find_inner_edge(wrap, dist=25, prom=0.08): # used to be named as find_cell_corner\n if len(wrap.shape) == 2:\n wrap_g = wrap\n elif len(wrap.shape) == 3:\n wrap_g = cv.cvtColor(wrap, cv.COLOR_BGR2GRAY)\n\n sum_x = np.sum(wrap_g, axis=0)\n sum_x = sum_x / np.max(sum_x)\n peak_x, _ = signal.find_peaks(-sum_x, distance=dist, prominence=prom)\n\n sum_y = np.sum(wrap_g, axis=1)\n sum_y = sum_y / np.max(sum_y)\n peak_y, _ = signal.find_peaks(-sum_y, distance=dist, prominence=prom)\n\n return peak_x, peak_y",
"def get_elevation_along_edge(self, from_, to):\n pass",
"def east_edge(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n model = Plane(w=width, h=self.ceilingthickness, name=name+str(wallnum))\r\n mergeshape.add(model, e,y+height+self.ceilingthickness / 2,z, rx=0.0,ry=90.0,rz=0.0)\r\n\r\n wallnum += 1",
"def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge",
"def mamajek08_logRpHK_Ro_edge():\n Ro_edge = 0.31935816876122064\n return Ro_edge",
"def GetBoundaryEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"quad\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesQuad()",
"def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges",
"def solve_for_edge_dimensionality(n):\n return int(round(np.sqrt(2 * n + 2.25) - 1.5))",
"def edgeDetection(image):\n output = np.array(image)\n #ksize determines details\n median_filtimg2 = cv2.medianBlur(output,5)\n lowThreshold = 100\n edges = cv2.Canny(median_filtimg2,lowThreshold,lowThreshold*2)\n\n #kernel size determines the bold\n kernel = np.ones((1,1), np.uint8)\n dialateimg = cv2.dilate(edges, kernel)\n #Inverts every bit of an array.\n edges_inv = cv2.bitwise_not(dialateimg)\n ret,thresh = cv2.threshold(edges_inv,127,255, 0)\n contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n img_contours = cv2.drawContours(thresh, contours, -1, (0,255,0), 3)\n return edges_inv",
"def edge(self, which='inner'):\n def edge_shape(site):\n def in_shape(x): return self.shape(Site(site.family, site.tag + x))\n sites = [in_shape(x) for x in self._directions]\n if which == 'inner':\n return self.shape(site) and not all(sites)\n elif which == 'outer':\n return not self.shape(site) and any(sites)\n return Shape(edge_shape)",
"def generate_edges(self):\n for i, n in enumerate(self.points):\n for i1, p in enumerate(self.points[i+1:]):\n d = 0\n differences = 0\n for x in range(3):\n d += math.fabs(n.xyz[x] - p.xyz[x])\n if math.fabs(n.xyz[x] - p.xyz[x]) != 0:\n differences += 1\n\n if differences > 1:\n continue\n\n if d == -1 * self.side_length or d == self.side_length:\n self.edges.append([i, i1 + i + 1])",
"def detect_edges_hed(self):\n inp = cv2.dnn.blobFromImage(self.input_image, scalefactor=1.0,\n size=(self.WIDTH, self.HEIGHT),\n mean=(104.00698793, 116.66876762, 122.67891434),\n swapRB=False, crop=False)\n self.net.setInput(inp)\n out = self.net.forward()\n hed = out[0, 0]\n hed = (255 * hed).astype(\"uint8\")\n hed = cv2.fastNlMeansDenoising(hed, SMOOTH_FACTOR, SMOOTH_FACTOR, 21)\n save_image_opencv(hed, 'hed')\n hed_skel = skeletonize(hed).astype(\"uint8\")\n save_image_opencv(hed_skel, 'hed_skel')\n thresheld_hed = cv2.threshold(hed_skel, 0, 255,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n save_image_opencv(thresheld_hed, 'thresheld_hed')\n return hed, thresheld_hed",
"def findEdge(self):\n edgeObjectList = list()\n for gridpoint in self.points:\n neighbors = self.analyzeData.iterateDiagonal(gridpoint.x,\n gridpoint.y)\n nonEqualNeighborList = list()\n equalNeighborList = list()\n for _x, _y, elevation in neighbors:\n if elevation != self.elevation:\n nonEqualNeighborList.append(GridPoint(_x, _y, elevation))\n elif elevation == self.elevation:\n equalNeighborList.append(GridPoint(_x, _y, elevation))\n\n if nonEqualNeighborList:\n edgeObjectList.append(EdgePoint(gridpoint.x,\n gridpoint.y,\n self.elevation,\n nonEqualNeighborList,\n equalNeighborList))\n return GridPointContainer(edgeObjectList)",
"def row_to_edge(row):\r\n return float(row[\"Dem\"]) - float(row[\"Rep\"])",
"def houses(self):\n num = 0\n points = 0\n # TODO: add pattern matching\n if \"s\" in self.__as_str:\n num += 1\n if \"f\" in self.__as_str:\n num += 1\n if \"1\" in self.__as_str or \"2\" in self.__as_str or \"3\" in self.__as_str or \"4\" in self.__as_str:\n num += 1\n if \"o\" in self.__as_str:\n num += 1\n if \"p\" in self.__as_str:\n num += 1\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'h':\n if 'f' in self.neighbours(i, j):\n points += 1\n else:\n points += num\n return points",
"def cw_face_edges(self,face):\n\n l0 = self.region_link[face]\n if face == self.left_region[l0]:\n l0 = (l0[1], l0[0])\n l = l0\n\n traversing = True\n edges = []\n while traversing:\n edges.append(l)\n r = self.right_region[l]\n if r == face:\n l = self.succ_right[l]\n else:\n l = self.succ_left[l]\n if l == l0:\n traversing = False\n return edges",
"def generate_edge(self, img):\n rgb_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n edgearray = self.edge_gen.detectEdges(img/255.0)\n orientationarray = self.edge_gen.computeOrientation(edgearray)\n suppressed_edgearray = self.edge_gen.edgesNms(edgearray, orientationarray)\n return edgearray, orientationarray, suppressed_edgearray",
"def get_band_edges():\n # Vacuum level energy from LOCPOT.\n locpot = Locpot.from_file('LOCPOT')\n evac = max(locpot.get_average_along_axis(2))\n\n vasprun = Vasprun('vasprun.xml')\n bs = vasprun.get_band_structure()\n eigenvals = vasprun.eigenvalues\n efermi = vasprun.efermi - evac\n\n if bs.is_metal():\n edges = {'up_cbm': None, 'up_vbm': None, 'dn_cbm': None, 'dn_vbm': None,\n 'efermi': efermi}\n\n elif bs.is_spin_polarized:\n up_cbm = min(\n [min([e[0] for e in eigenvals[Spin.up][i] if not e[1]])\n for i in range(len(eigenvals[Spin.up]))]) - evac\n up_vbm = max(\n [max([e[0] for e in eigenvals[Spin.up][i] if e[1]])\n for i in range(len(eigenvals[Spin.up]))]) - evac\n dn_cbm = min(\n [min([e[0] for e in eigenvals[Spin.down][i] if not e[1]])\n for i in range(len(eigenvals[Spin.down]))]) - evac\n dn_vbm = max(\n [max([e[0] for e in eigenvals[Spin.down][i] if e[1]])\n for i in range(len(eigenvals[Spin.down]))]) - evac\n edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm,\n 'dn_vbm': dn_vbm, 'efermi': efermi}\n\n else:\n cbm = bs.get_cbm()['energy'] - evac\n vbm = bs.get_vbm()['energy'] - evac\n edges = {'up_cbm': cbm, 'up_vbm': vbm, 'dn_cbm': cbm, 'dn_vbm': vbm,\n 'efermi': efermi}\n\n return edges",
"def row_to_edge(row):\n return float(row[\"Dem\"]) - float(row[\"Rep\"])"
]
| [
"0.6256715",
"0.62175244",
"0.6126858",
"0.6026825",
"0.6026479",
"0.59540725",
"0.58980346",
"0.5889512",
"0.58494496",
"0.58221847",
"0.5807132",
"0.57258004",
"0.57251084",
"0.5721338",
"0.5703475",
"0.57021576",
"0.5696279",
"0.5675031",
"0.5668957",
"0.5659139",
"0.56333834",
"0.56331366",
"0.5632824",
"0.5632527",
"0.5612605",
"0.55861145",
"0.55751085",
"0.5574331",
"0.55641615",
"0.55631685"
]
| 0.6363273 | 0 |
Create the producer, check for provided serializable message and publish it to the topic | def publish_message(self, topic, message):
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
# Trigger any available delivery report callbacks from previous produce() calls
self.producer.poll(0)
# Asynchronously produce a message, the delivery report callback
# will be triggered from poll() above, or flush() below, when the message has
# been successfully delivered or failed permanently.
value_to_publish = message
if self.handle_json_message_data:
if type(message) not in (dict, list):
raise MessageValueException("Your message should be json serializable!")
value_to_publish = json.dumps(value_to_publish)
self.producer.produce(topic, value_to_publish.encode('utf8'), callback=delivery_report)
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
self.producer.flush() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')",
"def create(self):\n topic = self.__conn__.create_topic(self.__topic__)\n return topic.get_producer(*self.__args__, **self.__kargs__)",
"def push(self, message, topic=None):\n topic = topic or self._topic\n\n try:\n compiled = serialize(message) if self._serializer is 'protobuf' else self.compile_avro(message)\n self.confluent_producer.produce(topic,\n key=str(random.randint(1, 100)),\n value=compiled)\n except (ValueError, AttributeError):\n print('Failed to compile avro message')\n except KafkaException:\n print('Failed to push kafka message')\n except Exception:\n print('Unexpected error while pushing kafka message')",
"def produce(self, message):\n self.producer.send(self.topic, message)",
"def producer(self, topic, msg, e=None):\n producer = KafkaProducer(bootstrap_servers=['HOST_IP', 'HOST_IP', 'HOST_IP']\n ,api_version=(2, 2, 1),security_protocol='SSL',\n ssl_check_hostname=True,\n ssl_cafile='/home/oulu/certs/ca-cert',\n ssl_certfile='/home/oulu/certs/cutler-p3-c1-00.crt',\n ssl_keyfile='/home/oulu/certs/cutler-p3-c1-00.key')\n\n msg_b = str.encode(msg)\n producer.send(topic, msg_b).get(timeout=30)\n\n if (e):\n logging.exception('exception happened')",
"def producer():\n\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit'))\n channel = connection.channel()\n\n channel.queue_declare(queue=QUEUE_NAME)\n\n # Create two unique device ids to provide more example data\n timestamp = arrow.now().timestamp\n device_name = b'A' if timestamp % 2 == 0 else b'B'\n '''\n This creates the same hash value each time so we can use the Raspberry Pi\n serial number to create a unique ID for each device\n '''\n device_id = hashlib.sha1(device_name).hexdigest()\n\n # Currently a python dict\n data = {\n 'device_id': device_id,\n 'timestamp': timestamp,\n 'data': {\n 'key': 'value'\n }\n }\n\n channel.basic_publish(exchange='',\n routing_key=QUEUE_NAME,\n body=json.dumps(data)) # Encode as a JSON string\n msg = f' [x] Sent {data}'\n print(msg)\n logging.info(msg)\n connection.close()",
"def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));",
"def publish(self, message: str) -> None:",
"def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.",
"def sendMessage(topic, data, key, producer):\n producer.poll(0)\n producer.produce(topic, data.encode('utf-8'), key, callback=delivery_report)\n producer.flush()",
"def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)",
"def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )",
"def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)",
"def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)",
"def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)",
"def message_sender(m):\n my_producer = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n my_producer.send(cfg.end_topic,m)\n return m",
"def message_sender(m):\n my_producer = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n my_producer.send(cfg.end_topic,m)\n return m",
"def publish_to_simulation(self, topic, message, **kwargs):\n pass",
"def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))",
"def test_producer(self):\n try:\n producer = Producer()\n producer.send()\n except (Exception) as error:\n logging.error(\"\\n\\nProducer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def publish(self, message: model.MQTTMessage):\n self.client.publish(message.topic, payload=message.get_payload())",
"def __init__(self, topic, message_type): \n self.topic = topic\n \n # find message type\n package, message = message_type.split('/')\n m = load_pkg_module(package)\n\n m2 = getattr(m, 'msg')\n self.message = getattr(m2, message)\n self.publisher = rospy.Publisher(topic, self.message)",
"def create_producer(self, stream, isCollectionStream=False, local=False, producer_name=None,\n initial_sequence_id=None, send_timeout_millis=30000,\n compression_type=COMPRESSION_TYPES.NONE,\n max_pending_messages=1000,\n batching_enabled=False,\n batching_max_messages=1000,\n batching_max_allowed_size_in_bytes=131072,\n batching_max_publish_delay_ms=10,\n message_routing_mode=ROUTING_MODE.ROUND_ROBIN_PARTITION\n ):\n if isCollectionStream is False:\n if local is True:\n type_constant = constants.STREAM_LOCAL_NS_PREFIX\n elif local is False:\n type_constant = constants.STREAM_GLOBAL_NS_PREFIX\n\n stream = type_constant.replace(\".\", \"\")+\"s.\"+stream\n elif isCollectionStream is False:\n stream = stream\n print(\"Calling has steram from create_producer: \", stream, local)\n flag = self.fabric.has_stream(stream, local=local, isCollectionStream=isCollectionStream)\n if flag:\n namespace = type_constant + self.fabric_name\n topic = \"producer/persistent/%s/%s/%s\" % (self.tenant_name, namespace,\n stream)\n params = {\n \"producerName\":producer_name,\n \"initialSequenceId\":initial_sequence_id,\n \"sendTimeoutMillis\":send_timeout_millis,\n \"compressionType\":compression_type,\n \"maxPendingMessages\":max_pending_messages,\n \"batchingEnabled\":batching_enabled,\n \"batchingMaxMessages\":batching_max_messages,\n \"batchingMaxPublishDelay\":batching_max_publish_delay_ms,\n \"messageRoutingMode\":message_routing_mode\n }\n\n params = {k: v for k, v in params.items() if v is not None}\n url = self._ws_url + topic \n print(url)\n return websocket.create_connection(url, header={'Authorization' : self.header['Authorization']}, class_=Base64Socket)\n\n raise ex.StreamProducerError(\n \"No stream present with name:\" + stream +\n \". Please create a stream and then stream producer\"\n )",
"def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)",
"def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass",
"async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...",
"def __init__(self,\n hostname,\n port,\n topic_name,\n level=logging.NOTSET,\n _producer_class=None):\n\n super().__init__(level=level)\n\n if _producer_class is None:\n _producer_class = kafka.KafkaProducer\n\n class KafkaProducer(_producer_class):\n isend = functools.partialmethod(func=kafka.KafkaProducer.send,\n topic=topic_name)\n\n self._producer = KafkaProducer(\n bootstrap_servers=[hostname + ':' + str(port)],\n value_serializer=self._serialize_value)",
"def publish(self, message: None):\n response = self.client.publish(TopicArn=self.params['topic_arn'], Message=message)\n return response",
"def test_topic_type(self):\n self.failureResultOf(self.producer.send_messages(1234, msgs=[b\"\"]), TypeError)",
"def create_producer(self, topic_id: str) -> Producer:\n backend = None\n if self.vendor == 'kafka':\n backend = KafkaClient(topic_id, self.configs['kafka_servers'])\n else:\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n subscription_id = os.getenv(\"GOOGLE_PUBSUB_SUB_ID\")\n backend = GooglePubSubClient(project_id=project_id, topic=topic_id,\n subscription_id=subscription_id, gcp_configs=self.configs)\n\n return Producer(backend)"
]
| [
"0.7313503",
"0.6905501",
"0.6878873",
"0.68761635",
"0.68431354",
"0.6827728",
"0.68188864",
"0.66390926",
"0.65988934",
"0.6591432",
"0.65635467",
"0.653336",
"0.65256566",
"0.6479786",
"0.64766586",
"0.64306474",
"0.64306474",
"0.6413492",
"0.6377593",
"0.63338226",
"0.62791777",
"0.62733454",
"0.6254986",
"0.62121695",
"0.61907995",
"0.61764604",
"0.6168284",
"0.6166596",
"0.61418295",
"0.6138421"
]
| 0.6998185 | 1 |
Like `str.format`, but takes tuples with a thread id and text instead. Return a `Message` object, with the formatted string and relevant mentions. >>> Message.format_mentions("Hey {!r}! My name is {}", ("1234", "Peter"), ("4321", "Michael")) | def format_mentions(cls, text, *args, **kwargs):
result = ""
mentions = list()
offset = 0
f = Formatter()
field_names = [field_name[1] for field_name in f.parse(text)]
automatic = "" in field_names
i = 0
for (literal_text, field_name, format_spec, conversion) in f.parse(text):
offset += len(literal_text)
result += literal_text
if field_name is None:
continue
if field_name == "":
field_name = str(i)
i += 1
elif automatic and field_name.isdigit():
raise ValueError(
"cannot switch from automatic field numbering to manual field specification"
)
thread_id, name = f.get_field(field_name, args, kwargs)[0]
if format_spec:
name = f.format_field(name, format_spec)
if conversion:
name = f.convert_field(name, conversion)
result += name
mentions.append(
Mention(thread_id=thread_id, offset=offset, length=len(name))
)
offset += len(name)
message = cls(text=result, mentions=mentions)
return message | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )",
"def _format_msg(self, format_str, *args):\r\n return u\"{0} - - [{1}] {2}\\n\".format(\r\n self.client_address[0],\r\n self.log_date_time_string(),\r\n format_str % args\r\n )",
"def format_log_message(message, transaction=None, *args):\n if transaction or args:\n format_args = [transaction]\n format_args.extend(args)\n return message % tuple(format_args)\n else:\n return message",
"def _make_formatter(*args, **kwargs):\n # pylint: disable = no-else-return\n\n assert not(args and kwargs)\n\n if args:\n # tuples are given for the whole command string but applied per token.\n # We need to supply only the tuples which are needed for the current\n # token.\n args = list(args[::-1])\n pcents = _re.compile(r'%[^%]').findall\n\n def formatter(value):\n \"\"\" Tuple formatter \"\"\"\n count = len(pcents(value))\n torepl = []\n while len(torepl) < count:\n torepl.append(args.pop())\n return value % tuple(torepl)\n return formatter\n\n elif kwargs:\n return lambda x: x % kwargs\n\n return lambda x: x",
"def get_mentions(db, addressbook, thread_id, versioninfo):\n mentions = {}\n\n if versioninfo.are_mentions_supported():\n query = db.execute(\n \"SELECT _id, message_id, recipient_id, range_start, range_length \"\n \"FROM mention WHERE thread_id=?\",\n (thread_id,),\n )\n mentions_data = query.fetchall()\n\n for (\n _id,\n message_id,\n recipient_id,\n range_start,\n range_length,\n ) in mentions_data:\n name = addressbook.get_recipient_by_address(str(recipient_id)).name\n mention = Mention(\n mention_id=_id,\n name=name,\n length=range_length,\n )\n if not message_id in mentions.keys():\n mentions[message_id] = {}\n mentions[message_id][range_start] = mention\n\n return mentions",
"def sformatf(cls, msg, *args):\n #formats = {\"%t\": \"%d\", \"%0t\": \"%0d\"}\n #for s in formats:\n # msg = msg.replace(s, formats[s])\n #return sformatf(msg, *args)\n # TODO substitute old types %s/%d etc with {}\n #new_msg = cls.STR_RE.sub(r'{:\\1}', msg)\n #print(\"new_msg is \" + new_msg)\n for s in cls.formats:\n if s == \"%h\" or s == \"%0h\":\n msg = msg.replace(s, \"{:X}\")\n else:\n msg = msg.replace(s, \"{}\")\n return msg.format(*args)",
"def formatter(in_tuple):\n length = len(in_tuple)\n form_string = (\"the {} numbers are: \" + \", \".join([\"{}\"]*length)).format(length, *in_tuple)\n return form_string.format(in_tuple)",
"def format_msg(self, text: str, variables=None) -> str:\n formatter = VariableFormatter(self, variables)\n text = formatter.format(text)\n return text",
"def send(self, text, thread, mentions=None, reply=None):\n # TODO: more settings (in kwargs), like attachments\n if thread is None:\n raise Exception('Could not send message: `thread` is None')\n message = None\n if mentions is not None:\n message = models.Message.formatMentions(text, *mentions)\n if message is None:\n message = models.Message(text=text)\n if reply is not None:\n message.reply_to_id = reply\n log.info('Sending a message to thread %s', repr(thread))\n return self.fbchat_client.send(\n message,\n thread_id=thread.id_,\n thread_type=thread.type_\n )",
"def formatter(in_tuple):\n in_tuple_length = len(in_tuple)\n form_string = \"the {} numbers are: \".format(in_tuple_length)\n form_string += ', '.join(['{:d}'] * in_tuple_length)\n\n return form_string.format(*in_tuple)",
"def _message(message):\n str_thread = \"Thread-%d\" % threading.current_thread().ident\n return \"%s\\t%s\" % (str_thread, message)",
"def conversation_formatter(text_fmt, prompt, **kwargs):\n assert prompt.startswith('Hi '), 'Prompt must start with \"Hi \".'\n name = sent_tokenize(prompt)[0].replace('Hi ', '').rstrip('.')\n # Still trying to think of a good backward-compatible way to pass img_path\n # on to caller. Thinking it may be simplest to either change logic to\n # always download to some constant temp filename, or to just make GUI load\n # the most recently created/changed file in the temp dir.\n summary, *_ = wiki_data(name, **kwargs)\n return text_fmt.format(name=name, summary=summary, message=prompt)",
"def fmt(message):\n return \"---------- \" + message + \" ----------\"",
"def formatter(t: tuple):\n s = 'The {} numbers are: ' + '{}, '*(len(t)-1) + '{}'\n return s.format(len(t),*t)",
"def format_string_3(*tmp_tuple):\n\n total_items = len(tmp_tuple)\n formated_string = \"the {} numbers are: \"\n formated_string += \", \".join([\"{}\"] * total_items)\n result = formated_string.format(total_items, *tmp_tuple)\n\n print(result)\n return result",
"def format_message(self, message):\n return \"%s at %s\" % (\n message[0], time.asctime(time.localtime(message[1])))",
"def formatMesg(message, levelno, frame, fmt):\n\n info = inspect.getframeinfo(frame[0])\n\n asctime = datetime.datetime.now().strftime('%F %H:%M:%S,%f')[:-3]\n created = time.time()\n filename = os.path.basename(info.filename) or ''\n funcName = info.function or ''\n levelname = logging.getLevelName(levelno)\n lineno = info.lineno\n module = '' # not implemented\n msec = datetime.datetime.now().strftime('%f')[:-3]\n name = '' # not implemented\n pathname = info.filename\n process = '' # not implemented\n processName = '' # not implemented\n relativeCreated = '' # not implemented\n thread = '' # not implemented\n threadName = '' # not implemented\n\n return fmt % {'asctime': asctime, 'created': created, 'filename': filename, 'funcName': funcName,\n 'levelname': levelname, 'levelno': levelno, 'lineno': lineno, 'message': message,\n 'module': module, 'msec': msec, 'name': name, 'pathname': pathname, 'process': process,\n 'processName': processName, 'relativeCreated': relativeCreated, 'thread': thread,\n 'threadName': threadName}",
"def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str",
"def _parse_text_as_direct_mention(\n message_text: str,\n ) -> tuple[Optional[str], Optional[str]]:\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n if matches:\n return matches.group(1), matches.group(2).strip()\n return None, None",
"def _FormatMessage(self, message):\n script_name = os.path.basename(sys.argv[0])\n timestamp = datetime.now().isoformat()\n formatted_message = '[{0:s}] {1:s}: {2:s} - {3:s}\\n'.format(\n timestamp, script_name, self._sender, message)\n return formatted_message",
"def format_template(template, *args):\n return textwrap.dedent(template % args).strip()",
"def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, message_text.strip())",
"def format(self, tweet):\n return self._format_string.format(tweet=tweet)",
"def format_message(rel_dict, template): # noqa: E306\n # TODO add length truncation; append full body onto deque buffer at\n # _user_targets[\"backspool\"]. User can retrieve (pop) later with:\n # /last <context>; or just use CBuffer to handle this\n #\n msgfmt = template[\"format\"]\n msgfmt_args = defaultdict(str, **rel_dict)\n #\n if \"{focus}\" in msgfmt:\n msgfmt_args[\"focus\"] = \"\"\n if (hasattr(self, \"_session\") and self._session[\"focus\"] and\n self._session[\"focus\"] == msgfmt_args[\"context\"]):\n from .ootil import unescape_unicode_char\n try:\n focus = unescape_unicode_char(template[\"focus_char\"])\n except ValueError:\n self.print_traceback()\n else:\n msgfmt_args[\"focus\"] = focus\n #\n if \"Action\" in name:\n msgfmt_args[\"body\"] = \" \".join((\"*\", msgfmt_args[\"nick\"],\n msgfmt_args[\"body\"]))\n elif \"CTCP\" in name:\n msgfmt_args[\"body\"] = msgfmt_args[\"body\"].replace(\n \"ACTION\", f\"*{msgfmt_args['nick']}\", 1\n )\n return msgfmt.format(**msgfmt_args)",
"def format(self, message):",
"def format_multiline_message(self, *args, color=None, start='', multiline=''):\n if not self.pretty:\n return '\\n'.join([str(x) for x in args])\n start = self.color(start, fg=color, style='bright')\n if len(args) > 0:\n first = True\n new_args = []\n ret = None\n for arg in args:\n if isinstance(arg, LogStr):\n new_args.append(arg)\n continue\n assert isinstance(arg, str)\n new_args.extend(arg.split('\\n'))\n for arg in new_args:\n if first:\n ret = start + arg\n first = False\n continue\n ret += '\\n'\n ret += self.color(multiline, fg=color, style='bright') + arg\n return ret\n return start",
"def format(self, *args, **kwargs) -> String:\n pass",
"def t(cls, partial_msg_id: str, args: dict = None) -> str:\n return lang.t(cls.resolve_msg_id(partial_msg_id), args)",
"def format_message(self, login: str, message: dict) -> str:\n login_from = login if message['incoming'] else self.username\n login_to = self.username if message['incoming'] else login\n return f'{login_from} -> {login_to}:\\n{message[\"text\"]}'",
"def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains\n # the remaining message\n return (matches.group(1),\n matches.group(2).strip()) if matches else (None, None)"
]
| [
"0.5809069",
"0.57020247",
"0.5651881",
"0.56004184",
"0.5461424",
"0.5386012",
"0.5381233",
"0.5335397",
"0.5301617",
"0.52856266",
"0.52818877",
"0.52577573",
"0.5242129",
"0.5230628",
"0.5182866",
"0.5172696",
"0.5172185",
"0.5059956",
"0.50580597",
"0.5057411",
"0.5011646",
"0.4968553",
"0.4958657",
"0.49478698",
"0.4931934",
"0.49182197",
"0.48882765",
"0.48783517",
"0.4863739",
"0.48577493"
]
| 0.75185233 | 0 |
Response when someone shout out to Bob | def shouting(self):
return 'Whoa, chill out!' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sendShout(self, user, message):\n pass",
"def whoelse(sock, request):\n users = set()\n for s in connections.values():\n users.add(s['user'])\n response = { \"echo\": \"\\n\".join(users) }\n sock['conn'].sendall(json.dumps(response))",
"def say_hello_to_boy(friend_name):\r\n card_title = \"Greeting Message\"\r\n greeting_string = \"Hi \"+friend_name+\"! Welcome to Mayank's adobe. This is unusual to have a guy in mikki's room. Anyway, I welcome you here.\"# Have you brought him anything to eat?\"\r\n should_end_session = True\r\n session_attributes = { \r\n \"speech_output\": greeting_string,\r\n \"friend_name\" : friend_name\r\n \r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, greeting_string, \"Ask me to say hello...\", should_end_session))",
"def on_me_joined(self, raw_msg, **kwargs):",
"def say_hello(bot, msg):\n bot.reply(\"Hi @{}!\".format(msg[u'user_name']))",
"def on_whoisuser(self, raw_msg, nick, user, host, **kwargs):",
"def so(self, message):\n # TODO: Add a command to be able to set the shout_out_str from within twitch chat, or at least somewhere\n user = self.ts.get_user(message)\n me = SOCKET_ARGS['channel']\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n channel = msg_list[1]\n url = 'https://api.twitch.tv/kraken/channels/{channel}'.format(channel=channel.lower())\n for attempt in range(5):\n try:\n r = requests.get(url)\n r.raise_for_status()\n game = r.json()['game']\n channel_url = r.json()['url']\n shout_out_str = 'Friends, {channel} is worth a follow. They last played {game}. If that sounds appealing to you, check out {channel} at {url}! Tell \\'em {I} sent you!'.format(\n channel=channel, game=game, url=channel_url, I=me)\n self._add_to_chat_queue(shout_out_str)\n except requests.exceptions.HTTPError:\n self._add_to_chat_queue('Hey {}, that\\'s not a real streamer!'.format(user))\n break\n except ValueError:\n continue\n else:\n break\n else:\n self._add_to_chat_queue(\n \"Sorry, there was a problem talking to the twitch api. Maybe wait a bit and retry your command?\")\n else:\n self._add_to_chat_queue('Sorry {}, you need to specify a caster to shout out.'.format(user))",
"def say(session_id, context, msg):\n pass",
"def say_hi(user_mention):\n response_template = random.choice(['Sup, {mention}...',\n 'Yo!',\n 'Hola {mention}',\n 'Bonjour!'])\n return response_template.format(mention=user_mention)",
"def say_hello_to_girl(friend_name):\r\n card_title = \"Greeting Message\"\r\n greeting_string = \"Hi \"+friend_name+\"! Welcome to Mayank's adobe. He has told me about you. \"\\\r\n \"You seem to be more beautiful then he has mentioned. \"\\\r\n \"Do you want to listen anything specific about him. \"\\\r\n \"Say Joke or Secret or Songs. You can also say food or quote or game.\"\r\n\r\n should_end_session = False\r\n session_attributes = { \r\n \"speech_output\": greeting_string,\r\n \"friend_name\" : friend_name\r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, greeting_string, \"Ask me to say hello...\", should_end_session))",
"def whoami(bot, event, *args):\n\n if bot.memory.exists(['user_data', event.user_id.chat_id, \"nickname\"]):\n try:\n fullname = '{0} ({1})'.format(event.user.full_name.split(' ', 1)[0]\n , bot.get_memory_suboption(event.user_id.chat_id, 'nickname'))\n except TypeError:\n fullname = event.user.full_name\n else:\n fullname = event.user.full_name\n\n yield from bot.coro_send_message(event.conv, _(\"<b><pre>{}</pre></b>, chat_id = <i>{}</i>\").format(fullname, event.user.id_.chat_id))",
"def ShouldI(sc, event):\n options = ['Yes, you should!',\n 'I think that would be best.',\n 'Hrmm... yes!',\n 'Signs point to yes!',\n 'That\\'s the best idea I\\'ve ever heard!',\n 'D\\'uh! Of course!',\n 'Wow! What a great idea!',\n 'What an incredible idea! You\\'re a genius!',\n 'Yes, yes! A thousand times, yes!',\n 'Of course you should!',\n 'I\\'ve never heard of a better idea!',\n 'Why didn\\'t I think of that? You\\'re brilliant!']\n response = random.choice(options)\n sc.api_call('chat.postMessage', as_user='true',\n channel=event['channel'], text=response)",
"async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))",
"def command_who(self, bot, update):\n\n messages = [\n 'Myles Braithwaite lives in Toronto where he runs a small '\n 'consluting company called [Monkey in your Soul]'\n '(https://monkeyinyoursoul.com/) (you should hire him because '\n \"he's awesome).\",\n 'You should follow him on [Twitter](https://twitter.com/mylesb) '\n 'or [Instagram](https://instagram.com/myles).',\n 'You can find his programming stuff on [GitHub]'\n '(https://github.com/myles) or [CodePen]'\n '(http://codepen.io/mylesb/).'\n ]\n\n self.send_messages(bot, update, messages)",
"def hello_monkey():\n\n resp = MessagingResponse().message(\"Hey hey you logged a brag! Nice!!\")\n return str(resp)",
"async def say(self, ctx, tosay):\n author = ctx.message.author.name\n await ctx.send('_**{}** says:_\\n{}'.format(author, tosay))",
"def on_welcome(self, raw_msg, server, port, nickname, **kwargs):",
"async def hi_yuki(ctx):\n await ctx.send('Hi! I\\'m {0.user}!'.format(bot))",
"def whisper(sock, user, msg):\r\n chat(sock, \"/w {} {}\".format(user, msg))",
"def greet_user():\n\tusername = get_stored_username()\n\tif username:\n\t\tprint(f\"WElcome back {username}!\")\n\telse:\n\t\tusername = get_new_username()\n\t\tprint(f\"We'll remember you when you come back again {username}! \")",
"async def say(self, ctx, *args):\n if not args:\n await ctx.send('did you want me to say something?')\n return\n message = ' '.join(args)\n message = profanity_filter(message)\n await ctx.send(message)",
"def say_meow(self):\n\n pylog.info('My master calls me {} and meow!'.format(self.name))",
"def handle_whoami(self, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n print(f'User {user} queried their identity')\n msg = f'You are currently user {user}'\n self.log_and_send(client_socket, msg)",
"def whoami(bot, event, *args):\n\n if bot.memory.exists(['user_data', event.user_id.chat_id, \"nickname\"]):\n try:\n fullname = '{0} ({1})'.format(event.user.full_name.split(' ', 1)[0]\n , bot.get_memory_suboption(event.user_id.chat_id, 'nickname'))\n except TypeError:\n fullname = event.user.full_name\n else:\n fullname = event.user.full_name\n\n bot.send_message_parsed(event.conv, _(\"<b>{}</b>, chat_id = <i>{}</i>\").format(fullname, event.user.id_.chat_id))",
"def greet_user():\n username = get_stored_username()\n if username:\n print(\"Welcome back, \" + username['Name'] + \"!\")\n else:\n username = get_new_username()\n print(\"We'll remember you when you come back, \" + username + \"!\")",
"def say_bye(user_mention):\n response_template = random.choice(['see you later, alligator...',\n 'adios amigo',\n 'Bye {mention}!',\n 'Au revoir!'])\n return response_template.format(mention=user_mention)",
"async def me(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n await ctx.send(users.print_account(ctx.user_object))",
"def greet_user(self):\n print(f\"Hiya {self.username}!\")",
"def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"",
"def whereami(bot, event, *args):\n\n yield from bot.coro_send_message(\n event.conv,\n _(\"You are at <b><pre>{}</pre></b>, conv_id = <i><pre>{}</pre></i>\").format(\n bot.conversations.get_name(event.conv),\n event.conv.id_))"
]
| [
"0.68223554",
"0.62321746",
"0.613027",
"0.61272854",
"0.6083503",
"0.6044151",
"0.60427237",
"0.60396767",
"0.6015404",
"0.5978478",
"0.5938269",
"0.59224814",
"0.58828086",
"0.5871032",
"0.5866333",
"0.58441913",
"0.58326113",
"0.5827812",
"0.5786388",
"0.5778596",
"0.57460403",
"0.57442987",
"0.5742787",
"0.5716629",
"0.5686776",
"0.5681241",
"0.56655335",
"0.5662572",
"0.5637514",
"0.56156343"
]
| 0.6643294 | 1 |
Wait for the start of a MV build, ensuring that it has saved some progress | def _wait_for_view_build_start(self, session, ks, view, wait_minutes=2):
start = time.time()
while True:
try:
query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(session.execute(query))
assert 0 == result[0].count
except AssertionError:
break
elapsed = (time.time() - start) / 60
if elapsed > wait_minutes:
pytest.fail("The MV build hasn't started in 2 minutes.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def wait(self):\n self.mainloop().wait()",
"def wait_progress(self):\n pass",
"def wait_progress(self):\n pass",
"def wait(self):\n self.event.wait()",
"def wait(self):\n pass",
"def wait(self):\n pass",
"async def wait_until_done(self) -> None:\n ...",
"def do_wait(self):\n pass",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)",
"def wait(self):\n time.sleep(0.010)",
"def _wait_for_completion(self):\n if self.do_timing:\n self.timer.start(\"Running.\")\n\n while self.state != State.COMPLETED:\n self._update_state()\n\n if self.do_timing:\n self.timer.stop()",
"def ready(self) -> None:\n self._ready.set()\n self._go.wait()\n self._go.clear()\n self._context.error = None",
"def is_ready(self) -> bool:\n return self.build_progress == 1.0",
"def wait(self):\n self.Popen.wait()",
"def _wait_for_ready(self):\n if not self._ready:\n self.expect(self._terminal_ready_str, timeout=15)\n self._ready = True\n return self",
"def wait_for_build(self, who_is_waiting):\n self.logger.info(\"Member waiting for me to build: %s\" % who_is_waiting)\n # This lock is in an acquired state until this image definitively succeeds or fails.\n # It is then released. Child images waiting on this image should block here.\n with self.build_lock:\n if not self.build_status:\n raise IOError(\n \"Error building image: %s (%s was waiting)\" % (self.metadata.qualified_name, who_is_waiting))\n else:\n self.logger.info(\"Member successfully waited for me to build: %s\" % who_is_waiting)",
"def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()",
"def _checkpoint(self,):\n self.outstanding.wait()",
"def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)",
"def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)",
"def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)",
"def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)",
"def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)",
"def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)",
"def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)"
]
| [
"0.6729482",
"0.6729482",
"0.6729482",
"0.6729482",
"0.65039194",
"0.64148176",
"0.64148176",
"0.641136",
"0.63505125",
"0.63505125",
"0.6345326",
"0.6255207",
"0.6221448",
"0.6187781",
"0.61466134",
"0.6146286",
"0.6141253",
"0.6140755",
"0.6135329",
"0.60878426",
"0.6061043",
"0.6041031",
"0.6029754",
"0.60268795",
"0.6026845",
"0.6026845",
"0.6026845",
"0.6026845",
"0.6026845",
"0.6026845"
]
| 0.7093879 | 0 |
Test the materialized view creation | def test_create(self):
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting 1 materialized view == got" + str(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def materialized_view(self) -> 'outputs.MaterializedViewDefinitionResponse':\n return pulumi.get(self, \"materialized_view\")",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def creates_view(self):\n return self.statements[0].creates_view()",
"def test_view_set_construction(empty_model):\n viewset = ViewSet(model=empty_model)\n assert viewset.model is empty_model\n assert count(viewset.dynamic_views) == 0",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def test_create_view(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n\n # Create Link - a view can hvae 0 to * links\n link = Link(name=\"TextNode\") # Name of a collection in the database\n linkAnalyzers = AnalyzerList([\"identity\"])\n link.analyzers = linkAnalyzers\n\n # A link can have 0..* fields\n field = Field(\n \"text\",\n AnalyzerList([\"text_en\", \"invalid_analyzer\", \"analyzer_sample\"]),\n ) # text_en is a predifined analyzer from arango\n field.analyzers.filter_invalid_analyzers(\n db, verbose=1\n ) # filters out the analyzer that are not defined in the database\n\n assert (\n str(field.analyzers)\n == \"AnalyzerList(analyzerList=['text_en', 'analyzer_sample'], database=None)\"\n )\n\n link.add_field(field)\n\n ## Show the dict format of all the fields in a link\n assert link.get_fields_dict() == {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n }\n\n # create view\n view = View(\"sample_view\", view_type=\"arangosearch\")\n ## add the link (can have 0 or 1 link)\n view.add_link(link)\n\n ## can have 0..* primary sort\n view.add_primary_sort(\"text\", asc=False)\n view.add_stored_value([\"text\", \"timestamp\"], compression=\"lz4\")\n\n assert view.summary() == {\n \"name\": \"sample_view\",\n \"viewType\": \"arangosearch\",\n \"properties\": {\n \"cleanupintervalstep\": 0,\n \"cleanupIntervalStep\": 0,\n \"commitIntervalMsec\": 1000,\n \"consolidationIntervalMsec\": 0,\n \"consolidationPolicy\": {\n \"type\": \"tier\",\n \"segmentsMin\": 1,\n \"segmentsMax\": 10,\n \"segmentsBytesMax\": 5368709120,\n \"segmentsBytesFloor\": 2097152,\n \"minScore\": 0,\n },\n \"primarySortCompression\": \"lz4\",\n \"writebufferIdle\": 64,\n \"writebufferActive\": 0,\n \"writebufferMaxSize\": 33554432,\n },\n \"links\": {\n \"TextNode\": {\n \"analyzers\": [\"identity\"],\n \"fields\": {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n },\n \"includeAllFields\": False,\n \"trackListPositions\": False,\n \"inBackground\": False,\n }\n },\n \"primarySort\": [{\"field\": \"text\", \"asc\": False}],\n \"storedValues\": [\n {\"fields\": [\"text\"], \"compression\": \"lz4\"},\n {\"fields\": [\"timestamp\"], \"compression\": \"lz4\"},\n ],\n }\n\n ## creates the view in the database\n view.create(db)\n\n assert db.view(\"sample_view\")[\"name\"] == \"sample_view\"",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def createViews(views):\n ...",
"def test_storage(self):\n return SnapshotView(self, self.test_data)",
"def create_view(self, repo, view, sql):\n return self.user_con.create_view(\n repo=repo, view=view, sql=sql)",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_adding_dynamic_view(empty_model):\n viewset = ViewSet(model=empty_model)\n view = viewset.create_dynamic_view(key=\"dyn1\", description=\"test\")\n assert view.model is empty_model\n assert view.get_viewset() is viewset\n assert view.description == \"test\"\n assert view in viewset.dynamic_views",
"def create_view(self, start: int = 0, stop: int = 0):\n stmt = f\"\"\"create or replace view {self._view_name} as {self.qry}\"\"\"\n if start != 0 or stop != 0:\n sql = stmt + f\" limit {stop} offset {start}\"\n else:\n sql = stmt\n self.execquery(sql)",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_create_view_returns_empty(dummy_request):\n from learning_journal.views.default import new_entry\n assert new_entry(dummy_request) == {}",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_dynamic_view_hydrated(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n viewset.create_dynamic_view(key=\"dyn1\", description=\"dynamic\", element=system1)\n io = ViewSetIO.from_orm(viewset)\n\n new_viewset = ViewSet.hydrate(io, viewset.model)\n assert count(new_viewset.dynamic_views) == 1\n view = list(new_viewset.dynamic_views)[0]\n assert view.description == \"dynamic\"\n assert view.element is system1",
"def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)",
"def _create_view(self, view, schema=None, config=None):\n viewname, vschema = view[\"__tablename__\"].split(' ')[0], view[\"__schema__\"].split(' ')[0]\n try:\n dve = SQL('NULL from {}.{}').format(Identifier(vschema),\n Identifier(viewname))\n veq = self.__session.query(self._sql_to_string(dve)).limit(1)\n self.__session.execute(veq)\n self._commit()\n except ProgrammingError:\n self._rollback()\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('* FROM information_schema.routines')\n count = count.filter(like).count()\n if int(count) == 0:\n self._create_extension(config)\n self.exschema = 'public'\n else:\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('routine_schema FROM'\n ' information_schema.routines')\n count = count.filter(like).limit(1)\n count = self.__session.execute(count).fetchone()[0]\n self._commit()\n self.exschema = count\n like = text(\"SELECT has_schema_privilege(:exschema, 'USAGE')\")\n like = self.__session.execute(like,\n {\"exschema\": self.exschema}).fetchone()[0]\n self._commit()\n if not like:\n self._grant_access(config)\n viewst, raw = self._sql_to_string(view[\"__statement__\"]), '{}.crosstab'\n defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))\n exsch = SQL(raw).format(Identifier(self.exschema))\n self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise",
"def test_create_view_returns_empty_dict_on_get(dummy_request):\n from learning_journal.views.default import create_view\n result = create_view(dummy_request)\n assert result == {}",
"def test_getting_view_by_key(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n container_view = viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )\n\n assert viewset.get_view(\"container1\") is container_view\n assert viewset.get_view(\"bogus\") is None\n assert viewset[\"container1\"] is container_view\n with pytest.raises(KeyError):\n viewset[\"bogus\"]",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")",
"def test_create_collection(self):\n pass",
"def test_resume_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n self.fixture_dtest_setup.ignore_log_patterns = [r'Compaction interrupted: View build']\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)\n assert not node.grep_log('Marking view', filename='debug.log')\n self.check_logs_for_errors()\n\n logger.debug(\"Check that MV shouldn't be built yet.\")\n assert len(list(session.execute(\"SELECT COUNT(*) FROM t_by_v\"))) != 5000\n\n logger.debug(\"Restart the cluster\")\n self.cluster.stop()\n marks = [node.mark_log() for node in nodes]\n self.cluster.start()\n session = self.patient_cql_connection(nodes[0])\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM ks.t_by_v\", [5000])\n\n logger.debug(\"Checking logs to verify that the view build has been resumed and completed after restart\")\n for node, mark in zip(nodes, marks):\n assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)\n assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)\n self.check_logs_for_errors()",
"def test_secondary_index(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n assert_invalid(session, \"CREATE INDEX ON t_by_v (v2)\",\n \"Secondary indexes are not supported on materialized views\")",
"def test_duplicate_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )\n with pytest.raises(ValueError, match=\"View already exists\"):\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )",
"def test_transform(self):\n shape = (3, 4, 5)\n index = (0, 2, 1)\n t = View(shape=shape, index=index)\n a = numpy.zeros(shape)\n a[index] = 2\n assert t.transform(a) == 2"
]
| [
"0.7338154",
"0.6931388",
"0.66762626",
"0.647838",
"0.64327127",
"0.63275766",
"0.6172521",
"0.61617875",
"0.61125934",
"0.61102265",
"0.60945475",
"0.60887665",
"0.60561234",
"0.6020806",
"0.59891534",
"0.5975811",
"0.5974465",
"0.591563",
"0.5910595",
"0.58976763",
"0.5882325",
"0.5867877",
"0.5838759",
"0.5836655",
"0.58131695",
"0.5803774",
"0.578455",
"0.5743809",
"0.5709255",
"0.5697086"
]
| 0.7793154 | 0 |
Verify that it's not possible to create or set a too low gc_grace_seconds on MVs | def test_gcgs_validation(self):
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_gc_min_max(self):\n if not self.allow_open_amount:\n return\n\n if self.gc_min < 0 or self.gc_max < 0:\n self.raise_user_error(\"negative_amount_not_allowed\")\n\n if self.gc_min > self.gc_max:\n self.raise_user_error(\"invalid_amount\")",
"def test_too_many_gigabytes(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_ids1 = []\n volume_ids2 = []\n for index in xrange(FLAGS.max_gigabytes):\n volume_id = self._create_volume()\n volume1.create_volume(self.context, volume_id)\n volume_ids1.append(volume_id)\n volume_id = self._create_volume()\n volume2.create_volume(self.context, volume_id)\n volume_ids2.append(volume_id)\n volume_id = self._create_volume()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_create_volume,\n self.context,\n volume_id)\n for volume_id in volume_ids1:\n volume1.delete_volume(self.context, volume_id)\n for volume_id in volume_ids2:\n volume2.delete_volume(self.context, volume_id)\n volume1.kill()\n volume2.kill()",
"def test_instance_too_small_gcp():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'instance-too-small-gcp.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n cfg.validate()\n assert err.value.returncode == INPUT_ERROR\n print(err.value.message)\n assert 'does not have enough memory' in err.value.message",
"def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])",
"def graceful(self):\n self._graceful = True",
"def test_negative_filesystem_limit(self):\n command_line = [\"pool\", \"set-fs-limit\", \"thispool\", \"-1\"]\n for prefix in [[], [\"-propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def gc_disable():\n raise NotImplementedError()",
"def gc_enabled():\n raise NotImplementedError()",
"def test_prevent_wrong_memory(self):\n self.assertRaises(cinv.host.Error, self.wrong_memory)",
"def checkGracePeriodDuration(self):\n if (not self.isInGraceInvulnerability):\n return\n if (time.time()-self.gracePeriodStartTime > shipDamagedInvulerabilityGracePeriodLength):\n #if the grace period is over...\n self.disableGracePeriod()",
"def gc_enable():\n raise NotImplementedError()",
"def _check_ceph_mon_growth(ceph_mon_gib):\n controller_fs_list = pecan.request.dbapi.controller_fs_get_list()\n\n cgtsvg_max_free_GiB = _get_controller_cgtsvg_limit()\n\n LOG.info(\"_check_ceph_mon_growth ceph_mon_gib = %s, \"\n \"cgtsvg_max_free_GiB = %s\" % (ceph_mon_gib, cgtsvg_max_free_GiB))\n\n _check_relative_controller_fs(None, controller_fs_list)\n\n rootfs_configured_size_GiB = \\\n _total_size_controller_fs(None, controller_fs_list) + ceph_mon_gib\n LOG.info(\"_check_ceph_mon_growth rootfs_configured_size_GiB = %s\" %\n rootfs_configured_size_GiB)\n\n utils.check_node_ceph_mon_growth(None, ceph_mon_gib, cgtsvg_max_free_GiB)",
"def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None",
"def _get_controller_cgtsvg_limit():\n cgtsvg0_free_mib = 0\n cgtsvg1_free_mib = 0\n cgtsvg_max_free_GiB = 0\n\n chosts = pecan.request.dbapi.ihost_get_by_personality(\n constants.CONTROLLER)\n for chost in chosts:\n if chost.hostname == constants.CONTROLLER_0_HOSTNAME:\n ipvs = pecan.request.dbapi.ipv_get_by_ihost(chost.uuid)\n for ipv in ipvs:\n if (ipv.lvm_vg_name == constants.LVG_CGTS_VG and\n ipv.pv_state != constants.PROVISIONED):\n msg = _(\"Cannot resize filesystem. There are still \"\n \"unprovisioned physical volumes on controller-0.\")\n raise wsme.exc.ClientSideError(msg)\n\n ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(chost.uuid)\n for ilvg in ilvgs:\n if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and\n ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):\n cgtsvg0_free_mib = (int(ilvg.lvm_vg_size) *\n int(ilvg.lvm_vg_free_pe) // int(\n ilvg.lvm_vg_total_pe)) // (1024 * 1024)\n break\n\n else:\n ipvs = pecan.request.dbapi.ipv_get_by_ihost(chost.uuid)\n for ipv in ipvs:\n if (ipv.lvm_vg_name == constants.LVG_CGTS_VG and\n ipv.pv_state != constants.PROVISIONED):\n msg = _(\"Cannot resize filesystem. There are still \"\n \"unprovisioned physical volumes on controller-1.\")\n raise wsme.exc.ClientSideError(msg)\n\n ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(chost.uuid)\n for ilvg in ilvgs:\n if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and\n ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):\n cgtsvg1_free_mib = (int(ilvg.lvm_vg_size) *\n int(ilvg.lvm_vg_free_pe) // int(\n ilvg.lvm_vg_total_pe)) // (1024 * 1024)\n break\n\n LOG.info(\"_get_controller_cgtsvg_limit cgtsvg0_free_mib=%s, \"\n \"cgtsvg1_free_mib=%s\" % (cgtsvg0_free_mib, cgtsvg1_free_mib))\n\n if cgtsvg0_free_mib > 0 and cgtsvg1_free_mib > 0:\n cgtsvg_max_free_GiB = min(cgtsvg0_free_mib, cgtsvg1_free_mib) // 1024\n LOG.info(\"min of cgtsvg0_free_mib=%s and cgtsvg1_free_mib=%s is \"\n \"cgtsvg_max_free_GiB=%s\" %\n (cgtsvg0_free_mib, cgtsvg1_free_mib, cgtsvg_max_free_GiB))\n elif cgtsvg1_free_mib > 0:\n cgtsvg_max_free_GiB = cgtsvg1_free_mib // 1024\n else:\n cgtsvg_max_free_GiB = cgtsvg0_free_mib // 1024\n\n LOG.info(\"SYS_I filesystem limits cgtsvg0_free_mib=%s, \"\n \"cgtsvg1_free_mib=%s, cgtsvg_max_free_GiB=%s\"\n % (cgtsvg0_free_mib, cgtsvg1_free_mib, cgtsvg_max_free_GiB))\n\n return cgtsvg_max_free_GiB",
"def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)",
"def test_with_process_crash(self):\n if self.num_replicas < 2:\n self.assertTrue(False, msg=\"Required: num_replicas > 1\")\n\n # Override num_of_nodes affected to 1 (Positive case)\n self.num_nodes_affected = 1\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(\n self.bucket.name,\n \"active\")\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Remove active vbuckets from doc_loading to avoid errors\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n load_spec[\"target_vbuckets\"] = list(set(range(0, 1024))\n ^ set(active_vbs_in_target_nodes))\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n self.sleep(5, \"Wait for doc loaders to start loading data\")\n\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud()\n\n # Wait for document_loader tasks to complete\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with process crash\")\n\n if self.simulate_error \\\n not in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Disconnect the shell connection\n shell_conn[node.ip].disconnect()\n self.sleep(10, \"Wait for node recovery to complete\")\n\n # In case of error with Ephemeral bucket, need to rebalance\n # to make sure data is redistributed properly\n if self.bucket_type == Bucket.Type.EPHEMERAL:\n retry_num = 0\n result = None\n while retry_num != 2:\n result = self.task.rebalance(\n self.servers[0:self.nodes_init],\n [], [])\n if result:\n break\n retry_num += 1\n self.sleep(10, \"Wait before retrying rebalance\")\n\n self.assertTrue(result, \"Rebalance failed\")\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Failover stat validation\n if self.simulate_error == CouchbaseError.KILL_MEMCACHED:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n if self.simulate_error != CouchbaseError.STOP_MEMCACHED \\\n and self.bucket_type == Bucket.Type.EPHEMERAL:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n val = failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats mismatch after error condition:\" \\\n \" %s != %s\" \\\n % (failover_info[\"init\"][node.ip],\n failover_info[\"afterCrud\"][node.ip])\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Doc count validation\n self.validate_test_failure()\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_set_glass_capacity__with_invalid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n with pytest.raises(ValueError):\n glass.capacity = -100",
"def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")",
"def test_update_privilege_with_invalid_max_size(self):\n\n # Create a tenant\n tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)\n\n # Create a privilege without volume size settings\n privilege = vim.vcs.storage.DatastoreAccessPrivilege()\n privilege.datastore = self.datastore\n privilege.allow_create = True\n privilege.volume_total_size = 1024\n\n # Add privilege to the tenant\n self.tenantMgr.AddPrivilege(tenant, privilege)\n\n # Update the privilege with invalid volume size\n with self.assertRaises(vmodl.fault.InvalidArgument):\n self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048)",
"def test_patch_cluster_resource_quota_status(self):\n pass",
"def test_replace_cluster_resource_quota_status(self):\n pass",
"def test_non_integer_filesystem_limit(self):\n command_line = [\"pool\", \"set-fs-limit\", \"thispool\", \"1.2\"]\n for prefix in [[], [\"-propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"async def test_age_limit_expiry(hass: HomeAssistant) -> None:\n now = dt_util.utcnow()\n current_time = datetime(now.year + 1, 8, 2, 12, 23, tzinfo=dt_util.UTC)\n\n with freeze_time(current_time) as freezer:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 20,\n \"max_age\": {\"minutes\": 4},\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_NUMERIC:\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n # After adding all values, we should only see 5 values in memory\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-5:]) / len(VALUES_NUMERIC[-5:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(5 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1.0\n\n # Values expire over time. Only two are left\n\n current_time += timedelta(minutes=3)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-2:]) / len(VALUES_NUMERIC[-2:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(2 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1 / 4\n\n # Values expire over time. Only one is left\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = float(VALUES_NUMERIC[-1])\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(1 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 0\n\n # Values expire over time. Buffer is empty\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == STATE_UNKNOWN\n assert state.attributes.get(\"buffer_usage_ratio\") == round(0 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") is None",
"def test_verify_fails_expected_metric_kwargs(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm - pm.mean(\"time\").mean(\"init\")\n with pytest.raises(ValueError) as excinfo:\n pm.verify(\n metric=\"threshold_brier_score\", comparison=\"m2c\", dim=[\"init\", \"member\"]\n )\n assert \"Please provide threshold.\" == str(excinfo.value)",
"def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True",
"def device_gc():\n safe_call(backend.get().af_device_gc())",
"def test_patch_cluster_resource_quota(self):\n pass",
"def auditmemallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfail\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_replace_cluster_resource_quota(self):\n pass",
"def test_pool_timeout_hw(self):\n self.test_pool_timeout()"
]
| [
"0.5917992",
"0.5739292",
"0.5642895",
"0.56359136",
"0.5613127",
"0.55982673",
"0.55579895",
"0.5507703",
"0.546689",
"0.5466805",
"0.54514426",
"0.5366381",
"0.53661144",
"0.52998894",
"0.52954394",
"0.5257324",
"0.52490336",
"0.523056",
"0.5204648",
"0.51972055",
"0.51803213",
"0.5178224",
"0.515616",
"0.5145222",
"0.5143518",
"0.51332355",
"0.5105544",
"0.509961",
"0.50819486",
"0.5074507"
]
| 0.6689494 | 0 |
Test that a view is OK when created with existing data with wide rows | def test_populate_mv_after_insert_wide_rows(self):
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.cluster.control_connection.wait_for_schema_agreement()
for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(5):
for j in range(10000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_copying_layout(empty_model):\n assert 1 == 0 # TODO",
"def test_table_widget_parser_with_single_columns_as_unique_key():\n execute_table_widget_parser_test('bh-management_operations_single-property_detailed-resident-records')",
"def test_overall_report_columns():\n assert (len(overall_data['columns']) == 31)",
"def test_duplicate_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )\n with pytest.raises(ValueError, match=\"View already exists\"):\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )",
"def test_build_dataframe(self):\n insert_good_data()\n dataframe = get_dataframe()\n # 1 2 3\n self.assertIs(type(dataframe['Total'][0]), numpy.float64)\n self.assertIs(type(dataframe['InvoiceDate'][0]), str)\n self.assertIs(type(dataframe['Count'][0]), numpy.int64)\n # 4\n self.assertEqual(dataframe['Total'][0], 8198.79)\n # 5\n self.assertDataframeEqual(dataframe, get_equal_dataframe())\n alt_dataframe = get_alter_dataframe(self.database_connection)\n # 6\n self.assertNotEqual(alt_dataframe['Count'][0], dataframe['Count'][0])\n # 7\n with self.assertRaises(AssertionError):\n self.assertDataframeEqual(alt_dataframe, dataframe)\n # 8\n self.assertEqual(dataframe['Total'][0], alt_dataframe['Total'][0])",
"def test_simple(cls):\n table_data = [\n ['Name', 'Color', 'Type'],\n ['Avocado', 'green', 'nut'],\n ['Tomato', 'red', 'fruit'],\n ['Lettuce', 'green', 'vegetable'],\n ]\n table = cls(table_data) # '| Lettuce | green | vegetable |'\n\n assert 56 == table.column_max_width(0)\n assert 54 == table.column_max_width(1)\n assert 58 == table.column_max_width(2)\n\n table_data.append(['Watermelon', 'green', 'fruit'])\n assert 56 == table.column_max_width(0)\n assert 51 == table.column_max_width(1)\n assert 55 == table.column_max_width(2)",
"def test_wrap_long_char_wider_than_max_width():\n column_1 = Column(\"Col 1\", width=1)\n tc = TableCreator([column_1])\n row = tc.generate_row(row_data=['深'], is_header=False)\n assert row == '…'",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def test_002_range_columns(self):\n assert(len(\n self.range_transformer.fit_transform(\n self.data[self.range_col]\n ).columns\n ) == 1)",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_collidium_rows(self):\n good_colls = collisions_clean(\"seattlecollision/data/raw_data/raw_collisions_input.csv\")\n good_builds_1 = buildings_clean(\n \"seattlecollision/data/raw_data/raw_buildings_input.csv\").head(1)\n processed_collidium_output = create_collidium_table(good_colls, good_builds_1)\n self.assertTrue(processed_collidium_output.shape[0] >= 10)",
"def test_index_lt_3(self):\n self.insert()\n data = self.tbl[:6]\n assert self.check(self.idata[:2], data)",
"def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())",
"def test_index_12(self):\n self.insert()\n data = self.tbl[2:6]\n assert self.check(self.idata[:2], data)",
"def test_fetch_from_wide_table(self):\n try:\n self.storage.store(RECORD_TABLE, value=\"a\", extra_column=\"EEK!\")\n a = self.clerk.fetch(Record, 1)\n a.value=\"aa\"\n self.clerk.store(a)\n except AttributeError:\n self.fail(\"shouldn't die when columns outnumber attributes\")",
"def test_num_columns(self):\n pass",
"def test_make_headless_row_has_single_col_tag(self):\n for as_type in ('p', 'ul', 'fieldset'):\n row_tag = 'li' if as_type == 'ul' else 'p'\n col_tag, single_col_tag, col_head_tag = 'span', 'div', None\n html_args = (row_tag, col_head_tag, col_tag, single_col_tag, as_type, False)\n html_el = \"This is some test content. \"\n column_count = 3\n col_attr = ' id=\"test-col\"'\n row_attr = ' class=\"row\"'\n result = self.form.make_headless_row(html_args, html_el, column_count, col_attr, row_attr)\n html_el = self.form._html_tag(single_col_tag, html_el, col_attr)\n expected = self.form._html_tag(row_tag, html_el, row_attr)\n self.assertEqual(expected, result, f\"Failed on as_{as_type}. \")",
"def test_view_delete_with_scope(self):\n table = Table(\n {\"id\": int, \"msg\": str, \"val\": float},\n index=\"id\",\n )\n table.view(\n computed_columns=[\n {\n \"column\": \"inverted\",\n \"computed_function_name\": \"invert\",\n \"inputs\": [\"val\"],\n }\n ],\n columns=[\"inverted\"],\n )\n table.update(\n [\n {\n \"id\": 1,\n \"msg\": \"test\",\n \"val\": 1.0,\n }\n ]\n )",
"def test_invalid_columns():\n train = ((\"Lorem ipsum dolor sit amet\", 3),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train, named_columns=['some_random_col'])",
"def test_slice_name_age(self):\n self.insert()\n data = self.tbl['name', 'age']\n assert self.check(self.idata[:, [0, 1, 2]], data)",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_create_from_dataframe_invalid_pk(self):\n self.insert()\n data = self.tbl.select()\n Table.create(\n ':memory:', \"Foo_2\", data,\n primary_key='foo', verbose=True)",
"def test_row_from_columns_no_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])",
"def test_row_from_columns_no_errors_table(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])"
]
| [
"0.62218446",
"0.6083495",
"0.60534817",
"0.58710885",
"0.5809294",
"0.5730357",
"0.5714876",
"0.56728816",
"0.5636616",
"0.5632922",
"0.5626984",
"0.5605452",
"0.5581437",
"0.5578789",
"0.54950875",
"0.54854596",
"0.5480552",
"0.5479986",
"0.5469593",
"0.54688376",
"0.54524624",
"0.5440794",
"0.5432775",
"0.5423065",
"0.5415046",
"0.539169",
"0.5391308",
"0.5388885",
"0.5369581",
"0.53557205"
]
| 0.65026 | 0 |
Test that crc_check_chance parameter is properly populated after mv creation and update | def test_crc_check_chance(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_crc():\n status_update = bytes.fromhex('7E1DFFAF13000064082D00000100000400000000000000000064000000067E')\n status_update_crc = 0x06\n\n conf_req = bytes.fromhex('7E050ABF04777E')\n conf_req_crc = 0x77\n\n spa = balboa.BalboaSpaWifi('gnet-37efed')\n\n result = spa.balboa_calc_cs(conf_req[1:], 4)\n print('Expected CRC={0} got {1}'.format(hex(conf_req_crc), hex(result)))\n if result != conf_req_crc:\n return 1\n\n result = spa.balboa_calc_cs(status_update[1:], 28)\n print('Expected CRC={0} got {1}'.format(hex(status_update_crc), hex(result)))\n if result != status_update_crc:\n return 1",
"def check_corr_rando(rand_no, personal_rand, prev_ledg_update):\n\n combined_rand = bytes(str((personal_rand + prev_ledg_update) % 2**512),'utf-8') \n combined_rand = wp.gen_rand_no(combined_rand)\n\n if combined_rand == rand_no:\n\n return True\n\n else:\n\n return False",
"def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol",
"def testChecksumCondition(self):\n file_defs = [\n {'name': 'file_0_byte.txt', 'path': '', 'size': 0},\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1},\n {'name': 'file_320k_minus 1_byte.txt', 'path': 'folder1', 'size': 320 * 1024 - 1, 'mod_inc': -1},\n ]\n\n self._setup_test_store(file_defs)\n\n # Check if this provider supports checksum as change condition\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n if drive.files_differ_on_hash(\n os.path.join(self.test_local_dir, file_defs[0]['path'], file_defs[0]['name']),\n 'dummy_hash') is None:\n self.skipTest('Checksum change condition not supported for this provider.')\n\n self._sync_drives()\n\n # Modify files\n for file_def in file_defs:\n file_path =\\\n os.path.join(self.test_local_dir, file_def['path'], file_def['name'])\n test_utils.make_random_file(\n file_path, file_def['size'], leave_existing=False,\n modify_timestamp_ns=os.stat(file_path).st_mtime_ns)\n\n self._sync_drives()\n self._download_store()\n self.assertDirectoriesAreEqual(self.test_local_dir, self.test_download_dir)",
"def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)",
"def test_change_brightness_back_to_10():",
"def check_crc(function_specific_data, crc):\n crc_cal = calculate_crc(function_specific_data)\n \n if crc == crc_cal:\n return True\n else:\n return False",
"def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')",
"def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()",
"def test_check_cost():",
"def test_set_check(identifier, test_ratio):\n return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32",
"def test_010(self):\n calculator = checksum.get_checksum_calculator_by_dataone_designator('SHA-1')\n calculator.update('test')\n self.assertTrue(calculator.hexdigest())",
"def testrescorr():\n tau = np.zeros((2,50))\n tau[0,25] = 2\n tau[1,23] = 3\n tau2 = spec_utils.res_corr(tau, 2, 8)\n #Check flux conserved\n assert np.abs(np.sum(tau2[0,:])/ np.sum(tau[0,:]) -1) < 1e-6\n assert np.abs(np.sum(tau2[1,:])/ np.sum(tau[1,:]) -1) < 1e-6\n #Check expanded by expected amount\n for i in (0,1):\n assert np.size(np.where(tau2[i,:]> 0)) == 15",
"def check_crc(self,message_from_sensor, check_value_from_sensor):\n remainder = message_from_sensor << 8 #Pad with 8 bits because we have to add in the check value\n remainder |= check_value_from_sensor #Add on the check value\n\n divsor = SHIFTED_DIVISOR\n\n for i in range(0, 16): #Operate on only 16 positions of max 24. The remaining 8 are our remainder and should be zero when we're done.\n if ((remainder & 1 << (23 - i)) > 0): #Check if there is a one in the left position\n remainder ^= divsor\n divsor >>= 1 #Rotate the divsor max 16 times so that we have 8 bits left of a remainder\n \n return remainder",
"def test_cmor_checker_called(self):\n checker = Mock()\n checker.return_value = Mock()\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[]):\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=checker) as get_mock:\n fix_data(self.cube, 'short_name', 'CMIP6', 'model', 'mip',\n 'frequency')\n get_mock.assert_called_once_with(\n table='CMIP6',\n automatic_fixes=True,\n check_level=CheckLevels.DEFAULT,\n fail_on_error=False,\n frequency='frequency',\n mip='mip',\n short_name='short_name',\n )\n checker.assert_called_once_with(self.cube)\n checker.return_value.check_data.assert_called_once_with()",
"def _get_checksum(self, arg):",
"def test_wrong_ref_power_mfcc():\n with raises(FeatureParamsError):\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")",
"def test_valid_input_succeeds(self, async_patch, chan_patch):\n self.assertTrue(send_rotate_to_can(self.USER, self.BIN_NUM))\n async_patch.assert_called_once()\n chan_patch.assert_called_once()",
"def test_update9(self):\n pass",
"def test_pm_Completeness(self):\n pass",
"def luck_check(chance):\n return randint(0, 100) < chance",
"def test_crc32():\n for data,seed in ((b'abc',0), (b'abc',0xFFFFFFFF), (b'abcdefg', 0), (b'abc', 0x99999999), (b'abc', 0x12345678)):\n print(\"%08x %s\" % (seed, binascii.b2a_hex(data)))\n racrc0 = crc32_binascii(reversevalue(seed,32), data)\n racrc1 = crc32_binascii(reversevalue(seed,32)^0xFFFFFFFF, data)\n print(\"ra -> (0)%08x (-1)%08x , xorred: (0)%08x (-1)%08x\" % (racrc0, racrc1, racrc0^0xFFFFFFFF, racrc1^0xFFFFFFFF))\n\n bacrc0 = crc32_binascii(seed, data)\n bacrc1 = crc32_binascii(seed^0xFFFFFFFF, data)\n print(\"ba -> (0)%08x (-1)%08x , xorred: (0)%08x (-1)%08x\" % (bacrc0, bacrc1, bacrc0^0xFFFFFFFF, bacrc1^0xFFFFFFFF))\n plcrc0 = crc32_poly2(seed, data)\n plcrc1 = crc32_poly2(seed^0xFFFFFFFF, data)\n print(\"pl -> (0)%08x (-1)%08x , xorred: (0)%08x (-1)%08x\" % (plcrc0, plcrc1, plcrc0^0xFFFFFFFF, plcrc1^0xFFFFFFFF))\n\n orcrc0 = calccrc32(seed, data)\n orcrc1 = calccrc32(seed^0xFFFFFFFF, data)\n print(\"or -> (0)%08x (-1)%08x , xorred: (0)%08x (-1)%08x\" % (orcrc0, orcrc1, orcrc0^0xFFFFFFFF, orcrc1^0xFFFFFFFF))",
"async def testcog_load_rescheduled(self):\n self.cog._reschedule = mock.create_autospec(self.cog._reschedule)\n await self.cog.cog_load()\n self.cog._reschedule.assert_awaited_once_with()",
"def test_phantom_roll(self):\n self.assertAlmostEqual(self.cheese.catphan_roll, self.expected_roll, delta=0.3)",
"def test_ref_power_mfcc():\n run_ref_power(MFCC)",
"def test_update_r():\n\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_r(color, 202)\n\n assert color.get_r() == 202\n assert color.get_g() == 142\n assert color.get_b() == 438",
"def test_exist_and_change(self):\n colorList = ColorList()\n prev = colorList.pickColor()\n self.assertIsNotNone(prev)\n for i in range(100):\n color = colorList.pickColor()\n self.assertIsNotNone(color)\n self.assertTrue(color.r != prev.r or color.g != prev.g or color.b != prev.b)\n prev = color",
"def test_cmor_checker_called(self):\n checker = Mock()\n checker.return_value = Mock()\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[]):\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=checker) as get_mock:\n fix_metadata(\n cubes=[self.cube],\n short_name='short_name',\n project='CMIP6',\n dataset='dataset',\n mip='mip',\n frequency='frequency',\n )\n get_mock.assert_called_once_with(\n automatic_fixes=True,\n fail_on_error=False,\n frequency='frequency',\n mip='mip',\n short_name='short_name',\n table='CMIP6',\n check_level=CheckLevels.DEFAULT,)\n checker.assert_called_once_with(self.cube)\n checker.return_value.check_metadata.assert_called_once_with()",
"def ensure_crc(crc):\n\n crc = str(crc)\n if len(crc) == 1:\n return '0000'+crc\n elif len(crc) == 2:\n return '000'+crc\n elif len(crc) == 3:\n return '00'+crc\n elif len(crc) == 4:\n return '0'+crc\n elif len(crc) == 5:\n return crc\n else:\n print('There was a problem with the number ensure_crc')",
"def ensure_crc(crc):\n\n crc = str(crc)\n if len(crc) == 1:\n return '0000'+crc\n elif len(crc) == 2:\n return '000'+crc\n elif len(crc) == 3:\n return '00'+crc\n elif len(crc) == 4:\n return '0'+crc\n elif len(crc) == 5:\n return crc\n else:\n print('There was a problem with the number ensure_crc')"
]
| [
"0.65232795",
"0.59417105",
"0.5777849",
"0.56471217",
"0.5601775",
"0.55940324",
"0.5572456",
"0.5551666",
"0.5518868",
"0.5510873",
"0.54753995",
"0.5463696",
"0.5428021",
"0.5407845",
"0.5405547",
"0.54040843",
"0.53722864",
"0.53687423",
"0.53552634",
"0.5338046",
"0.532021",
"0.5301705",
"0.529",
"0.52849376",
"0.5280454",
"0.5277363",
"0.5270254",
"0.525637",
"0.52437115",
"0.52437115"
]
| 0.7335866 | 0 |
Test that a materialized view is immutable | def test_immutable(self):
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def isImmutable(self):\n if self.isPrimaryKey():\n return True\n else:\n return self._immutable",
"def test_transform(self):\n shape = (3, 4, 5)\n index = (0, 2, 1)\n t = View(shape=shape, index=index)\n a = numpy.zeros(shape)\n a[index] = 2\n assert t.transform(a) == 2",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_simple_immutable_storage(self):\n immutables_rev = list(reversed(self.immutables))\n test_root = zip(immutables_rev, self.immutables)\n for key, value in test_root:\n self.db[key] = value\n\n self.db.close()\n self.db.open()\n\n for key, value in test_root:\n self.assertIn(key, self.db.keys())\n self.assertIn(value, self.db.values())\n self.assertEqual(self.db[key], value)",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_queryset_is_immutable(self):\r\n query1 = TestModel.objects(test_id=5)\r\n assert len(query1._where) == 1\r\n\r\n query2 = query1.filter(expected_result__gte=1)\r\n assert len(query2._where) == 2\r\n assert len(query1._where) == 1",
"def test_mutate(self, change: Statement) -> None:\n self.assertThat(\n statement_mutates(change.statement()),\n Equals(True),\n )",
"def immutable(self) -> bool:\n return self._immutable",
"def make_immutable(self):\n # just set the flag to make object immutable and hashable\n self.immutable = True",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_context_immutable():\n with pytest.raises(ImmutableStateError):\n Context().abc = 1",
"def test_update_node_state_readonly(self):\n pass",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def constrained_lens_object_test():\n return # TODO",
"def test_meta_mutable(self, cosmo):\n key = tuple(cosmo.meta.keys())[0] # select some key\n cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable",
"def make_immutable(mat):\n if issparse(mat):\n mat.data.flags.writeable = False\n if mat.format in {\"csr\", \"csc\", \"bsr\"}:\n mat.indices.flags.writeable = False\n mat.indptr.flags.writeable = False\n elif mat.format == \"coo\":\n mat.row.flags.writeable = False\n mat.col.flags.writeable = False\n else:\n mat.flags.writeable = False",
"def test_mutable(self):\n shutil.copyfile(TEST_RDF, TEST_MUTABLE_RDF)\n oi = SparqlImplementation(OntologyResource(slug=str(TEST_MUTABLE_RDF)))\n label = oi.label(NUCLEUS)\n preds = [IS_A, PART_OF]\n preds2 = [IS_A, FAKE_PREDICATE]\n ancestors = list(oi.ancestors(NUCLEUS, predicates=preds, reflexive=False))\n descendants = list(oi.descendants(NUCLEUS, predicates=preds, reflexive=False))\n oi.migrate_curies({NUCLEUS: FAKE_ID, PART_OF: FAKE_PREDICATE})\n self.assertEqual(label, oi.label(FAKE_ID))\n self.assertIsNone(oi.label(NUCLEUS))\n self.assertCountEqual(ancestors, oi.ancestors(FAKE_ID, predicates=preds2, reflexive=False))\n self.assertCountEqual([], list(oi.ancestors(NUCLEUS, predicates=preds, reflexive=False)))\n self.assertCountEqual(\n descendants, oi.descendants(FAKE_ID, predicates=preds2, reflexive=False)\n )\n self.assertCountEqual([], list(oi.descendants(NUCLEUS, predicates=preds, reflexive=False)))\n oi.save()",
"def test_matrix_testing(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n bikers = self.bikers\n harpers = self.harpers\n\n assertResourceUserState(self, bikes, [george], [], [])\n assertUserResourceState(self, george, [bikes], [], [])\n assertUserResourceState(self, alva, [], [], [])\n assertUserResourceState(self, john, [], [], [])\n assertUserGroupState(self, george, [harpers, bikers], [], [])\n assertUserGroupState(self, alva, [], [], [])\n assertUserGroupState(self, john, [], [], [])\n\n george.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.CHANGE)\n\n assertResourceUserState(self, bikes, [george], [alva], [])\n assertUserResourceState(self, george, [bikes], [], [])\n assertUserResourceState(self, alva, [], [bikes], [])\n assertUserResourceState(self, john, [], [], [])\n\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.VIEW)\n\n assertResourceUserState(self, bikes, [george], [alva], [john])\n assertUserResourceState(self, george, [bikes], [], [])\n assertUserResourceState(self, alva, [], [bikes], [])\n assertUserResourceState(self, john, [], [], [bikes])\n\n bikes.raccess.immutable = True\n bikes.raccess.save()\n\n assertResourceUserState(\n self, bikes, [george], [], [\n alva, john]) # squashes CHANGE\n assertUserResourceState(self, george, [bikes], [], [])\n # immutable squashes CHANGE\n assertUserResourceState(self, alva, [], [], [bikes])\n assertUserResourceState(self, john, [], [], [bikes])\n\n assertGroupUserState(self, bikers, [george], [], [])\n assertGroupUserState(self, harpers, [george], [], [])\n assertUserGroupState(self, george, [bikers, harpers], [], [])\n assertUserGroupState(self, alva, [], [], [])\n assertUserGroupState(self, john, [], [], [])\n\n george.uaccess.share_group_with_user(\n bikers, alva, PrivilegeCodes.CHANGE)\n\n assertGroupUserState(self, bikers, [george], [alva], [])\n assertGroupUserState(self, harpers, [george], [], [])\n assertUserGroupState(self, george, [bikers, harpers], [], [])\n assertUserGroupState(self, alva, [], [bikers], [])\n assertUserGroupState(self, john, [], [], [])\n\n george.uaccess.share_group_with_user(bikers, john, PrivilegeCodes.VIEW)\n\n assertGroupUserState(self, bikers, [george], [alva], [john])\n assertGroupUserState(self, harpers, [george], [], [])\n assertUserGroupState(self, george, [bikers, harpers], [], [])\n assertUserGroupState(self, alva, [], [bikers], [])\n assertUserGroupState(self, john, [], [], [bikers])\n\n assertResourceGroupState(self, bikes, [], [])\n assertGroupResourceState(self, bikers, [], [])\n\n george.uaccess.share_resource_with_group(\n bikes, bikers, PrivilegeCodes.CHANGE)\n\n # immutable squashes state\n assertResourceGroupState(self, bikes, [], [bikers])\n # immutable squashes state\n assertGroupResourceState(self, bikers, [], [bikes])\n\n bikes.raccess.immutable = False\n bikes.raccess.save()\n\n # without immutable, CHANGE returns\n assertResourceGroupState(self, bikes, [bikers], [])\n # without immutable, CHANGE returns\n assertGroupResourceState(self, bikers, [bikes], [])",
"def test_view_delete_with_scope(self):\n table = Table(\n {\"id\": int, \"msg\": str, \"val\": float},\n index=\"id\",\n )\n table.view(\n computed_columns=[\n {\n \"column\": \"inverted\",\n \"computed_function_name\": \"invert\",\n \"inputs\": [\"val\"],\n }\n ],\n columns=[\"inverted\"],\n )\n table.update(\n [\n {\n \"id\": 1,\n \"msg\": \"test\",\n \"val\": 1.0,\n }\n ]\n )",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def test_update_visibility_query4(self):\n pass",
"def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_populate_mv_after_insert_wide_rows(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for i in range(5):\n for j in range(10000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({}, {})\".format(i, j))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n for i in range(5):\n for j in range(10000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, j), [j, i])",
"def testDirtyRefresh(self):\n \n pass"
]
| [
"0.6292278",
"0.6224354",
"0.62184346",
"0.6176855",
"0.61021394",
"0.60555184",
"0.5991923",
"0.58763736",
"0.580347",
"0.5795407",
"0.5791623",
"0.57561356",
"0.57543343",
"0.5753366",
"0.57337886",
"0.57281095",
"0.5682763",
"0.567763",
"0.5645807",
"0.5579052",
"0.5550505",
"0.55311257",
"0.55134386",
"0.55121946",
"0.54990697",
"0.5498524",
"0.5495269",
"0.5490372",
"0.5482931",
"0.54828197"
]
| 0.74655896 | 0 |
Test that we can drop a view properly | def test_drop_mv(self):
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def test_drop_table(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n assert_invalid(\n session,\n \"DROP TABLE ks.users;\",\n \"Cannot drop table when materialized views still depend on it\"\n )\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n session.execute(\"DROP TABLE ks.users;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 0, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_drop_with_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n self.check_logs_for_errors()\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in nodes:\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n assert failed, \"The view shouldn't be queryable\"\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n # The original byteman delay it's still there and can make this flaky CASSANDRA-16962\n for i in range(10):\n try:\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])\n except AssertionError:\n time.sleep(1)\n else:\n break\n\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def test_drop_column(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n assert_invalid(\n session,\n \"ALTER TABLE ks.users DROP state;\",\n \"Cannot drop column state on base table with materialized views.\"\n )",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def _removeView(self, win):\n raise RuntimeError('Not implemented')",
"def drop(self):\n pass",
"def drop(self):\n pass",
"def test_drop(self):\n client_cik, client_rid = self.makeClient(self.cik)\n isok, response = self.onep.drop(self.cik, client_rid)\n self.assertTrue(isok, 'client drop succeeded')\n isok, response = self.onep.info(self.cik, client_rid)\n self.assertFalse(isok, 'dropped client was really dropped')",
"def test_drop_file_view_with_url(self):\n url = reverse('profiles:drop_file')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\n response,\n 'profiles/drop_file.html'\n )",
"def test_snapshot_and_restore_drop_table_remove_dropped_column(self):\n cluster = self.cluster\n cluster.populate(1).start()\n node1, = cluster.nodelist()\n session = self.patient_cql_connection(node1)\n\n # Create schema and insert some data\n create_ks(session, 'ks', 1)\n session.execute(\"CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)\")\n session.execute(\"INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')\")\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\", \"b\"])\n\n # Take a snapshot and drop the column and then drop table\n snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')\n session.execute(\"ALTER TABLE ks.cf DROP b\")\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\"])\n session.execute(\"DROP TABLE ks.cf\")\n\n # Restore schema and data from snapshot, data should be the same as input\n self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')\n self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')\n node1.nodetool('refresh ks cf')\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\", \"b\"])\n\n # Clean up\n logger.debug(\"removing snapshot_dir: \" + snapshot_dir)\n shutil.rmtree(snapshot_dir)",
"def test_shift_view_delete(self):\n \n path = reverse('api:id-shifts', kwargs={'id': self.shift.id})\n request = self.factory.delete(path)\n response = ShiftView.delete(self, request, id=self.shift.id)\n assert response.status_code == 204\n assert Shift.objects.filter(id=self.shift.id).count() == 0",
"def on_drop(self):\n print(\"You have dropped\", self.name)",
"def test_delete_composition(self):\n pass",
"def get_drop(s):\n s = match_token(s, 'DROP')\n s = match_token(s, 'ROLLUP')\n s = match_token(s, 'VIEW')\n s, rollup_view_name = get_token(s)\n t = drop_rollup_str(rollup_view_name)\n return (s, t)",
"def test_drop_medical_file_view_with_url(self):\n url = reverse('profiles:drop_medical_file')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\n response,\n 'profiles/drop_medical_file.html'\n )",
"def test_drop_table(self):\n schema: t.List[DiffableTable] = []\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(class_name=\"Band\", tablename=\"band\", columns=[])\n ]\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.drop_tables.statements) == 1)\n self.assertEqual(\n schema_differ.drop_tables.statements[0],\n \"manager.drop_table(class_name='Band', tablename='band')\",\n )",
"def drop(self):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n drop_z = self.drop_height\n \n #drop to given height\n self.move_to(init_x, init_y, drop_z)\n \n #open gripper\n self.gripper.command_position(100)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)",
"def test_drop_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.drop_columns.statements) == 1)\n self.assertEqual(\n schema_differ.drop_columns.statements[0],\n \"manager.drop_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre')\", # noqa\n )",
"def remove(self, view_id):\n raise NotImplementedError(\"calling abstract method\")",
"def test_drop(self):\n my_conn = MySQL(*self.conn_params)\n sql = \"CREATE TABLE table1 (id integer, column1 varchar(100), \" \\\n \"column2 double)\"\n my_conn.execute(sql)\n my_conn.get_table('table1')\n my_conn.drop('table1') # DROP example\n with self.assertRaises(InvalidRequestError):\n my_conn.get_table('table1')",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_drop_id_file_view_with_url(self):\n url = reverse('profiles:drop_id_file')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\n response,\n 'profiles/drop_id_file.html'\n )",
"def validate_view_naming(view_file):\n parsed = sqlparse.parse(view_file.read_text())[0]\n tokens = [\n t\n for t in parsed.tokens\n if not (t.is_whitespace or isinstance(t, sqlparse.sql.Comment))\n ]\n is_view_statement = (\n \" \".join(tokens[0].normalized.split()) == \"CREATE OR REPLACE\"\n and tokens[1].normalized == \"VIEW\"\n )\n if is_view_statement:\n target_view = str(tokens[2]).strip().split()[0]\n try:\n [project_id, dataset_id, view_id] = target_view.replace(\"`\", \"\").split(\".\")\n if not (\n view_file.parent.name == view_id\n and view_file.parent.parent.name == dataset_id\n and view_file.parent.parent.parent.name == project_id\n ):\n print(\n f\"{view_file} ERROR\\n\"\n f\"View name {target_view} not matching directory structure.\"\n )\n return False\n except Exception:\n print(f\"{view_file} ERROR\\n{target_view} missing project ID qualifier.\")\n return False\n else:\n print(\n f\"ERROR: {view_file} does not appear to be \"\n \"a CREATE OR REPLACE VIEW statement! Quitting...\"\n )\n return False\n return True",
"def test_delete_boat(self):\n pass",
"def test_handle_view_lookup_error(self):\n self.db.query.return_value = []\n self.assertTupleEqual(self.testcommand.handle(\"team view brs\", user),\n (self.testcommand.lookup_error, 200))",
"def drop(self, tube):\n cmd = tube.cmd('drop')\n args = ()\n\n the_tuple = self.tnt.call(cmd, args)\n\n return bool(the_tuple.return_code == 0)",
"def toolDropped(*args, **kwargs)->None:\n pass",
"def help_drop(self):\n print(DROP)",
"def test_view_name(self):\n if self.test_ctxtype == context.ContextTypes.STANDALONE:\n return self.skipTest(\"Skipping unicode view tests for standalone.\")\n view_names = [u\"®®®®\", u\"™¬⊕⇔\"]\n topo = Topology()\n\n view0 = topo.source([\"hello\"]).view(name=view_names[0])\n view1 = topo.source([\"view!\"]).view(name=view_names[1])\n\n self.tester = Tester(topo)\n self.tester.local_check = self._check_view_names\n\n self.tester.test(self.test_ctxtype, self.test_config)"
]
| [
"0.67889017",
"0.6657126",
"0.6492653",
"0.6312419",
"0.6294251",
"0.6155534",
"0.61430365",
"0.61430365",
"0.61031616",
"0.5975371",
"0.57409096",
"0.5730362",
"0.5695469",
"0.56953293",
"0.5690902",
"0.5669387",
"0.56591684",
"0.56381315",
"0.563404",
"0.56134856",
"0.55903566",
"0.55617017",
"0.556114",
"0.5553096",
"0.55456257",
"0.5528779",
"0.55279267",
"0.5526795",
"0.55221826",
"0.5509692"
]
| 0.6910934 | 0 |
Test that we cannot drop a column if it is used by a MV | def test_drop_column(self):
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state on base table with materialized views."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_drop_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.drop_columns.statements) == 1)\n self.assertEqual(\n schema_differ.drop_columns.statements[0],\n \"manager.drop_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre')\", # noqa\n )",
"def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_column_presence(self):\n\n columns = [\n \"assay_ontology_term_id\",\n \"development_stage_ontology_term_id\",\n \"disease_ontology_term_id\",\n \"ethnicity_ontology_term_id\",\n \"is_primary_data\",\n \"sex_ontology_term_id\",\n \"tissue_ontology_term_id\",\n ]\n\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n # Remove batch condition because it has a dependency with is_primary_data\n self.validator.adata.uns.pop(\"batch_condition\")\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [f\"ERROR: Dataframe 'obs' is missing \" f\"column '{column}'.\"],\n )",
"def test_run_missing_column(self):\n input_df = self.spark.createDataFrame(\n data=[\n (\"a\",),\n ],\n schema=StructType([\n StructField(\"baz\", StringType()),\n ])\n )\n with raises(AnalysisException):\n self.check.run(input_df)",
"def test_drop_zero_variance_columns_omiting_NA_will_drop_a_column(data):\n step = DropZVColumnsStep(naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns",
"def test_drop_zero_variance_on_subset_columns_with_zv_removals(data):\n step = DropZVColumnsStep(['released', 'episodes'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns",
"def test_remove_column(self):\n self.spy_on(DataGrid.remove_column)\n\n hook = DataGridColumnsHook(extension=self.extension,\n datagrid_cls=DataGrid,\n columns=[Column(id='sandbox2')])\n\n hook.disable_hook()\n\n self.assertTrue(DataGrid.remove_column.called)",
"def test_column_missing(self):\n columns = self.normalizer().config['columns']\n for column in columns:\n df = self.df.copy()\n df = df.drop(column, axis=1)\n with self.assertRaises(ValueError):\n self.normalizer().normalize(df, **self.kwargs)",
"def drop_columns(self, col):\n try:\n self.cleaned_data.drop(col, axis=1, inplace=True)\n except Exception as e:\n raise e",
"def test_drop_zero_variance_columns_considering_NA_will_not_drop_any_column(data):\n step = DropZVColumnsStep()\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns",
"def test_drop_table(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n assert_invalid(\n session,\n \"DROP TABLE ks.users;\",\n \"Cannot drop table when materialized views still depend on it\"\n )\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n session.execute(\"DROP TABLE ks.users;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 0, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_column_presence_organism(self):\n\n self.validator.adata.obs.drop(\"organism_ontology_term_id\", axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Dataframe 'obs' is missing column \"\n \"'organism_ontology_term_id'.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['ethnicity_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['development_stage_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n ],\n )",
"def test_invalid_columns():\n train = ((\"Lorem ipsum dolor sit amet\", 3),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train, named_columns=['some_random_col'])",
"def test_frame_invalid_column(self):\n with self.assertRaises(ValueError):\n self.frame.take(100, columns=['not_in'])",
"def test_does_not_move(self):\n Herbivore.set_parameters({\"mu\": 0})\n nt.assert_false(self.herb.check_migrate())",
"def test_drop_zero_variance_on_subset_columns(data):\n step = DropZVColumnsStep(['name', 'released'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns",
"def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))",
"def test_no_column(self):\n\n self.assertRaises(ValueError, self.table.where, 'True')",
"def _dataframe_column_check(df: DataFrame, compulsory_columns: Sequence) -> None:\n if not set(compulsory_columns).issubset(df.columns):\n diff = set(compulsory_columns).difference(df.columns)\n msg = (\n \"The following compulsory column(s) are missing from the \"\n f\"DataFrame: {diff}\"\n )\n raise ValueError(msg)",
"def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )",
"def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)",
"def check_for_column(self, column_name):\n if column_name not in self.data.columns:\n raise RuntimeError(\"Source {} has no '{}' column\".format(\n self.name, column_name))",
"def checkIfColumnControlledVocab(self, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n valid_controlled_column=0\n db_output=con.cursor().callproc('check_if_column_controlled',\n [column_name.upper(),\\\n valid_controlled_column])\n if db_output[1]==0:\n return False\n else:\n return True\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False",
"def test_extract_invalid_column(self):\n self.dicom.extract_keywords([\"invalid\"])\n\n # ensure column was added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"Invalid column not added\")\n\n # compare expected and actual result\n invalid_column = self.dicom.metadata.take(self.count, columns=[u'invalid'])\n expected_result = [[None] for x in range(0, self.count)]\n self.assertEqual(invalid_column, expected_result)",
"def dropTableColumn(self, tablename, columnname):\n\n # Check if the table exists\n if tablename in self.getTableNames():\n\n # Check that the column exists\n if columnname in self.getColumnNames(tablename):\n\n #Allow columnames with spaces\n columname = '`'+columnname+'`'\n\n # ALTER TABLE DROP COLUMN IS ONLY SUPPORTED IN MYSQL\n if self.connector == 'mysql':\n\n sqlcmd = ('ALTER TABLE ' + tablename + ' DROP COLUMN ' +\n columnname)\n self._c.execute(sqlcmd)\n\n # Commit changes\n self._conn.commit()\n\n else:\n print('Error deleting column. Column drop not supported for SQLITE')\n\n else:\n print('Error deleting column. The column does not exist')\n print(tablename, columnname)\n\n else:\n print('Error deleting column. Please, select a valid table name' +\n ' from the list')\n print(self.getTableNames())\n\n return",
"def verify(self):\n for col in self._columns:\n if col not in self._table_obj.columns.keys():\n raise GaiaException('{} column not found in {}'.format(\n col, self._table_obj))",
"def bad_column_positions(self, x):\n return x.is_null()",
"def test_columns_not_in_X_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=[\"a\", \"z\"])\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=df)",
"def test_columns_not_in_raw_var(self):\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )"
]
| [
"0.6937958",
"0.675997",
"0.6724233",
"0.6671695",
"0.63737416",
"0.63224864",
"0.6310467",
"0.6293324",
"0.62916225",
"0.6285022",
"0.62181425",
"0.6184214",
"0.61387783",
"0.6126988",
"0.610433",
"0.60856223",
"0.6075637",
"0.6050881",
"0.60451186",
"0.60119206",
"0.6000019",
"0.5967202",
"0.5943567",
"0.59366494",
"0.592403",
"0.5910035",
"0.58814394",
"0.58772576",
"0.58653706",
"0.5808846"
]
| 0.75503546 | 0 |
Test that we can use clustering columns as primary key for a materialized view | def test_clustering_column(self):
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_partition_keys(self):\r\n class ModelWithPartitionKeys(cqlengine.Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n c1 = cqlengine.Text(primary_key=True)\r\n p1 = cqlengine.Text(partition_key=True)\r\n p2 = cqlengine.Text(partition_key=True)\r\n\r\n cols = ModelWithPartitionKeys._columns\r\n\r\n self.assertTrue(cols['c1'].primary_key)\r\n self.assertFalse(cols['c1'].partition_key)\r\n\r\n self.assertTrue(cols['p1'].primary_key)\r\n self.assertTrue(cols['p1'].partition_key)\r\n self.assertTrue(cols['p2'].primary_key)\r\n self.assertTrue(cols['p2'].partition_key)\r\n\r\n obj = ModelWithPartitionKeys(p1='a', p2='b')\r\n self.assertEquals(obj.pk, ('a', 'b'))",
"def test_create_primary_key(self):\n assert self.tbl.primary_key == 'id'",
"def test_indexes_arent_allowed_on_models_with_multiple_primary_keys(self):",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_normal_fields_can_be_defined_between_primary_keys(self):",
"def test_index_keys(engine):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n other = Column(DateTime, range_key=True)\n another = Column(UUID)\n last = Column(String)\n\n by_last = GlobalSecondaryIndex(hash_key=\"another\", range_key=\"last\")\n by_another = LocalSecondaryIndex(range_key=\"last\")\n\n assert Model.by_last.hash_key is Model.another\n assert Model.by_last.range_key is Model.last\n\n assert Model.by_another.hash_key is Model.id\n assert Model.by_another.range_key is Model.last",
"def test_secondary_index(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n assert_invalid(session, \"CREATE INDEX ON t_by_v (v2)\",\n \"Secondary indexes are not supported on materialized views\")",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def _add_pk(self, conn, *, tblname, pk_columns):\n idx_metadatum = partition_utils.IndexMetadatum(idx_cols=pk_columns, is_unique=True)\n partition_utils.add_indices(conn, tbl_name=tblname, idx_metadata=[idx_metadatum])",
"def test_index_projections(engine):\n Global, Local = GlobalSecondaryIndex, LocalSecondaryIndex\n\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n other = Column(UUID, range_key=True)\n another = Column(UUID)\n date = Column(DateTime)\n boolean = Column(Boolean)\n\n g_all = Global(hash_key=\"another\", range_key=\"date\", projection=\"all\")\n g_key = Global(hash_key=\"another\", projection=\"keys_only\")\n g_inc = Global(hash_key=\"other\", projection=[\"another\", \"date\"])\n\n l_all = Local(range_key=\"another\", projection=\"all\")\n l_key = Local(range_key=\"another\", projection=\"keys_only\")\n l_inc = Local(range_key=\"another\", projection=[\"date\"])\n\n uuids = set([Model.id, Model.other, Model.another])\n no_boolean = set(Model.Meta.columns)\n no_boolean.remove(Model.boolean)\n\n assert Model.g_all.projection == \"ALL\"\n assert Model.g_all.projection_attributes == set(Model.Meta.columns)\n assert Model.g_key.projection == \"KEYS_ONLY\"\n assert Model.g_key.projection_attributes == uuids\n assert Model.g_inc.projection == \"INCLUDE\"\n assert Model.g_inc.projection_attributes == no_boolean\n\n assert Model.l_all.projection == \"ALL\"\n assert Model.l_all.projection_attributes == set(Model.Meta.columns)\n assert Model.l_key.projection == \"KEYS_ONLY\"\n assert Model.l_key.projection_attributes == uuids\n assert Model.l_inc.projection == \"INCLUDE\"\n assert Model.l_inc.projection_attributes == no_boolean",
"def test_defining_a_primary_key_counter_column_fails(self):\r\n with self.assertRaises(TypeError):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter(primary_ley=True)\r\n counter = columns.Counter()\r\n\r\n # force it\r\n with self.assertRaises(ModelDefinitionException):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter()\r\n cluster.primary_key = True\r\n counter = columns.Counter()",
"def test_local_index_no_range_key(engine):\n with pytest.raises(ValueError):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n another = Column(UUID)\n by_another = LocalSecondaryIndex(range_key=\"another\")",
"def pg_secondary_keys(self):",
"def test_primary_keys_metadata(sdc_builder, sdc_executor, database, values):\n if not database.is_ct_enabled:\n pytest.skip('Test only runs against SQL Server with CT enabled.')\n\n pipeline = None\n table_name = get_random_string(string.ascii_lowercase, 20)\n\n try:\n connection = database.engine.connect()\n\n if values == 'numeric':\n connection.execute(get_create_table_query_numeric(table_name, database))\n else:\n connection.execute(get_create_table_query_non_numeric(table_name, database))\n\n # Create the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_change_tracking = pipeline_builder.add_stage('SQL Server Change Tracking Client')\n sql_server_change_tracking.set_attributes(\n table_configs=[{\n 'initialOffset': 0,\n 'schema': 'dbo',\n 'tablePattern': f'{table_name}'\n }]\n )\n wiretap = pipeline_builder.add_wiretap()\n sql_server_change_tracking >> wiretap.destination\n\n pipeline = pipeline_builder.build(\"SQL Server CT Pipeline\").configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n connection = database.engine.connect()\n # enable change tracking on table\n connection.execute(f'ALTER TABLE {table_name} ENABLE change_tracking WITH (track_columns_updated = on)')\n sdc_executor.start_pipeline(pipeline)\n\n if values == 'numeric':\n connection.execute(get_insert_query_numeric(table_name, database))\n primary_key_specification_expected = PRIMARY_KEY_NUMERIC_METADATA_SQLSERVER\n else:\n connection.execute(get_insert_query_non_numeric(table_name, database))\n primary_key_specification_expected = PRIMARY_KEY_NON_NUMERIC_METADATA_SQLSERVER\n\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 1)\n\n assert len(wiretap.output_records) == 1\n\n record = wiretap.output_records[0]\n assert \"jdbc.primaryKeySpecification\" in record.header.values\n assert record.header.values[\"jdbc.primaryKeySpecification\"] is not None\n\n primary_key_specification_json = json.dumps(\n json.loads(record.header.values[\"jdbc.primaryKeySpecification\"]),\n sort_keys=True\n )\n\n primary_key_specification_expected_json = json.dumps(\n json.loads(primary_key_specification_expected),\n sort_keys=True\n )\n\n assert primary_key_specification_json == primary_key_specification_expected_json\n\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n connection.execute(f'drop table if exists {table_name}')\n\n if pipeline and (sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING'):\n sdc_executor.stop_pipeline(pipeline)",
"def test_assign_clusters_sparse(self, new_data, filename):\n\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n \n print('creating test sparse matrix...')\n if self.split_type == 'random':\n averages_seg = pd.read_sql('SELECT * FROM clust_sparse_avebysegment_random',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_sparse_avebyrt_random',con=engine)\n if self.split_type == 'date':\n averages_seg = pd.read_sql('SELECT * FROM clust_sparse_avebysegment_date',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_sparse_avebyrt_date',con=engine)\n\n averages_seg['exists'] = 1\n test_data_exists = pd.merge(new_data, averages_seg[['segment_id', 'exists']], on=['segment_id'])\n test_exists = test_data_exists[test_data_exists['exists']==1]\n test_notexists = test_data_exists[test_data_exists['exists']!=1] \n \n test_matrix_exists = pd.merge(test_exists[['segment_id', 'road_type']], averages_seg, how='left', on=['segment_id'])\n test_matrix_notexists = pd.merge(test_notexists[['segment_id', 'road_type']], averages_rt, how='left', on=['road_type'])\n test_matrix = pd.concat([test_matrix_exists, test_matrix_notexists])\n test_matrix = test_matrix.fillna(0) \n \n test_sparse_matrix = test_matrix.drop(columns = ['segment_id', 'road_type', 'exists', 'index', 'roadtypekey', 'segmentskey'])\n num = list(range(len(list(averages_seg))-4))\n columns = [str(item) for item in num]\n test_sparse_matrix = test_sparse_matrix[columns] \n \n print('clustering new data...')\n cluster_model = joblib.load(filename)\n cluster_predictions = cluster_model.predict(test_sparse_matrix)\n \n clusterdf = pd.DataFrame(cluster_predictions,columns = ['cluster_sparse'])\n clusterdf['index'] = clusterdf.index\n segmentdf = test_matrix['segment_id'].to_frame()\n segmentdf['index'] = segmentdf.index\n test_cluster_df_sparse = pd.merge(clusterdf, segmentdf, on=['index'])\n test_cluster_df_sparse = test_cluster_df_sparse[['segment_id','cluster_sparse']].groupby(['segment_id','cluster_sparse']).count()\n \n return test_cluster_df_sparse.reset_index()",
"def test_autofield_add_primary_key(self):\n sql = \"\"\"\n CREATE TABLE address_no_primary_key\n (\n id serial NOT NULL,\n address character varying(255),\n geometry geometry(Point,4326)\n );\n \"\"\"\n cursor = self.conn.get_connection().cursor()\n cursor.execute(sql)\n\n layer = DataBaseLayer()\n layer.db_connection = self.conn\n layer.name = 'address_no_primary_key'\n layer.table = 'address_no_primary_key'\n layer.pk_field = 'id'\n layer.geom_field = 'geometry'\n layer.anonymous_view = True\n layer.anonymous_add = True\n layer.anonymous_update = True\n layer.anonymous_delete = True\n layer.save()\n\n with ModelFactory(layer) as Model:\n primary_key = None\n for f in Model._meta.fields:\n if getattr(f, 'primary_key', None):\n primary_key = f.name\n break\n self.assertEqual(primary_key, 'id')",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_get_primary_key_column(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def run_sql_in_snapshot(*args, **kwargs):\n return [[\"PK_column\"]]\n\n cursor.run_sql_in_snapshot = run_sql_in_snapshot\n primary_key = db_introspection.get_primary_key_column(\n cursor=cursor, table_name=\"Table_1\"\n )\n self.assertEqual(\n primary_key,\n \"PK_column\",\n )",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def primary_key(table_name: str) -> str:\n\n return f\"\"\"\n SELECT\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type\n FROM\n pg_index i\n JOIN\n pg_attribute a\n ON\n a.attrelid = i.indrelid AND\n a.attnum = ANY(i.indkey)\n WHERE\n i.indrelid = '{table_name}'::regclass AND\n i.indisprimary\n \"\"\"",
"def test_assign_clusters_nonsparse(self, new_data, filename):\n\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n if self.split_type == 'random':\n averages_seg = pd.read_sql('SELECT * FROM clust_nonsparse_avebysegment_random',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_nonsparse_avebyrt_random',con=engine)\n elif self.split_type == 'date':\n averages_seg = pd.read_sql('SELECT * FROM clust_nonsparse_avebysegment_date',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_nonsparse_avebyrt_date',con=engine)\n \n averages_seg['exists'] = 1\n test_data_exists = pd.merge(new_data, averages_seg[['segment_id', 'day_of_week', 'time_idx', 'exists']], on=['segment_id', 'day_of_week', 'time_idx'])\n test_exists = test_data_exists[test_data_exists['exists']==1]\n test_notexists = test_data_exists[test_data_exists['exists']!=1]\n \n test_exists_tmp = test_exists[['date','time','date_idx', 'time_idx', 'day_of_week', 'segment_id', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2']]\n test_notexists_tmp = test_notexists[['date','time','date_idx', 'time_idx', 'day_of_week', 'segment_id', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2']]\n test_matrix_exists = pd.merge(test_exists_tmp, averages_seg, how='left', on=['segment_id', 'day_of_week', 'time_idx'])\n test_matrix_notexists = pd.merge(test_notexists_tmp, averages_rt, how='left', on=['road_type', 'day_of_week', 'time_idx'])\n test_matrix = pd.concat([test_matrix_exists, test_matrix_notexists])\n test_matrix = test_matrix.fillna(0)\n \n test_nonsparse_matrix = test_matrix[['segment_id','date','time','date_idx', 'time_idx', 'day_of_week', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2', 'level_binary', 'level_min', 'level_max', 'level_mean', 'level_count']]\n test_nonsparse_matrix = self.scale_matrix(test_nonsparse_matrix)\n\n print('clustering new data...')\n cluster_model = joblib.load(filename)\n cluster_predictions = cluster_model.predict(test_nonsparse_matrix.drop(columns = ['segment_id','date','time']))\n \n clusterdf = pd.DataFrame(cluster_predictions,columns = ['cluster_nonsparse']).reset_index()\n keydf = test_matrix[['segment_id','date','time']].reset_index()\n test_cluster_df_sparse = pd.merge(clusterdf, keydf, on=['index'])\n \n return test_cluster_df_sparse[['segment_id','date','time','cluster_nonsparse']]",
"def test_primary_keys_headers(sdc_builder, sdc_executor, database):\n if not database.is_ct_enabled:\n pytest.skip('Test only runs against SQL Server with CT enabled.')\n\n pipeline = None\n table_name = get_random_string(string.ascii_lowercase, 20)\n connection = database.engine.connect()\n\n try:\n logger.info('Creating source table %s in %s database ...', table_name, database.type)\n table = sqlalchemy.Table(\n table_name,\n sqlalchemy.MetaData(database.engine),\n sqlalchemy.Column('name', sqlalchemy.String(64), primary_key=True),\n sqlalchemy.Column('pokedex_id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('type', sqlalchemy.String(64)),\n sqlalchemy.Column('generation', sqlalchemy.Integer)\n )\n table.create(database.engine)\n\n # Create the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_change_tracking = pipeline_builder.add_stage('SQL Server Change Tracking Client')\n sql_server_change_tracking.set_attributes(\n table_configs=[{\n 'initialOffset': 0,\n 'schema': 'dbo',\n 'tablePattern': f'{table_name}'\n }]\n )\n wiretap = pipeline_builder.add_wiretap()\n sql_server_change_tracking >> wiretap.destination\n\n pipeline = pipeline_builder.build(\"SQL Server CT Pipeline\").configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n connection = database.engine.connect()\n # enable change tracking on table\n connection.execute(f'ALTER TABLE {table_name} ENABLE change_tracking WITH (track_columns_updated = on)')\n\n sdc_executor.start_pipeline(pipeline)\n\n # Define the data for each statement\n initial_data = {'name': 'Azurill', 'pokedex_id': 298, 'type': 'Normal', 'generation': 3}\n updated_data = {'name': 'Azurill', 'pokedex_id': 298, 'type': 'Normal/Fairy', 'generation': 6}\n\n # Insert some data and update it\n connection.execute(f\"\"\"\n insert into {table_name}\n values (\n '{initial_data.get(\"name\")}',\n {initial_data.get(\"pokedex_id\")},\n '{initial_data.get(\"type\")}',\n {initial_data.get(\"generation\")}\n )\n \"\"\")\n\n # In order to ensure all changes are tracked, a pause is added between changes so no record is lost\n sleep(5)\n\n connection.execute(f\"\"\"\n update {table_name}\n set type = '{updated_data.get(\"type\")}', generation = {updated_data.get(\"generation\")}\n where name = '{updated_data.get(\"name\")}' and pokedex_id = {updated_data.get(\"pokedex_id\")}\n \"\"\")\n\n sleep(5)\n\n connection.execute(f\"delete from {table_name}\")\n\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)\n assert len(wiretap.output_records) == 3\n\n primary_key_before_prefix = \"jdbc.primaryKey.before.\"\n primary_key_after_prefix = \"jdbc.primaryKey.after.\"\n\n for index in range(0, 3):\n header_values = wiretap.output_records[index].header.values\n\n assert primary_key_before_prefix + \"type\" not in header_values\n assert primary_key_before_prefix + \"generation\" not in header_values\n assert primary_key_after_prefix + \"type\" not in header_values\n assert primary_key_after_prefix + \"generation\" not in header_values\n\n if index == 1:\n assert header_values['sdc.operation.type'] == '3'\n assert header_values['jdbc.SYS_CHANGE_OPERATION'] == 'U'\n\n assert primary_key_before_prefix + \"name\" in header_values\n assert primary_key_before_prefix + \"pokedex_id\" in header_values\n assert primary_key_after_prefix + \"name\" in header_values\n assert primary_key_after_prefix + \"pokedex_id\" in header_values\n\n assert header_values[primary_key_before_prefix + \"name\"] is not None\n assert header_values[primary_key_before_prefix + \"pokedex_id\"] is not None\n assert header_values[primary_key_after_prefix + \"name\"] is not None\n assert header_values[primary_key_after_prefix + \"pokedex_id\"] is not None\n\n assert header_values[f\"{primary_key_before_prefix}name\"] == initial_data.get(\"name\")\n assert header_values[f\"{primary_key_before_prefix}pokedex_id\"] == f'{initial_data.get(\"pokedex_id\")}'\n assert header_values[f\"{primary_key_after_prefix}name\"] == updated_data.get(\"name\")\n assert header_values[f\"{primary_key_after_prefix}pokedex_id\"] == f'{updated_data.get(\"pokedex_id\")}'\n else:\n if index == 0:\n assert header_values['sdc.operation.type'] == '1'\n assert header_values['jdbc.SYS_CHANGE_OPERATION'] == 'I'\n else:\n assert header_values['sdc.operation.type'] == '2'\n assert header_values['jdbc.SYS_CHANGE_OPERATION'] == 'D'\n\n assert primary_key_before_prefix + \"name\" not in header_values\n assert primary_key_before_prefix + \"pokedex_id\" not in header_values\n assert primary_key_after_prefix + \"name\" not in header_values\n assert primary_key_after_prefix + \"pokedex_id\" not in header_values\n\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n connection.execute(f'drop table if exists {table_name}')\n\n if pipeline and (sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING'):\n sdc_executor.stop_pipeline(pipeline)",
"def test_pk(self):\n\n class Version(object):\n def __init__(self, id, version):\n self.id = id\n self.version = version\n def __composite_values__(self):\n # a tuple this time\n return (self.id, self.version)\n def __eq__(self, other):\n return other.id == self.id and other.version == self.version\n def __ne__(self, other):\n return not self.__eq__(other)\n\n class Graph(object):\n def __init__(self, version):\n self.version = version\n\n mapper(Graph, graphs, properties={\n 'version':composite(Version, graphs.c.id, graphs.c.version_id)\n })\n\n sess = create_session()\n g = Graph(Version(1, 1))\n sess.save(g)\n sess.flush()\n\n sess.clear()\n g2 = sess.query(Graph).get([1, 1])\n assert g.version == g2.version\n sess.clear()\n\n g2 = sess.query(Graph).get(Version(1, 1))\n assert g.version == g2.version",
"def test_code_add_primary_key(self):\n sql = \"\"\"\n CREATE TABLE public.address_no_primary_key\n (\n code character varying(10),\n address character varying(255),\n geometry geometry(Point,4326)\n );\n \"\"\"\n cursor = self.conn.get_connection().cursor()\n cursor.execute(sql)\n\n layer = DataBaseLayer()\n layer.db_connection = self.conn\n layer.name = 'address_no_primary_key'\n layer.table = 'address_no_primary_key'\n layer.pk_field = 'code'\n layer.geom_field = 'geometry'\n layer.anonymous_view = True\n layer.anonymous_add = True\n layer.anonymous_update = True\n layer.anonymous_delete = True\n layer.save()\n\n with ModelFactory(layer) as Model:\n primary_key = None\n for f in Model._meta.fields:\n if getattr(f, 'primary_key', None):\n primary_key = f.name\n break\n self.assertEqual(primary_key, 'code')",
"def _check_primary_key(df: \"pd.DataFrame\", primary_key_name: str):\n if primary_key_name in df.columns and primary_key_name == df.index.name:\n raise primary_key.Ambiguous(\n f\"Index {primary_key_name} has the same name as column {primary_key_name}\"\n )\n elif primary_key_name not in df.columns and primary_key_name != df.index.name:\n raise primary_key.NotFound(\n f\"Primary key: {primary_key_name} is not DataFrame index name: {df.index.name} or in\"\n f\" DataFrame column names: {df.columns}\"\n )",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_duplicate_primary_key(self):\n view = SchemaView(SCHEMA)\n patcher = ObjectChanger(schemaview=view)\n dataset = Dataset()\n patcher.apply(AddObject(value=Person(id='P1', name='p1')), dataset)\n patcher.apply(AddObject(value=Person(id='P1', name='p2')), dataset)\n assert dataset.persons[0].id == 'P1'\n self.assertEqual(len(dataset.persons), 2)\n logging.info(dataset.persons[0])\n logging.info(dataset.persons[1])\n patcher.apply(RemoveObject(value=Person(id='P1')), dataset)\n self.assertEqual(len(dataset.persons), 1)",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_creating_index_type(self):"
]
| [
"0.70326984",
"0.63601124",
"0.6233337",
"0.6105476",
"0.6045581",
"0.6044368",
"0.6027007",
"0.6023061",
"0.6000726",
"0.5922775",
"0.5874505",
"0.576385",
"0.5734012",
"0.57260954",
"0.5708586",
"0.56991565",
"0.56796235",
"0.5655274",
"0.5647864",
"0.56471586",
"0.5637265",
"0.563603",
"0.5626946",
"0.5611875",
"0.5610047",
"0.55999666",
"0.5552305",
"0.55424696",
"0.5537504",
"0.5505333"
]
| 0.68584615 | 1 |
CASSANDRA10978 Test that materialized views work as expected when adding a node. | def test_add_node_after_mv(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
"""
@jira_ticket CASSANDRA-12984
Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again
"""
assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_add_write_survey_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.write_survey=true\", \"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, txnPoolNodeSet, tconf, sdk_pool_handle,\n sdk_wallet_steward, tdir, allPluginsPath):\n\n ensure_several_view_change(looper, txnPoolNodeSet, do_view_change, custom_timeout=tconf.NEW_VIEW_TIMEOUT)\n\n cur_ppseqno = get_pp_seq_no(txnPoolNodeSet)\n big_ppseqno = cur_ppseqno + tconf.LOG_SIZE * 2 + 2300\n assert (big_ppseqno > cur_ppseqno)\n\n # ensure pool is working properly\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet))\n\n _set_ppseqno(txnPoolNodeSet, big_ppseqno)\n cur_ppseqno = get_pp_seq_no(txnPoolNodeSet)\n assert (big_ppseqno == cur_ppseqno)\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n\n assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet))\n\n\n\n # Disable view change after adding new node as it will not be able to finish due to fake ppSeqNo set\n for n in txnPoolNodeSet:\n n.write_manager.node_reg_handler.internal_bus = None\n\n new_steward_name = \"testClientSteward\" + randomString(4)\n new_node_name = \"TestTheta\" + randomString(4)\n new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node(\n looper, sdk_pool_handle, sdk_wallet_steward,\n new_steward_name, new_node_name, tdir, tconf,\n allPluginsPath=allPluginsPath)\n txnPoolNodeSet.append(new_node)\n looper.run(checkNodesConnected(txnPoolNodeSet))\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n new_steward_wallet_handle,\n sdk_pool_handle)\n\n waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n\n waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])",
"def test_openshift_node_with_node_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(node__in=[RBAC_NODE])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_adding_dynamic_view(empty_model):\n viewset = ViewSet(model=empty_model)\n view = viewset.create_dynamic_view(key=\"dyn1\", description=\"test\")\n assert view.model is empty_model\n assert view.get_viewset() is viewset\n assert view.description == \"test\"\n assert view in viewset.dynamic_views",
"def test_adding_node_single_role(self):\n name = Nodes().nodes_discovered[0].name.text\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertTrue(n.env_summary.is_displayed())\n self.assertEqual(len(n.nodes), 1, 'Nodes amount')\n self.assertEqual(n.nodes[0].name.text, name, 'Node name')\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text, 'Node role')",
"def checkReadNodeViews(read):\n ...",
"def test_view_set_construction(empty_model):\n viewset = ViewSet(model=empty_model)\n assert viewset.model is empty_model\n assert count(viewset.dynamic_views) == 0",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_create_view_returns_empty(dummy_request):\n from learning_journal.views.default import new_entry\n assert new_entry(dummy_request) == {}",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)",
"def test_updated_nodes():\n assert_missing_node(10000)\n assert_cached_node(10001, (10.0, 40.0))\n assert_cached_node(10002, (10.1, 40.0))\n place_10001 = query_row(db_conf, 'osm_places', 10001)\n assert place_10001['name'] == 'Bar', place_10001\n place_10002 = query_row(db_conf, 'osm_places', 10002)\n assert place_10002['name'] == 'Baz', place_10002",
"def test_add_delete_swap_nodes(self) -> None:\n collection = collection_domain.Collection.create_default_collection(\n '0')\n self.assertEqual(len(collection.nodes), 0)\n\n collection.add_node('test_exp')\n self.assertEqual(len(collection.nodes), 1)\n\n with self.assertRaisesRegex(\n ValueError,\n 'Exploration is already part of this collection: test_exp'\n ):\n collection.add_node('test_exp')\n\n collection.add_node('another_exp')\n self.assertEqual(len(collection.nodes), 2)\n\n collection.swap_nodes(0, 1)\n self.assertEqual(collection.nodes[0].exploration_id, 'another_exp')\n self.assertEqual(collection.nodes[1].exploration_id, 'test_exp')\n with self.assertRaisesRegex(\n ValueError,\n 'Both indices point to the same collection node.'\n ):\n collection.swap_nodes(0, 0)\n\n collection.delete_node('another_exp')\n self.assertEqual(len(collection.nodes), 1)\n\n with self.assertRaisesRegex(\n ValueError,\n 'Exploration is not part of this collection: another_exp'\n ):\n collection.delete_node('another_exp')\n\n collection.delete_node('test_exp')\n self.assertEqual(len(collection.nodes), 0)",
"def test_create_hyperflex_node_profile(self):\n pass",
"def test_create_view(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n\n # Create Link - a view can hvae 0 to * links\n link = Link(name=\"TextNode\") # Name of a collection in the database\n linkAnalyzers = AnalyzerList([\"identity\"])\n link.analyzers = linkAnalyzers\n\n # A link can have 0..* fields\n field = Field(\n \"text\",\n AnalyzerList([\"text_en\", \"invalid_analyzer\", \"analyzer_sample\"]),\n ) # text_en is a predifined analyzer from arango\n field.analyzers.filter_invalid_analyzers(\n db, verbose=1\n ) # filters out the analyzer that are not defined in the database\n\n assert (\n str(field.analyzers)\n == \"AnalyzerList(analyzerList=['text_en', 'analyzer_sample'], database=None)\"\n )\n\n link.add_field(field)\n\n ## Show the dict format of all the fields in a link\n assert link.get_fields_dict() == {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n }\n\n # create view\n view = View(\"sample_view\", view_type=\"arangosearch\")\n ## add the link (can have 0 or 1 link)\n view.add_link(link)\n\n ## can have 0..* primary sort\n view.add_primary_sort(\"text\", asc=False)\n view.add_stored_value([\"text\", \"timestamp\"], compression=\"lz4\")\n\n assert view.summary() == {\n \"name\": \"sample_view\",\n \"viewType\": \"arangosearch\",\n \"properties\": {\n \"cleanupintervalstep\": 0,\n \"cleanupIntervalStep\": 0,\n \"commitIntervalMsec\": 1000,\n \"consolidationIntervalMsec\": 0,\n \"consolidationPolicy\": {\n \"type\": \"tier\",\n \"segmentsMin\": 1,\n \"segmentsMax\": 10,\n \"segmentsBytesMax\": 5368709120,\n \"segmentsBytesFloor\": 2097152,\n \"minScore\": 0,\n },\n \"primarySortCompression\": \"lz4\",\n \"writebufferIdle\": 64,\n \"writebufferActive\": 0,\n \"writebufferMaxSize\": 33554432,\n },\n \"links\": {\n \"TextNode\": {\n \"analyzers\": [\"identity\"],\n \"fields\": {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n },\n \"includeAllFields\": False,\n \"trackListPositions\": False,\n \"inBackground\": False,\n }\n },\n \"primarySort\": [{\"field\": \"text\", \"asc\": False}],\n \"storedValues\": [\n {\"fields\": [\"text\"], \"compression\": \"lz4\"},\n {\"fields\": [\"timestamp\"], \"compression\": \"lz4\"},\n ],\n }\n\n ## creates the view in the database\n view.create(db)\n\n assert db.view(\"sample_view\")[\"name\"] == \"sample_view\"",
"def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")",
"def test_create_node_table(populated_workspace, managed_user, server):\n workspace, _, node_table, edge_table = populated_workspace\n\n aql = f\"FOR doc in {node_table} RETURN doc\"\n new_table_name = \"new_table\"\n\n with conftest.login(managed_user, server):\n resp = server.post(\n f\"/api/workspaces/{workspace}/tables\",\n data=aql,\n query_string={\"table\": new_table_name},\n )\n\n assert resp.status_code == 200\n assert resp.data.decode() == new_table_name",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def _add_dc_after_mv_test(self, rf, nts):\n\n session = self.prepare(rf=rf)\n\n logger.debug(\"Creating schema\")\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Writing 1k to base\")\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Reading 1k from view\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n logger.debug(\"Reading 1k from base\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t WHERE id = {}\".format(i), [i, -i])\n\n logger.debug(\"Bootstrapping new node in another dc\")\n node4 = new_node(self.cluster, data_center='dc2')\n node4.start(wait_for_binary_proto=True,\n jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT),\n \"-Dcassandra.reset_bootstrap_progress=false\"])\n\n logger.debug(\"Bootstrapping new node in another dc\")\n node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')\n node5.start(jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT),\n \"-Dcassandra.reset_bootstrap_progress=false\"],\n wait_for_binary_proto=True)\n if nts:\n session.execute(\"alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n session.execute(\"alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n session.execute(\"alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n node4.nodetool('rebuild dc1')\n node5.nodetool('rebuild dc1')\n\n cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE\n session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)\n\n logger.debug(\"Verifying data from new node in view\")\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n logger.debug(\"Inserting 100 into base\")\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Verify 100 in view\")\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_node_exists():\n assert Node",
"def test_node_exists():\n assert Node",
"def createViews(views):\n ...",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])"
]
| [
"0.7012901",
"0.7006532",
"0.64824516",
"0.6481415",
"0.6395929",
"0.6262773",
"0.62038887",
"0.6178619",
"0.6132794",
"0.6126011",
"0.6086672",
"0.6069375",
"0.60276246",
"0.5952837",
"0.58752394",
"0.5852436",
"0.58396053",
"0.58316773",
"0.5810097",
"0.57016176",
"0.5686971",
"0.5642592",
"0.5614806",
"0.5594826",
"0.5587754",
"0.5587582",
"0.5587352",
"0.5587352",
"0.55683434",
"0.55595475"
]
| 0.73405486 | 0 |
CASSANDRA11670 Test that materialized views work with very wide materialized views as expected when adding a node. | def test_add_node_after_very_wide_mv(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(5):
for j in range(5000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(5):
for j in range(5000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(5):
for j in range(5000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(5):
for j in range(5100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(5):
for j in range(5100):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_add_write_survey_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.write_survey=true\", \"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_populate_mv_after_insert_wide_rows(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for i in range(5):\n for j in range(10000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({}, {})\".format(i, j))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n for i in range(5):\n for j in range(10000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, j), [j, i])",
"def materialized_view(self) -> 'outputs.MaterializedViewDefinitionResponse':\n return pulumi.get(self, \"materialized_view\")",
"def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, txnPoolNodeSet, tconf, sdk_pool_handle,\n sdk_wallet_steward, tdir, allPluginsPath):\n\n ensure_several_view_change(looper, txnPoolNodeSet, do_view_change, custom_timeout=tconf.NEW_VIEW_TIMEOUT)\n\n cur_ppseqno = get_pp_seq_no(txnPoolNodeSet)\n big_ppseqno = cur_ppseqno + tconf.LOG_SIZE * 2 + 2300\n assert (big_ppseqno > cur_ppseqno)\n\n # ensure pool is working properly\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet))\n\n _set_ppseqno(txnPoolNodeSet, big_ppseqno)\n cur_ppseqno = get_pp_seq_no(txnPoolNodeSet)\n assert (big_ppseqno == cur_ppseqno)\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n\n assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet))\n\n\n\n # Disable view change after adding new node as it will not be able to finish due to fake ppSeqNo set\n for n in txnPoolNodeSet:\n n.write_manager.node_reg_handler.internal_bus = None\n\n new_steward_name = \"testClientSteward\" + randomString(4)\n new_node_name = \"TestTheta\" + randomString(4)\n new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node(\n looper, sdk_pool_handle, sdk_wallet_steward,\n new_steward_name, new_node_name, tdir, tconf,\n allPluginsPath=allPluginsPath)\n txnPoolNodeSet.append(new_node)\n looper.run(checkNodesConnected(txnPoolNodeSet))\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n new_steward_wallet_handle,\n sdk_pool_handle)\n\n waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n\n waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])",
"def checkReadNodeViews(read):\n ...",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def _add_dc_after_mv_test(self, rf, nts):\n\n session = self.prepare(rf=rf)\n\n logger.debug(\"Creating schema\")\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Writing 1k to base\")\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Reading 1k from view\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n logger.debug(\"Reading 1k from base\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t WHERE id = {}\".format(i), [i, -i])\n\n logger.debug(\"Bootstrapping new node in another dc\")\n node4 = new_node(self.cluster, data_center='dc2')\n node4.start(wait_for_binary_proto=True,\n jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT),\n \"-Dcassandra.reset_bootstrap_progress=false\"])\n\n logger.debug(\"Bootstrapping new node in another dc\")\n node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')\n node5.start(jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT),\n \"-Dcassandra.reset_bootstrap_progress=false\"],\n wait_for_binary_proto=True)\n if nts:\n session.execute(\"alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n session.execute(\"alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n session.execute(\"alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n node4.nodetool('rebuild dc1')\n node5.nodetool('rebuild dc1')\n\n cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE\n session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)\n\n logger.debug(\"Verifying data from new node in view\")\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n logger.debug(\"Inserting 100 into base\")\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Verify 100 in view\")\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_resume_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n self.fixture_dtest_setup.ignore_log_patterns = [r'Compaction interrupted: View build']\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)\n assert not node.grep_log('Marking view', filename='debug.log')\n self.check_logs_for_errors()\n\n logger.debug(\"Check that MV shouldn't be built yet.\")\n assert len(list(session.execute(\"SELECT COUNT(*) FROM t_by_v\"))) != 5000\n\n logger.debug(\"Restart the cluster\")\n self.cluster.stop()\n marks = [node.mark_log() for node in nodes]\n self.cluster.start()\n session = self.patient_cql_connection(nodes[0])\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM ks.t_by_v\", [5000])\n\n logger.debug(\"Checking logs to verify that the view build has been resumed and completed after restart\")\n for node, mark in zip(nodes, marks):\n assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)\n assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)\n self.check_logs_for_errors()",
"def test_view_set_construction(empty_model):\n viewset = ViewSet(model=empty_model)\n assert viewset.model is empty_model\n assert count(viewset.dynamic_views) == 0",
"def test_adding_dynamic_view(empty_model):\n viewset = ViewSet(model=empty_model)\n view = viewset.create_dynamic_view(key=\"dyn1\", description=\"test\")\n assert view.model is empty_model\n assert view.get_viewset() is viewset\n assert view.description == \"test\"\n assert view in viewset.dynamic_views",
"def test_dynamic_view_hydrated(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n viewset.create_dynamic_view(key=\"dyn1\", description=\"dynamic\", element=system1)\n io = ViewSetIO.from_orm(viewset)\n\n new_viewset = ViewSet.hydrate(io, viewset.model)\n assert count(new_viewset.dynamic_views) == 1\n view = list(new_viewset.dynamic_views)[0]\n assert view.description == \"dynamic\"\n assert view.element is system1",
"def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")",
"def test_drop_with_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n self.check_logs_for_errors()\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in nodes:\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n assert failed, \"The view shouldn't be queryable\"\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n # The original byteman delay it's still there and can make this flaky CASSANDRA-16962\n for i in range(10):\n try:\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])\n except AssertionError:\n time.sleep(1)\n else:\n break\n\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def _simple_repair_test(self, repair_base=False, repair_view=False):\n\n session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')\n for i in range(1000):\n statement = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n\n assert_unavailable(\n session.execute,\n statement\n )\n\n logger.debug('Start node2, and repair')\n node2.start(wait_for_binary_proto=True)\n if repair_base:\n node1.nodetool(\"repair ks t\")\n if repair_view:\n node1.nodetool(\"repair ks t_by_v\")\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')\n for i in range(1000):\n query = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]"
]
| [
"0.71924317",
"0.68652076",
"0.6806992",
"0.65596986",
"0.6539991",
"0.6482074",
"0.64745986",
"0.6339147",
"0.6170395",
"0.61196786",
"0.61022896",
"0.60363084",
"0.60322237",
"0.6024784",
"0.5951842",
"0.59104854",
"0.5875463",
"0.58679336",
"0.585328",
"0.57316285",
"0.5693421",
"0.56843853",
"0.56815135",
"0.5673074",
"0.56105006",
"0.5508381",
"0.5492923",
"0.54919314",
"0.54802805",
"0.5436984"
]
| 0.7392547 | 0 |
CASSANDRA10621 CASSANDRA10978 Test that materialized views work as expected when adding a node in write survey mode. | def test_add_write_survey_node_after_mv(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def checkReadNodeViews(read):\n ...",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, txnPoolNodeSet, tconf, sdk_pool_handle,\n sdk_wallet_steward, tdir, allPluginsPath):\n\n ensure_several_view_change(looper, txnPoolNodeSet, do_view_change, custom_timeout=tconf.NEW_VIEW_TIMEOUT)\n\n cur_ppseqno = get_pp_seq_no(txnPoolNodeSet)\n big_ppseqno = cur_ppseqno + tconf.LOG_SIZE * 2 + 2300\n assert (big_ppseqno > cur_ppseqno)\n\n # ensure pool is working properly\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet))\n\n _set_ppseqno(txnPoolNodeSet, big_ppseqno)\n cur_ppseqno = get_pp_seq_no(txnPoolNodeSet)\n assert (big_ppseqno == cur_ppseqno)\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n\n assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet))\n\n\n\n # Disable view change after adding new node as it will not be able to finish due to fake ppSeqNo set\n for n in txnPoolNodeSet:\n n.write_manager.node_reg_handler.internal_bus = None\n\n new_steward_name = \"testClientSteward\" + randomString(4)\n new_node_name = \"TestTheta\" + randomString(4)\n new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node(\n looper, sdk_pool_handle, sdk_wallet_steward,\n new_steward_name, new_node_name, tdir, tconf,\n allPluginsPath=allPluginsPath)\n txnPoolNodeSet.append(new_node)\n looper.run(checkNodesConnected(txnPoolNodeSet))\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n new_steward_wallet_handle,\n sdk_pool_handle)\n\n waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_steward,\n sdk_pool_handle)\n\n waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_adding_dynamic_view(empty_model):\n viewset = ViewSet(model=empty_model)\n view = viewset.create_dynamic_view(key=\"dyn1\", description=\"test\")\n assert view.model is empty_model\n assert view.get_viewset() is viewset\n assert view.description == \"test\"\n assert view in viewset.dynamic_views",
"def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_visuThreeD1(self):\n\n visu_logic = slicer.modules.visuThreeDWidget.logic\n #visu_logic.set_user_table(self.user_table)\n #visu_logic.set_user_file('/work/maria5/EBDS_CIVILITY/DataShare/TestMatricesForVisualization/AAL78/PerNodeMetrics/Conte_EigenVectorCentrality_4Yr_AAL78Regions.csv')\n #visu_logic.set_user_file('/Users/Wieke/Documents/visuThreeD/neo-0042-4year_AvgSym_normFull.csv')\n # visu_logic.create_node_actors()\n # visu_logic.create_line_actors()\n # visu_logic.update()\n #visu_logic.set_node_range()",
"def test_view_set_construction(empty_model):\n viewset = ViewSet(model=empty_model)\n assert viewset.model is empty_model\n assert count(viewset.dynamic_views) == 0",
"def test_openshift_node_with_node_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(node__in=[RBAC_NODE])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)",
"def test_adding_node_single_role(self):\n name = Nodes().nodes_discovered[0].name.text\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertTrue(n.env_summary.is_displayed())\n self.assertEqual(len(n.nodes), 1, 'Nodes amount')\n self.assertEqual(n.nodes[0].name.text, name, 'Node name')\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text, 'Node role')",
"def test_create_view(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n\n # Create Link - a view can hvae 0 to * links\n link = Link(name=\"TextNode\") # Name of a collection in the database\n linkAnalyzers = AnalyzerList([\"identity\"])\n link.analyzers = linkAnalyzers\n\n # A link can have 0..* fields\n field = Field(\n \"text\",\n AnalyzerList([\"text_en\", \"invalid_analyzer\", \"analyzer_sample\"]),\n ) # text_en is a predifined analyzer from arango\n field.analyzers.filter_invalid_analyzers(\n db, verbose=1\n ) # filters out the analyzer that are not defined in the database\n\n assert (\n str(field.analyzers)\n == \"AnalyzerList(analyzerList=['text_en', 'analyzer_sample'], database=None)\"\n )\n\n link.add_field(field)\n\n ## Show the dict format of all the fields in a link\n assert link.get_fields_dict() == {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n }\n\n # create view\n view = View(\"sample_view\", view_type=\"arangosearch\")\n ## add the link (can have 0 or 1 link)\n view.add_link(link)\n\n ## can have 0..* primary sort\n view.add_primary_sort(\"text\", asc=False)\n view.add_stored_value([\"text\", \"timestamp\"], compression=\"lz4\")\n\n assert view.summary() == {\n \"name\": \"sample_view\",\n \"viewType\": \"arangosearch\",\n \"properties\": {\n \"cleanupintervalstep\": 0,\n \"cleanupIntervalStep\": 0,\n \"commitIntervalMsec\": 1000,\n \"consolidationIntervalMsec\": 0,\n \"consolidationPolicy\": {\n \"type\": \"tier\",\n \"segmentsMin\": 1,\n \"segmentsMax\": 10,\n \"segmentsBytesMax\": 5368709120,\n \"segmentsBytesFloor\": 2097152,\n \"minScore\": 0,\n },\n \"primarySortCompression\": \"lz4\",\n \"writebufferIdle\": 64,\n \"writebufferActive\": 0,\n \"writebufferMaxSize\": 33554432,\n },\n \"links\": {\n \"TextNode\": {\n \"analyzers\": [\"identity\"],\n \"fields\": {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n },\n \"includeAllFields\": False,\n \"trackListPositions\": False,\n \"inBackground\": False,\n }\n },\n \"primarySort\": [{\"field\": \"text\", \"asc\": False}],\n \"storedValues\": [\n {\"fields\": [\"text\"], \"compression\": \"lz4\"},\n {\"fields\": [\"timestamp\"], \"compression\": \"lz4\"},\n ],\n }\n\n ## creates the view in the database\n view.create(db)\n\n assert db.view(\"sample_view\")[\"name\"] == \"sample_view\"",
"def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)",
"def test_view_in_studio_link_mongo_backed(self):\r\n self.setup_mongo_course()\r\n result_fragment = self.module.render('student_view')\r\n self.assertIn('View Unit in Studio', result_fragment.content)",
"def test_create_view_returns_empty(dummy_request):\n from learning_journal.views.default import new_entry\n assert new_entry(dummy_request) == {}",
"def test_view_in_studio_link_xml_authored(self):\r\n self.setup_mongo_course(course_edit_method='XML')\r\n result_fragment = self.module.render('student_view')\r\n self.assertNotIn('View Unit in Studio', result_fragment.content)",
"def test_view_in_studio_link_xml_authored(self):\r\n self.setup_mongo_course(course_edit_method='XML')\r\n result_fragment = self.module.render('student_view')\r\n self.assertNotIn('View Unit in Studio', result_fragment.content)",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_add_documents(empty_index, small_movies):\n index = empty_index()\n response = index.add_documents(small_movies)\n assert isinstance(response, TaskInfo)\n assert response.task_uid is not None\n update = index.wait_for_task(response.task_uid)\n assert index.get_primary_key() == \"id\"\n assert update.status == \"succeeded\"",
"def test_simengine_rest_snapshot_simulation_node_post(self):\n pass",
"def test_project_admin_views(self):\n \n self._check_project_admin_view(self.testproject,\"admin:index\")\n \n # check page add view \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_add\")\n \n # check page edit view for first page in project\n firstpage = get_first_page(self.testproject) \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_change\",args=[firstpage.pk])\n \n # check page history view for first page in project\n firstpage = get_first_page(self.testproject)\n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_history\",args=[firstpage.pk])\n \n # check overview of all pages\n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_changelist\")\n \n \n # Do the same for registration requests: check of standard views do not crash\n \n # Create some registrationrequests \n rr1 = RegistrationRequest.objects.create(user=self.participant,project=self.testproject)\n rr2 = RegistrationRequest.objects.create(user=self.participant,project=self.testproject,status=RegistrationRequest.REJECTED)\n rr3 = RegistrationRequest.objects.create(user=self.participant,project=self.testproject,status=RegistrationRequest.ACCEPTED)\n \n # Using root here because projectadmin cannot see objects created above. Don't know why but this is not tested here.\n self._check_project_admin_view(self.testproject,\"admin:comicmodels_registrationrequest_change\",args=[rr1.pk],user=self.root)\n \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_registrationrequest_history\",args=[rr1.pk],user=self.root)\n \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_registrationrequest_changelist\",user=self.root)\n \n # see if adding a page crashes the admin\n create_page_in_projectadmin(self.testproject,\"test_project_admin_page_add\")\n \n # Projectadminsite has the special feature that any 'comicsite' field in a form is automatically\n # set to the project this projectadmin is for. Test this by creating a\n # page without a project. \n create_page_in_projectadmin(self.testproject,\"test_project_admin_page_add_without_comicsite\",comicsite_for_page=None)\n \n # check that expected links are present in main admin page",
"def test_view_security(self):\n # create empty XML payload\n data = self._node('NewDataSet')\n data = etree.tostring(data)\n # GET method not allowed\n response = self._get(data)\n self.assertEqual(response.status_code, 405)\n # Unauthorized, requires add_patientdatapayload permission\n self.client.login(username='test', password='pass')\n response = self._post(data)\n self.assertEqual(response.status_code, 401)\n permission = Permission.objects.get(codename='add_patientdatapayload')\n self.user.user_permissions.add(permission)\n response = self._post(data)\n self.assertEqual(response.status_code, 200)",
"def test_meeting_registrant_question_update(self):\n pass",
"def run(self):\n print('Running test of the markups in different views')\n\n #\n # first load the data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n print(\"Getting MR Head Volume\")\n mrHeadVolume = sampleDataLogic.downloadMRHead()\n\n #\n # link the viewers\n #\n sliceLogic = slicer.app.layoutManager().sliceWidget('Red').sliceLogic()\n compositeNode = sliceLogic.GetSliceCompositeNode()\n compositeNode.SetLinkedControl(1)\n\n #\n # MR Head in the background\n #\n sliceLogic.StartSliceCompositeNodeInteraction(1)\n compositeNode.SetBackgroundVolumeID(mrHeadVolume.GetID())\n sliceLogic.EndSliceCompositeNodeInteraction()\n\n #\n # switch to conventional layout\n #\n lm = slicer.app.layoutManager()\n lm.setLayout(2)\n\n # create a fiducial list\n displayNode = slicer.vtkMRMLMarkupsDisplayNode()\n slicer.mrmlScene.AddNode(displayNode)\n fidNode = slicer.vtkMRMLMarkupsFiducialNode()\n slicer.mrmlScene.AddNode(fidNode)\n fidNode.SetAndObserveDisplayNodeID(displayNode.GetID())\n\n # make it active\n selectionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSelectionNodeSingleton\")\n if (selectionNode is not None):\n selectionNode.SetReferenceActivePlaceNodeID(fidNode.GetID())\n\n # add some known points to it\n eye1 = [33.4975, 79.4042, -10.2143]\n eye2 = [-31.283, 80.9652, -16.2143]\n nose = [4.61944, 114.526, -33.2143]\n index = fidNode.AddFiducialFromArray(eye1)\n fidNode.SetNthFiducialLabel(index, \"eye-1\")\n index = fidNode.AddFiducialFromArray(eye2)\n fidNode.SetNthFiducialLabel(index, \"eye-2\")\n # hide the second eye as a test of visibility flags\n fidNode.SetNthFiducialVisibility(index, 0)\n index = fidNode.AddFiducialFromArray(nose)\n fidNode.SetNthFiducialLabel(index, \"nose\")\n\n self.logicDelayDisplay(\"Placed 3 fiducials\")\n\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # switch to 2 3D views layout\n #\n lm.setLayout(15)\n self.logicDelayDisplay(\"Switched to 2 3D views\")\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 2\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode2\")\n self.logicDelayDisplay(\"Showing only in view 2\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # remove it so show in all\n #\n displayNode.RemoveAllViewNodeIDs()\n self.logicDelayDisplay(\"Showing in both views\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 1\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode1\")\n self.logicDelayDisplay(\"Showing only in view 1\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # switch back to conventional\n lm.setLayout(2)\n self.logicDelayDisplay(\"Switched back to conventional layout\")\n # self.printViewAndSliceNodes()\n\n # test of the visibility in slice views\n displayNode.RemoveAllViewNodeIDs()\n\n # jump to the last fiducial\n slicer.modules.markups.logic().JumpSlicesToNthPointInMarkup(fidNode.GetID(), index, 1)\n # refocus the 3D cameras as well\n slicer.modules.markups.logic().FocusCamerasOnNthPointInMarkup(fidNode.GetID(), index)\n\n # show only in red\n displayNode.AddViewNodeID('vtkMRMLSliceNodeRed')\n self.logicDelayDisplay(\"Show only in red slice\")\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed on red slice\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # remove all, add green\n # print 'before remove all, after added red'\n # self.printViewNodeIDs(displayNode)\n displayNode.RemoveAllViewNodeIDs()\n # print 'after removed all'\n # self.printViewNodeIDs(displayNode)\n displayNode.AddViewNodeID('vtkMRMLSliceNodeGreen')\n self.logicDelayDisplay('Show only in green slice')\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 0 or self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed only on green slice\")\n print '\\tred = ',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed')\n print '\\tgreen =',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen')\n self.printViewNodeIDs(displayNode)\n return False\n\n return True",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)"
]
| [
"0.65297717",
"0.6427251",
"0.60055476",
"0.5885476",
"0.5847958",
"0.5842925",
"0.58080024",
"0.5760694",
"0.57584924",
"0.5724205",
"0.5716714",
"0.5659941",
"0.55928737",
"0.55318415",
"0.54757285",
"0.5458959",
"0.5454351",
"0.54295045",
"0.54149824",
"0.5407767",
"0.5404607",
"0.5404607",
"0.53760934",
"0.5365035",
"0.5333692",
"0.5310493",
"0.5303098",
"0.5285886",
"0.52639234",
"0.5261865"
]
| 0.7997364 | 0 |
Test that allow filtering works as usual for a materialized view | def test_allow_filtering(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_filtered_view_hydrated(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n container_view = viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )\n viewset.create_filtered_view(\n key=\"filter1\",\n view=container_view,\n description=\"filtered\",\n mode=FilterMode.Include,\n tags=[\"v2\"],\n )\n io = ViewSetIO.from_orm(viewset)\n\n new_viewset = ViewSet.hydrate(io, viewset.model)\n assert count(new_viewset.filtered_views) == 1\n view = list(new_viewset.filtered_views)[0]\n assert view.description == \"filtered\"\n assert isinstance(view.view, ContainerView)\n assert view.view.key == \"container1\"",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_queryset_is_immutable(self):\r\n query1 = TestModel.objects(test_id=5)\r\n assert len(query1._where) == 1\r\n\r\n query2 = query1.filter(expected_result__gte=1)\r\n assert len(query2._where) == 2\r\n assert len(query1._where) == 1",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_search_collection_filters():\n col = Collection(search='forest', object_type=['layer'], filters={'provider': 'gee'}, app=['gfw'])\n assert len(col) > 1",
"def test_success_case(self):\r\n pk = uuid4()\r\n TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='1')\r\n time.sleep(0.2)\r\n TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='2')\r\n time.sleep(0.2)\r\n midpoint = datetime.utcnow()\r\n time.sleep(0.2)\r\n TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='3')\r\n time.sleep(0.2)\r\n TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='4')\r\n time.sleep(0.2)\r\n\r\n # test kwarg filtering\r\n q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint))\r\n q = [d for d in q]\r\n assert len(q) == 2\r\n datas = [d.data for d in q]\r\n assert '1' in datas\r\n assert '2' in datas\r\n\r\n q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint))\r\n assert len(q) == 2\r\n datas = [d.data for d in q]\r\n assert '3' in datas\r\n assert '4' in datas\r\n\r\n # test query expression filtering\r\n q = TimeUUIDQueryModel.filter(\r\n TimeUUIDQueryModel.partition == pk,\r\n TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint)\r\n )\r\n q = [d for d in q]\r\n assert len(q) == 2\r\n datas = [d.data for d in q]\r\n assert '1' in datas\r\n assert '2' in datas\r\n\r\n q = TimeUUIDQueryModel.filter(\r\n TimeUUIDQueryModel.partition == pk,\r\n TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint)\r\n )\r\n assert len(q) == 2\r\n datas = [d.data for d in q]\r\n assert '3' in datas\r\n assert '4' in datas",
"def test_filter_with_plone3_query(self):\n portal = self.layer['portal']\n req = test_request()\n # Search.filter_query() will get SearchableText from form if not\n # passed in explicit query argument:\n req.form['SearchableText'] = 'jobs'\n req.form['Title'] = 'Human resource'\n req.form['Description'] = ''\n req.form['created'] = [DateTime('1970/02/01 00:00:00 GMT+0')]\n req.form['created_usage'] = 'range:min'\n req.form['submit'] = 'Search'\n view = getMultiAdapter((portal, req), name=u'search')\n res = view.results(batch=False)\n self.assertEqual([], [r for r in res])",
"def test_filter_empty(self):\n portal = self.layer['portal']\n req = test_request()\n # Search.filter_query() will get SearchableText from form if not\n # passed in explicit query argument:\n req.form['SearchableText'] = 'spam'\n view = getMultiAdapter((portal, req), name=u'search')\n res = view.results(batch=False)\n self.assertTrue('my-page1' in [r.getId() for r in res],\n 'Test document is not found in the results.')\n # filter_query() will return None on invalid query (no real indexes):\n req = test_request()\n req.form['garbanzo'] = 'chickpea' # just noise, no index for this\n view = getMultiAdapter((portal, req), name=u'search')\n self.assertIsNone(view.filter_query({'b_start': 0, 'b_size': 10}))\n # resulting empty query, ergo no search performed, empty result:\n self.assertFalse(view.results(batch=False))\n # filter_query() succeeds if 1+ real index name added to request:\n req.form['portal_type'] = 'Document'\n self.assertIsNotNone(view.filter_query({'b_start': 0, 'b_size': 10}))\n res = view.results(batch=False)\n self.assertTrue('my-page1' in [r.getId() for r in res],\n 'Test document is not found in the results.')",
"def test_query_filter_field(self):\n obj = self.provision_single_asset()\n # TODO: Write a positive test for this\n ret = self.get('widget', 200,\n params={'__filter': [\n {'field': 'created_at', 'name': 'name', 'op': 'eq'}]})\n assert len(ret['objects']) == 0",
"def testUsingFilterTool(self):\n pass",
"def test_filter_wea_zero_entry():\n pass",
"def test_filteredQuery(self):\n answer, authority, additional = self._queryTest(True)\n self.assertEqual(\n answer,\n [RRHeader(b'foo.example.com', payload=Record_A('5.8.13.21', ttl=0))])\n self.assertEqual(authority, [])\n self.assertEqual(additional, [])",
"def test_by_statement_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, st='st1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, st='xxx2')\n assert not res == 0",
"def filter():\n return get_filter_data(db, MyTable)",
"def test_collection_filtering(self):\n # Create a collection\n entries = Entry.objects.filter(id__in=(1, 5, 10, 15))\n collection = CollectionFactory(entries=entries)\n\n # Get a valid collection\n params = {\"collection\": collection.id}\n\n self._test_filtering(**params)",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_filter(self):\n file_name = \"test_filter.hdf5\"\n dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name), \"w\")\n for view_index, (view_name, view, is_sparse) in enumerate(\n zip(self.view_names, self.views, self.are_sparse)):\n view_dataset = dataset_file_filter.create_dataset(\n \"View\" + str(view_index),\n view.shape,\n data=view)\n view_dataset.attrs[\"name\"] = view_name\n view_dataset.attrs[\"sparse\"] = is_sparse\n labels_dataset = dataset_file_filter.create_dataset(\"Labels\",\n shape=self.labels.shape,\n data=self.labels)\n labels_dataset.attrs[\"names\"] = [label_name.encode()\n for label_name in self.labels_names]\n meta_data_grp = dataset_file_filter.create_group(\"Metadata\")\n meta_data_grp.attrs[\"nbView\"] = len(self.views)\n meta_data_grp.attrs[\"nbClass\"] = len(np.unique(self.labels))\n meta_data_grp.attrs[\"datasetLength\"] = len(self.labels)\n dataset_object = dataset.HDF5Dataset(hdf5_file=dataset_file_filter)\n dataset_object.filter(np.array([0, 1, 0]), [\"0\", \"1\"], [1, 2, 3],\n [\"ViewN0\"], tmp_path)\n self.assertEqual(dataset_object.nb_view, 1)\n np.testing.assert_array_equal(dataset_object.get_labels(), [0, 1, 0])\n dataset_object.dataset.close()\n os.remove(os.path.join(tmp_path, \"test_filter_temp_filter.hdf5\"))\n os.remove(os.path.join(tmp_path, \"test_filter.hdf5\"))",
"def test_update_visibility_query(self):\n pass",
"def test_select():\n assert_that(users.select(), all_of(\n instance_of(SelectQuery),\n has_properties({\n 'collection': users,\n 'model': User,\n\n 'state': has_entries({\n 'properties': None\n })\n })\n ))",
"def testFilters(self):\n # This makes a matrix of rows and predicates with exactly one predicate set\n # per row.\n predicates = \"foo bar is so good\".split()\n for i in range(11):\n row_name = \"aff4:/row:%02d\" % i\n predicate = predicates[i % len(predicates)]\n data_store.DB.Set(row_name, \"metadata:%s\" % predicate,\n utils.SmartUnicode(row_name + predicate),\n token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", u\"test\", token=self.token)\n\n # Retrieve all subjects with prefix row1:\n rows = list(data_store.DB.Query(\n attributes=[\"metadata:foo\"],\n filter_obj=data_store.DB.filter.HasPredicateFilter(\"metadata:foo\"),\n token=self.token))\n\n for row in rows:\n self.assertEqual(row[\"metadata:foo\"][0][0], row[\"subject\"][0][0] + \"foo\")\n\n self.assertEqual(len(rows), 3)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:00\")\n self.assertEqual(rows[1][\"subject\"][0][0], \"aff4:/row:05\")\n self.assertEqual(rows[2][\"subject\"][0][0], \"aff4:/row:10\")\n\n rows = list(data_store.DB.Query(\n filter_obj=data_store.DB.filter.AndFilter(\n data_store.DB.filter.HasPredicateFilter(\"metadata:foo\"),\n data_store.DB.filter.SubjectContainsFilter(\"row:[0-1]0\")),\n token=self.token))\n\n self.assertEqual(len(rows), 2)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:00\")\n self.assertEqual(rows[1][\"subject\"][0][0], \"aff4:/row:10\")\n\n # Check that we can Query with a set of subjects\n rows = list(data_store.DB.Query(\n filter_obj=data_store.DB.filter.HasPredicateFilter(\"metadata:foo\"),\n subjects=[\"aff4:/row:00\", \"aff4:/row:10\"], token=self.token))\n\n self.assertEqual(len(rows), 2)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:00\")\n self.assertEqual(rows[1][\"subject\"][0][0], \"aff4:/row:10\")\n\n rows = list(data_store.DB.Query(\n filter_obj=data_store.DB.filter.PredicateContainsFilter(\n \"metadata:foo\", \"row:0[0-9]foo\"),\n token=self.token))\n\n self.assertEqual(len(rows), 2)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:00\")\n self.assertEqual(rows[1][\"subject\"][0][0], \"aff4:/row:05\")",
"def test_filter_errors(self):\n\n with self.assertRaises(ValueError):\n self.test_table.filter()\n\n with self.assertRaises(ValueError):\n self.test_table.filter(mode='wrongmode', Property='Property')",
"def test_list_filter_queryset_filtered_by_default(self):\n modeladmin = NotNinetiesListFilterAdmin(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n changelist.get_results(request)\n self.assertEqual(changelist.full_result_count, 4)",
"def test_boolean_filter(self):\n self.assertEqual(len(self.collection.results()), 6)\n\n result = get_filter_items(\n self.collection_uid, \"exclude_from_nav\", cache_enabled=False\n )\n\n self.assertEqual(len(result), 3)\n self.assertEqual(get_data_by_val(result, \"all\")[\"count\"], 6)\n self.assertEqual(get_data_by_val(result, \"all\")[\"selected\"], True)\n self.assertEqual(get_data_by_val(result, True)[\"count\"], 2)\n self.assertEqual(get_data_by_val(result, False)[\"count\"], 4)\n\n # test narrowed down results\n narrowed_down_result = get_filter_items(\n self.collection_uid,\n \"exclude_from_nav\",\n request_params={\"exclude_from_nav\": True},\n narrow_down=True,\n show_count=True,\n cache_enabled=False,\n )\n\n self.assertEqual(\n len(narrowed_down_result), 2, msg=\"narrowed result length should be 2\"\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, True)[\"selected\"],\n True, # noqa\n msg=\"Test that 'Yes' is selected, matching the query\",\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, \"all\")[\"count\"],\n 6,\n msg=\"Test that there are 3 results if unselected\",\n )\n\n # test narrowed down results\n narrowed_down_result = get_filter_items(\n self.collection_uid,\n \"exclude_from_nav\",\n request_params={\"exclude_from_nav\": False},\n narrow_down=True,\n show_count=True,\n cache_enabled=False,\n )\n\n self.assertEqual(\n len(narrowed_down_result), 2, msg=\"narrowed result length should be 2\"\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, False)[\"selected\"],\n True, # noqa\n msg=\"Test that 'No' is selected, matching the query\",\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, \"all\")[\"count\"],\n 6,\n msg=\"Test that there are 3 results if unselected\",\n )",
"def test_filter_rows(self):\n self.insert_row()\n\n instance = Manager.objects().first().run_sync()\n dictionary = instance.to_dict(Manager.name)\n self.assertDictEqual(dictionary, {\"name\": \"Guido\"})",
"def test_explicit_filter(self):\n request = RequestFactory().get('/?status=archived')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.data.getlist('status'), ['archived'])",
"def test_filter_chaining(self):\n\n class Blog(Document):\n id = StringField(primary_key=True)\n\n class BlogPost(Document):\n blog = ReferenceField(Blog)\n title = StringField()\n is_published = BooleanField()\n published_date = DateTimeField()\n\n @queryset_manager\n def published(doc_cls, queryset):\n return queryset(is_published=True)\n\n Blog.drop_collection()\n BlogPost.drop_collection()\n\n blog_1 = Blog(id=\"1\")\n blog_2 = Blog(id=\"2\")\n blog_3 = Blog(id=\"3\")\n\n blog_1.save()\n blog_2.save()\n blog_3.save()\n\n BlogPost.objects.create(\n blog=blog_1,\n title=\"Blog Post #1\",\n is_published=True,\n published_date=datetime.datetime(2010, 1, 5, 0, 0, 0),\n )\n BlogPost.objects.create(\n blog=blog_2,\n title=\"Blog Post #2\",\n is_published=True,\n published_date=datetime.datetime(2010, 1, 6, 0, 0, 0),\n )\n BlogPost.objects.create(\n blog=blog_3,\n title=\"Blog Post #3\",\n is_published=True,\n published_date=datetime.datetime(2010, 1, 7, 0, 0, 0),\n )\n\n # find all published blog posts before 2010-01-07\n published_posts = BlogPost.published()\n published_posts = published_posts.filter(\n published_date__lt=datetime.datetime(2010, 1, 7, 0, 0, 0)\n )\n assert published_posts.count() == 2\n\n blog_posts = BlogPost.objects\n blog_posts = blog_posts.filter(blog__in=[blog_1, blog_2])\n blog_posts = blog_posts.filter(blog=blog_3)\n assert blog_posts.count() == 0\n\n BlogPost.drop_collection()\n Blog.drop_collection()",
"def test_update_visibility_query2(self):\n pass",
"def test_filters(self):\n data = pd.DataFrame({'a': [1, 2, 3], 'ab': [6, 5, 4], 'c2': [1, 0, 1]})\n\n # test regex selector\n selector = ItemSelector(regex='[b2]$')\n selected_data = selector.transform(data)\n expected_data = pd.DataFrame({'ab': [6, 5, 4], 'c2': [1, 0, 1]})\n pd.testing.assert_frame_equal(selected_data, expected_data)\n\n # test items selector\n selector = ItemSelector(items=['a', 'c2'])\n selected_data = selector.transform(data)\n expected_data = pd.DataFrame({'a': [1, 2, 3], 'c2': [1, 0, 1]})\n pd.testing.assert_frame_equal(selected_data, expected_data)\n\n # test like selector\n selector = ItemSelector(like='a')\n selected_data = selector.transform(data)\n expected_data = pd.DataFrame({'a': [1, 2, 3], 'ab': [6, 5, 4]})\n pd.testing.assert_frame_equal(selected_data, expected_data)",
"def test_update_visibility_query4(self):\n pass",
"def test_update_visibility_query1(self):\n pass"
]
| [
"0.6839462",
"0.64977413",
"0.6263299",
"0.62301564",
"0.6035926",
"0.60172105",
"0.6004824",
"0.5993022",
"0.59713763",
"0.59470123",
"0.5942615",
"0.5900009",
"0.5886084",
"0.5870187",
"0.5866022",
"0.58232516",
"0.5784983",
"0.5762714",
"0.5761528",
"0.5744335",
"0.5738575",
"0.5731106",
"0.5730067",
"0.57276857",
"0.56906694",
"0.56760967",
"0.5650705",
"0.56506586",
"0.56490463",
"0.5638198"
]
| 0.7628281 | 0 |
Test that secondary indexes cannot be created on a materialized view | def test_secondary_index(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_local_index_no_range_key(engine):\n with pytest.raises(ValueError):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n another = Column(UUID)\n by_another = LocalSecondaryIndex(range_key=\"another\")",
"def test_missing_foreign_key_indices(db_session):\n\n from sqlalchemy_utils.functions import non_indexed_foreign_keys\n from kotti import metadata\n\n assert non_indexed_foreign_keys(metadata) == {}",
"def test_index_reflection(self):\n import warnings\n def capture_warnings(*args, **kw):\n capture_warnings._orig_showwarning(*args, **kw)\n capture_warnings.warnings.append(args)\n capture_warnings._orig_showwarning = warnings.warn\n capture_warnings.warnings = []\n\n m1 = MetaData(testing.db)\n t1 = Table('party', m1,\n Column('id', String(10), nullable=False),\n Column('name', String(20), index=True), \n Column('aname', String(20))\n )\n m1.create_all()\n \n testing.db.execute(\"\"\"\n create index idx1 on party ((id || name))\n \"\"\") \n testing.db.execute(\"\"\"\n create unique index idx2 on party (id) where name = 'test'\n \"\"\")\n \n testing.db.execute(\"\"\"\n create index idx3 on party using btree\n (lower(name::text), lower(aname::text))\n \"\"\")\n \n try:\n m2 = MetaData(testing.db)\n\n warnings.warn = capture_warnings\n t2 = Table('party', m2, autoload=True)\n \n wrn = capture_warnings.warnings\n assert str(wrn[0][0]) == (\n \"Skipped unsupported reflection of expression-based index idx1\")\n assert str(wrn[1][0]) == (\n \"Predicate of partial index idx2 ignored during reflection\")\n assert len(t2.indexes) == 2\n # Make sure indexes are in the order we expect them in\n tmp = [(idx.name, idx) for idx in t2.indexes]\n tmp.sort()\n \n r1, r2 = [idx[1] for idx in tmp]\n\n assert r1.name == 'idx2'\n assert r1.unique == True\n assert r2.unique == False\n assert [t2.c.id] == r1.columns\n assert [t2.c.name] == r2.columns\n finally:\n warnings.warn = capture_warnings._orig_showwarning\n m1.drop_all()",
"def test_indexes_arent_allowed_on_models_with_multiple_primary_keys(self):",
"def test_get_document_inexistent(empty_index):\n with pytest.raises(Exception):\n empty_index().get_document(\"123\")",
"def test_duplicate_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )\n with pytest.raises(ValueError, match=\"View already exists\"):\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def test_create_index_fail_test(tcex: TcEx, monkeypatch: MonkeyPatch):\n\n # monkeypatch method\n def mp_post(*args, **kwargs): # pylint: disable=unused-argument\n return MockPost({}, ok=False)\n\n monkeypatch.setattr(tcex.session.tc, 'post', mp_post)\n\n # create index\n key = str(uuid.uuid4())\n try:\n tcex.api.tc.v2.datastore('local', key)\n assert False, 'Failed to catch error on ok=False'\n except RuntimeError:\n assert True",
"def test_raise_exception_bad_indices(self):\n print(\"Testing exception is raised if indices are bad\")\n\n with self.assertRaises(Exception) as no_index:\n get_region_data(self.wmo_boxes, self.float_name, self.config,\n [], self.pres)\n\n self.assertTrue('NO DATA FOUND' in str(no_index.exception))\n\n with self.assertRaises(Exception) as big_index:\n get_region_data(self.wmo_boxes, self.float_name, self.config,\n [99999999999999999], self.pres)\n\n self.assertTrue('NO DATA FOUND' in str(big_index.exception))",
"def index_ddl_if_exists(self):\n return exclusions.closed()",
"def test_index_over_non_existing_field(self, collection):\n collection.create_index(\n [(\"hello\", Database.DESCENDING), (\"idontexist\", Database.ASCENDING)],\n unique=True,\n )\n\n collection.insert_many([{\"hello\": \"here\"}, {\"hello\": 2}])\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,), (2,), (3,)}),\n \"hello_1_idontexist_1\": (\n (\"hello\", \"idontexist\"),\n {(\"there\", None), (\"here\", None), (2, None)},\n ),\n }\n assert collection.find({}, selection={\"hello\": 1, \"idontexist\": 1}) == [\n {\"_id\": 1, \"hello\": \"there\", \"idontexist\": None},\n {\"_id\": 2, \"hello\": \"here\", \"idontexist\": None},\n {\"_id\": 3, \"hello\": 2, \"idontexist\": None},\n ]",
"def test_creating_index_type(self):",
"def test_integer_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, 12345)\n\t)",
"def test_no_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(description=\"container\", software_system=system1)",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_index_container_privileges(self):\n pass",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_indexable(self):\n # verify ----------------------\n try:\n self.collection[0]\n except TypeError:\n msg = \"'Collection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass",
"def test_geo_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_geo_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_geo_index, (0.0, 0.0))\n\t)",
"def test_collection_not_exists(self):\n def validate_vb_detail_stats():\n failed = durability_helper.verify_vbucket_details_stats(\n self.bucket, self.cluster_util.get_kv_nodes(self.cluster),\n vbuckets=self.cluster.vbuckets,\n expected_val=verification_dict)\n if failed:\n self.log_failure(\"vBucket_details validation failed\")\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n sync_write_enabled = DurabilityHelper.is_sync_write_enabled(\n self.bucket_durability_level, self.durability_level)\n num_cols_in_bucket = 0\n for _, scope in self.bucket.scopes.items():\n for _, _ in scope.collections.items():\n num_cols_in_bucket += 1\n\n verification_dict = dict()\n verification_dict[\"ops_create\"] = num_cols_in_bucket * self.num_items\n verification_dict[\"ops_update\"] = 0\n verification_dict[\"ops_delete\"] = 0\n verification_dict[\"rollback_item_count\"] = 0\n verification_dict[\"sync_write_aborted_count\"] = 0\n verification_dict[\"sync_write_committed_count\"] = 0\n\n durability_helper = DurabilityHelper(self.log,\n len(self.cluster.kv_nodes),\n durability=self.durability_level)\n\n drop_scope = self.input.param(\"drop_scope\", False)\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n if self.collection_name != CbServer.default_collection:\n self.collection_name = self.bucket_util.get_random_name()\n\n # Doc generator used for mutations\n doc_gen = doc_generator(\"test_col_not_exists\", 0, 10)\n\n # Acquire SDK client for mutations\n client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n doc_ttl, _ = \\\n SDKExceptionTests.__get_random_doc_ttl_and_durability_level()\n self.log.info(\"Creating docs with doc_ttl %s into %s:%s:%s\"\n % (doc_ttl,\n self.bucket.name,\n self.scope_name,\n self.collection_name))\n\n retry_reason = SDKException.RetryReason\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(\"create\", key, value,\n exp=doc_ttl,\n durability=self.durability_level,\n timeout=30)\n if self.collection_name == CbServer.default_collection:\n if result[\"status\"] is False:\n self.log_failure(\"Create doc failed for key: %s\" % key)\n else:\n verification_dict[\"ops_create\"] += 1\n if sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] += 1\n self.bucket.scopes[\n self.scope_name].collections[\n self.collection_name].num_items += 1\n elif result[\"status\"] is True:\n self.log_failure(\"Create didn't fail as expected for key: %s\"\n % key)\n elif (SDKException.AmbiguousTimeoutException\n not in str(result[\"error\"])\n or retry_reason.COLLECTION_NOT_FOUND\n not in str(result[\"error\"])) \\\n and (\n SDKException.RequestCanceledException\n not in str(result[\"error\"])\n or retry_reason.COLLECTION_MAP_REFRESH_IN_PROGRESS\n not in str(result[\"error\"])):\n self.log_failure(\"Invalid exception for key %s: %s\"\n % (key, result[\"error\"]))\n\n validate_vb_detail_stats()\n # Create required scope/collection for successful CRUD operation\n self.create_scope_collection()\n\n # Reset doc_gen itr value for retry purpose\n doc_gen.reset()\n doc_ttl, _ = \\\n SDKExceptionTests.__get_random_doc_ttl_and_durability_level()\n self.log.info(\"Creating docs with doc_ttl %s into %s:%s:%s\"\n % (doc_ttl,\n self.bucket.name,\n self.scope_name,\n self.collection_name))\n op_type = \"create\"\n if self.collection_name == CbServer.default_collection:\n op_type = \"update\"\n\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(op_type, key, value,\n exp=doc_ttl,\n durability=self.durability_level)\n if result[\"status\"] is False:\n self.log_failure(\"Create fail for key %s: %s\"\n % (key, result))\n else:\n if op_type == \"create\":\n verification_dict[\"ops_create\"] += 1\n self.bucket.scopes[\n self.scope_name].collections[\n self.collection_name].num_items += 1\n else:\n verification_dict[\"ops_update\"] += 1\n\n if sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] += 1\n validate_vb_detail_stats()\n self.validate_test_failure()\n\n if drop_scope:\n self.log.info(\"Dropping scope %s\" % self.scope_name)\n self.bucket_util.drop_scope(self.cluster.master,\n self.bucket,\n self.scope_name)\n else:\n self.log.info(\"Dropping collection %s:%s\" % (self.scope_name,\n self.collection_name))\n self.bucket_util.drop_collection(self.cluster.master,\n self.bucket,\n self.scope_name,\n self.collection_name)\n validate_vb_detail_stats()\n self.validate_test_failure()\n\n # Reset doc_gen itr value for retry purpose\n doc_gen.reset()\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(\"create\", key, value,\n exp=doc_ttl,\n durability=self.durability_level)\n if result[\"status\"] is True:\n self.log_failure(\"Create doc succeeded for dropped collection\")\n validate_vb_detail_stats()\n self.validate_test_failure()\n\n # Re-create the dropped collection\n self.create_scope_collection(create_scope=drop_scope)\n\n if self.collection_name != CbServer.default_collection:\n doc_gen.reset()\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(\"create\", key, value,\n exp=doc_ttl,\n durability=self.durability_level)\n if result[\"status\"] is False:\n self.log_failure(\"Create failed after collection recreate \"\n \"for key %s: %s\" % (key, result[\"error\"]))\n else:\n verification_dict[\"ops_create\"] += 1\n if sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] += 1\n self.bucket.scopes[\n self.scope_name].collections[\n self.collection_name].num_items += 1\n validate_vb_detail_stats()\n\n # Release the acquired client\n self.sdk_client_pool.release_client(client)\n self.validate_test_failure()",
"def test_index_keys(engine):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n other = Column(DateTime, range_key=True)\n another = Column(UUID)\n last = Column(String)\n\n by_last = GlobalSecondaryIndex(hash_key=\"another\", range_key=\"last\")\n by_another = LocalSecondaryIndex(range_key=\"last\")\n\n assert Model.by_last.hash_key is Model.another\n assert Model.by_last.range_key is Model.last\n\n assert Model.by_another.hash_key is Model.id\n assert Model.by_another.range_key is Model.last",
"def test_create_index(self, collection):\n collection.create_index(\"hello\")\n assert collection._indexes == {\"_id_\": ((\"_id\",), {(1,)})}\n\n collection.create_index(\"hello\", unique=True)\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",)}),\n }",
"def ensure_indexes(self):\n self.db_connection.ensure_indexes()",
"def _check_indexes(cls, document: dict) -> bool:\n criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Other, document, \"\")\n ]\n unique_criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Unique, document, \"\")\n ]\n index_name = f\"idx{cls.__collection_name__}\"\n unique_index_name = f\"uidx{cls.__collection_name__}\"\n indexes = cls.__collection__.list_indexes()\n cls.logger.debug(f\"Checking existing indexes: {indexes}\")\n indexes = {\n index[\"name\"]: index[\"key\"].keys()\n for index in indexes\n if \"name\" in index and \"key\" in index\n }\n return (\n (criteria and index_name not in indexes)\n or (not criteria and index_name in indexes)\n or (criteria and index_name in indexes and criteria != indexes[index_name])\n or (unique_criteria and unique_index_name not in indexes)\n or (not unique_criteria and unique_index_name in indexes)\n or (\n unique_criteria\n and unique_index_name in indexes\n and unique_criteria != indexes[unique_index_name]\n )\n )",
"def test_empty_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(\n key=\"\", description=\"container\", software_system=system1\n )",
"def test_index_projections(engine):\n Global, Local = GlobalSecondaryIndex, LocalSecondaryIndex\n\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n other = Column(UUID, range_key=True)\n another = Column(UUID)\n date = Column(DateTime)\n boolean = Column(Boolean)\n\n g_all = Global(hash_key=\"another\", range_key=\"date\", projection=\"all\")\n g_key = Global(hash_key=\"another\", projection=\"keys_only\")\n g_inc = Global(hash_key=\"other\", projection=[\"another\", \"date\"])\n\n l_all = Local(range_key=\"another\", projection=\"all\")\n l_key = Local(range_key=\"another\", projection=\"keys_only\")\n l_inc = Local(range_key=\"another\", projection=[\"date\"])\n\n uuids = set([Model.id, Model.other, Model.another])\n no_boolean = set(Model.Meta.columns)\n no_boolean.remove(Model.boolean)\n\n assert Model.g_all.projection == \"ALL\"\n assert Model.g_all.projection_attributes == set(Model.Meta.columns)\n assert Model.g_key.projection == \"KEYS_ONLY\"\n assert Model.g_key.projection_attributes == uuids\n assert Model.g_inc.projection == \"INCLUDE\"\n assert Model.g_inc.projection_attributes == no_boolean\n\n assert Model.l_all.projection == \"ALL\"\n assert Model.l_all.projection_attributes == set(Model.Meta.columns)\n assert Model.l_key.projection == \"KEYS_ONLY\"\n assert Model.l_key.projection_attributes == uuids\n assert Model.l_inc.projection == \"INCLUDE\"\n assert Model.l_inc.projection_attributes == no_boolean",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_slice_index_error(self):\n self.assertRaises(IndexError, lambda: self.table[0])",
"def test_missing_indices():\n\n data = pd.read_csv(\n \"https://raw.githubusercontent.com/facebook/prophet/main/examples/example_wp_log_peyton_manning.csv\"\n )\n data[\"ds\"] = pd.to_datetime(data[\"ds\"])\n data.set_index(\"ds\", inplace=True)\n data.index = data.index.to_period(\"D\")\n data.info()\n\n exp = TSForecastingExperiment()\n\n with pytest.raises(ValueError) as errmsg:\n exp.setup(data=data, fh=365, session_id=42)\n exceptionmsg = errmsg.value.args[0]\n\n assert \"Data has missing indices!\" in exceptionmsg",
"def test___getitem___invalid_index(self):\n with pytest.raises(TypeError):\n self.Person.objects()[\"a\"]"
]
| [
"0.69561404",
"0.66222596",
"0.64954585",
"0.6400075",
"0.6302062",
"0.6185816",
"0.6120662",
"0.6085855",
"0.60465896",
"0.60010475",
"0.5999763",
"0.59957534",
"0.59345144",
"0.58763325",
"0.5832284",
"0.58175516",
"0.5813598",
"0.5802429",
"0.5792589",
"0.5788268",
"0.5758962",
"0.57469225",
"0.57465094",
"0.5741913",
"0.57091016",
"0.5704433",
"0.56644297",
"0.5648932",
"0.5637306",
"0.56328"
]
| 0.80215204 | 0 |
Test that TTL works as expected for a materialized view The TTL is propagated properly between tables. | def test_ttl(self):
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in range(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_select_ttl_failure(self):",
"def test_update_queryset_ttl_success_case(self):",
"def test_ttl_included_on_create(self):\r\n with mock.patch.object(ConnectionPool, 'execute') as m:\r\n TestTTLModel.ttl(60).create(text=\"hello blake\")\r\n\r\n query = m.call_args[0][0]\r\n self.assertIn(\"USING TTL\", query)",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_instance_is_returned(self):\r\n o = TestTTLModel.create(text=\"whatever\")\r\n o.text = \"new stuff\"\r\n o = o.ttl(60)\r\n self.assertEqual(60, o._ttl)",
"def test_keys_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n storage.set('1', 'one', self.now + 5)\n storage.set('2', 'two')\n storage.set('3', 'three', self.now + 10)\n self.now += 6\n self.assertEqual(['2','3'], storage.keys('*'))\n self.assertEqual(['2','3'], list(storage._keys_dict.keys()))",
"def test_invalidate_template_cache_in_virtualization_realm(self):\n pass",
"def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def testTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:8\"\n\n # Extend the range of valid timestamps returned from the table to account\n # for potential clock skew.\n start = long(time.time() - 60) * 1e6\n data_store.DB.Set(subject, predicate, \"1\", token=self.token)\n\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n end = long(time.time() + 60) * 1e6\n\n self.assert_(ts >= start and ts <= end)\n self.assertEqual(stored, \"1\")",
"def _test_expired_liveness_with_limit(self, rf, nodes):\n session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for k in range(100):\n session.execute(\"INSERT INTO t (k, a, b) VALUES ({}, {}, {})\".format(k, k, k))\n\n # generate view row with expired liveness except for row 50 and 99\n for k in range(100):\n if k == 50 or k == 99:\n continue\n session.execute(\"DELETE a FROM t where k = {};\".format(k))\n\n # there should be 2 live data\n assert_one(session, \"SELECT k,a,b FROM mv limit 1\", [50, 50, 50])\n assert_all(session, \"SELECT k,a,b FROM mv limit 2\", [[50, 50, 50], [99, 99, 99]])\n assert_all(session, \"SELECT k,a,b FROM mv\", [[50, 50, 50], [99, 99, 99]])\n\n # verify IN\n keys = range(100)\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k in ({}) limit 1\".format(', '.join(str(x) for x in keys)),\n [50, 50, 50])\n assert_all(session, \"SELECT k,a,b FROM mv WHERE k in ({}) limit 2\".format(', '.join(str(x) for x in keys)),\n [[50, 50, 50], [99, 99, 99]])\n assert_all(session, \"SELECT k,a,b FROM mv WHERE k in ({})\".format(', '.join(str(x) for x in keys)),\n [[50, 50, 50], [99, 99, 99]])\n\n # verify fetch size\n session.default_fetch_size = 1\n assert_one(session, \"SELECT k,a,b FROM mv limit 1\", [50, 50, 50])\n assert_all(session, \"SELECT k,a,b FROM mv limit 2\", [[50, 50, 50], [99, 99, 99]])\n assert_all(session, \"SELECT k,a,b FROM mv\", [[50, 50, 50], [99, 99, 99]])",
"def set_ttl(self, ttl):",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_record_eq_record_different_ttl(self):\n zone = Zone('test.example.com')\n record_current = Record(zone, 'test-record', {'type': 'A', 'ttl': 30})\n record_desired = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n self.assertTrue(record_current != record_desired)",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_gcgs_validation(self):\n session = self.prepare(user_table=True)\n\n # Shouldn't be able to alter the gc_grace_seconds of the base table to 0\n assert_invalid(session,\n \"ALTER TABLE users WITH gc_grace_seconds = 0\",\n \"Cannot alter gc_grace_seconds of the base table of a materialized view \"\n \"to 0, since this value is used to TTL undelivered updates. Setting \"\n \"gc_grace_seconds too low might cause undelivered updates to expire \"\n \"before being replayed.\")\n\n # But can alter the gc_grace_seconds of the bease table to a value != 0\n session.execute(\"ALTER TABLE users WITH gc_grace_seconds = 10\")\n\n # Shouldn't be able to alter the gc_grace_seconds of the MV to 0\n assert_invalid(session,\n \"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0\",\n \"Cannot alter gc_grace_seconds of a materialized view to 0, since \"\n \"this value is used to TTL undelivered updates. Setting gc_grace_seconds \"\n \"too low might cause undelivered updates to expire before being replayed.\")\n\n # Now let's drop MV\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n # Now we should be able to set the gc_grace_seconds of the base table to 0\n session.execute(\"ALTER TABLE users WITH gc_grace_seconds = 0\")\n\n # Now we shouldn't be able to create a new MV on this table\n assert_invalid(session,\n \"CREATE MATERIALIZED VIEW users_by_state AS \"\n \"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL \"\n \"PRIMARY KEY (state, username)\",\n \"Cannot create materialized view 'users_by_state' for base table 'users' \"\n \"with gc_grace_seconds of 0, since this value is used to TTL undelivered \"\n \"updates. Setting gc_grace_seconds too low might cause undelivered updates\"\n \" to expire before being replayed.\")",
"def test_crc_check_chance(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5\"))\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.5, view=True)\n\n session.execute(\"ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3\")\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.3, view=True)",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_trust_expire(self):\n expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)\n # NOTE(ylobankov) In some cases the expiry time may be rounded up\n # because of microseconds. In fact, it depends on database and its\n # version. At least MySQL 5.6.16 does this.\n # For example, when creating a trust, we will set the expiry time of\n # the trust to 2015-02-17T17:34:01.907051Z. However, if we make a GET\n # request on the trust, the response will contain the time rounded up\n # to 2015-02-17T17:34:02.000000Z. That is why we set microsecond to\n # 0 when we invoke isoformat to avoid problems with rounding.\n expires_at = expires_at.replace(microsecond=0)\n # NOTE(ekhugen) Python datetime does not support military timezones\n # since we used UTC we'll add the Z so our compare works.\n expires_str = expires_at.isoformat() + 'Z'\n\n trust = self.create_trust(expires=expires_str)\n self.validate_trust(trust, expires=expires_str)\n\n trust_get = self.show_trust()\n\n self.validate_trust(trust_get, expires=expires_str)\n\n self.check_trust_roles()",
"def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_timed_reset(self):\n time = 0.005\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n assert cache[1] == 1\n sleep(time / 2)\n assert 1 in cache\n assert cache[1] == 1\n cache[1] = 1\n sleep(time / 2)\n assert 1 in cache\n assert cache[1] == 1\n sleep(time / 2)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]",
"def test_queryset_total_time_no_records(db):\n assert models.TimeRecord.objects.total_time() == datetime.timedelta(0)"
]
| [
"0.7233449",
"0.6468026",
"0.64311516",
"0.6386992",
"0.62730235",
"0.6260327",
"0.6132396",
"0.6131993",
"0.6101104",
"0.6042274",
"0.593731",
"0.584508",
"0.581179",
"0.5782146",
"0.57157606",
"0.5675802",
"0.5638631",
"0.55612683",
"0.55408764",
"0.5497256",
"0.5477924",
"0.54489166",
"0.5428977",
"0.5416268",
"0.541446",
"0.5413444",
"0.5409802",
"0.54043204",
"0.54026264",
"0.53647393"
]
| 0.80929774 | 0 |
Test that a materialized view created with 'SELECT ' works as expected when adding a new column The new column is not present in the view. | def test_query_new_column(self):
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_drop_column(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n assert_invalid(\n session,\n \"ALTER TABLE ks.users DROP state;\",\n \"Cannot drop column state on base table with materialized views.\"\n )",
"def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )",
"def test_dummydb_add_data_to_table_wrong_column_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", four=1)",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_select_columns(self):\n self.insert()\n data = self.tbl.select()\n assert (u'id',) + tuple(data.columns) == self.tbl.columns",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_view_delete_with_scope(self):\n table = Table(\n {\"id\": int, \"msg\": str, \"val\": float},\n index=\"id\",\n )\n table.view(\n computed_columns=[\n {\n \"column\": \"inverted\",\n \"computed_function_name\": \"invert\",\n \"inputs\": [\"val\"],\n }\n ],\n columns=[\"inverted\"],\n )\n table.update(\n [\n {\n \"id\": 1,\n \"msg\": \"test\",\n \"val\": 1.0,\n }\n ]\n )",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()",
"def test_locates_col(self):\n mapper(Order, orders, properties={\n 'description':deferred(orders.c.description)\n })\n\n sess = create_session()\n o1 = sess.query(Order).first()\n def go():\n assert o1.description == 'order 1'\n self.assert_sql_count(testing.db, go, 1)\n\n sess = create_session()\n o1 = sess.query(Order).add_column(orders.c.description).first()[0]\n def go():\n assert o1.description == 'order 1'\n self.assert_sql_count(testing.db, go, 0)",
"def test_add_column(self):\n self.spy_on(DataGrid.add_column)\n\n DataGridColumnsHook(extension=self.extension,\n datagrid_cls=DataGrid,\n columns=[Column(id='sandbox')])\n\n self.assertTrue(DataGrid.add_column.called)",
"def test_immutable(self):\n session = self.prepare(user_table=True)\n\n # cannot insert\n assert_invalid(session, \"INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');\",\n \"Cannot directly modify a materialized view\")\n\n # cannot update\n assert_invalid(session, \"UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot delete a row\n assert_invalid(session, \"DELETE from users_by_state where state='TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot delete a cell\n assert_invalid(session, \"DELETE session_token from users_by_state where state='TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot alter a table\n assert_invalid(session, \"ALTER TABLE users_by_state ADD first_name varchar\",\n \"Cannot use ALTER TABLE on Materialized View\")",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_dummydb_add_data_to_table_wrong_column_type(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", two=1)",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def test_secondary_index(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n assert_invalid(session, \"CREATE INDEX ON t_by_v (v2)\",\n \"Secondary indexes are not supported on materialized views\")",
"def test_crc_check_chance(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5\"))\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.5, view=True)\n\n session.execute(\"ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3\")\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.3, view=True)",
"def test_dynamic_view_hydrated(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n viewset.create_dynamic_view(key=\"dyn1\", description=\"dynamic\", element=system1)\n io = ViewSetIO.from_orm(viewset)\n\n new_viewset = ViewSet.hydrate(io, viewset.model)\n assert count(new_viewset.dynamic_views) == 1\n view = list(new_viewset.dynamic_views)[0]\n assert view.description == \"dynamic\"\n assert view.element is system1",
"def testQueryColumns(self):\n scaffolder = plaso_sqlite.PlasoSQLiteScaffolder()\n test_string = (\n 'SELECT foobar as Foo, foobar.dot, random, reallylong AS long FROM '\n 'foobarengine WHERE foobar = 1')\n expected_columns = set(['foo', 'dot', 'random', 'long'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'select one, two as three, four as five, f.eight as EIGHTE FROM '\n 'foobar f, scode s WHERE f.id = s.id ORDER BY one')\n expected_columns = set(['one', 'three', 'five', 'eighte'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'this should not produce anything...')\n self._RunQueryTests(scaffolder, test_string, set())",
"def compile_refresh_materialized_view(element, compiler, **kw):\n text = \"REFRESH MATERIALIZED VIEW {name}\"\n return text.format(name=element.name)",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_query_all_new_column(self):\n session = self.prepare(user_table=True)\n\n self._insert_data(session)\n\n assert_one(\n session,\n \"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]\n )\n\n session.execute(\"ALTER TABLE users ADD first_name varchar;\")\n\n results = list(session.execute(\"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\"))\n assert len(results) == 1\n assert hasattr(results[0], 'first_name'), 'Column \"first_name\" not found'\n assert_one(\n session,\n \"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]\n )"
]
| [
"0.6892171",
"0.6611816",
"0.65615696",
"0.6400288",
"0.6197115",
"0.61784774",
"0.6161736",
"0.5996451",
"0.5949513",
"0.59209174",
"0.59142745",
"0.59110093",
"0.58645874",
"0.5767183",
"0.57138884",
"0.5691429",
"0.5675922",
"0.56391716",
"0.55949956",
"0.55837137",
"0.5579921",
"0.5570666",
"0.5569896",
"0.55563915",
"0.55493367",
"0.55042434",
"0.5494877",
"0.5483913",
"0.54732263",
"0.5468306"
]
| 0.74716026 | 0 |
Test that column renaming is atomically done between a table and its materialized views CASSANDRA12952 | def test_rename_column_atomicity(self):
session = self.prepare(nodes=1, user_table=True, install_byteman=True)
node = self.cluster.nodelist()[0]
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
# Rename a column with an injected byteman rule to kill the node after the first schema update
self.fixture_dtest_setup.allow_log_errors = True
script_version = '4x' if self.cluster.version() >= '4' else '3x'
node.byteman_submit([mk_bman_path('merge_schema_failure_{}.btm'.format(script_version))])
with pytest.raises(NoHostAvailable):
session.execute("ALTER TABLE users RENAME username TO user")
logger.debug('Restarting node')
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
# Both the table and its view should have the new schema after restart
assert_one(
session,
"SELECT * FROM ks.users WHERE state = 'TX' AND user = 'user1' ALLOW FILTERING",
['user1', 1968, 'f', 'ch@ngem3a', None, 'TX']
)
assert_one(
session,
"SELECT * FROM ks.users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_rename_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n title_column = Varchar()\n title_column._meta.name = \"title\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[title_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.rename_columns.statements) == 1)\n self.assertEqual(\n schema_differ.rename_columns.statements[0],\n \"manager.rename_column(table_class_name='Band', tablename='band', old_column_name='title', new_column_name='name', old_db_column_name='title', new_db_column_name='name')\", # noqa\n )",
"def test_rename_column(self):\n session = self.prepare(user_table=True)\n\n self._insert_data(session)\n\n assert_one(\n session,\n \"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]\n )\n\n session.execute(\"ALTER TABLE users RENAME username TO user\")\n\n results = list(session.execute(\"SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'\"))\n assert len(results) == 1\n assert hasattr(results[0], 'user'), 'Column \"user\" not found'\n assert_one(\n session,\n \"SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'\",\n ['TX', 'user1', 1968, 'f']\n )",
"def test_rename_table(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Act\", tablename=\"act\", columns=[name_column]\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\", tablename=\"band\", columns=[name_column]\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.rename_tables.statements) == 1)\n self.assertEqual(\n schema_differ.rename_tables.statements[0],\n \"manager.rename_table(old_class_name='Band', old_tablename='band', new_class_name='Act', new_tablename='act')\", # noqa\n )\n\n self.assertEqual(schema_differ.create_tables.statements, [])\n self.assertEqual(schema_differ.drop_tables.statements, [])",
"def test_rename_columns(dupcols):\n # Rename the first column\n d1 = rename(dupcols, columns='Name', names='Person')\n assert d1.columns[0] == 'Person'\n assert dupcols.columns[0] == 'Name'\n assert d1.columns[1] == 'A'\n assert d1.columns[2] == 'A'\n for col in d1.columns:\n assert isinstance(col, Column)\n assert d1.shape == (7, 3)\n # Rename the first column and the second column\n d1 = rename(dupcols, columns=['Name', 'A'], names=['Person', 'Col2'])\n assert d1.columns[0] == 'Person'\n assert d1.columns[1] == 'Col2'\n assert d1.columns[2] == 'A'\n for col in d1.columns:\n assert isinstance(col, Column)\n assert d1.shape == (7, 3)\n # Rename the first column and the last column\n d1 = rename(dupcols, columns=['Name', 2], names=['Person', 'Col2'])\n assert d1.columns[0] == 'Person'\n assert d1.columns[1] == 'A'\n assert d1.columns[2] == 'Col2'\n for col in d1.columns:\n assert isinstance(col, Column)\n assert d1.shape == (7, 3)",
"def test_get_column_names(self):\n table = 'test_table_cols'\n columns = ['col1', 'col2', 'col3']\n\n with self.dbh.table_recreate(table, columns, 'integer'):\n try:\n res = self.dbh.get_column_names(table)\n except Exception:\n self.dbh.rollback()\n raise\n\n self.assertEqual(res, columns)",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_query_new_column(self):\n session = self.prepare(user_table=True)\n\n session.execute((\"CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users \"\n \"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)\"))\n\n self._insert_data(session)\n\n assert_one(\n session,\n \"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1']\n )\n\n session.execute(\"ALTER TABLE users ADD first_name varchar;\")\n\n results = list(session.execute(\"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'\"))\n assert len(results) == 1\n assert not hasattr(results[0], 'first_name'), 'Column \"first_name\" found in view'\n assert_one(\n session,\n \"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1']\n )",
"def _refactor_time_columns(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _refactor_time_columns')\n write_cursor.execute('ALTER TABLE timed_balances RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE timed_location_data RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE trades RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE asset_movements RENAME COLUMN time TO timestamp')\n log.debug('Exit _refactor_time_columns')",
"def test_single_column_factorize_columns():\n df = pd.DataFrame(\n {\"a\": [\"hello\", \"hello\", \"sup\"], \"b\": [1, 2, 3]}\n ).factorize_columns(column_names=\"a\")\n assert \"a_enc\" in df.columns",
"def test_multicolumn_factorize_columns_suffix_change():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"], suffix=\"_col\")\n assert \"a_col\" in df.columns\n assert \"c_col\" in df.columns\n assert \"a_enc\" not in df.columns\n assert \"c_enc\" not in df.columns",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_snapshot_and_restore_dropping_a_column(self):\n cluster = self.cluster\n cluster.populate(1).start()\n node1, = cluster.nodelist()\n session = self.patient_cql_connection(node1)\n\n # Create schema and insert some data\n create_ks(session, 'ks', 1)\n session.execute(\"CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)\")\n session.execute(\"INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')\")\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\", \"b\"])\n\n # Drop a column\n session.execute(\"ALTER TABLE ks.cf DROP b\")\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\"])\n\n # Take a snapshot and drop the table\n snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')\n session.execute(\"DROP TABLE ks.cf\")\n\n # Restore schema and data from snapshot\n self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')\n self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')\n node1.nodetool('refresh ks cf')\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\"])\n\n # Clean up\n logger.debug(\"removing snapshot_dir: \" + snapshot_dir)\n shutil.rmtree(snapshot_dir)",
"def text_rename_column_errors(dupcols):\n # Incompatible column and name lists\n with pytest.raises(ValueError):\n rename(dupcols, columns='Name', names=['Person', 'Age'])\n # Unknown column name\n with pytest.raises(ValueError):\n rename(dupcols, columns='Names', names='Persons')\n # Column index out of range\n with pytest.raises(ValueError):\n rename(dupcols, columns=100, names='Persons')",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_new_columns(self):\n rename = '{}*'\n expected = (list(self.df.columns) +\n list(rename.format(f) for f in self.formants))\n actual = self.normalizer(rename=rename, **self.kwargs).normalize(\n self.df).columns\n\n expected = sorted(expected)\n actual = sorted(actual)\n self.assertListEqual(actual, expected)",
"def test_column_name(self):\n field = self.base_field\n sch = SchemaField(field)\n self.assertEqual(sch.name, sch.column_name)\n self.assertNotEqual(sch.column_name, sch.title)",
"def test_column_synonyms(self):\n\n sess = create_session()\n\n assert_col = []\n class User(object):\n def _get_user_name(self):\n assert_col.append(('get', self._user_name))\n return self._user_name\n def _set_user_name(self, name):\n assert_col.append(('set', name))\n self._user_name = name\n user_name = property(_get_user_name, _set_user_name)\n\n mapper(Address, addresses)\n try:\n mapper(User, users, properties = {\n 'addresses':relation(Address, lazy=True),\n 'not_user_name':synonym('_user_name', map_column=True)\n })\n User.not_user_name\n assert False\n except exceptions.ArgumentError, e:\n assert str(e) == \"Can't compile synonym '_user_name': no column on table 'users' named 'not_user_name'\"\n\n clear_mappers()\n\n mapper(Address, addresses)\n mapper(User, users, properties = {\n 'addresses':relation(Address, lazy=True),\n 'user_name':synonym('_user_name', map_column=True)\n })\n\n # test compile\n assert not isinstance(User.user_name == 'jack', bool)\n\n assert hasattr(User, 'user_name')\n assert hasattr(User, '_user_name')\n\n u = sess.query(User).filter(User.user_name == 'jack').one()\n assert u.user_name == 'jack'\n u.user_name = 'foo'\n assert u.user_name == 'foo'\n assert assert_col == [('get', 'jack'), ('set', 'foo'), ('get', 'foo')]",
"def test_drop_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.drop_columns.statements) == 1)\n self.assertEqual(\n schema_differ.drop_columns.statements[0],\n \"manager.drop_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre')\", # noqa\n )",
"def test_drop_column(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n assert_invalid(\n session,\n \"ALTER TABLE ks.users DROP state;\",\n \"Cannot drop column state on base table with materialized views.\"\n )",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def test_set_col_names(self):\n self.dboard.set_col_names([\"A\", \"B\"])\n self.assertTrue(hasattr(self.dboard, \"_col_names\"))\n self.assertEqual(2, len(self.dboard._col_names))",
"def _rename_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"RenameColumnsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.RenameColumnsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n if isinstance(res, pl.LazyFrame):\n # work around https://github.com/pola-rs/polars/issues/5882#issue-1507040380\n res = res.collect()\n res = res.rename(op.reverse_mapping)\n res = res.select(op.columns_produced())\n if self.use_lazy_eval and isinstance(res, pl.DataFrame):\n res = res.lazy()\n return res",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_multicolumn_factorize_columns():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"])\n assert \"a_enc\" in df.columns\n assert \"c_enc\" in df.columns",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_dummydb_add_data_to_table_wrong_column_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", four=1)"
]
| [
"0.70623285",
"0.63104355",
"0.61005694",
"0.60737306",
"0.5995365",
"0.5948483",
"0.58400935",
"0.5781707",
"0.57810545",
"0.5773753",
"0.57513714",
"0.5725413",
"0.56517184",
"0.5640607",
"0.56149244",
"0.5593741",
"0.5567643",
"0.5563331",
"0.5556346",
"0.555506",
"0.55446106",
"0.5542361",
"0.5532507",
"0.54591566",
"0.54534554",
"0.5419538",
"0.541868",
"0.5395817",
"0.5390776",
"0.5374354"
]
| 0.73969036 | 0 |
Test that MV whose build has been stopped with `nodetool stop` can be dropped | def test_drop_with_stopped_build(self):
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
self.check_logs_for_errors()
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in nodes:
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
assert failed, "The view shouldn't be queryable"
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
# The original byteman delay it's still there and can make this flaky CASSANDRA-16962
for i in range(10):
try:
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
except AssertionError:
time.sleep(1)
else:
break
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_resume_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n self.fixture_dtest_setup.ignore_log_patterns = [r'Compaction interrupted: View build']\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)\n assert not node.grep_log('Marking view', filename='debug.log')\n self.check_logs_for_errors()\n\n logger.debug(\"Check that MV shouldn't be built yet.\")\n assert len(list(session.execute(\"SELECT COUNT(*) FROM t_by_v\"))) != 5000\n\n logger.debug(\"Restart the cluster\")\n self.cluster.stop()\n marks = [node.mark_log() for node in nodes]\n self.cluster.start()\n session = self.patient_cql_connection(nodes[0])\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM ks.t_by_v\", [5000])\n\n logger.debug(\"Checking logs to verify that the view build has been resumed and completed after restart\")\n for node, mark in zip(nodes, marks):\n assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)\n assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)\n self.check_logs_for_errors()",
"def test_interrupt_build_process(self):\n\n options = {'hinted_handoff_enabled': False}\n if self.cluster.version() >= '4':\n options['concurrent_materialized_view_builders'] = 4\n\n session = self.prepare(options=options, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n\n logger.debug(\"Avoid premature MV build finalization with byteman\")\n for node in self.cluster.nodelist():\n if self.cluster.version() >= '4':\n node.byteman_submit([mk_bman_path('4.0/skip_view_build_finalization.btm')])\n node.byteman_submit([mk_bman_path('4.0/skip_view_build_task_finalization.btm')])\n else:\n node.byteman_submit([mk_bman_path('pre4.0/skip_finish_view_build_status.btm')])\n node.byteman_submit([mk_bman_path('pre4.0/skip_view_build_update_distributed.btm')])\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(10000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stop the cluster. Interrupt the MV build process.\")\n self.cluster.stop()\n\n logger.debug(\"Checking logs to verify that the view build tasks have been created\")\n for node in self.cluster.nodelist():\n assert node.grep_log('Starting new view build', filename='debug.log')\n assert not node.grep_log('Resuming view build', filename='debug.log')\n node.mark_log(filename='debug.log')\n\n logger.debug(\"Restart the cluster\")\n self.cluster.start()\n session = self.patient_cql_connection(node1)\n session.execute(\"USE ks\")\n\n logger.debug(\"MV shouldn't be built yet.\")\n assert len(list(session.execute(\"SELECT COUNT(*) FROM t_by_v\"))) != 10000\n\n logger.debug(\"Wait and ensure the MV build resumed. Waiting up to 2 minutes.\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"Verify all data\")\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [10000])\n for i in range(10000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug(\"Checking logs to verify that some view build tasks have been resumed\")\n for node in self.cluster.nodelist():\n assert node.grep_log('Resuming view build', filename='debug.log')",
"def stopTestRun(self):",
"def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()",
"def test_issue_stop_stop_watch(self):\n pass",
"def test_terminate_run(self):\n pass",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def test_issue_delete_stop_watch(self):\n pass",
"def test_stop_project(self):\n support.create_project(self, 'homer3')\n support.add_step(self, contents='\\n'.join([\n 'import cauldron as cd',\n 'cd.shared.test = 0',\n 'cd.step.breathe()',\n 'cd.shared.test = 1',\n 'cd.project.stop()',\n 'cd.shared.test = 2'\n ]))\n support.add_step(self, contents='\\n'.join([\n 'import cauldron as cd',\n 'cd.shared.test = 3'\n ]))\n\n support.run_command('run')\n project = cd.project.get_internal_project()\n step = project.steps[1]\n\n self.assertEqual(project.shared.fetch('test'), 1)\n self.assertNotEqual(-1, step.dom.find('cd-StepStop'))",
"def stopTest(self, test):",
"def test_stop_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('stop_machine', {}).get(\n 'machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'stopped', 'actions': {'resize': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')",
"def test_stop_process(self):\n error_to_simulate = self.input.param(\"simulate_error\", None)\n target_node = self.getTargetNode()\n remote = RemoteMachineShellConnection(target_node)\n error_sim = CouchbaseError(self.log, remote)\n target_vbuckets = Cbstats(target_node).vbucket_list(\n self.bucket.name, target_node)\n\n bucket_dict = BucketUtils.get_random_collections(\n self.cluster.buckets,\n req_num=1,\n consider_scopes=\"all\",\n consider_buckets=\"all\")\n\n bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,\n bucket_dict.keys()[0])\n scope_name = bucket_dict[bucket.name][\"scopes\"].keys()[0]\n collection_name = bucket_dict[bucket.name][\n \"scopes\"][scope_name][\"collections\"].keys()[0]\n scope = BucketUtils.get_scope_obj(\n bucket, scope_name)\n collection = BucketUtils.get_collection_obj(scope, collection_name)\n\n if len(target_vbuckets) == 0:\n self.log.error(\"No target vbucket list generated to load data\")\n remote.disconnect()\n return\n\n self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)\n\n # Induce the error condition\n error_sim.create(error_to_simulate)\n\n if self.allowed_hosts:\n self.set_allowed_hosts()\n\n self.sleep(20, \"Wait before reverting the error condition\")\n # Revert the simulated error condition and close the ssh session\n error_sim.revert(error_to_simulate)\n remote.disconnect()\n\n # Wait for doc loading task to complete\n self.task.jython_task_manager.get_task_result(self.doc_loading_task)\n if self.atomicity:\n self.task.jython_task_manager.get_task_result(\n self.transaction_load_task)\n elif self.N1qltxn:\n self.task.jython_task_manager.get_task_result(\n self.N1ql_load_task)\n\n if len(self.doc_loading_task.fail.keys()) != 0:\n if self.target_node == \"active\" or self.num_replicas in [2, 3]:\n self.log_failure(\"Unwanted failures for keys: %s\"\n % self.doc_loading_task.fail.keys())\n\n validate_passed = \\\n self.durability_helper.validate_durability_exception(\n self.doc_loading_task.fail,\n SDKException.DurabilityAmbiguousException)\n if not validate_passed:\n self.log_failure(\"Unwanted exception seen during validation\")\n\n # Get SDK client for CRUD retries\n sdk_client = self.sdk_client_pool.get_client_for_bucket(self.bucket)\n for doc_key, crud_result in self.doc_loading_task.fail.items():\n result = sdk_client.crud(DocLoading.Bucket.DocOps.CREATE,\n doc_key,\n crud_result[\"value\"],\n replicate_to=self.replicate_to,\n persist_to=self.persist_to,\n durability=self.durability_level,\n timeout=self.sdk_timeout)\n if result[\"status\"] is False:\n self.log_failure(\"Retry of doc_key %s failed: %s\"\n % (doc_key, result[\"error\"]))\n # Close the SDK connection\n self.sdk_client_pool.release_client(sdk_client)\n\n self.validate_test_failure()\n\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n # Update self.num_items and validate docs per collection\n if not self.N1qltxn and self.atomicity is False:\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_999_stop(self):\n HEADING()\n self.db.stop()\n result = True\n assert result",
"def stopBuild(reason=\"<no reason given>\"):",
"def test_finished_no_vm(self):\n self.command.finished()",
"def test_expectation_failure_stop(self):\n with self.main.write_recipe('foo2') as recipe:\n del recipe.expectation['basic']\n with self.main.write_recipe('foo') as recipe:\n del recipe.expectation['basic']\n\n test_run = self._run_test('run', '--stop', should_fail=True)\n results = test_run.data['test_results']\n self.assertEqual(len(results), 1)\n self.assertEqual(results.values()[0].keys()[0], 'diff')",
"def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_restart(self):",
"def stop_all():\n subprocess.check_call(\n ['./run.py --down'], shell=True,\n cwd=orc8_docker_path,\n )\n subprocess.check_call(\n 'docker-compose down', shell=True,\n cwd=feg_docker_integ_test_path,\n )\n subprocess.check_call(\n 'vagrant halt magma', shell=True,\n cwd=agw_path,\n )",
"def test_clean_exit(self):\n ch = connection_helper()\n qr = list_test_artifacts(None, ch.tables)\n self.assertFalse(bool(qr), \"\"\"Run 'removefacts --conf <config> --removetestlist' or \nexecute 'tests/scripts/removetestfacts.py' to fix\"\"\")",
"def test_memleaks():\n build()\n sh(\"%s psutil\\\\tests\\\\test_memleaks.py\" % PYTHON)",
"def test_issue_start_stop_watch(self):\n pass",
"def pytest_unconfigure() -> None: # pragma: no cover\n if PROC.exitcode is None:\n assert PROC.pid is not None # not sure if this can happen (mypy error); if it does, be explicit\n os.kill(PROC.pid, signal.SIGINT)\n PROC.join(5)\n if PROC.exitcode is None:\n PROC.kill()\n PROC.join()\n print(\"\\nServer app terminated, logs in logs/server.log\")",
"def test_no_immediate_stop(self):\n script = ControlScript()\n options = ControlOptions()\n options.parseOptions([b\"--data-path\", self.mktemp()])\n self.assertNoResult(script.main(MemoryCoreReactor(), options))",
"def test_relaunch_deployment_run(self):\n pass",
"def test_node_stop(self, m_docker_client, m_client):\n # Call method under test\n node.node_stop(True)\n\n # Assert\n m_client.remove_host.assert_called_once_with(node.hostname)\n m_docker_client.stop.assert_called_once_with('calico-node')",
"def pytest_runtest_teardown(item):\n config = item.config\n enabled = config.getvalue('yagot')\n if enabled:\n import yagot\n tracker = yagot.GarbageTracker.get_tracker()\n tracker.stop()\n location = \"{file}::{func}\". \\\n format(file=item.location[0], func=item.name)\n assert not tracker.garbage, tracker.assert_message(location)",
"def test_node_graceful_shutdown(self, proc_info, controller_node):\n launch_testing.asserts.assertExitCodes(proc_info, process=controller_node)",
"def tearDown(self):\n \n pass\n #execfile('stop_geoserver.py') ",
"def test_stop(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n\n from dockerdb.commands.stop import stop\n\n stop()\n\n # getting the container again in order to assert their status after we stopped it\n _container = docker_client.containers.get(container_id=container.name)\n assert _container.status == \"exited\"",
"def tearDown(self):\n self._procfs_mock.stop()"
]
| [
"0.68905497",
"0.66020924",
"0.638977",
"0.6324155",
"0.6261753",
"0.6236924",
"0.6183464",
"0.6149862",
"0.61090034",
"0.595898",
"0.5956245",
"0.5946295",
"0.593066",
"0.59184647",
"0.5911645",
"0.5901314",
"0.58555895",
"0.5782444",
"0.57531476",
"0.5745357",
"0.5744198",
"0.57180786",
"0.56917465",
"0.56819934",
"0.56768364",
"0.5667923",
"0.5637042",
"0.5630465",
"0.5625592",
"0.56239915"
]
| 0.7115608 | 0 |
Able to shadow old view row if all columns in base are removed including unselected Able to recreate view row if at least one selected column alive CASSANDRA11500 | def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, 1, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected column, view row is removed
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# update unselected with ts=3, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# insert livenesssInfo, view row should be alive
self.update_view(session, "INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be alive because of base livenessInfo alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# delete with ts=3, view row should be alive due to unselected@ts4
self.update_view(session, "DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be removed
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=7, view row is alive
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected with ts=7, view row is dead
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=5, view row is alive (selected column should not affects each other)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
start = time.time()
# add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)
update_time = self.update_view(session, "UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;", flush)
try:
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
except AssertionError as ae:
if (time.time() - start) >= 30:
pytest.fail("Please increase the 30 TTL which expired before we could test due to a slow env.")
else:
raise ae
wait_time = update_time + 30 - time.time()
if wait_time > 0:
time.sleep(wait_time)
start = time.time()
# update unselected with ttl=30, view row should be alive
update_time = self.update_view(session, "UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;", flush)
try:
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
except AssertionError as ae:
if (time.time() - start) >= 30:
pytest.fail("Please increase the 30 TTL which expired before we could test due to a slow env.")
else:
raise ae
wait_time = update_time + 30 - time.time()
if wait_time > 0:
time.sleep(wait_time)
# view row still alive due to base livenessInfo
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def clear_rows(self):\n ...",
"def get_cols_drop():",
"def refreshView(self):\n chldn = self.tDisp.get_children()\n for chld in chldn:\n self.tDisp.delete(chld)\n if len(self.conflict.infeasibles) > 0:\n self.conflict.recalculateFeasibleStates()\n for infeas in self.conflict.infeasibles:\n key = infeas.name\n self.tDisp.insert('', 'end', key, text=key)\n self.tDisp.set(key, 'state', key)\n self.tDisp.set(key, 'stDes', str(2**(key.count('-'))))\n self.tDisp.set(key, 'stRem', str(infeas.statesRemoved))",
"def _column_selection_change(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n selected_columns_indexes = [self._columns[i][0] for i in list(self._ckl_columns.GetCheckedItems())]\n database_columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n\n to_add = []\n to_remove = []\n\n for i in selected_columns_indexes:\n if i not in database_columns_indexes:\n to_add.append(i)\n \n for i in database_columns_indexes:\n if i not in selected_columns_indexes:\n to_remove.append(i)\n \n queries = []\n for variable_id in to_add:\n queries.append(sciplot.database.Query(\"INSERT INTO TableColumn (TableID, VariableID, FormatPattern) VALUES ((?), (?), (?));\", [table_id, variable_id, \"*.*\"], 0)) #add new column to table with a generic format string\n \n for variable_id in to_remove:\n queries.append(sciplot.database.Query(\"DELETE FROM TableColumn WHERE VariableID = (?);\", [variable_id], 0)) #remove unselected column from the database\n \n self._datafile.query(queries)\n\n self.refresh_table() #update table to reflect the changed columns",
"def bg_reset(self):\n drop_column_idx = 6\n for row_i in range(self.people_table.rowCount()):\n drop_type = self.people_table.item(row_i, drop_column_idx).text()\n bg_qtcolor = background_drop_color(drop_type)\n for col_i in range(self.people_table.columnCount()):\n self.people_table.item(row_i, col_i).setBackground(bg_qtcolor)",
"def GenColsByModel(modelo, indices,tree):\n nCols = 0\n for i in indices:\n if i[2] ==\"boo\":\n render = gtk.CellRendererToggle()\n if len(i) ==4:\n if i[3] != False:\n render.connect('toggled', i[3], modelo) \n elif len(i) ==5:\n if i[3] != False:\n render.connect('toggled', i[3], modelo,i[0]) \n else:\n render.connect('toggled', fixed_toggled, modelo,i[0])\n \n column = gtk.TreeViewColumn(i[1], render, active=i[0])\n if len(i) ==4:\n if i[3] != False:\n column.set_clickable(True)\n column.connect('clicked', column_click_ok,modelo, tree, i[0],nCols)\n else:\n column.set_clickable(True)\n column.connect('clicked', column_click_ok,modelo, tree, i[0],nCols)\n elif i[2] ==\"pboo\":\n render = gtk.CellRendererToggle()\n if len(i) ==4:\n if i[3] != False:\n render.connect('toggled', i[3], modelo) \n elif len(i) ==5:\n if i[3] != False:\n render.connect('toggled', i[3], modelo,i[0][0]) \n else:\n render.connect('toggled', fixed_toggled, modelo,i[0][0])\n \n column = gtk.TreeViewColumn(i[1], render, active=i[0][0])\n if len(i) ==4:\n if i[3] != False:\n column.set_clickable(True)\n column.connect('clicked', column_click_ok,modelo, tree, i[0][0],nCols)\n else:\n column.set_clickable(True)\n column.connect('clicked', column_click_ok,modelo, tree, i[0][0],nCols)\n pix = gtk.CellRendererPixbuf()\n #column = gtk.TreeViewColumn(i[1])\n #pix.set_property('cell-background', 'red')\n column.pack_start(pix, True)\n column.set_attributes(pix, stock_id=i[0][1])\n else:\n if i[2] == \"pix\":\n render = gtk.CellRendererPixbuf()\n else:\n render = gtk.CellRendererText()\n \n if len(i) >= 4:\n if len(i) == 5:\n render.set_property('mode',gtk.CELL_RENDERER_MODE_EDITABLE)\n render.connect(\"editing-started\",edited_cc,i[4])\n if len(i) == 6:\n render.connect(\"edited\",edited_cb,modelo,i[0],i[3],i[5])\n else:\n render.connect(\"edited\",edited_cb,modelo,i[0],i[3])\n render.set_property('editable',True)\n if i[2] == \"pix\":\n column = gtk.TreeViewColumn(i[1])\n column.pack_start(render, False)\n column.set_attributes(render, stock_id=i[0])\n else:\n column = gtk.TreeViewColumn(i[1], render, markup=i[0])\n column.set_resizable(True)\n #column.set_attributes(render,markup=i[0])\n if i[2] ==\"str\":#str\n column.set_cell_data_func(render, columna_utf8, i[0])\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0],nCols)\n elif i[2] ==\"pstr\":#str\n #column.set_cell_data_func(render, columna_utf8, i[0])\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0][0],nCols)\n pix = gtk.CellRendererPixbuf()\n #column = gtk.TreeViewColumn(i[1])\n column.pack_start(pix, True)\n column.set_attributes(pix, stock_id=i[0][1])\n elif i[2] ==\"STR\":#str\n #column.set_cell_data_func(render, columna_utf8, i[0])\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0],nCols)\n elif i[2] ==\"dbl\":#float:\n column.set_cell_data_func(render, columna_real, i[0])\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0],nCols)\n elif i[2] ==\"int\":\n column.set_cell_data_func(render, columna_numerica, i[0])\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0],nCols)\n elif i[2] ==\"rut\":\n column.set_cell_data_func(render, columna_rut, i[0])\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0],nCols)\n \n elif i[2] ==\"dte\":\n column.set_clickable(True)\n column.connect('clicked', column_click,modelo, tree, i[0],nCols)\n column.set_cell_data_func(render, columna_fecha, i[0])\n elif i[2] == \"pix\":\n pass\n \n tree.append_column(column)\n nCols = nCols +1 \n \n tree.set_model(modelo)",
"def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()",
"def migrate(cr, version):\n cr.execute(\"\"\"\n update ir_ui_view v\n set inherit_id = NULL, mode='primary'\n from ir_model_data mdata\n where\n v.id = mdata.res_id\n and mdata.model= 'ir.ui.view'\n and mdata.name = 'l10n_ch_swissqr_template'\n and mdata.module='l10n_ch';\n \"\"\")",
"def __init__(self, master, dataframe, edit_this_col=None):\n tk.Frame.__init__(self, master, bd=3, relief=tk.RIDGE)\n self.master = master\n\n# the dataframe\n self.df_orig = dataframe.copy()\n self.df = dataframe\n \n self.b_opt = {'bd':4,'relief':tk.RAISED}\n self.frame_opt = {'bd':2, 'relief':tk.RIDGE}\n\n self.topframe = tk.Frame( self) #, width=300, height=200)\n self.topframe.pack(expand=tk.YES, fill=tk.BOTH) #fill=tk.BOTH,expand=tk.YES)\n\n self.working_frame = tk.Frame( self.topframe) #, width=300, height=200)\n self.working_frame.pack()\n\n# option menu for selection of dataframe column to resolve\n self.init_lab = tk.Label(self.working_frame,text='Select a column to edit', foreground='white', background='darkgreen')\n \n \n############################################################################ \n #self.opt_var = tk.StringVar(self.working_frame)\n \n #self.opt = tk.OptionMenu( self.working_frame, self.opt_var, *list(self.df) )\n #self.opt_var.set(list(self.df)[0])\n\n self.opt_button = tk.Button( self.working_frame, text='select', command=self.CMD_select_col)\n\n if edit_this_col is not None:\n if edit_this_col in self.df:\n #self.opt_var.set(edit_this_col)\n self.the_selected_col = edit_this_col\n self._col_select()\n else:\n raise ValueError\n else:\n# make button for selecting column and spawning the next set of widgets\n self.the_selected_col = list( self.df)[0]\n self.sel_b = tk.Button(self.working_frame, text='Select', command = self._col_select )\n self._grid_init()",
"def test_view_delete_with_scope(self):\n table = Table(\n {\"id\": int, \"msg\": str, \"val\": float},\n index=\"id\",\n )\n table.view(\n computed_columns=[\n {\n \"column\": \"inverted\",\n \"computed_function_name\": \"invert\",\n \"inputs\": [\"val\"],\n }\n ],\n columns=[\"inverted\"],\n )\n table.update(\n [\n {\n \"id\": 1,\n \"msg\": \"test\",\n \"val\": 1.0,\n }\n ]\n )",
"def _rewrite_project(self, node: saldag.Project):\n\n selected_cols = node.selected_cols\n\n for in_col, out_col in zip(selected_cols, node.out_rel.columns):\n out_col.coll_sets |= copy.deepcopy(in_col.coll_sets)",
"def clean_table(self):\n return False",
"def _non_listed_ea_columns_check():\n for ea_row in unused_list:\n # dup Check in disposition\n ddi_index = views_index[ea_row[15]]\n for key, value in ea_index.items():\n # ea attributes that could be listed.\n if key == 'Datacenter' or key == 'IPR Designation':\n continue\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue",
"def column_selection_change():\n d = curdoc()\n _remove_fig(d)\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n source = d.get_model_by_name(sind)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=1)\n sel_cols = d.get_model_by_name(COLUMN_MULTISELECT).value\n columns = [ TableColumn(field=c, title=c) for c in sel_cols ]\n data_table = DataTable(source=source, columns=columns, width=500, height=500)\n table_widget = widgetbox(data_table, name=FIGURE_MODEL)\n d.add_root(table_widget)",
"def Redraw(self):\n print(\"EMPTY VIEW REDRAW\")",
"def add_climatology_cols(df):\n return df",
"def delete_selected_rows(self):\n self._export_mode = 'delete'\n self._counter_update_data += 1",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def set_xviews(self, *args):\n self.xview(*args)\n self.tablecolheader.xview(*args)\n self.redrawVisible()\n return",
"def test_drop_column(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))\n\n assert_invalid(\n session,\n \"ALTER TABLE ks.users DROP state;\",\n \"Cannot drop column state on base table with materialized views.\"\n )",
"def __init__(self, parent, columns, single_sel=True, virtual_list=None):\n\n if virtual_list is None:\n virtual_list = []\n\n flags = wx.LC_REPORT | wx.LC_VIRTUAL\n\n if single_sel:\n flags |= wx.LC_SINGLE_SEL\n\n super().__init__(\n parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\n style=flags\n )\n if not single_sel:\n # Select all\n self.set_keybindings(\n [\n (wx.ACCEL_CMD if util.platform() == \"macos\" else wx.ACCEL_CTRL, ord('A'), self.select_all)\n ]\n )\n self.Bind(wx.EVT_SYS_COLOUR_CHANGED, self.on_color_change)\n\n self.hidden_columns = set()\n self.main_window = self.GetParent().GetParent().GetParent().GetParent()\n self.sort_init = True\n self.complete = False\n self.resize_complete = False\n self.wait = DummyLock()\n self.column_count = len(columns)\n self.col2virt = {x: x for x in range(self.column_count)}\n self.virt2col = {v: k for k, v in self.col2virt.items()}\n self.setup_virtual(virtual_list)\n self.headers = columns\n self.itemDataMap = OrderedDict()\n self.first_resize = True\n self.size_sample = COLUMN_SAMPLE_SIZE\n self.widest_cell = [MINIMUM_COL_SIZE] * self.column_count\n self.dc = wx.ClientDC(self)\n self.dc.SetFont(self.GetFont())\n self.last_idx_sized = -1\n self.update_colors()\n self.setup_columns()\n self.itemIndexMap = []",
"def refresh_column_list(self):\n selection = self._ckl_columns.GetSelection() #store the index of the selected column so it can be reselected after the column is refreshed\n checked_items = self._ckl_columns.GetCheckedItems()\n\n self._ckl_columns.Clear() #clear UI ready for the updated column list\n self._columns.clear()\n\n variables = self._datafile.query(sciplot.database.Query(\"SELECT Symbol, VariableID FROM Variable\", [], 1))[0] #get all variables from database\n for variable_str, variable_id in variables:\n self._ckl_columns.Append(variable_str) #add to the list of columns\n self._columns.append((variable_id, variable_str))\n \n if selection != -1: #reselect selections that were unselected when all of the elements were removed\n self._ckl_columns.SetSelection(selection)\n self._ckl_columns.SetCheckedItems(checked_items)",
"def clean(df):",
"def _modify_columns(self, cols, X, y=None):",
"def cleanTable(self):\n self.currentGroup = None",
"def OnMouse(self, event):\r\n\r\n # we want to work with logical coords\r\n x, dummy = self._owner.CalcUnscrolledPosition(event.GetX(), 0)\r\n y = event.GetY()\r\n\r\n if event.Moving():\r\n \r\n col = self.XToCol(x)\r\n if col != self._hotTrackCol:\r\n \r\n # Refresh the col header so it will be painted with hot tracking\r\n # (if supported by the native renderer.)\r\n self.RefreshColLabel(col)\r\n\r\n # Also refresh the old hot header\r\n if self._hotTrackCol >= 0:\r\n self.RefreshColLabel(self._hotTrackCol)\r\n\r\n self._hotTrackCol = col\r\n \r\n if event.Leaving() and self._hotTrackCol >= 0:\r\n \r\n # Leaving the window so clear any hot tracking indicator that may be present\r\n self.RefreshColLabel(self._hotTrackCol)\r\n self._hotTrackCol = -1\r\n \r\n if self._isDragging:\r\n\r\n self.SendListEvent(wx.wxEVT_COMMAND_LIST_COL_DRAGGING, event.GetPosition())\r\n\r\n # we don't draw the line beyond our window, but we allow dragging it\r\n # there\r\n w, dummy = self.GetClientSize()\r\n w, dummy = self._owner.CalcUnscrolledPosition(w, 0)\r\n w -= 6\r\n\r\n # erase the line if it was drawn\r\n if self._currentX < w:\r\n self.DrawCurrent()\r\n\r\n if event.ButtonUp():\r\n self._isDragging = False\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n self._dirty = True\r\n self.SetColumnWidth(self._column, self._currentX - self._minX)\r\n self.Refresh()\r\n self.SendListEvent(wx.wxEVT_COMMAND_LIST_COL_END_DRAG, event.GetPosition())\r\n else:\r\n self._currentX = max(self._minX + 7, x)\r\n\r\n # draw in the new location\r\n if self._currentX < w:\r\n self.DrawCurrent()\r\n \r\n else: # not dragging\r\n\r\n self._minX = 0\r\n hit_border = False\r\n\r\n # end of the current column\r\n xpos = 0\r\n\r\n # find the column where this event occured\r\n countCol = self.GetColumnCount()\r\n\r\n for column in xrange(countCol):\r\n\r\n if not self.IsColumnShown(column):\r\n continue # do next if not shown\r\n\r\n xpos += self.GetColumnWidth(column)\r\n self._column = column\r\n if abs (x-xpos) < 3 and y < 22:\r\n # near the column border\r\n hit_border = True\r\n break\r\n \r\n if x < xpos:\r\n # inside the column\r\n break\r\n \r\n self._minX = xpos\r\n \r\n if event.LeftDown() or event.RightUp():\r\n if hit_border and event.LeftDown():\r\n self._isDragging = True\r\n self.CaptureMouse()\r\n self._currentX = x\r\n self.DrawCurrent()\r\n self.SendListEvent(wx.wxEVT_COMMAND_LIST_COL_BEGIN_DRAG, event.GetPosition())\r\n else: # click on a column\r\n evt = (event.LeftDown() and [wx.wxEVT_COMMAND_LIST_COL_CLICK] or [wx.wxEVT_COMMAND_LIST_COL_RIGHT_CLICK])[0]\r\n self.SendListEvent(evt, event.GetPosition())\r\n \r\n elif event.LeftDClick() and hit_border:\r\n self.SetColumnWidth(self._column, self._owner.GetBestColumnWidth(self._column))\r\n self.Refresh()\r\n\r\n elif event.Moving():\r\n \r\n if hit_border:\r\n setCursor = self._currentCursor == wx.STANDARD_CURSOR\r\n self._currentCursor = self._resizeCursor\r\n else:\r\n setCursor = self._currentCursor != wx.STANDARD_CURSOR\r\n self._currentCursor = wx.STANDARD_CURSOR\r\n \r\n if setCursor:\r\n self.SetCursor(self._currentCursor)",
"def setAllColumns(self, newAllColumns):\n \n pass",
"def expand_cat_cols_rmv_cols(df,rmv=None,exclude=None):\n\n cols=get_non_num_cols(df)\n for col in cols:\n if exclude==None or( not exclude == None and not col in exclude):\n vals = df[col].unique()\n for val in vals:\n new_col_name=col + \"_\" +val\n df[new_col_name]=[int(i ==val) for i in df.loc[:,col].to_list()]\n print(col)\n if not rmv ==None and col in rmv:\n del df[col]"
]
| [
"0.5627029",
"0.54265463",
"0.54173553",
"0.5336454",
"0.5321225",
"0.5315297",
"0.5305727",
"0.5303478",
"0.52973366",
"0.51968604",
"0.51912946",
"0.51831746",
"0.5162054",
"0.5120009",
"0.51169086",
"0.5116364",
"0.5103139",
"0.50465274",
"0.5032975",
"0.50322765",
"0.50301975",
"0.5016676",
"0.50104374",
"0.5007379",
"0.49969614",
"0.4986166",
"0.4981294",
"0.4963312",
"0.4963304",
"0.49629116"
]
| 0.592299 | 0 |
Able to shadow old view row with column ts greater than pk's ts and reinsert the view row Able to shadow old view row with column ts smaller than pk's ts and reinsert the view row CASSANDRA11500 | def _test_base_column_in_view_pk_complex_timestamp(self, flush):
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=1
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
# increase b ts to 10
self.update_view(session, "UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;", flush, compact=True)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# able to shadow view row even if base-column in view pk's ts is smaller than row timestamp
# set row TS = 20, a@6, b@20
self.update_view(session, "DELETE FROM t USING TIMESTAMP 5 where k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, None, 2, 10])
assert_none(session, "SELECT k,a,b,writetime(b) FROM mv")
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
self.update_view(session, "INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 1, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 2, 1, 7, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 1, 20])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 1, 1, 8, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# create another view row
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (2, 2, 2);", flush)
assert_one(session, "SELECT k,a,b FROM t WHERE k = 2", [2, 2, 2])
assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
# stop node2, node3
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
logger.debug('Shutdown node3')
node3.stop(wait_other_notice=True)
# shadow a = 1, create a = 2
query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
# shadow (a=2, k=2) after 3 second
query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
logger.debug('Starting node2')
node2.start(wait_for_binary_proto=True)
logger.debug('Starting node3')
node3.start(wait_for_binary_proto=True)
# For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 0 == len(result.current_rows)
# For k = 1 & a = 1, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
assert 0 == len(result.current_rows)
# For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 1 == len(result.current_rows)
# For k = 1 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 1 == len(result.current_rows)
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
time.sleep(3)
# For k = 2 & a = 2, We should get a digest mismatch of expired and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
logger.debug(result.current_rows)
assert 0 == len(result.current_rows)
# For k = 2 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 0 == len(result.current_rows) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_populate_mv_after_insert_wide_rows(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for i in range(5):\n for j in range(10000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({}, {})\".format(i, j))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n for i in range(5):\n for j in range(10000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, j), [j, i])",
"def upgrade():\n op.create_table(\n \"dag_run_note\",\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"dag_run_id\", sa.Integer(), nullable=False),\n sa.Column(\n \"content\", sa.String(length=1000).with_variant(sa.Text(length=1000), \"mysql\"), nullable=True\n ),\n sa.Column(\"created_at\", UtcDateTime(timezone=True), nullable=False),\n sa.Column(\"updated_at\", UtcDateTime(timezone=True), nullable=False),\n sa.ForeignKeyConstraint(\n (\"dag_run_id\",), [\"dag_run.id\"], name=\"dag_run_note_dr_fkey\", ondelete=\"CASCADE\"\n ),\n sa.ForeignKeyConstraint((\"user_id\",), [\"ab_user.id\"], name=\"dag_run_note_user_fkey\"),\n sa.PrimaryKeyConstraint(\"dag_run_id\", name=op.f(\"dag_run_note_pkey\")),\n )\n\n op.create_table(\n \"task_instance_note\",\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"task_id\", StringID(), nullable=False),\n sa.Column(\"dag_id\", StringID(), nullable=False),\n sa.Column(\"run_id\", StringID(), nullable=False),\n sa.Column(\"map_index\", sa.Integer(), nullable=False),\n sa.Column(\n \"content\", sa.String(length=1000).with_variant(sa.Text(length=1000), \"mysql\"), nullable=True\n ),\n sa.Column(\"created_at\", UtcDateTime(timezone=True), nullable=False),\n sa.Column(\"updated_at\", UtcDateTime(timezone=True), nullable=False),\n sa.PrimaryKeyConstraint(\n \"task_id\", \"dag_id\", \"run_id\", \"map_index\", name=op.f(\"task_instance_note_pkey\")\n ),\n sa.ForeignKeyConstraint(\n (\"dag_id\", \"task_id\", \"run_id\", \"map_index\"),\n [\n \"task_instance.dag_id\",\n \"task_instance.task_id\",\n \"task_instance.run_id\",\n \"task_instance.map_index\",\n ],\n name=\"task_instance_note_ti_fkey\",\n ondelete=\"CASCADE\",\n ),\n sa.ForeignKeyConstraint((\"user_id\",), [\"ab_user.id\"], name=\"task_instance_note_user_fkey\"),\n )",
"def fix_incremental(meta, bind):\n meta.create_all(bind=bind, checkfirst=True)\n ref = inspect(bind)\n for table in meta.sorted_tables:\n orm_cols = set(col.name for col in table.c)\n ref_cols = set(col['name'] for col in ref.get_columns(table.name))\n col_to_create = orm_cols - ref_cols\n col_to_delete = ref_cols - orm_cols\n if col_to_create:\n print table.name, 'has diff to create', col_to_create\n with bind.begin() as conn:\n for col_name in col_to_create:\n col = table.c.get(col_name)\n column_sql = CreateColumn(col).compile(bind).string\n sql = 'ALTER TABLE {} ADD COLUMN {}'.format(table.name, column_sql)\n if col.default:\n sql += ' DEFAULT {!r}'.format(col.default.arg) # can break when a pickle type has callable default.\n if not col.nullable:\n sql += ' NOT NULL'\n print 'executing sql: ' + sql\n conn.execute(sql)\n\n # Workaround to ensure updated DBs start with \"False\" in ignore column\n if list(col_to_create)[0] == 'ignore':\n sessionmaker = get_sessionmaker(bind.url.database)\n session = sessionmaker()\n query_object = {'dttrialdff0s': DTTrialDff0, 'trials': Trial}[table.name]\n items = session.query(query_object).all()\n for item in items:\n item.ignore = False\n session.flush()\n\n if col_to_delete:\n print table.name, 'has diff to delete', col_to_delete, 'maybe later version.'\n \"\"\"\n BEGIN TRANSACTION;\n CREATE TEMPORARY TABLE t1_backup(a,b);\n INSERT INTO t1_backup SELECT a,b FROM t1;\n DROP TABLE t1;\n CREATE TABLE t1(a,b);\n INSERT INTO t1 SELECT a,b FROM t1_backup;\n DROP TABLE t1_backup;\n COMMIT;\n \"\"\"",
"def reindex(self):",
"def reindex(self):",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def before_update(mapper, conn, target):\n\n if target.id_ is None:\n\n if target.table:\n table_on = ObjectNumber.parse(target.table.vid)\n else:\n table_on = ObjectNumber.parse(target.t_vid)\n\n if not target.vid:\n target.vid = str(ColumnNumber(table_on, target.sequence_id))\n\n if not target.id_:\n target.id_ = str(ColumnNumber(table_on, target.sequence_id).rev(None))\n\n target.d_vid = str(ObjectNumber.parse(target.t_vid).as_dataset)",
"def _base_test_insert_during_range_movement(self, rf):\n\n session = self.prepare(rf=rf)\n\n logger.debug(\"Creating table and view\")\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Starting new node4 in write survey mode\")\n node4 = new_node(self.cluster, data_center=\"dc1\")\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.write_survey=true\",\n \"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug(\"Insert data while node4 is joining\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Finish joining node4\")\n node4.nodetool(\"join\")\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug(\"Verify data\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def update_vluln_table():",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def normalize_keys(df: pd.DataFrame) -> None:\n renames = {'tripUpdate_trip_tripId': 'trip_id', 'tripUpdate_trip_startDate': 'start_date',\n 'tripUpdate_trip_directionId': 'direction_id', 'tripUpdate_trip_routeId': 'route_id',\n 'tripUpdate_trip_scheduleRelationship': 'schedule_relationship',\n 'tripUpdate_trip_startTime': 'start_time',\n 'tripUpdate_timestamp': 'timestamp', 'tripUpdate_vehicle_id': 'vehicle_id',\n 'stopSequence': 'stop_sequence', 'stopId': 'stop_id',\n 'scheduleRelationship': 'schedule_relationship2',\n 'vehicle_trip_tripId': 'trip_id', 'vehicle_trip_scheduleRelationship': 'schedule_relationship',\n 'vehicle_timestamp': 'timestamp', 'vehicle_vehicle_id': 'vehicle_id',\n 'vehicle_trip_startTime': 'start_time', 'vehicle_trip_startDate': 'start_date',\n 'vehicle_trip_routeId': 'route_id', 'vehicle_trip_directionId': 'direction_id',\n 'tripUpdate_stopTimeUpdate_stopSequence': 'stop_sequence',\n 'tripUpdate_stopTimeUpdate_stopId': 'stop_id',\n 'tripUpdate_stopTimeUpdate_arrival_delay': 'arrival_delay',\n 'tripUpdate_stopTimeUpdate_arrival_time': 'arrival_time',\n 'tripUpdate_stopTimeUpdate_departure_delay': 'departure_delay',\n 'tripUpdate_stopTimeUpdate_departure_time': 'departure_time',\n 'tripUpdate_stopTimeUpdate_arrival_uncertainty': 'arrival_uncertainty',\n 'tripUpdate_stopTimeUpdate_departure_uncertainty': 'departure_uncertainty',\n 'alert_activePeriod_start': 'period_start', 'alert_activePeriod_end': 'period_end',\n 'alert_informedEntity_routeId': 'route_id', 'alert_informedEntity_stopId': 'stop_id',\n 'alert_informedEntity_trip_tripId': 'trip_id',\n 'alert_informedEntity_trip_scheduleRelationship': 'schedule_relationship',\n 'alert_headerText_translation_text': 'header_text',\n 'alert_descriptionText_translation_text': 'description_text',\n }\n df.rename(columns=renames, inplace=True)",
"def test_create_from_dataframe_invalid_pk(self):\n self.insert()\n data = self.tbl.select()\n Table.create(\n ':memory:', \"Foo_2\", data,\n primary_key='foo', verbose=True)",
"def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()",
"def _refactor_time_columns(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _refactor_time_columns')\n write_cursor.execute('ALTER TABLE timed_balances RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE timed_location_data RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE trades RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE asset_movements RENAME COLUMN time TO timestamp')\n log.debug('Exit _refactor_time_columns')",
"def refresh(self):\n new = self.table.records_updated_since(self.updated.max()).set_index(\"operator\")\n new = new.rename(columns={\"operator_alias\": \"alias\", \"fscore\": \"confidence\"})\n\n if not new.empty: # TODO: this is clunky. need to fix later\n self.update(new)\n for idx, values in new.iterrows():\n try:\n self.loc[\n idx\n ] # try to lookup the index. Insert record if the lookup fails.\n except KeyError:\n self.loc[idx] = values",
"def refresh(self):\n new = self.table.records_updated_since(self.updated.max()).set_index(\"operator\")\n new = new.rename(columns={\"operator_alias\": \"alias\", \"fscore\": \"confidence\"})\n\n if not new.empty: # TODO: this is clunky. need to fix later\n self.update(new)\n for idx, values in new.iterrows():\n try:\n self.loc[\n idx\n ] # try to lookup the index. Insert record if the lookup fails.\n except KeyError:\n self.loc[idx] = values",
"def fix_date(df):\n df.insert(2, \"timestamp\", df[\"TestDate\"])\n\n mask = df[\"TestDate\"] <= df[\"StorageDate\"]\n print(\"Removing %.2f%% of unusual data\" % ((len(df) - np.sum(mask)) * 100 / len(df)))\n df = df[mask]\n\n mask = df[\"StorageDate\"] - df[\"TestDate\"] > pd.Timedelta(days=90)\n print(\"Fixing %.2f%% of outdated data\" % (np.sum(mask) * 100 / len(df)))\n df[\"timestamp\"].values[mask] = df[\"StorageDate\"].values[mask]\n return df",
"def prune_data(self, ts):\n sql = \"delete from %s where dateTime < %d\" % (self.dbm.table_name, ts)\n self.dbm.getSql(sql)\n try:\n # sqlite databases need some help to stay small\n self.dbm.getSql('vacuum')\n except Exception as e:\n pass",
"def on_update_seatable(data, index, *args):\n row = convert_row(metadata, data)\n print(row)",
"def drupal_timestamp_callback(t_index, mode, scope, s_row, d_row,\n new_key_cv, new_value_cv, d_db, d_cur, diff_k,\n diff_i):\n return drupal_db_update_timestamps(d_db, d_cur, mode, scope, new_key_cv,\n new_value_cv)",
"def mapback(df):\n df.set_index(df['Datetime'], drop=False, inplace=True) #keep for later use\n df.sort_index(inplace=True)",
"def upgrade():\n\n op.add_column(\"risks\", sa.Column(\"due_date\",\n sa.Date,\n nullable=True))\n\n op.add_column(\"risks\", sa.Column(\"created_by_id\",\n sa.Integer,\n nullable=True))\n\n op.add_column(\"risks\", sa.Column(\"last_submitted_at\",\n sa.DateTime,\n nullable=True))\n\n op.add_column(\"risks\", sa.Column(\"last_submitted_by_id\",\n sa.Integer,\n nullable=True))\n\n op.add_column(\"risks\", sa.Column(\"last_verified_by_id\",\n sa.Integer,\n nullable=True))\n\n op.add_column(\"risks\", sa.Column(\"last_verified_at\",\n sa.DateTime,\n nullable=True))",
"def save_df_2(obj, cur):\n\n df = obj.value\n\n t_start = clock()\n\n t1 = clock()\n\n db_cols, db_type_map = get_db_cols(cur, 'test_dataframe_table')\n # print(db_cols)\n # print(db_type_map)\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Get db specs: \", t2 - t1)\n\n t1 = clock()\n\n df_cols, df_rows, df_type_map = get_df_cols_rows(df)\n # print(df_cols)\n # print(df_type_map)\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Get df specs: \", t2 - t1)\n\n t1 = clock()\n\n shared_items = [\n k for k in db_type_map\n if k in df_type_map and db_type_map[k] != df_type_map[k]\n ]\n if len(shared_items) != 0:\n print(\"A column type was changed, please don't do this.\")\n return\n\n inter_cols = [col for col in df_cols if col in db_cols]\n # print(inter_cols)\n add_cols = [col for col in df_cols if col not in db_cols]\n # print(add_cols)\n\n df_indices = tuple([r[0] for r in df_rows])\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Inter_cols, add_cols and indices: \", t2 - t1)\n\n t1 = clock()\n\n max_rid_sql = \"SELECT max_rid FROM test_mrm_table WHERE index in %s\"\n cur.execute(max_rid_sql, (df_indices, ))\n max_rid_list = tuple([r[0] for r in cur])\n\n if len(max_rid_list) > 0:\n db_data_sql = \"\"\"SELECT rid,{} \n FROM test_dataframe_table \n WHERE rid in %s\n \"\"\".format(','.join(inter_cols))\n cur.execute(db_data_sql, (max_rid_list, ))\n r_list = [r for r in cur]\n else:\n r_list = []\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Get data from db table: \", t2 - t1)\n\n t1 = clock()\n\n hash_list = [(hash(r[1:]), r[0]) for r in r_list]\n hash_dic = dict(hash_list)\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Hash dic: \", t2 - t1)\n\n t1 = clock()\n\n rids = []\n update_rows = []\n new_rows = []\n for i, r in enumerate(df_rows):\n row = tuple([\n cast(r[0], df_type_map.get(r[1])) for r in zip(r, df_cols)\n if r[1] in db_cols\n ])\n # if i < 5:\n # print(row)\n rest_row = tuple(\n [r[0] for r in zip(r, df_cols) if r[1] not in db_cols])\n h = hash(row)\n if h in hash_dic:\n rid = hash_dic.get(h)\n update_rows.append((rid, rest_row))\n rids.append(rid)\n else:\n new_rows.append(r)\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Compute update_rows and new_rows: \", t2 - t1)\n\n t1 = clock()\n\n if len(add_cols) != 0:\n alter_sql = \"ALTER TABLE test_dataframe_table {};\".format(','.join(\n map(lambda x: 'ADD COLUMN {} {}'.format(x, df_type_map.get(x)),\n add_cols)))\n cur.execute(alter_sql)\n update_sql = \"UPDATE test_dataframe_table SET {} WHERE rid = %s\".format(\n ','.join(map(lambda x: '{} = %s'.format(x), add_cols)))\n for (rid, rest_row) in update_rows:\n cur.execute(update_sql, (*rest_row, rid))\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Alter table and update: \", t2 - t1)\n\n t1 = clock()\n\n insert_sql = \"\"\"\n INSERT INTO test_dataframe_table({})\n VALUES %s RETURNING rid, index\n \"\"\".format(','.join(df_cols))\n new_rids_indices = execute_values(cur, insert_sql, new_rows, fetch=True)\n update_max_rids_sql = \"\"\"\n INSERT INTO test_mrm_table(max_rid, index) \n VALUES %s \n ON CONFLICT (index) DO UPDATE SET max_rid = EXCLUDED.max_rid\"\"\"\n execute_values(cur, update_max_rids_sql, new_rids_indices)\n new_rids = [x[0] for x in new_rids_indices]\n print(len(new_rids))\n rids.extend(new_rids)\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Insert new tuples: \", t2 - t1)\n\n t1 = clock()\n\n insert_versioning_sql = \"\"\"\n INSERT INTO test_dataframe_object(t, lineno, name, rlist, clist) \n VALUES (%s, %s, %s, %s, %s)\"\"\"\n args = (obj.time, obj.lineno, obj.name, rids, df_cols)\n cur.execute(insert_versioning_sql, args)\n\n t2 = clock()\n if t2 - t1 > DT_LIMIT:\n print(\"Save version: \", t2 - t1)\n\n t_end = clock()\n print(\"Total time: \", t_end - t_start)",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def _migrate_top(self, entry, updated=False):\r\n if updated:\r\n entry['schema'] = self.SCHEMA_VERSION\r\n entry_id = self._entry_id_to_son(entry['_id'])\r\n self.location_map.update({'_id': entry_id}, entry)\r\n\r\n return entry"
]
| [
"0.7177231",
"0.61059767",
"0.57408565",
"0.550788",
"0.5367041",
"0.5361094",
"0.53473574",
"0.5249827",
"0.5230615",
"0.5230615",
"0.520213",
"0.5197923",
"0.51811713",
"0.51491165",
"0.51447564",
"0.5142157",
"0.5130651",
"0.5109452",
"0.502001",
"0.50022817",
"0.50022817",
"0.5001704",
"0.499701",
"0.49688613",
"0.49638465",
"0.49580404",
"0.49521413",
"0.49449804",
"0.49302348",
"0.4916634"
]
| 0.7051648 | 1 |
Test MV with expired liveness limit is properly handled CASSANDRA13883 | def _test_expired_liveness_with_limit(self, rf, nodes):
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
for k in range(100):
session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
# generate view row with expired liveness except for row 50 and 99
for k in range(100):
if k == 50 or k == 99:
continue
session.execute("DELETE a FROM t where k = {};".format(k))
# there should be 2 live data
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
# verify IN
keys = range(100)
assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
[50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({})".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
# verify fetch size
session.default_fetch_size = 1
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_hyperflex_feature_limit_external(self):\n pass",
"def test_update_offline_status(self):\n pass",
"def test_update_hyperflex_feature_limit_internal(self):\n pass",
"def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit",
"async def test_age_limit_expiry(hass: HomeAssistant) -> None:\n now = dt_util.utcnow()\n current_time = datetime(now.year + 1, 8, 2, 12, 23, tzinfo=dt_util.UTC)\n\n with freeze_time(current_time) as freezer:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 20,\n \"max_age\": {\"minutes\": 4},\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_NUMERIC:\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n # After adding all values, we should only see 5 values in memory\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-5:]) / len(VALUES_NUMERIC[-5:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(5 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1.0\n\n # Values expire over time. Only two are left\n\n current_time += timedelta(minutes=3)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-2:]) / len(VALUES_NUMERIC[-2:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(2 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1 / 4\n\n # Values expire over time. Only one is left\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = float(VALUES_NUMERIC[-1])\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(1 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 0\n\n # Values expire over time. Buffer is empty\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == STATE_UNKNOWN\n assert state.attributes.get(\"buffer_usage_ratio\") == round(0 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") is None",
"def test_pool_timeout_hw(self):\n self.test_pool_timeout()",
"def test_timeout_with_crud_failures(self):\n\n # Local methods to validate vb_seqno\n\n def compare_vb_stat(stat_1, stat_2, vb, comparison=\"!=\"):\n keys_to_check = [\"high_seqno\", \"high_completed_seqno\"]\n result = True\n for key in keys_to_check:\n if vb in stat_1.keys():\n if stat_1[vb][\"uuid\"] != stat_2[vb][\"uuid\"]:\n self.log_failure(\"Mismatch in vb-%s UUID. %s != %s\"\n % (vb, stat_1[vb][\"uuid\"],\n stat_2[vb][\"uuid\"]))\n if comparison == \"!=\":\n if stat_1[vb][key] != stat_2[vb][key]:\n result = False\n self.log.warning(\n \"Mismatch in vb-%s stat %s. %s != %s\"\n % (vb, key, stat_1[vb][key], stat_2[vb][key]))\n elif stat_1[vb][key] == stat_2[vb][key]:\n result = False\n self.log.warning(\"Stat not updated for vb-%s stat %s. \"\n \"%s == %s\"\n % (vb, key,\n stat_1[vb][key], stat_2[vb][key]))\n return result\n\n def validate_vb_seqno_stats():\n \"\"\"\n :return retry_validation: Boolean denoting to retry validation\n \"\"\"\n retry_validation = False\n vb_info[\"post_timeout\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n for tem_vb_num in range(self.cluster.vbuckets):\n tem_vb_num = str(tem_vb_num)\n if tem_vb_num not in affected_vbs:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log_failure(\"Unaffected vb-%s stat\" % tem_vb_num)\n elif int(tem_vb_num) in target_nodes_vbuckets[\"active\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"active\", tem_vb_num))\n elif int(tem_vb_num) in target_nodes_vbuckets[\"replica\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num, comparison=\"==\") is False:\n retry_validation = True\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"replica\", tem_vb_num))\n return retry_validation\n\n shell_conn = dict()\n cbstat_obj = dict()\n error_sim = dict()\n target_nodes_vbuckets = dict()\n vb_info = dict()\n tasks = dict()\n doc_gen = dict()\n affected_vbs = list()\n\n target_nodes_vbuckets[\"active\"] = []\n target_nodes_vbuckets[\"replica\"] = []\n vb_info[\"init\"] = dict()\n vb_info[\"post_timeout\"] = dict()\n vb_info[\"afterCrud\"] = dict()\n\n # Override crud_batch_size to minimum value for testing\n self.crud_batch_size = 5\n self.key = \"test_collections\"\n self.sdk_timeout = 3\n\n # Select target vbucket type to load_docs\n target_vb_type = \"replica\"\n if self.simulate_error == CouchbaseError.STOP_PERSISTENCE \\\n and self.durability_level \\\n == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:\n target_vb_type = \"active\"\n\n # Create required scope/collection for successful CRUD operation\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n self.collection_name = self.bucket_util.get_random_name()\n self.log.info(\"Creating scope::collection %s::%s\"\n % (self.scope_name, self.collection_name))\n self.create_scope_collection()\n\n # Load docs into created collection\n self.log.info(\"Loading data into created collection\")\n load_gen = doc_generator(self.key, 0, self.num_items)\n task = self.task.async_load_gen_docs(\n self.cluster, self.bucket, load_gen, \"create\", 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=200, process_concurrency=8,\n timeout_secs=60)\n self.task_manager.get_task_result(task)\n if self.subdoc_test:\n load_gen = sub_doc_generator(self.key, 0, self.num_items/2)\n task = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket,\n load_gen, Bucket_Op.SubDocOps.INSERT,\n timeout_secs=self.sdk_timeout,\n compression=self.sdk_compression,\n path_create=True,\n batch_size=100,\n process_concurrency=8,\n durability=self.durability_level,\n scope=self.scope_name, collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool)\n self.task_manager.get_task_result(task)\n\n self.bucket.scopes[self.scope_name].collections[\n self.collection_name].num_items = self.num_items\n\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n for node in target_nodes:\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n target_nodes_vbuckets[\"active\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"active\")\n target_nodes_vbuckets[\"replica\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"replica\")\n vb_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])\n\n curr_time = int(time.time())\n expected_timeout = curr_time + self.sdk_timeout\n\n if target_vb_type == \"active\":\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"replica\"])))\n else:\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"active\"])))\n\n # Create required doc_generators\n doc_gen[\"create\"] = doc_generator(self.key, self.num_items,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"delete\"] = doc_generator(self.key, 0,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"read\"] = doc_generator(\n self.key, int(self.num_items/3),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"update\"] = doc_generator(\n self.key, int(self.num_items/2),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Create required subdoc generators\n doc_gen[\"insert\"] = sub_doc_generator(\n self.key, int(self.num_items/2), self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"upsert\"] = sub_doc_generator_for_edit(\n self.key, 0, self.crud_batch_size,\n template_index=1,\n target_vbucket=target_vbs)\n doc_gen[\"remove\"] = sub_doc_generator(\n self.key, 0, self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Perform specified action\n for node in target_nodes:\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n self.sleep(5, \"Wait for error_simulation to take effect\")\n\n ops_to_perform = [Bucket_Op.DocOps.CREATE, Bucket_Op.DocOps.UPDATE,\n Bucket_Op.DocOps.READ, Bucket_Op.DocOps.DELETE]\n if self.subdoc_test:\n ops_to_perform = [Bucket_Op.SubDocOps.INSERT,\n Bucket_Op.SubDocOps.UPSERT,\n Bucket_Op.SubDocOps.REMOVE]\n\n for op_type in ops_to_perform:\n self.log.info(\"Starting doc op %s\" % op_type)\n if op_type in Bucket_Op.DOC_OPS:\n tasks[op_type] = self.task.async_load_gen_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n suppress_error_table=True,\n print_ops_rate=False,\n skip_read_on_error=True)\n else:\n tasks[op_type] = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n path_create=True,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n print_ops_rate=False)\n\n self.task.jython_task_manager.get_task_result(tasks[op_type])\n\n # Validate task failures\n if op_type == Bucket_Op.DocOps.READ:\n # Validation for read task\n if len(tasks[op_type].fail.keys()) != 0:\n self.log_failure(\"Read failed for few docs: %s\"\n % tasks[op_type].fail.keys())\n else:\n # Validation of CRUDs - Update / Create / Delete\n for doc_id, crud_result in tasks[op_type].fail.items():\n vb_num = self.bucket_util.get_vbucket_num_for_key(\n doc_id, self.cluster.vbuckets)\n if SDKException.DurabilityAmbiguousException \\\n not in str(crud_result[\"error\"]):\n self.log_failure(\n \"Invalid exception for doc %s, vb %s: %s\"\n % (doc_id, vb_num, crud_result))\n\n # Revert the specified error scenario\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Check whether the timeout triggered properly\n if int(time.time()) < expected_timeout:\n self.log_failure(\"Timed-out before expected time\")\n\n for op_type in ops_to_perform:\n if op_type == Bucket_Op.DocOps.READ:\n continue\n while doc_gen[op_type].has_next():\n doc_id, _ = doc_gen[op_type].next()\n affected_vbs.append(\n str(self.bucket_util.get_vbucket_num_for_key(\n doc_id,\n self.cluster.vbuckets)))\n\n affected_vbs = list(set(affected_vbs))\n # Fetch latest stats and validate the seq_nos are not updated\n for node in target_nodes:\n retry_count = 0\n max_retry = 3\n while retry_count < max_retry:\n self.log.info(\"Trying to validate vbseq_no stats: %d\"\n % (retry_count+1))\n retry_count += 1\n retry_required = validate_vb_seqno_stats()\n if not retry_required:\n break\n self.sleep(5, \"Sleep for vbseq_no stats to update\")\n else:\n # This will be exited only if `break` condition is not met\n self.log_failure(\"validate_vb_seqno_stats verification failed\")\n\n self.validate_test_failure()\n\n # Get SDK Client from client_pool\n sdk_client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n # Doc error validation\n for op_type in ops_to_perform:\n task = tasks[op_type]\n\n if self.nodes_init == 1 \\\n and op_type != Bucket_Op.DocOps.READ \\\n and len(task.fail.keys()) != (doc_gen[op_type].end\n - doc_gen[op_type].start):\n self.log_failure(\"Failed keys %d are less than expected %d\"\n % (len(task.fail.keys()),\n (doc_gen[op_type].end\n - doc_gen[op_type].start)))\n\n # Create table objects for display\n table_view = TableView(self.log.error)\n ambiguous_table_view = TableView(self.log.info)\n table_view.set_headers([\"Key\", \"vBucket\", \"Exception\"])\n ambiguous_table_view.set_headers([\"Key\", \"vBucket\"])\n\n # Iterate failed keys for validation\n for doc_key, doc_info in task.fail.items():\n vb_for_key = self.bucket_util.get_vbucket_num_for_key(doc_key)\n\n if SDKException.DurabilityAmbiguousException \\\n not in str(doc_info[\"error\"]):\n table_view.add_row([doc_key, vb_for_key,\n doc_info[\"error\"]])\n\n ambiguous_table_view.add_row([doc_key, str(vb_for_key)])\n if op_type not in Bucket_Op.SUB_DOC_OPS:\n retry_success = \\\n self.durability_helper.retry_for_ambiguous_exception(\n sdk_client, op_type, doc_key, doc_info)\n if not retry_success:\n self.log_failure(\"%s failed in retry for %s\"\n % (op_type, doc_key))\n\n # Display the tables (if any errors)\n table_view.display(\"Unexpected exception during %s\" % op_type)\n ambiguous_table_view.display(\"D_Ambiguous exception during %s\"\n % op_type)\n\n # Release the acquired client\n self.sdk_client_pool.release_client(sdk_client)\n\n # Verify doc count after expected CRUD failure\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest stats and validate the values are updated\n for node in target_nodes:\n vb_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n if vb_info[\"init\"][node.ip] == vb_info[\"afterCrud\"][node.ip]:\n self.log_failure(\"vBucket seq_no stats not updated\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()",
"def test_select_ttl_failure(self):",
"def test_api_livesession_video_no_stopped_at_cache_has_timeout(\n self,\n ):\n # set the start at current time minus 30 seconds\n started = int(to_timestamp(timezone.now())) - 30\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\"started_at\": str(started)},\n live_type=JITSI,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n live_attendance={started + 10: {\"onStage\": 0}, started + 20: {\"muted\": 0}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n livesession.refresh_from_db()\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(livesession.id),\n \"display_name\": \"[email protected]\",\n \"is_registered\": False,\n \"live_attendance\": {\n str(started): {},\n str(started + 15): {\"onStage\": 0},\n str(started + 30): {\"muted\": 0},\n },\n }\n ],\n }\n self.assertEqual(response.json(), response_json)\n\n with self.assertNumQueries(0):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_json)\n\n # go over the cache limit\n new_time = timezone.now() + timedelta(\n settings.VIDEO_ATTENDANCES_CACHE_DURATION + 1\n )\n with mock.patch.object(\n timezone, \"now\", return_value=new_time\n ), mock.patch.object(time, \"time\", return_value=int(to_timestamp(new_time))):\n # we call again the same request,\n # results are not identical\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.json(), response_json)",
"def test_heartbeat(self):\n pass",
"def test_lbheartbeat(self):\n pass",
"def test_api_livesession_video_ended_cache_no_timeout(\n self,\n ):\n started = int(to_timestamp(timezone.now())) - 1000\n\n video = VideoFactory(\n live_state=STOPPED,\n live_info={\"started_at\": str(started), \"stopped_at\": str(started + 30)},\n live_type=JITSI,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n live_attendance={started + 10: {\"onStage\": 0}, started + 20: {\"muted\": 0}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n livesession.refresh_from_db()\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(livesession.id),\n \"display_name\": \"[email protected]\",\n \"is_registered\": False,\n \"live_attendance\": {\n str(started): {},\n str(started + 15): {\"onStage\": 0},\n str(started + 30): {\"muted\": 0},\n },\n }\n ],\n }\n self.assertEqual(response.json(), response_json)\n\n # go over the cache limit\n new_time = timezone.now() + timedelta(\n settings.VIDEO_ATTENDANCES_CACHE_DURATION + 1\n )\n with mock.patch.object(\n timezone, \"now\", return_value=new_time\n ), mock.patch.object(time, \"time\", return_value=int(to_timestamp(new_time))):\n # cache has no timeout\n with self.assertNumQueries(0):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_json)",
"def test_polling_plugin_timeout(self):\n pass",
"def test_update_virtualization_realm_maximum_impact_level(self):\n pass",
"def test_update_instance_limit(self):\n pass",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():",
"def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()",
"def test_smoker_latest_get(self):\n pass",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():",
"def test_ttl(self):\n session = self.prepare()\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t \"\n \"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)\"))\n\n for i in range(100):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10\".format(v=i))\n\n for i in range(100):\n assert_one(session, \"SELECT * FROM t_by_v2 WHERE v2 = {}\".format(i), [i, i, i, i])\n\n time.sleep(20)\n\n rows = list(session.execute(\"SELECT * FROM t_by_v2\"))\n assert len(rows) == 0, \"Expected 0 rows but got {}\".format(len(rows))",
"def test_api_livesession_reset_cache(\n self,\n ):\n # set the start at current time minus 30 seconds\n started = int(to_timestamp(timezone.now())) - 30\n\n video = VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n live_state=STOPPED,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n \"started_at\": started,\n \"stopped_at\": to_timestamp(timezone.now() + timedelta(minutes=10)),\n },\n live_type=RAW,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n live_attendance={started + 10: {\"onStage\": 0}, started + 20: {\"muted\": 0}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n livesession_public = AnonymousLiveSessionFactory(\n email=None,\n live_attendance={\n started + 5: {\"muted\": 1},\n started + 18: {\"muted\": 0}, # will be ignored\n started + 25: {\"fullscreen\": 1, \"muted\": 1},\n },\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n livesession.refresh_from_db()\n livesession_public.refresh_from_db()\n prefix_key = f\"attendances:video:{video.id}\"\n\n with self.assertNumQueries(3):\n response = self.client.get(\n f\"{self._get_url(video)}?limit=99\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n response_json = response.json()\n cache_key = f\"{prefix_key}offset:Nonelimit:99\"\n self.assertEqual(response.status_code, 200)\n self.assertEqual(cache.get(prefix_key), [cache_key])\n\n # two queries are cached with no timeout\n # results are identical as it is cached, no queries are executed\n with self.assertNumQueries(3):\n response = self.client.get(\n f\"{self._get_url(video)}?limit=1&offset=1\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n cache_key_offset = f\"{prefix_key}offset:1limit:1\"\n\n response_offset_1 = response.json()\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response_json, response_offset_1)\n self.assertEqual(cache.get(prefix_key), [cache_key, cache_key_offset])\n\n # go over the cache limit, the two queries are cached\n new_time = timezone.now() + timedelta(\n settings.VIDEO_ATTENDANCES_CACHE_DURATION + 1\n )\n with mock.patch.object(\n timezone, \"now\", return_value=new_time\n ), mock.patch.object(time, \"time\", return_value=int(to_timestamp(new_time))):\n with self.assertNumQueries(0):\n response = self.client.get(\n f\"{self._get_url(video)}?limit=99\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_json)\n\n with self.assertNumQueries(0):\n response = self.client.get(\n f\"{self._get_url(video)}?limit=1&offset=1\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_offset_1)\n\n # we now reset the video, keys must have been reset\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"requestId\": \"7954d4d1-9dd3-47f4-9542-e7fd5f937fe6\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n with mock.patch.object(api.video, \"update_id3_tags\"):\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n self.assertEqual(response.status_code, 200)\n\n # results aren't cached anymore\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n\n with self.assertNumQueries(3):\n response = self.client.get(\n f\"{self._get_url(video)}?limit=1&offset=1\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n # listing of key are deleted\n self.assertEqual(cache.get(prefix_key, []), [])",
"def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])",
"def test_metrics_server(self):\n validate_metrics_server()",
"def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)",
"def test_REFRESH_TIMEOUT(self):\n self.assertIsInstance(constants.REFRESH_TIMEOUT, int,\n \"constants.REFRESH_TIMEOUT must be an integer.\")",
"def testRefresh(self):\n \n pass",
"def test_too_far_scenario():\n start_too_far_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n time.sleep(WARN_EVENT_THRESHOLD)\n start_too_far_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n return \"ok\"",
"def test_liveness(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_live, retries=30)",
"def test_redis_increase_replica_count_usual_case():",
"def test_remove_through_timeout(self):\n TestStorage.set_timeout(20)\n TestStorage.set_time(100)\n store = RatedStatisticStorage()\n store._RatedStatisticStorage__add_single_outcome(\n \"n!node3\", \"cpu\", Outcome.HIGH, rospy.Time(100))\n self.assertEqual(\n store.get_outcome(\"n!node3\", \"cpu\"), Outcome.HIGH)\n TestStorage.set_time(120)\n self.assertEqual(\n store.get_outcome(\"n!node3\", \"cpu\"), Outcome.UNKNOWN)"
]
| [
"0.61492723",
"0.6087286",
"0.6037791",
"0.6019407",
"0.6004978",
"0.5970144",
"0.5930014",
"0.5906103",
"0.59024256",
"0.5864831",
"0.58527917",
"0.57939404",
"0.57530594",
"0.57497495",
"0.57018834",
"0.566865",
"0.5648454",
"0.56078196",
"0.55687165",
"0.55558985",
"0.55473185",
"0.5544305",
"0.5540956",
"0.5524592",
"0.5518204",
"0.5510054",
"0.55040836",
"0.5490343",
"0.5482346",
"0.5473236"
]
| 0.7073038 | 0 |
Test that a materialized view are consistent after a simple repair. | def _simple_repair_test(self, repair_base=False, repair_view=False):
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in range(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
logger.debug('Start node2, and repair')
node2.start(wait_for_binary_proto=True)
if repair_base:
node1.nodetool("repair ks t")
if repair_view:
node1.nodetool("repair ks t_by_v")
logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
for i in range(1000):
query = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_crc_check_chance(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5\"))\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.5, view=True)\n\n session.execute(\"ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3\")\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.3, view=True)",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def testConsistency(self):",
"def test_immutable(self):\n session = self.prepare(user_table=True)\n\n # cannot insert\n assert_invalid(session, \"INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');\",\n \"Cannot directly modify a materialized view\")\n\n # cannot update\n assert_invalid(session, \"UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot delete a row\n assert_invalid(session, \"DELETE from users_by_state where state='TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot delete a cell\n assert_invalid(session, \"DELETE session_token from users_by_state where state='TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot alter a table\n assert_invalid(session, \"ALTER TABLE users_by_state ADD first_name varchar\",\n \"Cannot use ALTER TABLE on Materialized View\")",
"def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")",
"def test_populate_mv_after_insert_wide_rows(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for i in range(5):\n for j in range(10000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({}, {})\".format(i, j))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n for i in range(5):\n for j in range(10000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, j), [j, i])",
"def test_comprehensive_system(self):\n\n with ROV_Validity_Table() as db:\n\n # Run MRT_Parser to fill mrt_announcements table which will\n # be used as the input table for RPKI_Validator.\n input_table = MRT_Announcements_Table.name\n MRT_Parser().run()\n\n RPKI_Validator_Parser().run(table=input_table)\n\n initial_count = db.get_count()\n initial_rows = db.get_all()\n \n # all prefix-origin pairs from input should be in val table\n sql = f\"\"\"SELECT * FROM {input_table} a\n LEFT JOIN {db.name} b\n USING (prefix, origin)\n WHERE b.prefix IS NULL;\"\"\"\n assert len(db.execute(sql)) == 0\n\n # clear validity table and run with a wait before getting data\n # should be the same with and without waiting\n db.clear_table()\n\n RPKI_Validator_Parser().run(table=input_table, wait=True)\n\n second_count = db.get_count()\n second_rows = db.get_all()\n\n assert initial_count == second_count\n assert initial_rows == second_rows",
"def test_resume_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n self.fixture_dtest_setup.ignore_log_patterns = [r'Compaction interrupted: View build']\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)\n assert not node.grep_log('Marking view', filename='debug.log')\n self.check_logs_for_errors()\n\n logger.debug(\"Check that MV shouldn't be built yet.\")\n assert len(list(session.execute(\"SELECT COUNT(*) FROM t_by_v\"))) != 5000\n\n logger.debug(\"Restart the cluster\")\n self.cluster.stop()\n marks = [node.mark_log() for node in nodes]\n self.cluster.start()\n session = self.patient_cql_connection(nodes[0])\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM ks.t_by_v\", [5000])\n\n logger.debug(\"Checking logs to verify that the view build has been resumed and completed after restart\")\n for node, mark in zip(nodes, marks):\n assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)\n assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)\n self.check_logs_for_errors()",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_key_repair_lens(self):\n # Create new work trail and retrieve the HEAD workflow of the default\n # branch\n f_handle = self.filestore.upload_file(KEY_REPAIR_FILE)\n ds1 = self.datastore.load_dataset(f_handle=f_handle)\n # Missing Value Lens\n command = cmd.mimir_key_repair(DATASET_NAME, ds1.column_by_name('Empid').identifier)\n result = self.compute_lens_result(ds1, command)\n self.assertTrue(result.is_success)\n ds = self.datastore.get_dataset(result.provenance.write[DATASET_NAME].identifier)\n self.assertEqual(len(ds.columns), 4)\n self.assertEqual(ds.row_count, 2)\n names = set()\n empids = set()\n for row in ds.fetch_rows():\n empids.add(int(row.values[0]))\n names.add(row.values[1])\n self.assertTrue(1 in empids)\n self.assertTrue('Alice' in names or 'Bob' in names)\n self.assertFalse('Alice' in names and 'Bob' in names)\n self.assertTrue('Carla' in names)\n # Test error case and command text\n with self.assertRaises(ValueError):\n command = cmd.mimir_key_repair('MY DS', 'MY COL')\n result = self.compute_lens_result(ds, command)",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_change_provisioned_throughput_usual_case():",
"def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )"
]
| [
"0.71252847",
"0.7041749",
"0.6935138",
"0.6930578",
"0.68625474",
"0.6740868",
"0.6521269",
"0.65040326",
"0.6496666",
"0.64616734",
"0.6252084",
"0.61252517",
"0.60395676",
"0.6006589",
"0.60032815",
"0.5981635",
"0.59799033",
"0.59473616",
"0.5926516",
"0.5917056",
"0.59159195",
"0.5866138",
"0.58238244",
"0.58087254",
"0.57657504",
"0.5746313",
"0.57386076",
"0.57187474",
"0.5702771",
"0.5688918"
]
| 0.75850284 | 0 |
Test repair does not fail when there is MV lock contention CASSANDRA12905 | def test_base_replica_repair_with_contention(self):
self._base_replica_repair_test(fail_mv_lock=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_change_provisioned_throughput_usual_case():",
"def _simple_repair_test(self, repair_base=False, repair_view=False):\n\n session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')\n for i in range(1000):\n statement = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n\n assert_unavailable(\n session.execute,\n statement\n )\n\n logger.debug('Start node2, and repair')\n node2.start(wait_for_binary_proto=True)\n if repair_base:\n node1.nodetool(\"repair ks t\")\n if repair_view:\n node1.nodetool(\"repair ks t_by_v\")\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')\n for i in range(1000):\n query = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]",
"def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )",
"def test_MB_51219(self):\n len_of_nodes_to_afo = len(self.failover_order[0].split(\":\"))\n nodes_to_fo = dict()\n nodes_in_cluster = self.rest.get_nodes()\n for node in nodes_in_cluster:\n if len_of_nodes_to_afo <= 0:\n break\n if str(self.cluster.master.ip) == str(node.ip):\n continue\n nodes_to_fo[node] = self.failover_method\n len_of_nodes_to_afo -= 1\n self.cluster_util.update_cluster_nodes_service_list(self.cluster)\n self.nodes_to_fail = nodes_to_fo\n self.__update_server_obj()\n try:\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=self.fo_events,\n task_type=\"induce_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n dictionary = dict(list(self.nodes_to_fail.items())[:1])\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=dictionary,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n timeout = int(time()) + 15\n task_id_changed = False\n self.prev_rebalance_status_id = None\n while not task_id_changed and int(time()) < timeout:\n server_task = self.rest.ns_server_tasks(\n task_type=\"rebalance\", task_sub_type=\"failover\")\n if server_task and server_task[\"statusId\"] != \\\n self.prev_rebalance_status_id:\n task_id_changed = True\n self.prev_rebalance_status_id = server_task[\"statusId\"]\n self.log.debug(\"New failover status id: %s\"\n % server_task[\"statusId\"])\n self.assertTrue(task_id_changed,\n \"Fail-over did not happen as expected\")\n self.bucket_util._wait_warmup_completed(self.cluster.buckets[0],\n servers=[\n self.cluster.master],\n wait_time=30)\n finally:\n # reverting failure from all the nodes\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n result = self.cluster_util.rebalance(self.cluster)\n self.assertTrue(result, \"Final re-balance failed\")",
"def test_with_process_crash(self):\n if self.num_replicas < 2:\n self.assertTrue(False, msg=\"Required: num_replicas > 1\")\n\n # Override num_of_nodes affected to 1 (Positive case)\n self.num_nodes_affected = 1\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(\n self.bucket.name,\n \"active\")\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Remove active vbuckets from doc_loading to avoid errors\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n load_spec[\"target_vbuckets\"] = list(set(range(0, 1024))\n ^ set(active_vbs_in_target_nodes))\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n self.sleep(5, \"Wait for doc loaders to start loading data\")\n\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud()\n\n # Wait for document_loader tasks to complete\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with process crash\")\n\n if self.simulate_error \\\n not in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Disconnect the shell connection\n shell_conn[node.ip].disconnect()\n self.sleep(10, \"Wait for node recovery to complete\")\n\n # In case of error with Ephemeral bucket, need to rebalance\n # to make sure data is redistributed properly\n if self.bucket_type == Bucket.Type.EPHEMERAL:\n retry_num = 0\n result = None\n while retry_num != 2:\n result = self.task.rebalance(\n self.servers[0:self.nodes_init],\n [], [])\n if result:\n break\n retry_num += 1\n self.sleep(10, \"Wait before retrying rebalance\")\n\n self.assertTrue(result, \"Rebalance failed\")\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Failover stat validation\n if self.simulate_error == CouchbaseError.KILL_MEMCACHED:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n if self.simulate_error != CouchbaseError.STOP_MEMCACHED \\\n and self.bucket_type == Bucket.Type.EPHEMERAL:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n val = failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats mismatch after error condition:\" \\\n \" %s != %s\" \\\n % (failover_info[\"init\"][node.ip],\n failover_info[\"afterCrud\"][node.ip])\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Doc count validation\n self.validate_test_failure()\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_no_sync_correctness(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_no_sync_correctness,\n )",
"def test_double_corrupt(pid: int, otId: int) -> bool:\n box_mon = BoxMon()\n box_mon.personality = pid\n box_mon.otId = otId\n box_mon.sub(0).type0.species = 308\n box_mon.sub(0).type0.experience = 2195\n box_mon.sub(0).type0.friendship = 70\n sub1 = box_mon.sub(1).type1\n sub1.moves[0] = 33\n sub1.moves[1] = 253\n sub1.moves[2] = 185\n sub1.pp[0] = 35\n sub1.pp[1] = 10\n sub1.pp[2] = 20\n sub2 = box_mon.sub(2).type2\n sub2.attackEV = 22\n sub2.hpEV = 8\n sub3 = box_mon.sub(3).type3\n sub3.metLocation = 28\n sub3.metLevel = 14\n sub3.metGame = 3\n sub3.pokeBall = 2\n sub3.otGender = 1\n sub3.unk = 977594907\n box_mon.checksum = box_mon.calc_checksum()\n sum1 = box_mon.checksum\n box_mon.encrypt()\n box_mon.personality |= 0x40000000\n box_mon.decrypt()\n sum2 = box_mon.calc_checksum()\n box_mon.encrypt()\n box_mon.otId |= 0x40000000\n box_mon.decrypt()\n sum3 = box_mon.calc_checksum()\n if sum1 == sum2 == sum3 and box_mon.sub(3).type3.isEgg == 0:\n box_mon.encrypt()\n return True\n return False",
"def test_full_house_flush_ind(self):",
"def multiple_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n session = self.patient_cql_connection(node1)\n create_ks(session, 'ks', 3)\n create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})\n\n debug(\"insert data\")\n\n insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)\n node1.flush()\n\n debug(\"bringing down node 3\")\n node3.flush()\n node3.stop(gently=False)\n\n debug(\"inserting additional data into node 1 and 2\")\n insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node2.flush()\n\n debug(\"restarting and repairing node 3\")\n node3.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node3.repair()\n else:\n node3.nodetool(\"repair -par -inc\")\n\n # wait stream handlers to be closed on windows\n # after session is finished (See CASSANDRA-10644)\n if is_win:\n time.sleep(2)\n\n debug(\"stopping node 2\")\n node2.stop(gently=False)\n\n debug(\"inserting data in nodes 1 and 3\")\n insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node3.flush()\n\n debug(\"start and repair node 2\")\n node2.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node2.repair()\n else:\n node2.nodetool(\"repair -par -inc\")\n\n debug(\"replace node and check data integrity\")\n node3.stop(gently=False)\n node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))\n cluster.add(node5, False)\n node5.start(replace_address='127.0.0.3', wait_other_notice=True)\n\n assert_one(session, \"SELECT COUNT(*) FROM ks.cf LIMIT 200\", [149])",
"def test_unminimized(self):\n self.testcases[0].security_flag = True\n self.testcases[0].crash_state = 'abc\\ndef'\n self.testcases[0].crash_type = 'Heap-buffer-overflow\\nREAD {*}'\n self.testcases[0].minimized_keys = None\n self.testcases[1].security_flag = True\n self.testcases[1].crash_state = 'abc\\ndef'\n self.testcases[1].crash_type = 'Heap-buffer-overflow\\nREAD 3'\n\n for t in self.testcases:\n t.put()\n\n grouper.group_testcases()\n\n testcases = []\n for testcase_id in data_handler.get_open_testcase_id_iterator():\n testcases.append(data_handler.get_testcase_by_id(testcase_id))\n\n self.assertEqual(len(testcases), 2)\n self.assertEqual(testcases[0].group_id, 0)\n self.assertFalse(testcases[0].is_leader)\n self.assertEqual(testcases[1].group_id, 0)\n self.assertTrue(testcases[1].is_leader)",
"def multiple_subsequent_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n debug(\"Inserting data with stress\")\n node1.stress(['write', 'n=5M', 'no-warmup', '-rate', 'threads=10', '-schema', 'replication(factor=3)'])\n\n debug(\"Flushing nodes\")\n cluster.flush()\n\n debug(\"Waiting compactions to finish\")\n cluster.wait_for_compactions()\n\n if self.cluster.version() >= '2.2':\n debug(\"Repairing node1\")\n node1.nodetool(\"repair\")\n debug(\"Repairing node2\")\n node2.nodetool(\"repair\")\n debug(\"Repairing node3\")\n node3.nodetool(\"repair\")\n else:\n debug(\"Repairing node1\")\n node1.nodetool(\"repair -par -inc\")\n debug(\"Repairing node2\")\n node2.nodetool(\"repair -par -inc\")\n debug(\"Repairing node3\")\n node3.nodetool(\"repair -par -inc\")\n\n # Using \"print\" instead of debug() here is on purpose. The compactions\n # take a long time and don't print anything by default, which can result\n # in the test being timed out after 20 minutes. These print statements\n # prevent it from being timed out.\n print \"compacting node1\"\n node1.compact()\n print \"compacting node2\"\n node2.compact()\n print \"compacting node3\"\n node3.compact()\n\n # wait some time to be sure the load size is propagated between nodes\n debug(\"Waiting for load size info to be propagated between nodes\")\n time.sleep(45)\n\n load_size_in_kb = float(sum(map(lambda n: n.data_size(), [node1, node2, node3])))\n load_size = load_size_in_kb / 1024 / 1024\n debug(\"Total Load size: {}GB\".format(load_size))\n\n # There is still some overhead, but it's lot better. We tolerate 25%.\n expected_load_size = 4.5 # In GB\n assert_almost_equal(load_size, expected_load_size, error=0.25)",
"def test_key_repair_lens(self):\n # Create new work trail and retrieve the HEAD workflow of the default\n # branch\n f_handle = self.filestore.upload_file(KEY_REPAIR_FILE)\n ds1 = self.datastore.load_dataset(f_handle=f_handle)\n # Missing Value Lens\n command = cmd.mimir_key_repair(DATASET_NAME, ds1.column_by_name('Empid').identifier)\n result = self.compute_lens_result(ds1, command)\n self.assertTrue(result.is_success)\n ds = self.datastore.get_dataset(result.provenance.write[DATASET_NAME].identifier)\n self.assertEqual(len(ds.columns), 4)\n self.assertEqual(ds.row_count, 2)\n names = set()\n empids = set()\n for row in ds.fetch_rows():\n empids.add(int(row.values[0]))\n names.add(row.values[1])\n self.assertTrue(1 in empids)\n self.assertTrue('Alice' in names or 'Bob' in names)\n self.assertFalse('Alice' in names and 'Bob' in names)\n self.assertTrue('Carla' in names)\n # Test error case and command text\n with self.assertRaises(ValueError):\n command = cmd.mimir_key_repair('MY DS', 'MY COL')\n result = self.compute_lens_result(ds, command)",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def test_comprehensive_system(self):\n\n with ROV_Validity_Table() as db:\n\n # Run MRT_Parser to fill mrt_announcements table which will\n # be used as the input table for RPKI_Validator.\n input_table = MRT_Announcements_Table.name\n MRT_Parser().run()\n\n RPKI_Validator_Parser().run(table=input_table)\n\n initial_count = db.get_count()\n initial_rows = db.get_all()\n \n # all prefix-origin pairs from input should be in val table\n sql = f\"\"\"SELECT * FROM {input_table} a\n LEFT JOIN {db.name} b\n USING (prefix, origin)\n WHERE b.prefix IS NULL;\"\"\"\n assert len(db.execute(sql)) == 0\n\n # clear validity table and run with a wait before getting data\n # should be the same with and without waiting\n db.clear_table()\n\n RPKI_Validator_Parser().run(table=input_table, wait=True)\n\n second_count = db.get_count()\n second_rows = db.get_all()\n\n assert initial_count == second_count\n assert initial_rows == second_rows",
"def test_locked_file_03(self):\n \n f = open(\"tests/locked.db3\", \"a+\")\n fcntl.lockf(f.fileno(), fcntl.LOCK_EX) \n \n x = subprocess.Popen([\"sqlbak\", \"tests\", \"--ms-towait=4000\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n time.sleep(3)\n fcntl.lockf(f.fileno(), fcntl.LOCK_UN)\n f.close()\n\n result = x.communicate()[0]\n\n self.assertTrue(\"cannot obtain lock\" not in result)",
"def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))",
"def test_vacuum(self):\n # This will upgrade Dummy2 in-place; Dummy had its own upgrade.\n upgradeExplicitOid(self.store)\n\n # Make sure we push the upgraded items out of cache\n gc.collect()\n self.store.executeSQL('VACUUM')\n self.test_transition()",
"def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_restart(self):",
"def testConsistency(self):",
"def test_update_node_state_smartfail(self):\n pass",
"def test_with_persistence_issues(self):\n\n if self.durability_level in [\n Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,\n Bucket.DurabilityLevel.PERSIST_TO_MAJORITY]:\n self.log.critical(\"Test not valid for persistence durability\")\n return\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(\n self.bucket.name,\n \"active\")\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n if self.simulate_error \\\n in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n error_sim = DiskError(self.log, self.task_manager,\n self.cluster.master, target_nodes,\n 60, 0, False, 120,\n disk_location=\"/data\")\n error_sim.create(action=self.simulate_error)\n else:\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Perform CRUDs with induced error scenario is active\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud(mutation_num=2)\n\n # Wait for doc_loading to complete and validate the doc ops\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with persistence issue\")\n\n if self.simulate_error \\\n in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n error_sim.revert(self.simulate_error)\n else:\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Disconnect the shell connection\n shell_conn[node.ip].disconnect()\n self.sleep(10, \"Wait for node recovery to complete\")\n\n # Doc count validation\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Failover validation\n val = \\\n failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats got updated\"\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n self.validate_test_failure()\n\n # Doc count validation\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_crash_process(self):\n def_bucket = self.cluster.buckets[0]\n target_node = self.getTargetNode()\n remote = RemoteMachineShellConnection(target_node)\n target_vbuckets = range(0, self.cluster.vbuckets)\n retry_exceptions = list()\n self.transaction_load_task = None\n self.doc_loading_task = None\n self.N1ql_load_task = None\n\n # If Memcached is killed, we should not perform KV ops on\n # particular node. If not we can target all nodes for KV operation.\n if self.process_name == \"memcached\":\n target_vbuckets = Cbstats(target_node).vbucket_list(\n def_bucket.name, self.target_node)\n if self.target_node == \"active\":\n retry_exceptions = [SDKException.TimeoutException]\n if len(target_vbuckets) == 0:\n self.log.error(\"No target vbucket list generated to load data\")\n remote.disconnect()\n return\n\n bucket_dict = BucketUtils.get_random_collections(\n self.cluster.buckets,\n req_num=1,\n consider_scopes=\"all\",\n consider_buckets=\"all\")\n\n bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,\n bucket_dict.keys()[0])\n scope_name = bucket_dict[bucket.name][\"scopes\"].keys()[0]\n collection_name = bucket_dict[bucket.name][\n \"scopes\"][scope_name][\"collections\"].keys()[0]\n scope = BucketUtils.get_scope_obj(\n bucket, scope_name)\n collection = BucketUtils.get_collection_obj(\n scope, collection_name)\n\n self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)\n\n task_info = dict()\n task_info[self.doc_loading_task] = \\\n self.bucket_util.get_doc_op_info_dict(\n def_bucket, DocLoading.Bucket.DocOps.CREATE, 0,\n replicate_to=self.replicate_to, persist_to=self.persist_to,\n durability=self.durability_level,\n timeout=self.sdk_timeout, time_unit=\"seconds\",\n retry_exceptions=retry_exceptions)\n\n self.sleep(10, \"Wait for doc_ops to start\")\n self.log.info(\"Killing {0}:{1} on node {2}\"\n .format(self.process_name, self.service_name,\n target_node.ip))\n remote.kill_process(self.process_name, self.service_name,\n signum=signum[self.sig_type])\n remote.disconnect()\n # Wait for tasks completion and validate failures\n if self.transaction_load_task:\n self.task.jython_task_manager.get_task_result(\n self.transaction_load_task)\n if self.N1qltxn:\n self.task.jython_task_manager.get_task_result(\n self.N1ql_load_task)\n self.task_manager.get_task_result(self.doc_loading_task)\n self.bucket_util.verify_doc_op_task_exceptions(task_info,\n self.cluster)\n self.bucket_util.log_doc_ops_task_failures(task_info)\n\n # Verification stats\n verification_dict = dict()\n verification_dict[\"ops_create\"] = 2*self.num_items\n verification_dict[\"sync_write_aborted_count\"] = 0\n verification_dict[\"rollback_item_count\"] = 0\n verification_dict[\"pending_writes\"] = 0\n if self.__is_sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] = 2*self.num_items\n\n if self.bucket_type == Bucket.Type.EPHEMERAL \\\n and self.process_name == \"memcached\":\n result = self.task.rebalance(self.cluster, [], [])\n self.assertTrue(result, \"Rebalance failed\")\n\n # Validate doc count\n if self.process_name != \"memcached\":\n stats_failed = \\\n self.durability_helper.verify_vbucket_details_stats(\n def_bucket, self.cluster_util.get_kv_nodes(self.cluster),\n vbuckets=self.cluster.vbuckets,\n expected_val=verification_dict)\n if stats_failed:\n self.fail(\"Cbstats verification failed\")\n\n # Doc count validation per collection\n if not self.N1qltxn and self.atomicity is False:\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_backup_restore_sanity(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", self.expires)\n self.log.info(\"*** done to load items to all buckets\")\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.expected_error = self.input.param(\"expected_error\", None)\n if self.auto_failover:\n self.log.info(\"Enabling auto failover on \" + str(self.backupset.cluster_host))\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)\n self.backup_create_validate()\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.ops_type == \"update\":\n self.log.info(\"*** start to update items in all buckets\")\n self._load_all_buckets(self.master, gen, \"update\", self.expires)\n self.log.info(\"*** done update items in all buckets\")\n elif self.ops_type == \"delete\":\n self.log.info(\"*** start to delete items in all buckets\")\n self._load_all_buckets(self.master, gen, \"delete\", self.expires)\n self.log.info(\"*** done to delete items in all buckets\")\n self.sleep(10)\n self.log.info(\"*** start to validate backup cluster\")\n self.backup_cluster_validate()\n self.targetMaster = True\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.log.info(\"*** start to restore cluster\")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.reset_restore_cluster:\n self.log.info(\"\\n*** start to reset cluster\")\n self.backup_reset_clusters(self.cluster_to_restore)\n cmd_init = 'node-init'\n if self.same_cluster:\n self.log.info(\"Same cluster\")\n self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])\n if self.hostname and self.master.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.master.ip\n shell = RemoteMachineShellConnection(self.master)\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,\n options=options,\n cluster_host=\"localhost\",\n user=self.master.rest_username,\n password=self.master.rest_password)\n shell.disconnect()\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n else:\n self.log.info(\"Different cluster\")\n shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n shell.enable_diag_eval_on_non_local_hosts()\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.force_eject_node()\n rest.init_node()\n if self.hostname and self.backupset.restore_cluster_host.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,\n cluster_host=\"localhost\",\n user=self.backupset.restore_cluster_host.rest_username,\n password=self.backupset.restore_cluster_host.rest_password)\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n shell.disconnect()\n self.log.info(\"\\n*** Done reset cluster\")\n self.sleep(10)\n\n \"\"\" Add built-in user cbadminbucket to second cluster \"\"\"\n self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])\n\n self.backupset.start = start\n self.backupset.end = end\n self.log.info(\"*** start restore validation\")\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=\">=\",\n expected_error=self.expected_error)\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"",
"def test_recheck_fails(self):\n raise NotImplementedError",
"def test_re_balance(self):\n shards = []\n control_table_item_list = []\n kinesis_ramp1 = self.get_kinesis_ramp()\n kinesis_ramp2 = self.get_kinesis_ramp()\n kinesis_ramp3 = self.get_kinesis_ramp()\n worker_id = kinesis_ramp1.worker_id\n for i in range(1, 11):\n shard_id = \"shard-%s\" % str(i)\n shards.append(shard_id)\n if i == 11:\n # leave one shard to be claimed\n continue\n if i == 4:\n worker_id = kinesis_ramp2.worker_id\n elif i == 7:\n worker_id = kinesis_ramp3.worker_id\n # assign each worker 3 shards\n control_table_item_list.append(MockDynamoItem(\n shard_id=shard_id,\n checkpoint=0,\n worker_id=worker_id,\n heartbeat=0,\n ))\n table = MockControlTable(control_table_item_list) # create a shared control table\n\n kinesis_ramp1.control_table = table\n kinesis_ramp2.control_table = table\n kinesis_ramp3.control_table = table\n\n def change_heartbeat(seconds):\n for j in range(1, 10):\n # change the first nines heartbeat to exclude them from rebalancing\n table.get_item(Key={'shard_id': 'shard-%s' % j})['Item']['heartbeat'] += 1\n\n with patch('time.sleep', change_heartbeat) as mock_method:\n self.assertTrue(kinesis_ramp1.can_claim_shard(\"shard-10\"))\n self.assertTrue(kinesis_ramp1.claim_shard(\"shard-10\"))\n\n def change_heartbeat10(seconds):\n for j in range(1, 11):\n # change the heartbeat for all shards so no workers seems idle\n table.get_item(Key={'shard_id': 'shard-%s' % j})['Item']['heartbeat'] += 1\n\n with patch('time.sleep', change_heartbeat10) as mock_method:\n # we should not be able to claim shard 10 since we have a optimal distribution of 3,3,4 and no workers are marked as idle\n self.assertFalse(kinesis_ramp2.can_claim_shard(\"shard-10\"))",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_sync_cve_md_negative(self, pg_db_conn, monkeypatch, caplog, cleanup): # pylint: disable=unused-argument\n monkeypatch.setattr('vmaas_sync.vmaas_sync.vmaas_post_request', lambda endpoint, cve_request, session: None)\n\n with caplog.at_level(logging.INFO):\n with DatabasePool(1):\n result = sync_cve_md(TestMqueueWriter())\n assert not result\n assert caplog.records[0].msg == 'Syncing CVE metadata'\n assert caplog.records[1].msg.startswith('Downloading CVE metadata')\n assert caplog.records[2].msg == 'Finished syncing CVE metadata'\n caplog.clear()"
]
| [
"0.748205",
"0.69555116",
"0.6787574",
"0.6608144",
"0.6607815",
"0.65436244",
"0.6180294",
"0.61780506",
"0.61641556",
"0.613567",
"0.611566",
"0.6115433",
"0.61123097",
"0.6047326",
"0.6025254",
"0.5991636",
"0.595571",
"0.5936672",
"0.59278905",
"0.5926791",
"0.592278",
"0.5891785",
"0.58820236",
"0.58763",
"0.58755946",
"0.5869951",
"0.5865587",
"0.58481526",
"0.5823822",
"0.58064157"
]
| 0.78057253 | 0 |
Test that a materialized view are consistent after the repair of the base replica. | def _base_replica_repair_test(self, fail_mv_lock=False):
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Write initial data')
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug('Shutdown node1')
node1.stop(wait_other_notice=True)
logger.debug('Delete node1 data')
node1.clear(clear_all=True)
jvm_args = []
if fail_mv_lock:
if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134
jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]
jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
# this should not make Keyspace.apply throw WTE on failure to acquire lock
node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
logger.debug('Verify that there is no data on node1')
for i in range(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
logger.debug('Restarting node2 and node3')
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
# Just repair the base replica
logger.debug('Starting repair on node1')
node1.nodetool("repair ks t")
logger.debug('Verify data with cl=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _simple_repair_test(self, repair_base=False, repair_view=False):\n\n session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')\n for i in range(1000):\n statement = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n\n assert_unavailable(\n session.execute,\n statement\n )\n\n logger.debug('Start node2, and repair')\n node2.start(wait_for_binary_proto=True)\n if repair_base:\n node1.nodetool(\"repair ks t\")\n if repair_view:\n node1.nodetool(\"repair ks t_by_v\")\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')\n for i in range(1000):\n query = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]",
"def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def test_crc_check_chance(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5\"))\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.5, view=True)\n\n session.execute(\"ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3\")\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.3, view=True)",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_single_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(1)",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_multi_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(5)",
"def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))",
"def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def testConsistency(self):",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_view_change_not_happen_if_ic_is_discarded(looper, txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n tconf, tdir, allPluginsPath):\n nodes_to_restart = txnPoolNodeSet[1:3]\n panic_node = txnPoolNodeSet[-1]\n view_no = txnPoolNodeSet[0].viewNo\n\n send_test_instance_change(panic_node)\n for n in nodes_to_restart:\n restart_node(looper, txnPoolNodeSet, n, tconf, tdir, allPluginsPath)\n nodes_to_restart = txnPoolNodeSet[1:3]\n\n # waiting to discard InstanceChange\n def check_old_ic_discarded():\n vct_services = [n.master_replica._view_change_trigger_service for n in txnPoolNodeSet]\n assert all(not vct_service._instance_changes.has_inst_chng_from(view_no + 1, panic_node.name)\n for vct_service in vct_services)\n\n looper.run(eventually(check_old_ic_discarded, timeout=tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL + 1))\n\n for n in nodes_to_restart:\n send_test_instance_change(n)\n\n def check_ic():\n for node in txnPoolNodeSet:\n vct_service = node.master_replica._view_change_trigger_service\n assert all(vct_service._instance_changes.has_inst_chng_from(view_no + 1, n.name)\n for n in nodes_to_restart)\n\n looper.run(eventually(check_ic))\n ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)\n ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)\n\n for node in txnPoolNodeSet:\n assert node.viewNo == view_no",
"def test_populate_mv_after_insert_wide_rows(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for i in range(5):\n for j in range(10000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({}, {})\".format(i, j))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n for i in range(5):\n for j in range(10000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, j), [j, i])",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))",
"def test_sync_uses_correct_connection(self):\n # To test this a migration from new to old will expose the bug\n api.set_shard_at_rest('dummy', 1, \"dest2/test_sharding\")\n api.start_migration('dummy', 1, \"dest1/test_sharding\")\n\n # Mimic the state the shard would be in after a document was copied\n # from one location to another\n doc1 = {'x': 1, 'y': 1}\n doc1['_id'] = self.db1.dummy.insert(doc1)\n self.db2.dummy.insert(doc1)\n\n # Get the initial oplog position, do an update and then sync from the\n # initial position\n initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)\n self.db2.dummy.update({'x': 1}, {'$inc': {'y': 1}})\n api.set_shard_to_migration_status(\n 'dummy', 1, api.ShardStatus.MIGRATING_SYNC)\n sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)\n\n # The data on the first database should now reflect the update that\n # went through\n doc2, = self.db1.dummy.find({})\n self.assertEquals(2, doc2['y'])",
"def test_sync_uses_correct_connection(self):\n # To test this a migration from new to old will expose the bug\n api.set_shard_at_rest('dummy', 1, \"dest2/test_sharding\")\n api.start_migration('dummy', 1, \"dest1/test_sharding\")\n\n # Mimic the state the shard would be in after a document was copied\n # from one location to another\n doc1 = {'x': 1, 'y': 1}\n doc1['_id'] = self.db1.dummy.insert(doc1)\n self.db2.dummy.insert(doc1)\n\n # Get the initial oplog position, do an update and then sync from the\n # initial position\n initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)\n self.db2.dummy.update({'x': 1}, {'$inc': {'y': 1}})\n api.set_shard_to_migration_status(\n 'dummy', 1, api.ShardStatus.MIGRATING_SYNC)\n sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)\n\n # The data on the first database should now reflect the update that\n # went through\n doc2, = self.db1.dummy.find({})\n self.assertEquals(2, doc2['y'])",
"def test_sync_error(self):\n client = self.mock_client(\n [\n self.join_response(),\n defer.fail(RebalanceInProgress()),\n self.join_response(),\n self.sync_response(),\n ]\n )\n coord = self.make_coordinator(client)\n de = coord.join_and_sync()\n self.successResultOf(de)\n self.assertEqual(coord._rejoin_needed, True)\n assert_delayed_calls(1, client)\n de = coord.join_and_sync()\n self.successResultOf(de)\n self.assertEqual(coord._rejoin_needed, False)",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def check_consistency(self):\n raise NotImplementedError()"
]
| [
"0.74890757",
"0.71741855",
"0.6980225",
"0.6973491",
"0.6903745",
"0.6803438",
"0.6549949",
"0.6438437",
"0.642581",
"0.62041116",
"0.616515",
"0.6136762",
"0.6135697",
"0.6135309",
"0.61278754",
"0.60847795",
"0.59118557",
"0.5904218",
"0.5855572",
"0.5855482",
"0.58467007",
"0.5836308",
"0.5699606",
"0.5690414",
"0.56638354",
"0.56621075",
"0.56621075",
"0.5659884",
"0.56547636",
"0.5653715"
]
| 0.71913344 | 1 |
Test that a materialized view are consistent after a more complex repair. | def test_really_complex_repair(self):
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
logger.debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
logger.debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _simple_repair_test(self, repair_base=False, repair_view=False):\n\n session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')\n for i in range(1000):\n statement = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n\n assert_unavailable(\n session.execute,\n statement\n )\n\n logger.debug('Start node2, and repair')\n node2.start(wait_for_binary_proto=True)\n if repair_base:\n node1.nodetool(\"repair ks t\")\n if repair_view:\n node1.nodetool(\"repair ks t_by_v\")\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')\n for i in range(1000):\n query = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def _test_base_view_consistency_on_crash(self, fail_phase):\n\n self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')\n self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r\"Failed to force-recycle all segments\"]\n self.prepare(rf=1, install_byteman=True)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Make node1 fail {} view writes'.format(fail_phase))\n node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])\n\n logger.debug('Write 1000 rows - all node1 writes should fail')\n\n failed = False\n for i in range(1, 1000):\n try:\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}\".format(v=i))\n except WriteFailure:\n failed = True\n\n assert failed, \"Should fail at least once.\"\n assert node1.grep_log(\"Dummy failure\"), \"Should throw Dummy failure\"\n\n missing_entries = 0\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n if not base_entry:\n missing_entries += 1\n if not view_entry:\n missing_entries += 1\n\n logger.debug(\"Missing entries {}\".format(missing_entries))\n assert missing_entries > 0\n\n logger.debug('Restarting node1 to ensure commit log is replayed')\n node1.stop(wait_other_notice=True)\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node1.start(jvm_args=[\"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n for i in range(1, 1000):\n view_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, i),\n consistency_level=ConsistencyLevel.ONE)))\n base_entry = rows_to_list(session.execute(SimpleStatement(\"SELECT * FROM t WHERE id = {}\".format(i),\n consistency_level=ConsistencyLevel.ONE)))\n\n assert base_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)\n assert view_entry, \"Both base {} and view entry {} should exist.\".format(base_entry, view_entry)",
"def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def test_crc_check_chance(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5\"))\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.5, view=True)\n\n session.execute(\"ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3\")\n\n assert_crc_check_chance_equal(session, \"t_by_v\", 0.3, view=True)",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def _test_mv_with_default_ttl(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n session.execute('USE ks')\n\n logger.debug(\"MV with same key and unselected columns\")\n session.execute(\"CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"UPDATE t2 SET c=1 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 1])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"UPDATE t2 SET c=null WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n self.update_view(session, \"UPDATE t2 SET c=2 WHERE k=1 AND a=1;\", flush)\n assert_one(session, \"SELECT k,a,b,c FROM t2\", [1, 1, None, 2])\n assert_one(session, \"SELECT k,a,b FROM mv2\", [1, 1, None])\n\n self.update_view(session, \"DELETE c FROM t2 WHERE k=1 AND a=1;\", flush)\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n # test with user-provided ttl\n self.update_view(session, \"INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;\", flush)\n self.update_view(session, \"DELETE c FROM t2 WHERE k=2 AND a=2;\", flush)\n\n time.sleep(5)\n\n assert_none(session, \"SELECT k,a,b,c FROM t2\")\n assert_none(session, \"SELECT k,a,b FROM mv2\")\n\n if flush:\n self.cluster.compact()\n assert_none(session, \"SELECT * FROM t2\")\n assert_none(session, \"SELECT * FROM mv2\")\n\n logger.debug(\"MV with extra key\")\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 2, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 2, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 2, 1])\n\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 3, 1);\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t\", [1, 3, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 3, 1])\n\n # user provided ttl\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 100 SET a = 4 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 4, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 4, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 100:\n pytest.fail(\"Please increase the 100 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 80 SET a = 5 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 5, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 5, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 80:\n pytest.fail(\"Please increase the 80 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n start = time.time()\n self.update_view(session, \"UPDATE t USING TTL 60 SET a = 6 WHERE k = 1\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n if flush:\n self.cluster.compact()\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 6, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 6, 1])\n except AssertionError as ae:\n if (time.time() - start) >= 60:\n pytest.fail(\"Please increase the 60 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)",
"def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_populate_mv_after_insert(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({v}, {v})\".format(v=i))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(i), [i, i])",
"def testConsistency(self):",
"def test_drop_while_building(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in self.cluster.nodelist():\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Drop the MV while it is still building\")\n session.execute(\"DROP MATERIALIZED VIEW t_by_v\")\n\n logger.debug(\"Verify that the build has been stopped before its finalization without errors\")\n for node in self.cluster.nodelist():\n self.check_logs_for_errors()\n assert not node.grep_log('Marking view', filename='debug.log')\n assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')\n\n logger.debug(\"Verify that the view has been removed\")\n failed = False\n try:\n session.execute(\"SELECT COUNT(*) FROM t_by_v\")\n except InvalidRequest:\n failed = True\n self.assertTrue(failed, \"The view shouldn't be queryable\")\n self._assert_view_meta(session, views=1, exists=False)\n\n logger.debug(\"Create the MV again\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM t_by_v\", [5000])",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_immutable(self):\n session = self.prepare(user_table=True)\n\n # cannot insert\n assert_invalid(session, \"INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');\",\n \"Cannot directly modify a materialized view\")\n\n # cannot update\n assert_invalid(session, \"UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot delete a row\n assert_invalid(session, \"DELETE from users_by_state where state='TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot delete a cell\n assert_invalid(session, \"DELETE session_token from users_by_state where state='TX';\",\n \"Cannot directly modify a materialized view\")\n\n # cannot alter a table\n assert_invalid(session, \"ALTER TABLE users_by_state ADD first_name varchar\",\n \"Cannot use ALTER TABLE on Materialized View\")",
"def test_populate_mv_after_insert_wide_rows(self):\n session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for i in range(5):\n for j in range(10000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({}, {})\".format(i, j))\n\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL \"\n \"AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug(\"wait for view to build\")\n self._wait_for_view(\"ks\", \"t_by_v\")\n\n logger.debug(\"wait that all batchlogs are replayed\")\n self._replay_batchlogs()\n for i in range(5):\n for j in range(10000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} AND v = {}\".format(i, j), [j, i])",
"def test_comprehensive_system(self):\n\n with ROV_Validity_Table() as db:\n\n # Run MRT_Parser to fill mrt_announcements table which will\n # be used as the input table for RPKI_Validator.\n input_table = MRT_Announcements_Table.name\n MRT_Parser().run()\n\n RPKI_Validator_Parser().run(table=input_table)\n\n initial_count = db.get_count()\n initial_rows = db.get_all()\n \n # all prefix-origin pairs from input should be in val table\n sql = f\"\"\"SELECT * FROM {input_table} a\n LEFT JOIN {db.name} b\n USING (prefix, origin)\n WHERE b.prefix IS NULL;\"\"\"\n assert len(db.execute(sql)) == 0\n\n # clear validity table and run with a wait before getting data\n # should be the same with and without waiting\n db.clear_table()\n\n RPKI_Validator_Parser().run(table=input_table, wait=True)\n\n second_count = db.get_count()\n second_rows = db.get_all()\n\n assert initial_count == second_count\n assert initial_rows == second_rows",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_change_provisioned_throughput_usual_case():",
"def test_resume_stopped_build(self):\n\n session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n nodes = self.cluster.nodelist()\n self.fixture_dtest_setup.ignore_log_patterns = [r'Compaction interrupted: View build']\n\n logger.debug(\"Inserting initial data\")\n for i in range(5000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i))\n\n logger.debug(\"Slowing down MV build with byteman\")\n for node in nodes:\n node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])\n\n logger.debug(\"Create a MV\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Wait and ensure the MV build has started. Waiting up to 2 minutes.\")\n self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)\n\n logger.debug(\"Stopping all running view build tasks with nodetool\")\n for node in nodes:\n node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)\n node.nodetool('stop VIEW_BUILD')\n\n logger.debug(\"Checking logs to verify that some view build tasks have been stopped\")\n for node in nodes:\n node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)\n node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)\n node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)\n assert not node.grep_log('Marking view', filename='debug.log')\n self.check_logs_for_errors()\n\n logger.debug(\"Check that MV shouldn't be built yet.\")\n assert len(list(session.execute(\"SELECT COUNT(*) FROM t_by_v\"))) != 5000\n\n logger.debug(\"Restart the cluster\")\n self.cluster.stop()\n marks = [node.mark_log() for node in nodes]\n self.cluster.start()\n session = self.patient_cql_connection(nodes[0])\n\n logger.debug(\"Verify that the MV has been successfully created\")\n self._wait_for_view('ks', 't_by_v')\n assert_one(session, \"SELECT COUNT(*) FROM ks.t_by_v\", [5000])\n\n logger.debug(\"Checking logs to verify that the view build has been resumed and completed after restart\")\n for node, mark in zip(nodes, marks):\n assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)\n assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)\n self.check_logs_for_errors()",
"def test_drop_mv(self):\n session = self.prepare(user_table=True)\n\n # create another materialized view\n session.execute((\"CREATE MATERIALIZED VIEW users_by_birth_year AS \"\n \"SELECT * FROM users WHERE birth_year IS NOT NULL AND \"\n \"username IS NOT NULL PRIMARY KEY (birth_year, username)\"))\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 2, \"Expecting {} materialized view, got {}\".format(2, len(result))\n\n session.execute(\"DROP MATERIALIZED VIEW ks.users_by_state;\")\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting {} materialized view, got {}\".format(1, len(result))",
"def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_key_repair_lens(self):\n # Create new work trail and retrieve the HEAD workflow of the default\n # branch\n f_handle = self.filestore.upload_file(KEY_REPAIR_FILE)\n ds1 = self.datastore.load_dataset(f_handle=f_handle)\n # Missing Value Lens\n command = cmd.mimir_key_repair(DATASET_NAME, ds1.column_by_name('Empid').identifier)\n result = self.compute_lens_result(ds1, command)\n self.assertTrue(result.is_success)\n ds = self.datastore.get_dataset(result.provenance.write[DATASET_NAME].identifier)\n self.assertEqual(len(ds.columns), 4)\n self.assertEqual(ds.row_count, 2)\n names = set()\n empids = set()\n for row in ds.fetch_rows():\n empids.add(int(row.values[0]))\n names.add(row.values[1])\n self.assertTrue(1 in empids)\n self.assertTrue('Alice' in names or 'Bob' in names)\n self.assertFalse('Alice' in names and 'Bob' in names)\n self.assertTrue('Carla' in names)\n # Test error case and command text\n with self.assertRaises(ValueError):\n command = cmd.mimir_key_repair('MY DS', 'MY COL')\n result = self.compute_lens_result(ds, command)",
"def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))"
]
| [
"0.7456203",
"0.70387155",
"0.6999308",
"0.69620454",
"0.68222475",
"0.679167",
"0.6605587",
"0.6563779",
"0.6553053",
"0.64597106",
"0.6333627",
"0.6204435",
"0.6145481",
"0.60931844",
"0.6082192",
"0.60116386",
"0.6002193",
"0.60000944",
"0.59826684",
"0.5974092",
"0.59694266",
"0.5923133",
"0.5849956",
"0.5849372",
"0.58317363",
"0.5816562",
"0.5733781",
"0.5725482",
"0.57149535",
"0.5692693"
]
| 0.7250699 | 1 |
Test complex MV select statements CASSANDRA9664 | def test_complex_mv_select_statements(self):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
logger.debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
logger.debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_select(self):\n self.assertEqual(['SELECT',\n ['MAX(*)', 'AS', 'a'], ['(SELECT 1 FROM Q)', 'AS', 'b'], ['c', 'AS', 'c.a'],\n 'FROM', 'T', ''],\n grammar._SELECT_EXPR.parseString(\"SELECT MAX(*) AS a, (SELECT 1 FROM Q) AS b,\"\n \"c AS `c.a` FROM T;\").asList())\n self.assertEqual(['SELECT',\n ['MAX(*)', 'AS', 'a'], ['(SELECT 1 FROM Q)', 'AS', 'b'], ['c', 'AS', 'c.a'],\n ['INTO', 'a', 'b', 'c'], 'FROM', 'T', '', 'object'],\n grammar._SELECT_EXPR.parseString(\"SELECT MAX(*) AS a, (SELECT 1 FROM Q) AS b,\"\n \"c AS `c.a` INTO a,b,c FROM T; -- > object\").asList())",
"def test_query(rgd):\n data = rgd.query(\"test\")\n assert isinstance(data, pd.DataFrame)\n assert data.iloc[0][\"name\"] == \"vm1\"",
"def query(mdx_stmt):",
"def run_select_examples():\n table = \"actors\"\n select_fields = ['name', 'last_name', 'country']\n select_conds1 = {}\n select_conds2 = {'id': 3}\n select_conds3 = {'id': 3, 'name': \"Matt\"}\n print querify.select_from_dict(table, select_fields)\n print querify.select_from_dict(table, select_fields, select_conds1)\n print querify.select_from_dict(table, select_fields, select_conds2)\n print querify.select_from_dict(table, select_fields, select_conds3)",
"def test_query_expression_get_success_case(self):\r\n m = self.table.get(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)\r\n m = q.get()\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n m = q.get(self.table.column('attempt_id') == 0)\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0",
"def test_mixed_select(self, document):\n assert document.select({\"_id\": 0, \"hello\": 1}) == {\"hello\": \"there\"}",
"def test_do_select_all(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n\r\n assert DUT.do_select_all(revision_id=1) is None\r\n assert isinstance(DUT.tree, Tree)\r\n assert isinstance(DUT.tree.get_node(1).data, RAMSTKFunction)",
"def test_do_select(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n _function = DUT.do_select(1)\r\n\r\n assert isinstance(_function, RAMSTKFunction)\r\n assert _function.function_id == 1\r\n assert _function.availability_logistics == 1.0",
"def test_function(self):\n\n s = select([users,\n (users.c.user_id * 2).label('concat'),\n func.count(addresses.c.address_id).label('count')],\n users.c.user_id == addresses.c.user_id,\n group_by=[c for c in users.c]).alias('myselect')\n\n mapper(User, s)\n sess = create_session()\n l = sess.query(User).all()\n for u in l:\n print \"User\", u.user_id, u.user_name, u.concat, u.count\n assert l[0].concat == l[0].user_id * 2 == 14\n assert l[1].concat == l[1].user_id * 2 == 16",
"def test_select_column(self):\n self.assertEqual(\"(SELECT * FROM A WHERE id > 0)\",\n grammar._NESTED_SELECT.parseString(\"(SELECT * FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"(SELECT COUNT(*) FROM A WHERE id > 0)\",\n grammar._NESTED_SELECT.parseString(\"(SELECT COUNT(*) FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"EXISTS(SELECT * FROM A WHERE id > 0)\",\n grammar._NESTED_CALL.parseString(\"EXISTS(SELECT * FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"MAX(*)\",\n grammar._NESTED_CALL.parseString(\"MAX(*)\")[0])\n self.assertEqual(\"count\",\n grammar._SELECT_COLUMN.parseString(\"`count`\").name[0])\n\n self.assertEqual([\"count\", \"max\", \"id\"],\n [x[-1] for x in\n grammar._SELECT_COLUMN_LIST.parseString(\n \"(SELECT COUNT(*) FROM A) AS `count`, MAX(*) AS `max`, id\").columns])",
"def test_select_columns(self):\n self.insert()\n data = self.tbl.select()\n assert (u'id',) + tuple(data.columns) == self.tbl.columns",
"def test_select(self):\n my_conn = MySQL(*self.conn_params)\n table_name = \"inf_schema\"\n inf_schema = my_conn.get_table(table_name)\n # SELECT * FROM inf_schema\n # WHERE table_name like 'INNO%' AND avg_row_length > 100\n results = my_conn.engine.execute(select('*')\n .where(inf_schema.c.table_name\n .like('INNO%'))\n .where(inf_schema.c.avg_row_length >\n 100)\n .select_from(inf_schema)).fetchall()\n table_df = pd.DataFrame(results)\n self.assertGreaterEqual(len(table_df), 6)",
"def query3() :",
"def test_select_two(self, document):\n assert document.select({\"hello\": 1, \"mighty\": 1}) == {\n \"_id\": 1,\n \"hello\": \"there\",\n \"mighty\": \"duck\",\n }",
"def test_boolean_complex_selection(self):\n\n # The residue selection loop.\n sel = list(mol_res_spin.residue_loop(\"#Ap4Aase:4 & :Pro | #RNA\"))\n\n # Residue names and numbers.\n names = ['Pro', None, None]\n numbers = [4, -5, -4]\n\n # The residues.\n self.assertEqual(len(sel), 3)\n for i in range(3):\n self.assertEqual(sel[i].name, names[i])\n self.assertEqual(sel[i].num, numbers[i])",
"def test_get_rows_with_sql(self):\n error = None\n try:\n statements = [\n \"select timestamp from cpu_idle\",\n \"select value from cpu_idle\",\n \"select host from cpu_idle\",\n \"select timestamp,field1 from cpu_idle\",\n \"select * from cpu_idle\",\n \"select timestamp, value from cpu_idle order by timestamp \",\n \"select timestamp, value from cpu_idle order by timestamp desc\",\n '''select timestamp, value from cpu_idle\n where value > 30 and timestamp >150937263000''',\n \"select host, count(1) from cpu_idle group by host\",\n '''select time_bucket(timestamp, '2 days') as DAY, sum(value) as SUM\n from cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')''',\n \"select timestamp, ((field2 - field1) * 10) as RESULT, host from cpu_idle\",\n \"select timestamp from cpu_idle\",\n '''SELECT field1, CASE field1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'many' END\n FROM cpu_idle''',\n \"SELECT field1, IF(field1>100,1,0) as result FROM cpu_idle\",\n \"SELECT field1, field2, COALESCE (field1, field2) as result FROM cpu_idle\",\n \"SELECT field1, abs (field1) as result FROM cpu_idle\",\n \"SELECT field1, sqrt (field1) as result FROM cpu_idle\",\n \"SELECT field1, cbrt (field1) as result FROM cpu_idle\",\n \"SELECT field1, ceil (field1) as result FROM cpu_idle\",\n \"SELECT field1, floor (field1) as result FROM cpu_idle\",\n \"SELECT 'str1' || 'str2' as result FROM cpu_idle\",\n '''SELECT time_bucket(timestamp, '2 days') as DAY, avg(field1) as result \n FROM cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')''',\n ''' SELECT count(*) as result \n FROM cpu_idle where timestamp < 1525611901''',\n ''' SELECT time_bucket(timestamp, '2 days') as DAY, count(field1) as count \n FROM cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')''',\n '''SELECT max_by(field1,field2) as result \n FROM cpu_idle where timestamp < 1525611901000 ''',\n '''SELECT min_by(field1,field2) as result \n FROM cpu_idle where timestamp < 1525611901000\t''',\n '''SELECT max(field1) as result \n FROM cpu_idle where timestamp < 1525611901000''',\n '''SELECT min(field1) as result \n FROM cpu_idle where timestamp < 1525611901000''',\n '''SELECT time_bucket(timestamp, '2 days') as DAY, sum(field1) as sum \n FROM cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')'''\n ]\n for statement in statements:\n response = self.tsdb_client.get_rows_with_sql(statement)\n print(statement, response)\n except BaseException as e:\n error = e\n finally:\n self.assertIsNone(error)",
"def test_select_all(self, document):\n assert document.select({}) == {\"_id\": 1, \"hello\": \"there\", \"mighty\": \"duck\"}",
"def useful_test_function(db, query):\n print pd.read_sql_query(query, db)",
"def test_select_crawler_nested():\n sql = \"\"\"\nselect\n a.x, a.y, b.z\nfrom a\njoin (\n with d as (\n select x, z from b\n )\n select * from d\n) using (x)\n \"\"\"\n crawler, linter = _parse_and_crawl_outer(sql)\n sc = SelectCrawler(\n crawler.query_tree.selectables[0]\n .select_info.table_aliases[1]\n .from_expression_element,\n linter.dialect,\n )\n assert sc.query_tree.as_json() == {\n \"selectables\": [\n \"select * from d\",\n ],\n \"ctes\": {\"D\": {\"selectables\": [\"select x, z from b\"]}},\n \"query_type\": \"WithCompound\",\n }",
"def test_query_expression_parsing(self):\r\n query1 = self.table.filter(self.table.column('test_id') == 5)\r\n assert len(query1._where) == 1\r\n\r\n op = query1._where[0]\r\n assert isinstance(op.operator, operators.EqualsOperator)\r\n assert op.value == 5\r\n\r\n query2 = query1.filter(self.table.column('expected_result') >= 1)\r\n assert len(query2._where) == 2\r\n\r\n op = query2._where[1]\r\n assert isinstance(op.operator, operators.GreaterThanOrEqualOperator)\r\n assert op.value == 1",
"def test_query_expression_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n assert q.count() == 4",
"def test_custom_query_basic(self):\n\n # Create a simple query statement a\n query = \"SELECT * FROM system.local\"\n statement = SimpleStatement(query)\n # Validate that various types of custom payloads are sent and received okay\n self.validate_various_custom_payloads(statement=statement)",
"def test_iterating_query_with_arguments(self):\n with Database(connstr) as db:\n for row in db.query(\"\"\"select i, dc from test where i = %s or i = %s\"\"\", 2, 3):\n\n drow = row.as_dict\n i, dc = drow['i'], drow['dc']\n assert len(row) == 2\n assert dc == Decimal('0.{}'.format(i))\n assert repr(row) == '<Row {\"dc\": \"%s\", \"i\": %s}>' % (dc, i)",
"def test_two_groups(self):\n q = big_query_query.Query()\n q.filter('q', 'test')\n\n s1_1 = q.new_subquery()\n s1_1.filter('s1_1', 'test')\n s1_2 = q.new_subquery()\n s1_2.filter('s1_2', 'test')\n\n s2_1 = q.new_subquery()\n s2_1.filter('s2_1', 'test')\n s2_2 = q.new_subquery()\n s2_2.filter('s2_2', 'test')\n\n q.union(s1_1, s1_2)\n q.union(s2_1, s2_2)\n\n self.assertEqual(\n ('(q = \"test\" AND ((s1_1 = \"test\") OR (s1_2 = \"test\")) AND '\n '((s2_1 = \"test\") OR (s2_2 = \"test\")))'), q.get_where_clause())",
"def testSQLString(self): \n val = selectAllSQLStr()\n self.assertEqual(val,\"SELECT * FROM bookStore\")",
"def testQueryColumns(self):\n scaffolder = plaso_sqlite.PlasoSQLiteScaffolder()\n test_string = (\n 'SELECT foobar as Foo, foobar.dot, random, reallylong AS long FROM '\n 'foobarengine WHERE foobar = 1')\n expected_columns = set(['foo', 'dot', 'random', 'long'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'select one, two as three, four as five, f.eight as EIGHTE FROM '\n 'foobar f, scode s WHERE f.id = s.id ORDER BY one')\n expected_columns = set(['one', 'three', 'five', 'eighte'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'this should not produce anything...')\n self._RunQueryTests(scaffolder, test_string, set())",
"def testQuery(self):\n # Clear anything first\n for i in range(10):\n row_name = \"aff4:/row:%s\" % i\n data_store.DB.Set(row_name, \"metadata:%s\" % i, str(i), timestamp=5,\n token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", \"test\", token=self.token)\n\n # Retrieve all subjects with metadata:5 set:\n rows = [row for row in data_store.DB.Query(\n [\"metadata:5\"], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:\", token=self.token)]\n\n self.assertEqual(len(rows), 1)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:5\")\n self.assertEqual(rows[0][\"metadata:5\"][0][0], \"5\")\n self.assertEqual(rows[0][\"metadata:5\"][0][1], 5)",
"def test_complex_session() -> None:\n s = UnifiedAlchemyMagicMock(\n data=[\n (\n [mock.call.query(Data), mock.call.filter(Data.data_p1 < 13)],\n [\n Data(pk1=1, data_p1=11.4, data_p2=13.5, name=\"test1\"),\n Data(pk1=2, data_p1=9.4, data_p2=19.5, name=\"test2\"),\n Data(pk1=3, data_p1=4.7, data_p2=15.5, name=\"test3\"),\n Data(pk1=4, data_p1=3.4, data_p2=13.5, name=\"test4\"),\n ],\n ),\n (\n [mock.call.query(Data), mock.call.filter(Data.data_p1 >= 13)],\n [\n Data(pk1=5, data_p1=16.3, data_p2=3.5, name=\"test6\"),\n Data(pk1=6, data_p1=19.3, data_p2=10.5, name=\"test7\"),\n Data(pk1=7, data_p1=13.3, data_p2=33.7, name=\"test8\"),\n ],\n ),\n ]\n )\n new_data = [\n Data(pk1=8, data_p1=16.3, data_p2=38.15, name=\"test9\"),\n Data(pk1=9, data_p1=13.6, data_p2=33.5, name=\"test10\"),\n Data(pk1=10, data_p1=10.1, data_p2=331.35, name=\"test11\"),\n Data(pk1=1, data_p1=2.5, data_p2=67.1, name=\"test12\"),\n ]\n s.add_all(new_data)\n s.add(Data(pk1=11, data_p1=31.5, data_p2=67.1, name=\"test13\"))\n ret = s.query(Data).all()\n expected_data = [str(r) for r in ret]\n assert expected_data == [\"8test9\", \"9test10\", \"10test11\", \"1test12\", \"11test13\"]\n n_d = s.query(Data).filter(Data.data_p1 < 13).delete()\n assert n_d == 4\n # test arbitrary parameters to delete\n n_d = (\n s.query(Data)\n .filter(Data.data_p1 >= 13)\n .delete(synchronize_session=False, test1=1, test2=2)\n )\n assert n_d == 3\n ret = s.query(Data).filter(Data.data_p1 >= 13).all()\n assert ret == []\n ret = s.query(Data).all()\n expected_data = [str(r) for r in ret]\n assert expected_data == [\"8test9\", \"9test10\", \"10test11\", \"1test12\", \"11test13\"]\n ret = s.query(Data).filter(Data.data_p1 < 13).all()\n assert ret == []",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_disallowed_queries():\n strings = [\"select * from test times 10\",\n \"select * from test save clusters with threshold .5 as test.csv\",\n \"select * from test given a=5\",\n \"select * from test with confidence .4\",\n \"select a conf .4 from test\",\n \"select a conf .4, b from test\",\n \"simulate a conf .4 from test times 10\",\n \"simulate a conf .4, b from test times 10\",\n \"infer * from test times 10\",\n \"infer typicality from test\",\n \"infer * from test with confidence 1.5\",\n \"simulate typicality from test\",\n \"infer * from test save clusters with threshold .5 as test.csv\",\n \"infer * from test given a=5\",\n \"simulate * from test where a < 4\",\n \"simulate * from test save clusters with threshold .5 as test.csv\",\n \"simulate * from test with confidence .4\",\n \"simulate * from test with 4 samples\",\n \"simulate * from test\",\n \"estimate columns from test with confidence .4\",\n \"estimate columns from test given a=4\",\n \"estimate columns from test times 10\",\n \"summarize estimate columns from test\",\n \"plot estimate columns from test\",\n \"estimate columns from test save clusters with threshold .5 as test.csv\",\n \"estimate pairwise correlation from test where a = b\",\n \"estimate pairwise correlation from test times 10\",\n \"estimate pairwise correlation from test given a = 5\",\n \"estimate pairwise correlation from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\",\n \"estimate pairwise row similarity from test times 10\",\n \"estimate pairwise row similarity from test given a = 5\",\n \"estimate pairwise row similarity from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\"\n ]\n\n for query_string in strings:\n ast = bql_statement.parseString(query_string,parseAll=True)\n with pytest.raises(AssertionError):\n parser.parse_single_statement(ast)"
]
| [
"0.65079945",
"0.63243395",
"0.62658215",
"0.62626994",
"0.62251323",
"0.61912185",
"0.5928468",
"0.5857377",
"0.5831925",
"0.58248574",
"0.58164155",
"0.5789836",
"0.57417756",
"0.5724001",
"0.57180935",
"0.5706569",
"0.5698504",
"0.565796",
"0.56467897",
"0.56436086",
"0.5614914",
"0.5576445",
"0.55615014",
"0.554848",
"0.5535346",
"0.55308133",
"0.5493871",
"0.5487809",
"0.54858875",
"0.5477936"
]
| 0.7580564 | 0 |
The internal addition of a view over a non existing table should be ignored CASSANDRA13737 | def propagate_view_creation_over_non_existing_table(self):
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
create_ks(session, 'ks', 3)
session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')
# create a materialized view only in nodes 1 and 2
node3.stop(wait_other_notice=True)
session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '
'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '
'PRIMARY KEY (state, username)'))
# drop the base table only in node 3
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node3.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)
session.execute('DROP TABLE ks.users')
# restart the cluster
cluster.stop()
cluster.start()
# node3 should have received and ignored the creation of the MV over the dropped table
assert node3.grep_log('Not adding view users_by_state because the base table') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def test_view_delete_with_scope(self):\n table = Table(\n {\"id\": int, \"msg\": str, \"val\": float},\n index=\"id\",\n )\n table.view(\n computed_columns=[\n {\n \"column\": \"inverted\",\n \"computed_function_name\": \"invert\",\n \"inputs\": [\"val\"],\n }\n ],\n columns=[\"inverted\"],\n )\n table.update(\n [\n {\n \"id\": 1,\n \"msg\": \"test\",\n \"val\": 1.0,\n }\n ]\n )",
"def is_db_view(db_table):\n if db_table in postgresql_views:\n return True\n return False",
"def _create_view(self, view, schema=None, config=None):\n viewname, vschema = view[\"__tablename__\"].split(' ')[0], view[\"__schema__\"].split(' ')[0]\n try:\n dve = SQL('NULL from {}.{}').format(Identifier(vschema),\n Identifier(viewname))\n veq = self.__session.query(self._sql_to_string(dve)).limit(1)\n self.__session.execute(veq)\n self._commit()\n except ProgrammingError:\n self._rollback()\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('* FROM information_schema.routines')\n count = count.filter(like).count()\n if int(count) == 0:\n self._create_extension(config)\n self.exschema = 'public'\n else:\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('routine_schema FROM'\n ' information_schema.routines')\n count = count.filter(like).limit(1)\n count = self.__session.execute(count).fetchone()[0]\n self._commit()\n self.exschema = count\n like = text(\"SELECT has_schema_privilege(:exschema, 'USAGE')\")\n like = self.__session.execute(like,\n {\"exschema\": self.exschema}).fetchone()[0]\n self._commit()\n if not like:\n self._grant_access(config)\n viewst, raw = self._sql_to_string(view[\"__statement__\"]), '{}.crosstab'\n defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))\n exsch = SQL(raw).format(Identifier(self.exschema))\n self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise",
"def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def create_or_replace_view(self, relation) -> None:\n database = self.quoted(self._correct_case(relation.database))\n schema = self.quoted(self._correct_case(relation.schema))\n ddl_statement = f\"\"\"CREATE OR REPLACE VIEW\n{self.quoted_dot_notation(relation)}\nAS\n{relation.view_ddl}\n\"\"\"\n engine = self.get_connection(database_override=database,\n schema_override=schema)\n try:\n engine.execute(ddl_statement)\n except Exception as exc:\n logger.info(\"Failed to create %s %s:%s\", relation.materialization.name,\n self.quoted_dot_notation(relation),\n exc)\n raise exc\n logger.info('Created relation %s', self.quoted_dot_notation(relation))",
"def addViewToDb(self,name):\n\t\tsql = \"INSERT INTO hudson_views(viewname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[name])",
"def create_view(self, repo, view, sql):\n return self.user_con.create_view(\n repo=repo, view=view, sql=sql)",
"def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)",
"def view(name, selectable, *, clear: bool = False):\n log.debug('view(%r, clear=%r)', name, clear)\n\n if clear:\n DDL[name] = None, None\n return None\n\n DDL[name] = (CreateView(name, selectable),\n DropView(name))\n\n return make_table(selectable, name=name)",
"def test_add_new_no_dupl_w_optional(self):\n new_df = pd.DataFrame(np.eye(3) * 2, index=range(3, 6),\n columns=self.req_cols + self.opt_cols)\n self.table.add_new(new=new_df)\n self.assertEqual(len(self.table.index), 6)",
"def createview_bad_request():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting only status 404\n c.execute(\"create view bad_request as select cast(time as date), \"\n \"count(*) as num \"\n \"from log \"\n \"where status = '404 NOT FOUND' \"\n \"group by cast(time as date) \"\n \"order by cast(time as date)\")\n db.commit()\n db.close()",
"def creates_view(self):\n return self.statements[0].creates_view()",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def createAuthorArticleView():\n query = \"\"\"\n CREATE TEMPORARY VIEW author_article_view AS\n SELECT authors.name AS author,\n articles.slug AS slug,\n articles.title AS title\n FROM articles LEFT JOIN authors\n ON articles.author=authors.id;\n \"\"\"\n connection.cursor().execute(query)",
"def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")",
"def add_view(self, schema, create=True):\n if not constants.NAME_RX.match(schema[\"name\"]):\n raise ValueError(\"invalid view name\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"tables\"]):\n raise ValueError(\"name is already in use for a table\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"views\"]):\n raise ValueError(\"name is already in use for a view\")\n if create:\n sql = 'CREATE VIEW \"%s\" AS %s' % (\n schema[\"name\"],\n dbshare.query.get_sql_statement(schema[\"query\"]),\n )\n self.dbcnx.execute(sql)\n cursor = self.dbcnx.cursor()\n try:\n sql = 'PRAGMA table_info(\"%s\")' % schema[\"name\"]\n cursor.execute(sql)\n except sqlite3.Error: # Invalid view\n sql = 'DROP VIEW \"%s\"' % schema[\"name\"]\n cursor.execute(sql)\n raise ValueError(\"invalid view; maybe non-existent column?\")\n # Source names considering quotes and disregarding AS part, if any.\n schema[\"sources\"] = dbshare.query.get_from_sources(schema[\"query\"][\"from\"])\n schema[\"columns\"] = [{\"name\": row[1], \"type\": row[2]} for row in cursor]\n sql = \"INSERT INTO %s (name, schema) VALUES (?,?)\" % constants.VIEWS\n with self.dbcnx:\n self.dbcnx.execute(sql, (schema[\"name\"], json.dumps(schema)))\n self.db[\"views\"][schema[\"name\"]] = schema",
"def test_secondary_index(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n assert_invalid(session, \"CREATE INDEX ON t_by_v (v2)\",\n \"Secondary indexes are not supported on materialized views\")",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def test_table_false_positives(self):\n pass",
"def test_duplicate_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )\n with pytest.raises(ValueError, match=\"View already exists\"):\n viewset.create_container_view(\n key=\"container1\", description=\"container\", software_system=system1\n )",
"def example():\n joined_table = [[1900, 170, 10], [0, 120, 10], [0, 120, 100], [2010, 120, 10], [1650, 200, 10]]\n remove_columns = [2]\n example_table = [[1900, 170], [0, 120]]\n\n annotated_table = query.decorate_table(example_table, remove_columns, joined_table)\n\n joined_schema = [\"I SHOULD NOT BE VISIBLE\", \"birth\", \"height\"] # the decorator column should never be in the output\n tree = decision_tree.make_tree(annotated_table)\n\n print(tree)\n print(query.where_segment(joined_schema, tree))",
"def column_mapped_to_nonexistant_field():\r\n class FaultyPersonTable(PersonTable):\r\n missing = tables.Column()\r\n\r\n table = FaultyPersonTable(Person.objects.all())\r\n table.as_html() # the bug would cause this to raise FieldDoesNotExist\r",
"def _add_pk(self, conn, *, tblname, pk_columns):\n idx_metadatum = partition_utils.IndexMetadatum(idx_cols=pk_columns, is_unique=True)\n partition_utils.add_indices(conn, tbl_name=tblname, idx_metadata=[idx_metadatum])",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_missing_foreign_key_indices(db_session):\n\n from sqlalchemy_utils.functions import non_indexed_foreign_keys\n from kotti import metadata\n\n assert non_indexed_foreign_keys(metadata) == {}",
"def create_view(self, start: int = 0, stop: int = 0):\n stmt = f\"\"\"create or replace view {self._view_name} as {self.qry}\"\"\"\n if start != 0 or stop != 0:\n sql = stmt + f\" limit {stop} offset {start}\"\n else:\n sql = stmt\n self.execquery(sql)"
]
| [
"0.643317",
"0.5908668",
"0.58885986",
"0.5843251",
"0.5790858",
"0.5722778",
"0.5717566",
"0.5585914",
"0.55839574",
"0.55772454",
"0.55742663",
"0.554627",
"0.55317867",
"0.5525174",
"0.55066884",
"0.54840314",
"0.5459816",
"0.5369781",
"0.5342012",
"0.5285785",
"0.5284631",
"0.52822316",
"0.5266634",
"0.5253267",
"0.5244213",
"0.51979876",
"0.51702744",
"0.5151588",
"0.5148561",
"0.51300687"
]
| 0.6953988 | 0 |
Fails base table write before or after applying views Restart node and replay commit and batchlog Check that base and views are present CASSANDRA13069 | def _test_base_view_consistency_on_crash(self, fail_phase):
self.cluster.set_batch_commitlog(enabled=True, use_batch_window = self.cluster.version() < '5.0')
self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
self.prepare(rf=1, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Make node1 fail {} view writes'.format(fail_phase))
node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])
logger.debug('Write 1000 rows - all node1 writes should fail')
failed = False
for i in range(1, 1000):
try:
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
except WriteFailure:
failed = True
assert failed, "Should fail at least once."
assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
missing_entries = 0
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
if not base_entry:
missing_entries += 1
if not view_entry:
missing_entries += 1
logger.debug("Missing entries {}".format(missing_entries))
assert missing_entries > 0
logger.debug('Restarting node1 to ensure commit log is replayed')
node1.stop(wait_other_notice=True)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_view_metadata_cleanup(self):\n session = self.prepare(rf=2, nodes=2)\n\n def populate_data(session, rows):\n logger.debug(\"populate base data\")\n for v in range(rows):\n session.execute(\"INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})\".format(v=v))\n\n def verify_data(session, rows, views):\n logger.debug(\"verify view data\")\n for v in range(rows):\n for view in range(views):\n assert_one(session, \"SELECT * FROM mv{} WHERE k={v} AND c={v}\".format(view, v=v), [v, v, v, v, v, v])\n\n def create_keyspace(session, ks=\"ks1\", rf=2):\n create_ks(session, ks, rf)\n\n def create_table(session):\n logger.debug(\"create base table\")\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n\n def create_views(session, views, keyspace=\"ks1\"):\n logger.debug(\"create view\")\n for view in range(views):\n session.execute(\"CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)\".format(view),\n timeout=60)\n self._wait_for_view(keyspace, \"mv{}\".format(view))\n\n def drop_keyspace(session, keyspace=\"ks1\"):\n logger.debug(\"drop keyspace {}\".format(keyspace))\n session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(keyspace),\n timeout=60)\n\n def drop_views(session, views):\n logger.debug(\"drop all views\")\n for view in range(views):\n session.execute(\"DROP MATERIALIZED VIEW IF EXISTS mv{}\".format(view))\n\n rows = 100\n views = 5\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_keyspace(session)\n self._assert_view_meta(session, views, exists=False)\n\n create_keyspace(session)\n create_table(session)\n populate_data(session, rows)\n create_views(session, views)\n verify_data(session, rows, views)\n\n self._assert_view_meta(session, views)\n drop_views(session, views)\n self._assert_view_meta(session, views, exists=False)",
"def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n for node in self.cluster.nodelist():\n node.nodetool(\"disableautocompaction\")\n\n # sstable 1, Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, 'a', 3.0])\n\n # sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_none(session, \"SELECT * FROM t\")\n\n # sstable 3, tombstones of mv created by base deletion should remain.\n self.update_view(session, \"INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None])\n\n # sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [2, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 2, None, None])\n\n # sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;\", flush)\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n if flush:\n self.cluster.compact()\n assert_one(session, \"SELECT * FROM t_by_v\", [1, 1, None, None])\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect\n\n # shadow view row (id=1, v=1)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;\", flush)\n assert_none(session, \"SELECT * FROM t_by_v\")\n assert_one(session, \"SELECT * FROM t\", [1, None, None, None])",
"def upgrade():\n op.create_table(\n \"dag_run_note\",\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"dag_run_id\", sa.Integer(), nullable=False),\n sa.Column(\n \"content\", sa.String(length=1000).with_variant(sa.Text(length=1000), \"mysql\"), nullable=True\n ),\n sa.Column(\"created_at\", UtcDateTime(timezone=True), nullable=False),\n sa.Column(\"updated_at\", UtcDateTime(timezone=True), nullable=False),\n sa.ForeignKeyConstraint(\n (\"dag_run_id\",), [\"dag_run.id\"], name=\"dag_run_note_dr_fkey\", ondelete=\"CASCADE\"\n ),\n sa.ForeignKeyConstraint((\"user_id\",), [\"ab_user.id\"], name=\"dag_run_note_user_fkey\"),\n sa.PrimaryKeyConstraint(\"dag_run_id\", name=op.f(\"dag_run_note_pkey\")),\n )\n\n op.create_table(\n \"task_instance_note\",\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"task_id\", StringID(), nullable=False),\n sa.Column(\"dag_id\", StringID(), nullable=False),\n sa.Column(\"run_id\", StringID(), nullable=False),\n sa.Column(\"map_index\", sa.Integer(), nullable=False),\n sa.Column(\n \"content\", sa.String(length=1000).with_variant(sa.Text(length=1000), \"mysql\"), nullable=True\n ),\n sa.Column(\"created_at\", UtcDateTime(timezone=True), nullable=False),\n sa.Column(\"updated_at\", UtcDateTime(timezone=True), nullable=False),\n sa.PrimaryKeyConstraint(\n \"task_id\", \"dag_id\", \"run_id\", \"map_index\", name=op.f(\"task_instance_note_pkey\")\n ),\n sa.ForeignKeyConstraint(\n (\"dag_id\", \"task_id\", \"run_id\", \"map_index\"),\n [\n \"task_instance.dag_id\",\n \"task_instance.task_id\",\n \"task_instance.run_id\",\n \"task_instance.map_index\",\n ],\n name=\"task_instance_note_ti_fkey\",\n ondelete=\"CASCADE\",\n ),\n sa.ForeignKeyConstraint((\"user_id\",), [\"ab_user.id\"], name=\"task_instance_note_user_fkey\"),\n )",
"def test_view_tombstone(self):\n\n self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session = self.patient_exclusive_cql_connection(node1)\n session.max_trace_wait = 120\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=0, verify\n session.execute(SimpleStatement(\"INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'a', 3.0]\n )\n\n session.execute(SimpleStatement(\"INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n # change v's value and TS=3, tombstones v=1 and adds v=0 record\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1\",\n consistency_level=ConsistencyLevel.ALL))\n self._replay_batchlogs()\n\n assert_none(session, \"SELECT * FROM t_by_v WHERE v = 1\")\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n session.execute(SimpleStatement(\"UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1\",\n consistency_level=ConsistencyLevel.QUORUM))\n self._replay_batchlogs()\n\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0]\n )\n\n node2.start(wait_for_binary_proto=True)\n\n # We should get a digest mismatch\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\",\n consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n\n # We should not get a digest mismatch the second time\n query = SimpleStatement(\"SELECT * FROM t_by_v WHERE v = 1\", consistency_level=ConsistencyLevel.ALL)\n\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n\n # Verify values one last time\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = 1\",\n [1, 1, 'b', 3.0],\n cl=ConsistencyLevel.ALL\n )",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def _simple_repair_test(self, repair_base=False, repair_view=False):\n\n session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')\n for i in range(1000):\n statement = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n\n assert_unavailable(\n session.execute,\n statement\n )\n\n logger.debug('Start node2, and repair')\n node2.start(wait_for_binary_proto=True)\n if repair_base:\n node1.nodetool(\"repair ks t\")\n if repair_view:\n node1.nodetool(\"repair ks t_by_v\")\n\n logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')\n for i in range(1000):\n query = SimpleStatement(\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n consistency_level=ConsistencyLevel.ALL\n )\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")",
"def migrate(self):\n\tpass",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)",
"def post_migrations(self):",
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def test_exclude_unlogged_tables(self):\n fname = self.id().split('.')[3]\n backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')\n node = self.make_simple_node(base_dir=\"{0}/{1}/node\".format(module_name, fname),\n set_replication=True,\n initdb_params=['--data-checksums'],\n pg_options={'wal_level': 'replica', 'max_wal_senders': '2', \"shared_buffers\": \"1GB\", \"fsync\": \"off\", 'ptrack_enable': 'on'}\n )\n\n self.init_pb(backup_dir)\n self.add_instance(backup_dir, 'node', node)\n node.start()\n\n conn = node.connect()\n with node.connect(\"postgres\") as conn:\n\n conn.execute(\"create unlogged table test as select generate_series(0,50050000)::text\")\n conn.commit()\n\n conn.execute(\"create index test_idx on test (generate_series)\")\n conn.commit()\n\n heap_path = conn.execute(\"select pg_relation_filepath('test')\")[0][0]\n conn.commit()\n\n index_path = conn.execute(\"select pg_relation_filepath('test_idx')\")[0][0]\n conn.commit()\n index_init_path = index_path + \"_init\"\n\n heap_oid = conn.execute(\"select 'test'::regclass::oid\")[0][0]\n conn.commit()\n\n toast_path = conn.execute(\"select pg_relation_filepath('{0}.{1}')\".format(\"pg_toast\", \"pg_toast_\" + str(heap_oid)))[0][0]\n conn.commit()\n toast_init_path = toast_path + \"_init\"\n\n toast_idx_path = conn.execute(\"select pg_relation_filepath('{0}.{1}')\".format(\"pg_toast\", \"pg_toast_\" + str(heap_oid) + \"_index\"))[0][0]\n conn.commit()\n toast_index_idx_path = toast_idx_path + \"_init\"\n\n unlogged_heap_filename = os.path.basename(heap_path)\n unlogged_heap_init_filename = unlogged_heap_filename + \"_init\"\n\n unlogged_idx_filename = os.path.basename(index_path)\n unlogged_idx_init_filename = unlogged_idx_filename + \"_init\"\n\n unlogged_toast_filename = os.path.basename(toast_path)\n unlogged_toast_init_filename = unlogged_toast_filename + \"_init\"\n\n unlogged_idx_toast_filename = os.path.basename(toast_idx_path)\n unlogged_idx_toast_init_filename = unlogged_idx_toast_filename + \"_init\"\n\n self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])\n\n found_unlogged_heap_init = False\n found_unlogged_idx_init = False\n found_unlogged_toast = False\n found_unlogged_idx_toast_init = False\n for root, dirs, files in os.walk(backup_dir):\n for file in files:\n if file in [unlogged_heap_filename, unlogged_heap_filename + \".1\",\n unlogged_idx_filename,\n unlogged_idx_filename + \".1\",\n unlogged_toast_filename,\n unlogged_toast_filename + \".1\",\n unlogged_idx_toast_filename,\n unlogged_idx_toast_filename + \".1\"]:\n self.assertTrue(False, \"Found unlogged table file in backup catalogue.\\n Filepath: {0}\".format(file))\n\n if file == unlogged_heap_init_filename:\n found_unlogged_heap_init = True\n\n if file == unlogged_idx_init_filename:\n found_unlogged_idx_init = True\n\n if file == unlogged_toast_init_filename:\n found_unlogged_toast = True\n\n if file == unlogged_idx_toast_init_filename:\n found_unlogged_idx_toast_init = True\n\n self.assertTrue(found_unlogged_heap_init, \"{0} is not found in backup catalogue\".format(unlogged_heap_init_filename));\n self.assertTrue(found_unlogged_idx_init, \"{0} is not found in backup catalogue\".format(unlogged_idx_init_filename));\n self.assertTrue(found_unlogged_toast, \"{0} is not found in backup catalogue\".format(unlogged_toast_filename));\n self.assertTrue(found_unlogged_idx_toast_init, \"{0} is not found in backup catalogue\".format(unlogged_idx_toast_init_filename));\n\n # Clean after yourself\n self.del_test_dir(module_name, fname)",
"def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )",
"def test13(self):\n ###get a block to migrate from global dbs\n dest_datasets = set((dataset['dataset'] for dataset in self.api.listDatasets()))\n ###only dataset after last DBS2->3 because of the parentage issue in DBS 2 min_cdate=1368162000 =10May2013\n src_datasets = set((dataset['dataset'] for dataset in self.cmsweb_api.listDatasets(min_cdate=1368162000)))\n dataset_to_migrate = choice(list(src_datasets.difference(dest_datasets)))\n block_to_migrate = choice([block['block_name']\n for block in self.cmsweb_api.listBlocks(dataset=dataset_to_migrate)])\n\n ###submit migration request\n toMigrate = {'migration_url': self.source_url,\n 'migration_input': block_to_migrate}\n migration_request = self.migration_api.submitMigration(toMigrate)\n self.assertTrue('migration_request_id' in migration_request['migration_details'])\n migration_request_id = migration_request['migration_details']['migration_request_id']\n print(\"____toMigrate___\")\n print(toMigrate)\n print(\"----------migration_request -----------\")\n print(migration_request) \n\n ###check migration status for max. 300s (should be enough time to migrate the dataset)\n with Timeout(300):\n while True:\n request_status = self.migration_api.statusMigration(migration_rqst_id=migration_request_id)\n if request_status[0]['migration_status'] == 2:\n break\n\n ###validate block migration\n def check(input, output):\n non_comparable_keys = ('block_id', 'dataset_id', 'last_modification_date',\n 'parent_file_id', 'primary_ds_id')\n if isinstance(input, dict):\n for key, value in input.items():\n if key in non_comparable_keys:\n continue ###do not compare id's\n if key in ('processing_era',): ###do compare create_by, creation_date for re-used entries\n for key2remove in ('create_by', 'creation_date',):\n try:\n del input[key][key2remove]\n del output[key][key2remove]\n except KeyError:\n pass\n self.assertTrue(key in output)\n check(value, output[key])\n elif isinstance(input, list):\n for element_in, element_out in zip(sorted(remove_non_comparable_keys(input, non_comparable_keys)),\n sorted(remove_non_comparable_keys(output, non_comparable_keys))):\n check(element_in, element_out)\n else:\n self.assertEqual(str(input), str(output))\n\n block_dump_src = self.cmsweb_api.blockDump(block_name=block_to_migrate)\n block_dump_dest = self.api.blockDump(block_name=block_to_migrate)\n check(block_dump_src, block_dump_dest)\n\n ###try to delete successfully executed migration request\n toDelete = {'migration_rqst_id': migration_request_id}\n self.assertRaises(HTTPError, self.migration_api.removeMigration, toDelete)",
"def migrate_2(session, **kwargs):\n try:\n session.execute(\n \"ALTER TABLE {} \\\n ADD COLUMN baseid VARCHAR DEFAULT NULL, \\\n ADD COLUMN created_date TIMESTAMP DEFAULT NOW(), \\\n ADD COLUMN updated_date TIMESTAMP DEFAULT NOW()\".format(IndexRecord.__tablename__))\n except ProgrammingError:\n session.rollback()\n session.commit()\n\n count = session.execute(\n \"SELECT COUNT(*) FROM {};\"\n .format(IndexRecord.__tablename__)).fetchone()[0]\n\n # create tmp_index_record table for fast retrival\n try:\n session.execute(\n \"CREATE TABLE tmp_index_record AS SELECT did, ROW_NUMBER() OVER (ORDER BY did) AS RowNumber \\\n FROM {}\".format(IndexRecord.__tablename__))\n except ProgrammingError:\n session.rollback()\n\n for loop in range(count):\n baseid = str(uuid.uuid4())\n session.execute(\n \"UPDATE index_record SET baseid = '{}'\\\n WHERE did = (SELECT did FROM tmp_index_record WHERE RowNumber = {});\".format(baseid, loop + 1))\n session.execute(\n \"INSERT INTO {}(baseid) VALUES('{}');\".format(BaseVersion.__tablename__, baseid))\n\n session.execute(\n \"ALTER TABLE {} \\\n ADD CONSTRAINT baseid_FK FOREIGN KEY (baseid) references base_version(baseid);\"\n .format(IndexRecord.__tablename__))\n\n # drop tmp table\n session.execute(\n \"DROP TABLE IF EXISTS tmp_index_record;\"\n )",
"def index_tables(query_root, data_dir, host, port, db_name, user, password):\n try:\n conn = PGDB(host, port, db_name, user, password)\n try:\n conn.executeQueryFromFile(os.path.join(query_root, PREP_QUERY_DIR, \"create_idx.sql\"))\n conn.commit()\n except Exception as e:\n print(\"unable to run index tables. %s\" % e)\n return 1\n conn.close()\n return 0\n except Exception as e:\n print(\"unable to connect to the database. %s\" % e)\n return 1",
"def step060():\n logger.logMessage('Begin: updating database')\n update_sql = 'update weather_work set tsa=$1, esDocId = $2 where time = $3;'\n pgConn = pg.connect(host=host,user=user,password=password,database=database) \n c = pgConn.cursor()\n# c.execute('drop table weather_work')\n# c.execute('create table weather_work (like weather excluding constraints)')\n# c.execute('insert into weather_work select * from weather_dupes')\n# c.execute('create index weather_work_time on weather_work(time)')\n pgConn.commit()\n c.execute('prepare updtDocid as {0}'.format(update_sql))\n numUpdates = 0\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n tsa = int(fields[0])\n time = fields[1].rstrip() \n docid = fields[2].rstrip()\n try:\n dic = { 'esDocId': docid, 'tsa': tsa , 'time': time+\"+00:00\" }\n c.execute('execute updtDocid (%(tsa)s,%(esDocId)s,%(time)s)',dic)\n numUpdates += 1\n if numUpdates % 250 == 0:\n pgConn.commit()\n logger.logMessage(level='DEBUG',message=\"{0:9d} commited updates\".format(numUpdates))\n except:\n logger.logException('Exception while updating database')\n pgConn.rollback()\n raise\n line = f.readline().rstrip()\n pgConn.commit()\n logger.logMessage(\"Total updates: {0:d}\".format(numUpdates))\n c.close()\n pgConn.close()\n logger.logMessage('End : updating database')",
"def test_invalid_table(self):\n self.execute_query_expect_failure(self.client, \"select * from functional.bad_serde\")\n # The table expires after 1 second. Sleeping for another logbufsecs=5 seconds to wait\n # for the log to be flushed. Wait 4 more seconds to reduce flakiness.\n time.sleep(10)\n assert \"Unexpected exception thrown while attempting to automatically invalidate \"\\\n \"tables\" not in open(os.path.join(self.impala_log_dir, \"catalogd.INFO\")).read()",
"def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)",
"def test12(self):\n ###get a dataset to migrate from global dbs\n dest_datasets = set((dataset['dataset'] for dataset in self.api.listDatasets()))\n ###only dataset after last DBS2->3 because of the parentage issue in DBS 2 min_cdate=1368162000 =10May2013\n src_datasets = set((dataset['dataset'] for dataset in self.cmsweb_api.listDatasets(min_cdate=1368162000)))\n dataset_to_migrate = choice(list(src_datasets.difference(dest_datasets)))\n\n ###submit migration request\n toMigrate = {'migration_url': self.source_url,\n 'migration_input': dataset_to_migrate}\n migration_request = self.migration_api.submitMigration(toMigrate)\n self.assertTrue('migration_request_id' in migration_request['migration_details'])\n migration_request_id = migration_request['migration_details']['migration_request_id']\n print(\"____toMigrate___\")\n print(toMigrate)\n print(\"----------migration_request -----------\")\n print(migration_request)\n ###check migration status for max. 300s (should be enough time to migrate the dataset)\n with Timeout(300):\n while True:\n request_status = self.migration_api.statusMigration(migration_rqst_id=migration_request_id)\n if request_status[0]['migration_status'] == 2:\n break\n\n ###validate dataset migration\n def check(input, output):\n non_comparable_keys = ('block_id', 'dataset_id', 'last_modification_date',\n 'parent_file_id', 'primary_ds_id')\n if isinstance(input, dict):\n for key, value in input.items():\n if key in non_comparable_keys:\n continue ###do not compare id's\n if key in ('processing_era',): ###do compare create_by, creation_date for re-used entries\n for key2remove in ('create_by', 'creation_date',):\n try:\n del input[key][key2remove]\n del output[key][key2remove]\n except KeyError:\n pass\n self.assertTrue(key in output)\n check(value, output[key])\n elif isinstance(input, list):\n for element_in, element_out in zip(sorted(remove_non_comparable_keys(input, non_comparable_keys)),\n sorted(remove_non_comparable_keys(output, non_comparable_keys))):\n check(element_in, element_out)\n else:\n self.assertEqual(str(input), str(output))\n\n for block_name in (block['block_name'] for block in self.cmsweb_api.listBlocks(dataset=dataset_to_migrate)):\n block_dump_src = self.cmsweb_api.blockDump(block_name=block_name)\n block_dump_dest = self.api.blockDump(block_name=block_name)\n check(block_dump_src, block_dump_dest)\n\n ###try to delete successfully executed migration request\n toDelete = {'migration_rqst_id': migration_request_id}\n self.assertRaises(HTTPError, self.migration_api.removeMigration, toDelete)",
"def _test_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # Set initial values TS=1\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1])\n\n # increase b ts to 10\n self.update_view(session, \"UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;\", flush, compact=True)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 2, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 2, 10])\n\n # able to shadow view row even if base-column in view pk's ts is smaller than row timestamp\n # set row TS = 20, a@6, b@20\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 5 where k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, None, 2, 10])\n assert_none(session, \"SELECT k,a,b,writetime(b) FROM mv\")\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 2, 10])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 2, 10])\n self.update_view(session, \"INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM t\", [1, 1, 1, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # switch entries. shadow a = 1, insert a = 2\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 2, 1, 7, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 2, 1, 20])\n\n # switch entries. shadow a = 2, insert a = 1\n self.update_view(session, \"UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;\", flush)\n assert_one(session, \"SELECT k,a,b,writetime(a),writetime(b) FROM t\", [1, 1, 1, 8, 20])\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv\", [1, 1, 1, 20])\n\n # create another view row\n self.update_view(session, \"INSERT INTO t (k, a, b) VALUES (2, 2, 2);\", flush)\n assert_one(session, \"SELECT k,a,b FROM t WHERE k = 2\", [2, 2, 2])\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k = 2\", [2, 2, 2])\n\n # stop node2, node3\n logger.debug('Shutdown node2')\n node2.stop(wait_other_notice=True)\n logger.debug('Shutdown node3')\n node3.stop(wait_other_notice=True)\n # shadow a = 1, create a = 2\n query = SimpleStatement(\"UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n # shadow (a=2, k=2) after 3 second\n query = SimpleStatement(\"UPDATE t USING TTL 3 SET a = 2 WHERE k = 2\", consistency_level=ConsistencyLevel.ONE)\n self.update_view(session, query, flush)\n\n logger.debug('Starting node2')\n node2.start(wait_for_binary_proto=True)\n logger.debug('Starting node3')\n node3.start(wait_for_binary_proto=True)\n\n # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 1\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 1, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert_none(session, \"SELECT * FROM mv WHERE k = 1 AND a = 1\")\n assert 0 == len(result.current_rows)\n\n # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 1 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n assert 1 == len(result.current_rows)\n\n # For k = 1 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 1 == len(result.current_rows)\n assert_one(session, \"SELECT k,a,b,writetime(b) FROM mv WHERE k = 1\", [1, 2, 1, 20])\n\n time.sleep(3)\n # For k = 2 & a = 2, We should get a digest mismatch of expired and repaired\n query = SimpleStatement(\"SELECT * FROM mv WHERE k = 2 AND a = 2\", consistency_level=ConsistencyLevel.ALL)\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), True)\n logger.debug(result.current_rows)\n assert 0 == len(result.current_rows)\n\n # For k = 2 & a = 2, second time no digest mismatch\n result = session.execute(query, trace=True)\n self.check_trace_events(result.get_query_trace(), False)\n assert 0 == len(result.current_rows)",
"def test_timeout_with_successful_crud(self):\n\n shell_conn = dict()\n cbstat_obj = dict()\n error_sim = dict()\n vb_info = dict()\n vb_info[\"init\"] = dict()\n vb_info[\"afterCrud\"] = dict()\n\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n for node in target_nodes:\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n vb_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])\n\n doc_load_spec = dict()\n doc_load_spec[MetaCrudParams.SDK_TIMEOUT] = self.sdk_timeout\n doc_load_spec[MetaCrudParams.DURABILITY_LEVEL] = self.durability_level\n doc_load_spec[\"doc_crud\"] = dict()\n doc_load_spec[\"subdoc_crud\"] = dict()\n doc_load_spec[\"doc_crud\"][MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \\\n \"test_collections\"\n doc_load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 0\n doc_load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 0\n doc_load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 0\n\n doc_load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 0\n doc_load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION] = 0\n doc_load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION] = 0\n\n ops_to_perform = [\"create\", \"update\", \"read\", \"replace\", \"delete\"]\n if self.subdoc_test:\n ops_to_perform = [\"insert\", \"upsert\", \"remove\"]\n\n for op_type in ops_to_perform:\n self.log.info(\"Performing '%s' with timeout=%s\"\n % (op_type, self.sdk_timeout))\n curr_spec = deepcopy(doc_load_spec)\n if op_type == \"create\":\n curr_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] \\\n = 5\n elif op_type == \"update\":\n curr_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] \\\n = 5\n elif op_type == \"delete\":\n curr_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] \\\n = 5\n elif op_type == \"read\":\n curr_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION] = 5\n curr_spec[MetaCrudParams.RETRY_EXCEPTIONS] = [\n SDKException.TimeoutException]\n elif op_type == \"insert\":\n curr_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 5\n elif op_type == \"upsert\":\n curr_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION] = 5\n elif op_type == \"remove\":\n curr_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION] = 5\n\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n curr_spec,\n mutation_num=1,\n async_load=True,\n validate_task=False)\n\n # Perform specified action\n for node in target_nodes:\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n self.sleep(10, \"Wait before reverting the error condition\")\n\n # Revert the specified error scenario\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.fail(\"Doc_loading for '%s' failed\" % op_type)\n\n # Fetch latest stats and validate the values are updated\n for node in target_nodes:\n curr_stat = cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n if vb_info[\"init\"][node.ip] == curr_stat:\n self.log_failure(\"vbucket_seqno not updated. %s == %s\"\n % (vb_info[\"init\"][node.ip], curr_stat))\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n # Verify initial doc load count\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n self.validate_test_failure()",
"def _base_test_insert_during_range_movement(self, rf):\n\n session = self.prepare(rf=rf)\n\n logger.debug(\"Creating table and view\")\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Starting new node4 in write survey mode\")\n node4 = new_node(self.cluster, data_center=\"dc1\")\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.write_survey=true\",\n \"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug(\"Insert data while node4 is joining\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Finish joining node4\")\n node4.nodetool(\"join\")\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug(\"Verify data\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def fix_incremental(meta, bind):\n meta.create_all(bind=bind, checkfirst=True)\n ref = inspect(bind)\n for table in meta.sorted_tables:\n orm_cols = set(col.name for col in table.c)\n ref_cols = set(col['name'] for col in ref.get_columns(table.name))\n col_to_create = orm_cols - ref_cols\n col_to_delete = ref_cols - orm_cols\n if col_to_create:\n print table.name, 'has diff to create', col_to_create\n with bind.begin() as conn:\n for col_name in col_to_create:\n col = table.c.get(col_name)\n column_sql = CreateColumn(col).compile(bind).string\n sql = 'ALTER TABLE {} ADD COLUMN {}'.format(table.name, column_sql)\n if col.default:\n sql += ' DEFAULT {!r}'.format(col.default.arg) # can break when a pickle type has callable default.\n if not col.nullable:\n sql += ' NOT NULL'\n print 'executing sql: ' + sql\n conn.execute(sql)\n\n # Workaround to ensure updated DBs start with \"False\" in ignore column\n if list(col_to_create)[0] == 'ignore':\n sessionmaker = get_sessionmaker(bind.url.database)\n session = sessionmaker()\n query_object = {'dttrialdff0s': DTTrialDff0, 'trials': Trial}[table.name]\n items = session.query(query_object).all()\n for item in items:\n item.ignore = False\n session.flush()\n\n if col_to_delete:\n print table.name, 'has diff to delete', col_to_delete, 'maybe later version.'\n \"\"\"\n BEGIN TRANSACTION;\n CREATE TEMPORARY TABLE t1_backup(a,b);\n INSERT INTO t1_backup SELECT a,b FROM t1;\n DROP TABLE t1;\n CREATE TABLE t1(a,b);\n INSERT INTO t1 SELECT a,b FROM t1_backup;\n DROP TABLE t1_backup;\n COMMIT;\n \"\"\"",
"def migration():"
]
| [
"0.6456797",
"0.5971283",
"0.5955767",
"0.5870181",
"0.58392113",
"0.57749593",
"0.57468843",
"0.5569703",
"0.5568404",
"0.553152",
"0.55259246",
"0.5524626",
"0.5524061",
"0.5493021",
"0.54891425",
"0.5397019",
"0.5376122",
"0.533465",
"0.53338456",
"0.5315216",
"0.5313221",
"0.52891624",
"0.5288324",
"0.5282482",
"0.52762496",
"0.5267969",
"0.52623546",
"0.52548945",
"0.52330095",
"0.5205334"
]
| 0.6547234 | 0 |
Tests consistency of multiple writes to a single partition CASSANDRA10981 | def test_single_partition_consistent_reads_after_write(self):
self._consistent_reads_after_write_test(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_multi_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(5)",
"def _check_write_consistency(self):\n self.logger.warning('Not checking write consistency')",
"def testConsistency(self):",
"def check_consistency(self, es):",
"def test_redis_increase_replica_count_usual_case():",
"def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def test_eventualConsistencyRing(self) :\n\t\tringSize = self.RINGSIZE\n\t\tprint str(ringSize) + \" Nodes arranged in ring topology\"\n\n\t\tnodeList = self.createNodes(ringSize)\n\t\tself.addAppRecordDiff(nodeList)\n\n\t\t# i client , i+1 server \n\t\tsessIDlist = self.sessionsRing(nodeList)\n\n\t\tfor j in range (2) :\n \t\tfor i in range(ringSize) :\n\t\t\t\tself.fullDBReplication(nodeList[sessIDlist[i][0]], sessIDlist[i][2])\n\t\t\t\t\n\t\t\t\t# Print statements\n\t\t\t\tif i == ringSize -1 :\n\t\t\t\t\tprint \"Sync data between \" + nodeList[i].instanceID + \" and \" + nodeList[0].instanceID\n\t\t\t\telse :\n\t\t\t\t\tprint \"Sync data between \" + nodeList[i].instanceID + \" and \" + nodeList[i+1].instanceID\n\n\t\t# Asserts to show that all the nodes have the same data\n\t\tself.assertEqual(self.endConditionData(nodeList) , False)",
"def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()",
"def test_sub_doc_with_persistence_issues(self):\n\n if self.durability_level.upper() in [\n Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,\n Bucket.DurabilityLevel.PERSIST_TO_MAJORITY]:\n self.log.critical(\"Test not valid for persistence durability\")\n return\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n def_bucket = self.cluster.buckets[0]\n\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"subdoc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION] = 50\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 20\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION] = 10\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION] = 10\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n # Create new docs for sub-doc operations to run\n self.load_data_for_sub_doc_ops()\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs = cbstat_obj[node.ip] .vbucket_list(def_bucket.name,\n \"active\")\n active_vbs_in_target_nodes += active_vbs\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n def_bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(def_bucket.name)\n\n for node in target_nodes:\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=def_bucket.name)\n\n # Perform CRUDs with induced error scenario is active\n self.log.info(\"Perform 'insert', 'upsert', 'remove' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=0,\n async_load=True)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud(mutation_num=1)\n\n # Wait for doc_loading to complete and validate the doc ops\n self.task_manager.get_task_result(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with persistence issue\")\n\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=def_bucket.name)\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(def_bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(def_bucket.name)\n\n # Failover validation\n val = \\\n failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"Failover stats not updated\")\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_very_many_partitions_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(10000):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])",
"def test_very_many_partitions_and_instances_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(99):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i for i in range(999)}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])",
"def aknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n cursor = self.prepare(nodes=3)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, acknowledged_by_batchlog=True)",
"def test_bulk_round_trip_with_timeouts(self):\n self._test_bulk_round_trip(nodes=1, partitioner=\"murmur3\", num_operations=100000,\n configuration_options={'range_request_timeout_in_ms': '200',\n 'write_request_timeout_in_ms': '100'},\n copy_from_options={'MAXINSERTERRORS': -1},\n skip_count_checks=True)",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def test_update_values(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == (6 if i == 3 else i)\r\n assert row.text == str(i)",
"def test_concurrent_updates(self):\r\n instance = TestCounterModel.create()\r\n new1 = TestCounterModel.get(partition=instance.partition)\r\n new2 = TestCounterModel.get(partition=instance.partition)\r\n\r\n new1.counter += 5\r\n new1.save()\r\n new2.counter += 5\r\n new2.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 10",
"def test_update_values(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, str(i))",
"def test_bulk_round_trip_default(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=100000)",
"def acknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, received_responses=2)",
"def test_write(self):\n data2 = self.data.copy()\n data2['a'] *= 2\n self.dset['a'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['b'] *= 4\n self.dset['b'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['a'] *= 3\n data2['c'] *= 3\n self.dset['a','c'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))",
"def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def test_full_house_flush_ind(self):",
"def _test_bulk_round_trip(self, nodes, partitioner,\n num_operations, profile=None,\n stress_table='keyspace1.standard1',\n configuration_options=None,\n skip_count_checks=False,\n copy_to_options=None,\n copy_from_options=None):\n if configuration_options is None:\n configuration_options = {}\n if copy_to_options is None:\n copy_to_options = {}\n\n # The default truncate timeout of 10 seconds that is set in init_default_config() is not\n # enough for truncating larger tables, see CASSANDRA-11157\n if 'truncate_request_timeout_in_ms' not in configuration_options:\n configuration_options['truncate_request_timeout_in_ms'] = 60000\n\n self.prepare(nodes=nodes, partitioner=partitioner, configuration_options=configuration_options)\n\n ret = []\n\n def create_records():\n if not profile:\n logger.debug('Running stress without any user profile')\n self.node1.stress(['write', 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])\n else:\n logger.debug('Running stress with user profile {}'.format(profile))\n self.node1.stress(['user', 'profile={}'.format(profile), 'ops(insert=1)',\n 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])\n\n if skip_count_checks:\n return num_operations\n else:\n count_statement = SimpleStatement(\"SELECT COUNT(*) FROM {}\".format(stress_table), consistency_level=ConsistencyLevel.ALL,\n retry_policy=FlakyRetryPolicy(max_retries=3))\n ret = rows_to_list(self.session.execute(count_statement))[0][0]\n logger.debug('Generated {} records'.format(ret))\n assert ret >= num_operations, 'cassandra-stress did not import enough records'\n return ret\n\n def run_copy_to(filename):\n logger.debug('Exporting to csv file: {}'.format(filename.name))\n start = datetime.datetime.now()\n copy_to_cmd = \"CONSISTENCY ALL; COPY {} TO '{}'\".format(stress_table, filename.name)\n if copy_to_options:\n copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.items())\n logger.debug('Running {}'.format(copy_to_cmd))\n result = self.run_cqlsh(cmds=copy_to_cmd)\n ret.append(result)\n logger.debug(\"COPY TO took {} to export {} records\".format(datetime.datetime.now() - start, num_records))\n\n def run_copy_from(filename):\n logger.debug('Importing from csv file: {}'.format(filename.name))\n start = datetime.datetime.now()\n copy_from_cmd = \"COPY {} FROM '{}'\".format(stress_table, filename.name)\n if copy_from_options:\n copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.items())\n logger.debug('Running {}'.format(copy_from_cmd))\n result = self.run_cqlsh(cmds=copy_from_cmd)\n ret.append(result)\n logger.debug(\"COPY FROM took {} to import {} records\".format(datetime.datetime.now() - start, num_records))\n\n num_records = create_records()\n\n # Copy to the first csv files\n tempfile1 = self.get_temp_file()\n run_copy_to(tempfile1)\n\n # check all records generated were exported\n with io.open(tempfile1.name, encoding=\"utf-8\", newline='') as csvfile:\n assert num_records == sum(1 for _ in csv.reader(csvfile, quotechar='\"', escapechar='\\\\'))\n\n # import records from the first csv file\n logger.debug('Truncating {}...'.format(stress_table))\n self.session.execute(\"TRUNCATE {}\".format(stress_table))\n run_copy_from(tempfile1)\n\n # export again to a second csv file\n tempfile2 = self.get_temp_file()\n run_copy_to(tempfile2)\n\n # check the length of both files is the same to ensure all exported records were imported\n assert sum(1 for _ in open(tempfile1.name)) == sum(1 for _ in open(tempfile2.name))\n\n return ret",
"def test_update_values_validation(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf')",
"def test_update_values_validation(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf')",
"def test_spanner_indexer_implementation_bulk_insert_twice_gives_same_result(testing_indexer):\n record = {\"org_id\": 55555, \"string\": get_random_string(10)}\n record1_int = testing_indexer.record(\n use_case_id=UseCaseKey.PERFORMANCE, org_id=record[\"org_id\"], string=record[\"string\"]\n )\n\n # Insert the record again to validate that the returned id is the one we\n # got from the first insert.\n record2_int = testing_indexer.record(\n use_case_id=UseCaseKey.PERFORMANCE, org_id=record[\"org_id\"], string=record[\"string\"]\n )\n\n assert record1_int == record2_int",
"def test_custom_end_to_end(self):\n\n utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])\n\n # start the first shard only for now\n shard_0_master.init_tablet(\n 'replica',\n keyspace='test_keyspace',\n shard='0',\n tablet_index=0)\n shard_0_replica.init_tablet(\n 'replica',\n keyspace='test_keyspace',\n shard='0',\n tablet_index=1)\n shard_0_rdonly.init_tablet(\n 'rdonly',\n keyspace='test_keyspace',\n shard='0',\n tablet_index=2)\n\n for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:\n t.create_db('vt_test_keyspace')\n t.start_vttablet(wait_for_state=None)\n\n for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:\n t.wait_for_vttablet_state('NOT_SERVING')\n\n utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',\n shard_0_master.tablet_alias], auto_log=True)\n utils.wait_for_tablet_type(shard_0_replica.tablet_alias, 'replica')\n utils.wait_for_tablet_type(shard_0_rdonly.tablet_alias, 'rdonly')\n for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:\n t.wait_for_vttablet_state('SERVING')\n\n self._check_shards_count_in_srv_keyspace(1)\n s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0'])\n self.assertEqual(len(s['served_types']), 3)\n\n # create a table on shard 0\n sql = '''create table data(\nid bigint auto_increment,\nname varchar(64),\nprimary key (id)\n) Engine=InnoDB'''\n utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],\n auto_log=True)\n\n # reload schema everywhere so the QueryService knows about the tables\n for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:\n utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)\n\n # create shard 1\n shard_1_master.init_tablet(\n 'replica',\n keyspace='test_keyspace',\n shard='1',\n tablet_index=0)\n shard_1_replica.init_tablet(\n 'replica',\n keyspace='test_keyspace',\n shard='1',\n tablet_index=1)\n shard_1_rdonly.init_tablet(\n 'rdonly',\n keyspace='test_keyspace',\n shard='1',\n tablet_index=2)\n\n for t in [shard_1_master, shard_1_replica, shard_1_rdonly]:\n t.create_db('vt_test_keyspace')\n t.start_vttablet(wait_for_state=None)\n\n for t in [shard_1_master, shard_1_replica, shard_1_rdonly]:\n t.wait_for_vttablet_state('NOT_SERVING')\n\n s = utils.run_vtctl_json(['GetShard', 'test_keyspace/1'])\n self.assertEqual(len(s['served_types']), 3)\n\n utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/1',\n shard_1_master.tablet_alias], auto_log=True)\n utils.wait_for_tablet_type(shard_1_replica.tablet_alias, 'replica')\n utils.wait_for_tablet_type(shard_1_rdonly.tablet_alias, 'rdonly')\n for t in [shard_1_master, shard_1_replica, shard_1_rdonly]:\n t.wait_for_vttablet_state('SERVING')\n utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,\n 'test_keyspace/1'], auto_log=True)\n\n # we need to rebuild SrvKeyspace here to account for the new shards.\n utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)\n self._check_shards_count_in_srv_keyspace(2)\n\n # must start vtgate after tablets are up, or else wait until 1min refresh\n utils.VtGate().start(tablets=[\n shard_0_master, shard_0_replica, shard_0_rdonly,\n shard_1_master, shard_1_replica, shard_1_rdonly])\n utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)\n utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)\n utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1)\n utils.vtgate.wait_for_endpoints('test_keyspace.1.master', 1)\n utils.vtgate.wait_for_endpoints('test_keyspace.1.replica', 1)\n utils.vtgate.wait_for_endpoints('test_keyspace.1.rdonly', 1)\n\n # insert and check data on shard 0\n self._insert_data('0', 100, 10)\n self._check_data('0', 100, 10)\n\n # insert and check data on shard 1\n self._insert_data('1', 200, 10)\n self._check_data('1', 200, 10)\n\n # create a second table on all shards\n sql = '''create table data2(\nid bigint auto_increment,\nname varchar(64),\nprimary key (id)\n) Engine=InnoDB'''\n utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],\n auto_log=True)\n\n # reload schema everywhere so the QueryService knows about the tables\n for t in all_tablets:\n utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)\n\n # insert and read data on all shards\n self._insert_data('0', 300, 10, table='data2')\n self._insert_data('1', 400, 10, table='data2')\n self._check_data('0', 300, 10, table='data2')\n self._check_data('1', 400, 10, table='data2')\n\n # Now test SplitQuery API works (used in MapReduce usually, but bringing\n # up a full MR-capable cluster is too much for this test environment)\n sql = 'select id, name from data'\n s = utils.vtgate.split_query(sql, 'test_keyspace', 4)\n self.assertEqual(len(s), 4)\n shard0count = 0\n shard1count = 0\n for q in s:\n if q['shard_part']['shards'][0] == '0':\n shard0count += 1\n if q['shard_part']['shards'][0] == '1':\n shard1count += 1\n self.assertEqual(shard0count, 2)\n self.assertEqual(shard1count, 2)\n\n # run the queries, aggregate the results, make sure we have all rows\n rows = {}\n for q in s:\n bindvars = {}\n for name, value in q['query']['bind_variables'].iteritems():\n # vtctl encodes bytes as base64.\n bindvars[name] = int(base64.standard_b64decode(value['value']))\n qr = utils.vtgate.execute_shards(\n q['query']['sql'],\n 'test_keyspace', ','.join(q['shard_part']['shards']),\n tablet_type='master', bindvars=bindvars)\n for r in qr['rows']:\n rows[int(r[0])] = r[1]\n self.assertEqual(len(rows), 20)\n expected = {}\n for i in xrange(10):\n expected[100 + i] = 'row %d' % (100 + i)\n expected[200 + i] = 'row %d' % (200 + i)\n self.assertEqual(rows, expected)",
"def test_get_id_range_for_partition_with_evenly_divisible():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n assert id_range_item_count % partition_size == 0 # evenly divisible\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (max_id - partition_size + 1) == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def testAllWrite(self):\n import time,copy\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n pts = [pt for pt in pts if pt['name']!='pressure'] #Can't write to pres\n for i in xrange(50):\n ptnames = [ pt['name'] for pt in pts ]\n pointsvalues = dict(zip(ptnames, [0]*len(ptnames)))\n reply = client.writePoints(pointsvalues)\n assert reply is None, \"Write returned value other than None: \" + str(reply)\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n #assert value == reply[ptnames.index(pt)]\n if not 0 == reply[ptnames.index(pt)]: \n print pt, ' was not read properly.'"
]
| [
"0.81448984",
"0.6523661",
"0.61439437",
"0.6118131",
"0.60696036",
"0.5982776",
"0.5967827",
"0.59595704",
"0.59334314",
"0.5880416",
"0.58677423",
"0.5786828",
"0.5780153",
"0.5724465",
"0.5712325",
"0.568453",
"0.5681303",
"0.56687766",
"0.5657813",
"0.5655897",
"0.56269157",
"0.5597894",
"0.55859715",
"0.55737156",
"0.5572417",
"0.5566049",
"0.5562614",
"0.55595833",
"0.5558627",
"0.5555294"
]
| 0.79836094 | 1 |
Tests consistency of multiple writes to a multiple partitions CASSANDRA10981 | def test_multi_partition_consistent_reads_after_write(self):
self._consistent_reads_after_write_test(5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_single_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(1)",
"def test_very_many_partitions_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(10000):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])",
"def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()",
"def test_very_many_partitions_and_instances_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(99):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i for i in range(999)}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])",
"def _check_write_consistency(self):\n self.logger.warning('Not checking write consistency')",
"def test_redis_increase_replica_count_usual_case():",
"def test_eventualConsistencyRing(self) :\n\t\tringSize = self.RINGSIZE\n\t\tprint str(ringSize) + \" Nodes arranged in ring topology\"\n\n\t\tnodeList = self.createNodes(ringSize)\n\t\tself.addAppRecordDiff(nodeList)\n\n\t\t# i client , i+1 server \n\t\tsessIDlist = self.sessionsRing(nodeList)\n\n\t\tfor j in range (2) :\n \t\tfor i in range(ringSize) :\n\t\t\t\tself.fullDBReplication(nodeList[sessIDlist[i][0]], sessIDlist[i][2])\n\t\t\t\t\n\t\t\t\t# Print statements\n\t\t\t\tif i == ringSize -1 :\n\t\t\t\t\tprint \"Sync data between \" + nodeList[i].instanceID + \" and \" + nodeList[0].instanceID\n\t\t\t\telse :\n\t\t\t\t\tprint \"Sync data between \" + nodeList[i].instanceID + \" and \" + nodeList[i+1].instanceID\n\n\t\t# Asserts to show that all the nodes have the same data\n\t\tself.assertEqual(self.endConditionData(nodeList) , False)",
"def test_sub_doc_with_persistence_issues(self):\n\n if self.durability_level.upper() in [\n Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,\n Bucket.DurabilityLevel.PERSIST_TO_MAJORITY]:\n self.log.critical(\"Test not valid for persistence durability\")\n return\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n def_bucket = self.cluster.buckets[0]\n\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"subdoc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION] = 50\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 20\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION] = 10\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION] = 10\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n # Create new docs for sub-doc operations to run\n self.load_data_for_sub_doc_ops()\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs = cbstat_obj[node.ip] .vbucket_list(def_bucket.name,\n \"active\")\n active_vbs_in_target_nodes += active_vbs\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n def_bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(def_bucket.name)\n\n for node in target_nodes:\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=def_bucket.name)\n\n # Perform CRUDs with induced error scenario is active\n self.log.info(\"Perform 'insert', 'upsert', 'remove' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=0,\n async_load=True)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud(mutation_num=1)\n\n # Wait for doc_loading to complete and validate the doc ops\n self.task_manager.get_task_result(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with persistence issue\")\n\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=def_bucket.name)\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(def_bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(def_bucket.name)\n\n # Failover validation\n val = \\\n failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"Failover stats not updated\")\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )",
"def testConsistency(self):",
"def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )",
"def check_consistency(self, es):",
"def test_bulk_round_trip_with_timeouts(self):\n self._test_bulk_round_trip(nodes=1, partitioner=\"murmur3\", num_operations=100000,\n configuration_options={'range_request_timeout_in_ms': '200',\n 'write_request_timeout_in_ms': '100'},\n copy_from_options={'MAXINSERTERRORS': -1},\n skip_count_checks=True)",
"def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()",
"def test_bulk_round_trip_default(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=100000)",
"def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)",
"def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()",
"def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )",
"def test_get_id_range_for_partition_with_evenly_divisible():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n assert id_range_item_count % partition_size == 0 # evenly divisible\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (max_id - partition_size + 1) == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def _test_bulk_round_trip(self, nodes, partitioner,\n num_operations, profile=None,\n stress_table='keyspace1.standard1',\n configuration_options=None,\n skip_count_checks=False,\n copy_to_options=None,\n copy_from_options=None):\n if configuration_options is None:\n configuration_options = {}\n if copy_to_options is None:\n copy_to_options = {}\n\n # The default truncate timeout of 10 seconds that is set in init_default_config() is not\n # enough for truncating larger tables, see CASSANDRA-11157\n if 'truncate_request_timeout_in_ms' not in configuration_options:\n configuration_options['truncate_request_timeout_in_ms'] = 60000\n\n self.prepare(nodes=nodes, partitioner=partitioner, configuration_options=configuration_options)\n\n ret = []\n\n def create_records():\n if not profile:\n logger.debug('Running stress without any user profile')\n self.node1.stress(['write', 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])\n else:\n logger.debug('Running stress with user profile {}'.format(profile))\n self.node1.stress(['user', 'profile={}'.format(profile), 'ops(insert=1)',\n 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])\n\n if skip_count_checks:\n return num_operations\n else:\n count_statement = SimpleStatement(\"SELECT COUNT(*) FROM {}\".format(stress_table), consistency_level=ConsistencyLevel.ALL,\n retry_policy=FlakyRetryPolicy(max_retries=3))\n ret = rows_to_list(self.session.execute(count_statement))[0][0]\n logger.debug('Generated {} records'.format(ret))\n assert ret >= num_operations, 'cassandra-stress did not import enough records'\n return ret\n\n def run_copy_to(filename):\n logger.debug('Exporting to csv file: {}'.format(filename.name))\n start = datetime.datetime.now()\n copy_to_cmd = \"CONSISTENCY ALL; COPY {} TO '{}'\".format(stress_table, filename.name)\n if copy_to_options:\n copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.items())\n logger.debug('Running {}'.format(copy_to_cmd))\n result = self.run_cqlsh(cmds=copy_to_cmd)\n ret.append(result)\n logger.debug(\"COPY TO took {} to export {} records\".format(datetime.datetime.now() - start, num_records))\n\n def run_copy_from(filename):\n logger.debug('Importing from csv file: {}'.format(filename.name))\n start = datetime.datetime.now()\n copy_from_cmd = \"COPY {} FROM '{}'\".format(stress_table, filename.name)\n if copy_from_options:\n copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.items())\n logger.debug('Running {}'.format(copy_from_cmd))\n result = self.run_cqlsh(cmds=copy_from_cmd)\n ret.append(result)\n logger.debug(\"COPY FROM took {} to import {} records\".format(datetime.datetime.now() - start, num_records))\n\n num_records = create_records()\n\n # Copy to the first csv files\n tempfile1 = self.get_temp_file()\n run_copy_to(tempfile1)\n\n # check all records generated were exported\n with io.open(tempfile1.name, encoding=\"utf-8\", newline='') as csvfile:\n assert num_records == sum(1 for _ in csv.reader(csvfile, quotechar='\"', escapechar='\\\\'))\n\n # import records from the first csv file\n logger.debug('Truncating {}...'.format(stress_table))\n self.session.execute(\"TRUNCATE {}\".format(stress_table))\n run_copy_from(tempfile1)\n\n # export again to a second csv file\n tempfile2 = self.get_temp_file()\n run_copy_to(tempfile2)\n\n # check the length of both files is the same to ensure all exported records were imported\n assert sum(1 for _ in open(tempfile1.name)) == sum(1 for _ in open(tempfile2.name))\n\n return ret",
"def test_get_id_range_for_partition_with_one_over():\n min_id = 1\n max_id = 101\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n assert id_range_item_count % partition_size == 1 # one over the partition size\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1])) == 101\n assert upper_bound == max_id == 101\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def test_merge_backup_with_failover_logs(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n failed_persisted_bucket = []\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in self.buckets:\n ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,\n bucket.name, 'ep_queue_size',\n 0, timeout_in_seconds=120)\n if not ready:\n failed_persisted_bucket.append(bucket.name)\n if failed_persisted_bucket:\n self.fail(\"Buckets %s did not persisted.\" % failed_persisted_bucket)\n self.log.info(\"Stop persistence at each node\")\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for bucket in self.buckets:\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n bucket.name))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.sleep(5)\n self.log.info(\"Crash cluster via kill memcached\")\n for node in clusters:\n for server in self.servers:\n if node.ip == server.ip:\n num_entries = 4\n reach_num_entries = False\n while not reach_num_entries:\n shell = RemoteMachineShellConnection(server)\n shell.kill_memcached()\n ready = False\n while not ready:\n if not RestHelper(RestConnection(server)).is_ns_server_running():\n self.sleep(10)\n else:\n ready = True\n cmd = \"%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries \" \\\n \"| gawk%s '{printf $2}' | grep -m 5 '4\\|5\\|6\\|7'\" \\\n % (self.cli_command_location, self.cmd_ext, server.ip,\n \"cbadminbucket\", \"password\", self.cmd_ext)\n output, error = shell.execute_command(cmd)\n shell.disconnect()\n if output:\n self.log.info(\"number failover logs entries reached. %s \" % output)\n reach_num_entries = True\n self.backup_create()\n self.log.info(\"Start backup data\")\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Load 3rd batch docs\")\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen3, \"create\", 0)\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)",
"def test_get_id_range_for_partition_with_empty_partitions():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n record_ids = {1, 5, 7, 15, 19, 20, 41, 100}\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = len(record_ids)\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n assert _remove_seen_ids(ctrl, record_ids) == set({})",
"def test_update_values(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == (6 if i == 3 else i)\r\n assert row.text == str(i)",
"def test_update_values(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, str(i))",
"def test_concurrent_updates(self):\r\n instance = TestCounterModel.create()\r\n new1 = TestCounterModel.get(partition=instance.partition)\r\n new2 = TestCounterModel.get(partition=instance.partition)\r\n\r\n new1.counter += 5\r\n new1.save()\r\n new2.counter += 5\r\n new2.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 10",
"def aknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n cursor = self.prepare(nodes=3)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, acknowledged_by_batchlog=True)",
"def test_sub_doc_non_overlapping_similar_crud(self):\n doc_ops = self.input.param(\"op_type\",\n DocLoading.Bucket.SubDocOps.INSERT)\n\n # Create new docs for sub-doc operations to run\n self.load_data_for_sub_doc_ops()\n\n # Create required doc_generators for CRUD ops\n doc_load_template = dict()\n doc_load_template[\"doc_crud\"] = dict()\n doc_load_template[\"subdoc_crud\"] = dict()\n doc_load_template[MetaCrudParams.DURABILITY_LEVEL] = \"\"\n doc_load_template[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 3\n\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION] = 50\n\n if DocLoading.Bucket.SubDocOps.INSERT in doc_ops:\n doc_load_template[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 20\n elif DocLoading.Bucket.SubDocOps.UPSERT in doc_ops:\n doc_load_template[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION] = 10\n elif DocLoading.Bucket.SubDocOps.REMOVE in doc_ops:\n doc_load_template[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION] = 10\n\n async_write_crud_spec = deepcopy(doc_load_template)\n sync_write_crud_spec = deepcopy(doc_load_template)\n\n sync_write_crud_spec[MetaCrudParams.DURABILITY_LEVEL] = \\\n self.durability_level\n\n async_write_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n async_write_crud_spec,\n mutation_num=1,\n async_load=True)\n sync_write_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n sync_write_crud_spec,\n mutation_num=1,\n async_load=True)\n\n # Wait for all task to complete\n self.task.jython_task_manager.get_task_result(async_write_loading_task)\n self.task.jython_task_manager.get_task_result(sync_write_loading_task)\n\n # Validate CRUD loading results\n self.bucket_util.validate_doc_loading_results(async_write_loading_task)\n self.bucket_util.validate_doc_loading_results(sync_write_loading_task)\n\n if async_write_loading_task.result is False:\n self.log_failure(\"Doc_ops failed in async_write_task\")\n if sync_write_loading_task.result is False:\n self.log_failure(\"Doc_ops failed in sync_write_task\")\n\n # Verify doc count and other stats\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def test_add_node_after_very_wide_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(5):\n for j in range(5000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(5):\n for j in range(5000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(5):\n for j in range(5000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(5):\n for j in range(5100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(5):\n for j in range(5100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])"
]
| [
"0.75174904",
"0.6304416",
"0.6260457",
"0.6260355",
"0.60834306",
"0.6013536",
"0.5997093",
"0.59896755",
"0.5972248",
"0.595207",
"0.59135675",
"0.5900774",
"0.58920884",
"0.58596706",
"0.5805482",
"0.5766768",
"0.573955",
"0.5716366",
"0.5706905",
"0.5689456",
"0.5642045",
"0.56339043",
"0.5629975",
"0.5614578",
"0.5601033",
"0.5585046",
"0.55600643",
"0.55569154",
"0.5545162",
"0.5539703"
]
| 0.7950631 | 0 |
Validate (User | Group) payload from NEXT being added to AD. | def validate_create_entry(payload, data_type):
new_entry_required_fields = ["distinguishedName"]
new_entry_prohibited_fields = ["objectGUID", "whenChanged"]
if data_type == "user":
new_entry_required_fields.append("cn")
new_entry_required_fields.append("userPrincipalName")
elif data_type == "group":
new_entry_required_fields.append("groupType")
else:
raise ValidationException(
"Payload does not have the data_type of user or group."
)
for required_field in new_entry_required_fields:
if required_field not in payload:
raise ValidationException(
"Required field: '{}' is missing".format(required_field)
)
for prohibited_field in new_entry_prohibited_fields:
if prohibited_field in payload:
LOGGER.info(
"Payload contains prohibited field %s. Removing prohibited field from payload",
prohibited_field,
)
payload.pop(prohibited_field, None)
return payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, data):\n user_type = 3\n return validate_register_user(self, data, user_type)",
"def fusion_api_validate_group_and_roles(self, body, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.validate(body, api, headers)",
"def validate(self, data):\n groups = data['groups']\n if all(self.context['request'].user.has_perm(\"change_group\", group) for group in groups) or set(groups) <= set(\n self.context['request'].user.groups.all()):\n return data\n else:\n raise PermissionDenied()",
"def validate(self, data):\n request = self.context.get('request')\n data['poster'] = request.user\n\n return validate_complete_address(data)",
"def validate_payload(payload):\n\n if not isinstance(payload, dict):\n raise Exception(\"payload is a %s, not a dictionary\" % type(payload))\n\n if \"nmo\" not in payload:\n raise Exception(\"No nmo in payload\")\n\n if \"job\" not in payload[\"nmo\"]:\n raise Exception(\"No job in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n if \"task\" not in payload[\"nmo\"]:\n raise Exception(\"No task in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n try:\n isGroup = payload['nmo']['source']['misc']['isGroup']\n except:\n isGroup = False\n\n if \"jsonld\" not in payload and not isGroup:\n raise Exception(\"No jsonld in payload \\nPayload is:- %s\" % payload)",
"def validate(self, data):\n user_type = 3\n return validate_login_user(self, data, user_type)",
"def validate(cls, data, errors):",
"def check_insert_group_user(self, id_user:int, id_group:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with the check previous insertion on th. Mistake: {e} \"\n self.proceed_error(msg)\n return False",
"def _validatePayload(request):\n return {\n 'name': _validateField(request, 'name'),\n 'email': _validateField(request, 'email'),\n 'password': _validateField(request, 'password'),\n }",
"def test_add_user_invalid_payload(self):\n with self.client:\n auth_headers = login_test_user(self.client)\n response = self.client.post('/users',\n data = json.dumps(dict()),\n content_type='application/json',\n headers = auth_headers\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertIn('Invalid payload', data['message'])\n self.assertIn('fail', data['status'])",
"def validate_new_group(link_id_field, scope_path_field,\n group_logic):\n\n def wrapper(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n cleaned_data = self.cleaned_data\n\n fields = {}\n\n link_id = cleaned_data.get(link_id_field)\n\n if link_id:\n fields['link_id'] = link_id\n\n scope_path = cleaned_data.get(scope_path_field)\n if scope_path:\n fields['scope_path'] = scope_path\n\n # check if there is already a group for the given fields\n group_entity = group_logic.logic.getForFields(fields, unique=True)\n\n if group_entity:\n # add the error message to the link id field\n self._errors[link_id_field] = ErrorList([DEF_LINK_ID_IN_USE_MSG])\n del cleaned_data[link_id_field]\n # return the new cleaned_data\n return cleaned_data\n\n return cleaned_data\n return wrapper",
"def test_020_add_user_to_group(self):\n testflow.step(\"Adding user %s to group %s\", TEST_USER1, TEST_GROUP1)\n assert MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to add user to group '%s'\" % TEST_GROUP1\n\n testflow.step(\"Adding nonexisting user to group %s\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to add nonexisting user to group\"\n\n testflow.step(\"Adding user %s to nonexisting group\", TEST_USER2)\n assert not MANAGE_CLI.run(\n 'useradd',\n 'nonsense',\n user=TEST_USER2\n )[0], \"Possible to add user to nonexisting group\"",
"def _validate_user(_):\n pass",
"def test_resource_user_resource_add_user_groups_for_user_post(self):\n pass",
"def validate_update_entry(payload, data_type):\n updated_required_fields = [\"distinguishedName\"]\n prohibited_updated_group_fields = [\"objectGUID\", \"whenCreated\"]\n\n if data_type == \"user\":\n prohibited_updated_group_fields.append(\"cn\")\n elif data_type == \"group\":\n prohibited_updated_group_fields.append(\"groupType\")\n prohibited_updated_group_fields.append(\"name\")\n else:\n raise ValidationException(\n \"Payload does not have the data_type of user or group.\"\n )\n\n for required_field in updated_required_fields:\n if required_field not in payload:\n raise ValidationException(\n \"Required field: '{}' is missing\".format(required_field)\n )\n\n for prohibited_field in prohibited_updated_group_fields:\n if prohibited_field in payload:\n LOGGER.info(\n \"Payload contains prohibited field %s. Removing prohibited field from payload\",\n prohibited_field,\n )\n payload.pop(prohibited_field, None)\n\n return payload",
"def validate(self, attrs):\n tag_name = attrs['tag_name']\n club = attrs['club']\n request = self.context['request']\n profile = UserProfile.objects.get(user=request.user)\n if (club not in profile.get_club_privileges() and\n club not in profile.get_workshop_privileges().values_list('club', flat=True)):\n raise serializers.ValidationError(\"You are not allowed to create tag for this club\")\n if Tag.objects.filter(tag_name=tag_name, club=club):\n raise serializers.ValidationError(\"The tag already exists for this club\")\n return attrs",
"def add_new_member(self, event):\n body = event['body']\n body = json.loads(body)\n\n required_fields = ['group_id', 'new_user_id']\n for f in required_fields:\n if f not in body:\n return get_bad_request('POST body missing field {}'.format(f))\n\n group_id = body['group_id']\n new_user_id = body['new_user_id']\n \n user = self.mealShareUsers.get_user_cognito_data(event)\n current_user = user['user_id']\n \n # Requesting user must already be a member\n if not self.mealShareGroups.is_user_in_group(current_user, str(group_id)):\n return {\n 'statusCode': 401,\n 'statusMessage': 'User {} is not a member of the group ID {} and can not add a person to it'.format(current_user, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n \n # Check if adding was successful\n success = self.mealShareGroups.add_user_to_group(new_user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully added {} to group {}'.format(new_user_id, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to add user {} to group {} by {}'.format(new_user_id, group_id, current_user),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }",
"def validate(self, data):\n\n def getattr_patched(attr_name):\n \"\"\"\n This utility function retrieves 'attr_name' from data if it is present,\n otherwise it uses the value from self.instance. This is necessary because\n data will not have entries for all the fields in the Model\n if a partial update (PATCH) is performed.\n \"\"\"\n if attr_name in data:\n return data[attr_name]\n if self.instance and hasattr(self.instance, attr_name):\n return getattr(self.instance, attr_name)\n return None\n\n step = getattr_patched(\"step\")\n user_responses = getattr_patched(\"user_responses\")\n\n has_required_inputs = bool(\n WorkflowStepUserInput.objects.filter(workflow_step=step, required=True)\n )\n\n workflow_collection_engagement = getattr_patched(\n \"workflow_collection_engagement\"\n )\n\n workflow_collection: WorkflowCollection = (\n workflow_collection_engagement.workflow_collection\n )\n\n state: EngagementStateType = workflow_collection_engagement.state\n\n # CHECK 1: Does the specified step belong to a workflow in the specified collection?\n if not workflow_collection.workflowcollectionmember_set.filter(\n workflow__workflowstep=step\n ):\n raise serializers.ValidationError(\n \"Step must belong to a workflow in the collection\"\n )\n\n \"\"\"\n CHECK 2\n Usually, the UUID of the step being submitted must be either match \n state['next']['step_id'] or state['previous']['step_id'] to prevent the user \n from getting a sort of Frankenstein engagement with messed up data.\n\n However, there are a couple of cavaets to this if the collection is \n an unordered activity.\n\n The first is that a user can start such an engagement on any workflow.\n The second is that they can move to any other workflow after completing\n a workflow.\n\n In BOTH of these scenarios the state of the engagement will have a None \n value for both state[\"next\"][\"step_id\"] and state[\"previous\"][\"step_id\"] values.\n\n We will search for that condition, and if present, allow the user to submit\n data for any step that is the first step of a collection workflow.\n \"\"\"\n if (\n workflow_collection.category == \"ACTIVITY\"\n and not workflow_collection.ordered\n and state[\"next\"][\"step_id\"] == None\n and state[\"previous\"][\"step_id\"] == None\n ):\n if WorkflowStep.objects.filter(\n workflow=step.workflow, order__lt=step.order\n ):\n raise serializers.ValidationError(\n \"Posted step must be the first step in a workflow\"\n )\n\n else:\n if step.id not in (state[\"next\"][\"step_id\"], state[\"previous\"][\"step_id\"]):\n raise serializers.ValidationError(\n \"Posted step must be next step or previous step.\"\n )\n\n \"\"\"EXAMPLE JSON PAYLOAD\n\n {\n \"detail\": \"http://localhost:8000/api/workflow_system/users/self/workflows/engagements/6dfe24d5-9e2d-4308-9c33-e878a3d378b4/details/ad4e2263-d468-4adb-9c0a-b96740ccacd1/\",\n \"workflow_collection_engagement\": \"6dfe24d5-9e2d-4308-9c33-e878a3d378b4\",\n \"step\": \"353a1aba-57fd-4183-802e-083d53863601\",\n \"user_responses\": [\n {\n \"submittedTime\": \"2021-07-26 18:33:06.731050+00:00\",\n \"inputs\": [\n {\n \"stepInputID\": \"758f482d-3eb0-4779-bf2a-bad9e452ea0e\", \n \"stepInputUIIdentifier\": \"question_1\",\n \"userInput\": \"Red\"\n },\n {\n \"stepInputID\": \"96e7f658-7f08-4432-b3d1-f483f01aa19b\", \n \"stepInputUIIdentifier\": \"question_2\",\n \"userInput\": false\n },\n {\n \"stepInputID\": \"2312304f-ceb3-4fea-b93f-94420060b238\", \n \"stepInputUIIdentifier\": \"question_3\",\n \"userInput\": \"hi\"\n }\n ]\n },\n {\n \"submittedTime\": \"2021-07-26 18:33:06.731050+00:00\",\n \"inputs\": [\n {\n \"stepInputID\": \"758f482d-3eb0-4779-bf2a-bad9e452ea0e\", \n \"stepInputUIIdentifier\": \"question_1\",\n \"userInput\": \"Red\"\n },\n {\n \"stepInputID\": \"96e7f658-7f08-4432-b3d1-f483f01aa19b\", \n \"stepInputUIIdentifier\": \"question_2\",\n \"userInput\": true\n },\n {\n \"stepInputID\": \"2312304f-ceb3-4fea-b93f-94420060b238\", \n \"stepInputUIIdentifier\": \"question_3\",\n \"userInput\": \"hi\"\n }\n ]\n }\n ],\n \"started\": \"2021-07-26T08:00:28-05:00\",\n \"finished\": null\n }\n\n \"\"\"\n\n # CHECK 4\n # 1: Ensure all required attributes are present for each question in the payload.\n # 2: Ensure user input data in payload corresponds to actual, defined user inputs for the step.\n # 3: Sorted user inputs for further validation in CHECK 5.\n collected_user_inputs_by_step_input_id = {}\n\n # Outer Loop: User Response Sets\n for index, user_input_set in enumerate(user_responses):\n\n # Inner Loop: Each Input in the Response Set\n for user_input in user_input_set[\"inputs\"]:\n\n # Ensure required keys are present for each input.\n try:\n step_input_id = user_input[\"stepInputID\"]\n step_input_UI_identifier = user_input[\"stepInputUIIdentifier\"]\n response = user_input[\"userInput\"]\n except KeyError as e:\n raise serializers.ValidationError(\n \"Missing key in questions entry {}\".format(e.args[0])\n )\n\n if not WorkflowStepUserInput.objects.filter(\n id=step_input_id, ui_identifier=step_input_UI_identifier\n ):\n raise serializers.ValidationError(\n f\"No step with given stepInputID {step_input_id} and stepInputUIIdentifier {step_input_UI_identifier} exists.\"\n )\n\n # Add the user input to our sorted collection for further checks.\n if step_input_id not in collected_user_inputs_by_step_input_id.keys():\n collected_user_inputs_by_step_input_id[step_input_id] = {}\n collected_user_inputs_by_step_input_id[step_input_id][\n index\n ] = user_input\n\n # CHECK 5 - Final Checks\n # Evaluate each defined WorkflowStepUserInput object for the step\n # and make sure that required answers are present and conform\n # to the specification for the object.\n for step_input in WorkflowStepUserInput.objects.filter(workflow_step=step):\n step_input_id = str(step_input.id)\n\n # Determine if the user has one or more answers for the current WorkflowStepUserInput\n if step_input_id not in collected_user_inputs_by_step_input_id:\n # No answers. Now see if answers were required.\n if step_input.required:\n raise serializers.ValidationError(\n \"A response is required, but missing, for step_input id {}\".format(\n step_input_id\n )\n )\n\n else:\n # TODO: This checking process, in general, could probably benefit\n # from a little bit of clean-up. This is too broad in that it will\n # handle both \"incorrect\" answers and radical schema violations in the same way.\n responses_to_input = collected_user_inputs_by_step_input_id[\n step_input_id\n ]\n for index, response in responses_to_input.items():\n try:\n jsonschema.validate(\n instance=response, schema=step_input.response_schema\n )\n except jsonschema.ValidationError:\n # This answer is not valid\n for entry in user_responses[index][\"inputs\"]:\n if step_input_id == entry[\"stepInputID\"]:\n entry[\"is_valid\"] = False\n break\n else:\n # This is!\n for entry in user_responses[index][\"inputs\"]:\n if step_input_id == entry[\"stepInputID\"]:\n entry[\"is_valid\"] = True\n break\n\n return data",
"def validate_payload(cls, event):\n # TODO: Use invenio-jsonschemas/jsonresolver instead of this\n # Validate against Event JSONSchema\n # NOTE: raises `jsonschemas.ValidationError`\n cls._jsonschema_validator.validate(event)\n\n # Validate using marshmallow loader\n for payload in event:\n errors = RelationshipSchema(check_existing=True).validate(payload)\n if errors:\n raise MarshmallowValidationError(str(errors) + \"payload\" + str(payload))",
"def test_new_user(self):\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']",
"def validate():",
"def validate(self, attrs):\n data = super().validate(attrs)\n\n # Get the fields from the user profile serializer.\n serializer = UserSerializerWithToken(self.user).data\n\n for fields, values in serializer.items():\n data[fields] = values\n\n #print('token:', data)\n\n return data",
"def validate(self, data):\n # if data['is_private'] and data['contestants']:\n # raise serializers.ValidationError(\"Can not be private and compete for an award.\")\n return data",
"def validate(self, registration):\n self.l.info(\"Starting registration validation\")\n\n validation_errors = []\n\n # Check if registrant_id is a valid UUID\n if not utils.is_valid_uuid(registration.registrant_id):\n validation_errors += [\"Invalid UUID registrant_id\"]\n\n # Check that required fields are provided and valid\n data_fields = registration.data.keys()\n\n if registration.reg_type == \"pmtct_prebirth\":\n validation_errors += self.check_lang(data_fields, registration)\n validation_errors += self.check_mom_dob(data_fields, registration)\n validation_errors += self.check_edd(data_fields, registration)\n validation_errors += self.check_operator_id(\n data_fields, registration)\n\n elif registration.reg_type == \"pmtct_postbirth\":\n validation_errors += self.check_lang(data_fields, registration)\n validation_errors += self.check_mom_dob(data_fields, registration)\n validation_errors += self.check_baby_dob(data_fields, registration)\n validation_errors += self.check_operator_id(\n data_fields, registration)\n\n elif registration.reg_type == \"nurseconnect\":\n validation_errors += self.check_faccode(\n data_fields, registration)\n validation_errors += self.check_operator_id(\n data_fields, registration)\n validation_errors += self.check_msisdn_registrant(\n data_fields, registration)\n validation_errors += self.check_msisdn_device(\n data_fields, registration)\n validation_errors += self.check_lang(\n data_fields, registration)\n\n elif registration.reg_type == \"momconnect_prebirth\":\n # Checks that apply to clinic, chw, public\n validation_errors += self.check_operator_id(\n data_fields, registration)\n validation_errors += self.check_msisdn_registrant(\n data_fields, registration)\n validation_errors += self.check_msisdn_device(\n data_fields, registration)\n validation_errors += self.check_lang(\n data_fields, registration)\n validation_errors += self.check_consent(\n data_fields, registration)\n\n # Checks that apply to clinic, chw\n if registration.source.authority in [\"hw_full\", \"hw_partial\"]:\n validation_errors += self.check_id(\n data_fields, registration)\n\n # Checks that apply to clinic only\n if registration.source.authority == \"hw_full\":\n validation_errors += self.check_edd(\n data_fields, registration)\n validation_errors += self.check_faccode(\n data_fields, registration)\n\n elif registration.reg_type == \"momconnect_postbirth\":\n validation_errors.append(\"Momconnect postbirth not yet supported\")\n\n elif registration.reg_type == \"loss_general\":\n validation_errors.append(\"Loss general not yet supported\")\n\n # Evaluate if there were any problems, save and return\n if len(validation_errors) == 0:\n self.l.info(\"Registration validated successfully - updating \"\n \"registration object\")\n registration.validated = True\n registration.save()\n self.l.info(\"Registration object updated.\")\n return True\n else:\n self.l.info(\"Registration validation failed - updating \"\n \"registration object\")\n registration.data[\"invalid_fields\"] = validation_errors\n registration.save()\n self.l.info(\"Registration object updated.\")\n return False",
"def test_new_user_400(self):\n # Missing First Name\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['first_name'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Missing Last Name\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['last_name'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Missing UserID\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['userid'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Bad data type for groups\n user1_body = deepcopy(self.test_user1_data)\n user1_body['groups'] = self.test_group1_groupid\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400",
"def _validate_user(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'user_id'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'user':\n raise ValidationError('expected _type \"user\"', path)\n if not isinstance(instance['user_id'], int):\n raise ValidationError('user_id must be int', path)\n if 'component_uuid' in instance and instance['component_uuid'] != flask.current_app.config['FEDERATION_UUID']:\n pass # TODO\n else:\n try:\n users.check_user_exists(user_id=instance['user_id'])\n except UserDoesNotExistError:\n raise ValidationError('user does not exist', path)",
"def test_add_user_invalid_payload_keys(self):\n with self.client:\n auth_header = login_test_user(self.client)\n response = self.client.post('/users',\n data = json.dumps(dict(email=\"[email protected]\")),\n content_type='application/json',\n headers=auth_header\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertIn('Invalid payload keys', data['message'])\n self.assertIn('fail', data['status'])",
"def group_members_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if values is not None:\n node_templates = \\\n context.presentation.get('service_template', 'topology_template', 'node_templates') \\\n or {}\n for value in values:\n if value not in node_templates:\n report_issue_for_unknown_type(context, presentation, 'node template', field.name,\n value)\n\n group_type = presentation._get_type(context)\n if group_type is None:\n break\n\n node_types = group_type._get_members(context)\n\n is_valid = False\n\n if value in node_templates:\n our_node_type = node_templates[value]._get_type(context)\n for node_type in node_types:\n if node_type._is_descendant(context, our_node_type):\n is_valid = True\n break\n\n if not is_valid:\n context.validation.report(\n u'group definition target does not match a node type'\n u' declared in the group type in \"{0}\": {1}'\n .format(presentation._name, safe_repr(value)),\n locator=presentation._locator, level=Issue.BETWEEN_TYPES)",
"def add_user(request):\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(form.cleaned_data['username'], \"[email protected]\", form.cleaned_data['password1'])\n role = form.cleaned_data['role']\n group = Group.objects.get(name=role)\n new_user.groups.add(group)\n return redirect(index)\n else:\n form = CreateUserForm() \n return render(request, 'users/add_user.html', {\n 'form': form,\n })",
"def test_users_groups_post(self):\n pass"
]
| [
"0.60938716",
"0.56177956",
"0.5573164",
"0.5544951",
"0.5542647",
"0.551167",
"0.55002177",
"0.54839563",
"0.5442849",
"0.5365981",
"0.5358088",
"0.5331456",
"0.5325358",
"0.53190184",
"0.5303998",
"0.52889675",
"0.5275196",
"0.5269549",
"0.52496207",
"0.52422386",
"0.52317804",
"0.5199831",
"0.5181247",
"0.5171608",
"0.5170496",
"0.5164203",
"0.5149951",
"0.5126169",
"0.51139665",
"0.5076357"
]
| 0.5935128 | 1 |
Validate (User | Group) payload from NEXT being updated on AD. | def validate_update_entry(payload, data_type):
updated_required_fields = ["distinguishedName"]
prohibited_updated_group_fields = ["objectGUID", "whenCreated"]
if data_type == "user":
prohibited_updated_group_fields.append("cn")
elif data_type == "group":
prohibited_updated_group_fields.append("groupType")
prohibited_updated_group_fields.append("name")
else:
raise ValidationException(
"Payload does not have the data_type of user or group."
)
for required_field in updated_required_fields:
if required_field not in payload:
raise ValidationException(
"Required field: '{}' is missing".format(required_field)
)
for prohibited_field in prohibited_updated_group_fields:
if prohibited_field in payload:
LOGGER.info(
"Payload contains prohibited field %s. Removing prohibited field from payload",
prohibited_field,
)
payload.pop(prohibited_field, None)
return payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, data):\n groups = data['groups']\n if all(self.context['request'].user.has_perm(\"change_group\", group) for group in groups) or set(groups) <= set(\n self.context['request'].user.groups.all()):\n return data\n else:\n raise PermissionDenied()",
"def _validate_update_data(self, data):\n return",
"def validate(self, data):\n\n def getattr_patched(attr_name):\n \"\"\"\n This utility function retrieves 'attr_name' from data if it is present,\n otherwise it uses the value from self.instance. This is necessary because\n data will not have entries for all the fields in the Model\n if a partial update (PATCH) is performed.\n \"\"\"\n if attr_name in data:\n return data[attr_name]\n if self.instance and hasattr(self.instance, attr_name):\n return getattr(self.instance, attr_name)\n return None\n\n step = getattr_patched(\"step\")\n user_responses = getattr_patched(\"user_responses\")\n\n has_required_inputs = bool(\n WorkflowStepUserInput.objects.filter(workflow_step=step, required=True)\n )\n\n workflow_collection_engagement = getattr_patched(\n \"workflow_collection_engagement\"\n )\n\n workflow_collection: WorkflowCollection = (\n workflow_collection_engagement.workflow_collection\n )\n\n state: EngagementStateType = workflow_collection_engagement.state\n\n # CHECK 1: Does the specified step belong to a workflow in the specified collection?\n if not workflow_collection.workflowcollectionmember_set.filter(\n workflow__workflowstep=step\n ):\n raise serializers.ValidationError(\n \"Step must belong to a workflow in the collection\"\n )\n\n \"\"\"\n CHECK 2\n Usually, the UUID of the step being submitted must be either match \n state['next']['step_id'] or state['previous']['step_id'] to prevent the user \n from getting a sort of Frankenstein engagement with messed up data.\n\n However, there are a couple of cavaets to this if the collection is \n an unordered activity.\n\n The first is that a user can start such an engagement on any workflow.\n The second is that they can move to any other workflow after completing\n a workflow.\n\n In BOTH of these scenarios the state of the engagement will have a None \n value for both state[\"next\"][\"step_id\"] and state[\"previous\"][\"step_id\"] values.\n\n We will search for that condition, and if present, allow the user to submit\n data for any step that is the first step of a collection workflow.\n \"\"\"\n if (\n workflow_collection.category == \"ACTIVITY\"\n and not workflow_collection.ordered\n and state[\"next\"][\"step_id\"] == None\n and state[\"previous\"][\"step_id\"] == None\n ):\n if WorkflowStep.objects.filter(\n workflow=step.workflow, order__lt=step.order\n ):\n raise serializers.ValidationError(\n \"Posted step must be the first step in a workflow\"\n )\n\n else:\n if step.id not in (state[\"next\"][\"step_id\"], state[\"previous\"][\"step_id\"]):\n raise serializers.ValidationError(\n \"Posted step must be next step or previous step.\"\n )\n\n \"\"\"EXAMPLE JSON PAYLOAD\n\n {\n \"detail\": \"http://localhost:8000/api/workflow_system/users/self/workflows/engagements/6dfe24d5-9e2d-4308-9c33-e878a3d378b4/details/ad4e2263-d468-4adb-9c0a-b96740ccacd1/\",\n \"workflow_collection_engagement\": \"6dfe24d5-9e2d-4308-9c33-e878a3d378b4\",\n \"step\": \"353a1aba-57fd-4183-802e-083d53863601\",\n \"user_responses\": [\n {\n \"submittedTime\": \"2021-07-26 18:33:06.731050+00:00\",\n \"inputs\": [\n {\n \"stepInputID\": \"758f482d-3eb0-4779-bf2a-bad9e452ea0e\", \n \"stepInputUIIdentifier\": \"question_1\",\n \"userInput\": \"Red\"\n },\n {\n \"stepInputID\": \"96e7f658-7f08-4432-b3d1-f483f01aa19b\", \n \"stepInputUIIdentifier\": \"question_2\",\n \"userInput\": false\n },\n {\n \"stepInputID\": \"2312304f-ceb3-4fea-b93f-94420060b238\", \n \"stepInputUIIdentifier\": \"question_3\",\n \"userInput\": \"hi\"\n }\n ]\n },\n {\n \"submittedTime\": \"2021-07-26 18:33:06.731050+00:00\",\n \"inputs\": [\n {\n \"stepInputID\": \"758f482d-3eb0-4779-bf2a-bad9e452ea0e\", \n \"stepInputUIIdentifier\": \"question_1\",\n \"userInput\": \"Red\"\n },\n {\n \"stepInputID\": \"96e7f658-7f08-4432-b3d1-f483f01aa19b\", \n \"stepInputUIIdentifier\": \"question_2\",\n \"userInput\": true\n },\n {\n \"stepInputID\": \"2312304f-ceb3-4fea-b93f-94420060b238\", \n \"stepInputUIIdentifier\": \"question_3\",\n \"userInput\": \"hi\"\n }\n ]\n }\n ],\n \"started\": \"2021-07-26T08:00:28-05:00\",\n \"finished\": null\n }\n\n \"\"\"\n\n # CHECK 4\n # 1: Ensure all required attributes are present for each question in the payload.\n # 2: Ensure user input data in payload corresponds to actual, defined user inputs for the step.\n # 3: Sorted user inputs for further validation in CHECK 5.\n collected_user_inputs_by_step_input_id = {}\n\n # Outer Loop: User Response Sets\n for index, user_input_set in enumerate(user_responses):\n\n # Inner Loop: Each Input in the Response Set\n for user_input in user_input_set[\"inputs\"]:\n\n # Ensure required keys are present for each input.\n try:\n step_input_id = user_input[\"stepInputID\"]\n step_input_UI_identifier = user_input[\"stepInputUIIdentifier\"]\n response = user_input[\"userInput\"]\n except KeyError as e:\n raise serializers.ValidationError(\n \"Missing key in questions entry {}\".format(e.args[0])\n )\n\n if not WorkflowStepUserInput.objects.filter(\n id=step_input_id, ui_identifier=step_input_UI_identifier\n ):\n raise serializers.ValidationError(\n f\"No step with given stepInputID {step_input_id} and stepInputUIIdentifier {step_input_UI_identifier} exists.\"\n )\n\n # Add the user input to our sorted collection for further checks.\n if step_input_id not in collected_user_inputs_by_step_input_id.keys():\n collected_user_inputs_by_step_input_id[step_input_id] = {}\n collected_user_inputs_by_step_input_id[step_input_id][\n index\n ] = user_input\n\n # CHECK 5 - Final Checks\n # Evaluate each defined WorkflowStepUserInput object for the step\n # and make sure that required answers are present and conform\n # to the specification for the object.\n for step_input in WorkflowStepUserInput.objects.filter(workflow_step=step):\n step_input_id = str(step_input.id)\n\n # Determine if the user has one or more answers for the current WorkflowStepUserInput\n if step_input_id not in collected_user_inputs_by_step_input_id:\n # No answers. Now see if answers were required.\n if step_input.required:\n raise serializers.ValidationError(\n \"A response is required, but missing, for step_input id {}\".format(\n step_input_id\n )\n )\n\n else:\n # TODO: This checking process, in general, could probably benefit\n # from a little bit of clean-up. This is too broad in that it will\n # handle both \"incorrect\" answers and radical schema violations in the same way.\n responses_to_input = collected_user_inputs_by_step_input_id[\n step_input_id\n ]\n for index, response in responses_to_input.items():\n try:\n jsonschema.validate(\n instance=response, schema=step_input.response_schema\n )\n except jsonschema.ValidationError:\n # This answer is not valid\n for entry in user_responses[index][\"inputs\"]:\n if step_input_id == entry[\"stepInputID\"]:\n entry[\"is_valid\"] = False\n break\n else:\n # This is!\n for entry in user_responses[index][\"inputs\"]:\n if step_input_id == entry[\"stepInputID\"]:\n entry[\"is_valid\"] = True\n break\n\n return data",
"def fusion_api_validate_group_and_roles(self, body, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.validate(body, api, headers)",
"def validate(self, data):\n request = self.context.get('request')\n data['poster'] = request.user\n\n return validate_complete_address(data)",
"def validate(self, data):\n user_type = 3\n return validate_register_user(self, data, user_type)",
"def validate(self, data):\n # if data['is_private'] and data['contestants']:\n # raise serializers.ValidationError(\"Can not be private and compete for an award.\")\n return data",
"def validate(self, data):\n user1 = self.context['request'].user\n \n if data['sinceWhen'] > data['tilWhen']:\n raise serializers.ValidationError(\"sinceWhen must precede after tilWhen\")\n\n if user1 == data['user2']:\n raise serializers.ValidationError(\"user2 must be a different user\")\n \n return data",
"def validate_payload(payload):\n\n if not isinstance(payload, dict):\n raise Exception(\"payload is a %s, not a dictionary\" % type(payload))\n\n if \"nmo\" not in payload:\n raise Exception(\"No nmo in payload\")\n\n if \"job\" not in payload[\"nmo\"]:\n raise Exception(\"No job in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n if \"task\" not in payload[\"nmo\"]:\n raise Exception(\"No task in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n try:\n isGroup = payload['nmo']['source']['misc']['isGroup']\n except:\n isGroup = False\n\n if \"jsonld\" not in payload and not isGroup:\n raise Exception(\"No jsonld in payload \\nPayload is:- %s\" % payload)",
"def validate(self, data):\n user_type = 3\n return validate_login_user(self, data, user_type)",
"def validate_create_entry(payload, data_type):\n new_entry_required_fields = [\"distinguishedName\"]\n new_entry_prohibited_fields = [\"objectGUID\", \"whenChanged\"]\n\n if data_type == \"user\":\n new_entry_required_fields.append(\"cn\")\n new_entry_required_fields.append(\"userPrincipalName\")\n elif data_type == \"group\":\n new_entry_required_fields.append(\"groupType\")\n else:\n raise ValidationException(\n \"Payload does not have the data_type of user or group.\"\n )\n\n for required_field in new_entry_required_fields:\n if required_field not in payload:\n raise ValidationException(\n \"Required field: '{}' is missing\".format(required_field)\n )\n\n for prohibited_field in new_entry_prohibited_fields:\n if prohibited_field in payload:\n LOGGER.info(\n \"Payload contains prohibited field %s. Removing prohibited field from payload\",\n prohibited_field,\n )\n payload.pop(prohibited_field, None)\n\n return payload",
"def validate():",
"def do_validate(self, request, _object):\n\n pass",
"def _validate_user(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'user_id'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'user':\n raise ValidationError('expected _type \"user\"', path)\n if not isinstance(instance['user_id'], int):\n raise ValidationError('user_id must be int', path)\n if 'component_uuid' in instance and instance['component_uuid'] != flask.current_app.config['FEDERATION_UUID']:\n pass # TODO\n else:\n try:\n users.check_user_exists(user_id=instance['user_id'])\n except UserDoesNotExistError:\n raise ValidationError('user does not exist', path)",
"def validate(self, data):\n draft_group_id = data['draft_group']\n if draft_group_id is None:\n raise serializers.ValidationError(\"invalid draft_group id\")\n try:\n draftgroup.models.DraftGroup.objects.get(pk=draft_group_id)\n except draftgroup.models.DraftGroup.DoesNotExist:\n raise serializers.ValidationError('invalid draft_group id')\n\n return data",
"def validate(cls, data, errors):",
"def _validate_user(_):\n pass",
"def check_insert_group_user(self, id_user:int, id_group:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with the check previous insertion on th. Mistake: {e} \"\n self.proceed_error(msg)\n return False",
"def _validatePayload(request):\n return {\n 'name': _validateField(request, 'name'),\n 'email': _validateField(request, 'email'),\n 'password': _validateField(request, 'password'),\n }",
"def validate_new_group(link_id_field, scope_path_field,\n group_logic):\n\n def wrapper(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n cleaned_data = self.cleaned_data\n\n fields = {}\n\n link_id = cleaned_data.get(link_id_field)\n\n if link_id:\n fields['link_id'] = link_id\n\n scope_path = cleaned_data.get(scope_path_field)\n if scope_path:\n fields['scope_path'] = scope_path\n\n # check if there is already a group for the given fields\n group_entity = group_logic.logic.getForFields(fields, unique=True)\n\n if group_entity:\n # add the error message to the link id field\n self._errors[link_id_field] = ErrorList([DEF_LINK_ID_IN_USE_MSG])\n del cleaned_data[link_id_field]\n # return the new cleaned_data\n return cleaned_data\n\n return cleaned_data\n return wrapper",
"def validate(self, data):\n request = self.context['request']\n data.setdefault('user', request.user)\n data.setdefault('device_user_token', None)\n\n if not request.user.is_authenticated():\n raise serializers.ValidationError('user is not logged in.')\n\n try:\n self.instance = DeviceUser.objects.get(**data)\n\n except DeviceUser.DoesNotExist:\n raise serializers.ValidationError('invalid device')\n\n return data",
"def _validate_post_fields(self, value, name, result):\n state = result.get(\"state\")\n persistent_state = result.get(\"persistent_state\")\n\n # minimal settings not related to runtime changes\n valid_fields = [\"actions\", \"ignore_errors\", \"name\", \"persistent_state\", \"state\"]\n\n # when type is present, a profile is completely specified (using\n # defaults or other settings)\n if \"type\" in result:\n valid_fields += list(self.nested.keys())\n\n # If there are no runtime changes, \"wait\" and \"force_state_change\" do\n # not make sense\n # FIXME: Maybe this restriction can be removed. Need to make sure that\n # defaults for wait or force_state_change do not interfer\n if not state:\n while \"wait\" in valid_fields:\n valid_fields.remove(\"wait\")\n while \"force_state_change\" in valid_fields:\n valid_fields.remove(\"force_state_change\")\n else:\n valid_fields += [\"force_state_change\", \"wait\"]\n\n # FIXME: Maybe just accept all values, even if they are not\n # needed/meaningful in the respective context\n valid_fields = set(valid_fields)\n for k in result:\n if k not in valid_fields:\n raise ValidationError(\n name + \".\" + k,\n \"property is not allowed for state '%s' and persistent_state '%s'\"\n % (state, persistent_state),\n )\n\n if \"name\" not in result:\n if persistent_state == \"absent\":\n result[\"name\"] = \"\" # set to empty string to mean *absent all others*\n else:\n raise ValidationError(name, \"missing 'name'\")\n\n # FIXME: Seems to be a duplicate check since \"wait\" will be removed from\n # valid_keys when state is considered to be not True\n if \"wait\" in result and not state:\n raise ValidationError(\n name + \".wait\",\n \"'wait' is not allowed for state '%s'\" % (result[\"state\"]),\n )\n\n result[\"state\"] = state\n result[\"persistent_state\"] = persistent_state\n\n self.VALID_FIELDS = valid_fields\n return result",
"def _validate_post_fields(self, value, name, result):\n state = result.get(\"state\")\n persistent_state = result.get(\"persistent_state\")\n\n # minimal settings not related to runtime changes\n valid_fields = [\"actions\", \"ignore_errors\", \"name\", \"persistent_state\", \"state\"]\n\n # when type is present, a profile is completely specified (using\n # defaults or other settings)\n if \"type\" in result:\n valid_fields += list(self.nested.keys())\n\n # If there are no runtime changes, \"wait\" and \"force_state_change\" do\n # not make sense\n # FIXME: Maybe this restriction can be removed. Need to make sure that\n # defaults for wait or force_state_change do not interfer\n if not state:\n while \"wait\" in valid_fields:\n valid_fields.remove(\"wait\")\n while \"force_state_change\" in valid_fields:\n valid_fields.remove(\"force_state_change\")\n else:\n valid_fields += [\"force_state_change\", \"wait\"]\n\n # FIXME: Maybe just accept all values, even if they are not\n # needed/meaningful in the respective context\n valid_fields = set(valid_fields)\n for k in result:\n if k not in valid_fields:\n raise ValidationError(\n name + \".\" + k,\n \"property is not allowed for state '%s' and persistent_state '%s'\"\n % (state, persistent_state),\n )\n\n if \"name\" not in result:\n if persistent_state == \"absent\":\n result[\"name\"] = \"\" # set to empty string to mean *absent all others*\n else:\n raise ValidationError(name, \"missing 'name'\")\n\n # FIXME: Seems to be a duplicate check since \"wait\" will be removed from\n # valid_keys when state is considered to be not True\n if \"wait\" in result and not state:\n raise ValidationError(\n name + \".wait\",\n \"'wait' is not allowed for state '%s'\" % (result[\"state\"]),\n )\n\n result[\"state\"] = state\n result[\"persistent_state\"] = persistent_state\n\n self.VALID_FIELDS = valid_fields\n return result",
"def _validate(self):\n pass",
"def _process_validator_results(ret, level, object_data, obj):\n\n # The first object in the tuple is the one being validated\n if isinstance(obj, tuple):\n real_obj = obj[0]\n else:\n real_obj = obj\n\n if not ret:\n is_valid = True\n return is_valid\n\n if isinstance(ret, string_types):\n ledger.add_message(ret, level, object_data)\n is_valid = False\n\n elif isinstance(ret, dict):\n for field_name, error in list(ret.items()):\n # verbose_field_name = ledger.map_field_name_to_verbose_name(obj, field_name)\n object_data_with_field = object_data.copy()\n object_data_with_field[\"field\"] = field_name\n if field_name_mapper is None:\n # raise RuntimeError(\"A field_name_mapper was not supplied to this validator.\")\n verbose_name = None\n else:\n verbose_name = field_name_mapper(real_obj, field_name)\n if verbose_name is None:\n from titlecase import titlecase\n\n verbose_name = titlecase(\" \".join(field_name.split(\"_\")))\n\n object_data_with_field[\"verbose_name\"] = verbose_name\n if include_field_name_in_message:\n error = \"{}: {}\".format(verbose_name, error)\n else:\n error = \"{}\".format(error)\n ledger.add_message(error, level, object_data_with_field)\n is_valid = False\n\n else:\n for validator_ret_item in ret:\n if isinstance(validator_ret_item, str):\n ledger.add_message(validator_ret_item, level, object_data)\n is_valid = False\n elif isinstance(validator_ret_item, dict):\n for field_name, error in list(validator_ret_item.items()):\n # verbose_field_name = ledger.map_field_name_to_verbose_name(obj, field_name)\n object_data_with_field = object_data.copy()\n object_data_with_field[\"field\"] = field_name\n verbose_name = field_name_mapper(real_obj, field_name)\n if verbose_name is None:\n from titlecase import titlecase\n\n verbose_name = titlecase(\" \".join(field_name.split(\"_\")))\n\n object_data_with_field[\"verbose_name\"] = verbose_name\n if include_field_name_in_message:\n error = \"{}: {}\".format(verbose_name, error)\n else:\n error = \"{}\".format(error)\n \n ledger.add_message(error, level, object_data_with_field)\n is_valid = False\n\n return is_valid",
"def check_fields_updated_correctly(old_user_data: user_models.User,\n updated_data_json: Dict[str, Any]) -> bool:\n\n updated_user_object = get_user_data_from_user_model(old_user_data)\n updated_user_data_dict = updated_user_object.dict()\n\n # basically just getting intersetion of keys from two dicts\n keys_to_compare = set(updated_user_data_dict).intersection(\n set(updated_data_json))\n\n try:\n is_password = lambda x: x == \"password\"\n for key in keys_to_compare:\n\n if is_password(key):\n password_str = updated_data_json[key]\n assert updated_user_object.check_password(password_str)\n continue\n\n assert updated_data_json[key] == updated_user_data_dict[key]\n return True\n except AssertionError as assert_error:\n logging.warning(f\"failed at: {assert_error}\")\n return False",
"def validate_user_data(self, expected, actual):\n self.log.debug('actual: {}'.format(repr(actual)))\n for e in expected:\n found = False\n for act in actual:\n a = {'enabled': act.enabled, 'name': act.name,\n 'email': act.email, 'tenantId': act.tenantId,\n 'id': act.id}\n if e['name'] == a['name']:\n found = True\n ret = self._validate_dict_data(e, a)\n if ret:\n return \"unexpected user data - {}\".format(ret)\n if not found:\n return \"user {} does not exist\".format(e['name'])\n return ret",
"def validate_user_request_dict(request_dict):\n if 'first_name' not in request_dict:\n return False\n if 'last_name' not in request_dict:\n return False\n if 'id' not in request_dict:\n return False\n if 'email' not in request_dict:\n return False\n return True",
"def validate(self, data):\n\t\tvalidated_data = super(BoxSerializer, self).validate(data)\n\t\tuser = self.context['request'].user\n\t\tcheck_constraint_util = CheckConstraintsUtil(user, validated_data, self.instance) \n\t\treturn check_constraint_util.check_constraints()",
"def validate(self, attrs):\n data = super().validate(attrs)\n\n # Get the fields from the user profile serializer.\n serializer = UserSerializerWithToken(self.user).data\n\n for fields, values in serializer.items():\n data[fields] = values\n\n #print('token:', data)\n\n return data"
]
| [
"0.6097056",
"0.5806911",
"0.5760532",
"0.56909925",
"0.56166065",
"0.5543161",
"0.55097544",
"0.54937786",
"0.5447769",
"0.5442769",
"0.54362345",
"0.5408871",
"0.5381777",
"0.5366057",
"0.5358948",
"0.5351018",
"0.5333003",
"0.531848",
"0.526143",
"0.52607524",
"0.5235242",
"0.5214466",
"0.5214466",
"0.52125454",
"0.52099514",
"0.5201197",
"0.519672",
"0.51823956",
"0.517916",
"0.5174885"
]
| 0.67155135 | 0 |
Returns the dimensions as a list, ordered according to the schema | def dims_list(self):
return [n for n in self.schema.names if n in self.dims] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize_dimensions(md: Metadata) -> list:\n dimensions = []\n\n for dim in md.dimensions.all():\n dimension = OrderedDict()\n\n dimension[\"type\"] = dim.type\n dimension[\"custom_name\"] = dim.custom_name\n dimension[\"units\"] = dim.units\n dimension[\"extent\"] = dim.extent\n\n dimensions.append(dimension)\n\n return dimensions",
"def listdimension(self):\n return list(self.dimensions.keys())",
"def ordered_dims(self) -> Sequence[Tuple[str, int]]:\n return list(self.affine_state.all_dims.items())",
"def all_dimensions(self):\n\t\treturn [dimension for key, dimension in self._all_dimensions.items()]",
"def dimensions(self):\n return self.index.names",
"def shape_list(x):\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n ret = []\n for i, static_dim in enumerate(static):\n dim = static_dim or shape[i]\n ret.append(dim)\n return ret",
"def ordering(self):\n if self.dim_ordering is None:\n return list(range(self.rank))\n\n orig = self.dim_ordering.dims_and_symbols.dims\n return [orig.index(sym) for sym in self.dim_ordering.map.dims]",
"def get_dimensions(self, fieldname=None):\n if fieldname is None:\n dims = self._dims.keys()\n else:\n dims = self.read_field(fieldname).dimensions.keys()\n return tuple(dims)",
"def queryDimensionNames(name):\n\n header, rows = querySciDB(\"dimensions(%s)\" % name)\n return [row[1].translate(None, \"\\\"\") for row in rows]",
"def subsystem_dims(self):\n return [subsystem.truncated_dim for subsystem in self]",
"def _sort_ds(self):\n d = []\n for layer in self.structure:\n if (layer.type == 'Layer' or layer.type == 'Substrate'):\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d",
"def getDimensions(unique_name=None):",
"def dims_list(tensor: tf.Tensor) -> List[Union[int, tf.Tensor]]:\n static = tensor.shape.as_list()\n if None not in static:\n return static\n dynamic = tf.unstack(tf.shape(tensor))\n return [(d if s is None else s) for s, d in zip(static, dynamic)]",
"def get_dimensions(js_dict, naming):\n\n dimensions = []\n dim_names = []\n if check_version_2(js_dict):\n dimension_dict = js_dict\n else:\n dimension_dict = js_dict['dimension']\n for dim in dimension_dict['id']:\n dim_name = js_dict['dimension'][dim]['label']\n if not dim_name:\n dim_name = dim\n if naming == 'label':\n dim_label = get_dim_label(js_dict, dim)\n dimensions.append(dim_label)\n dim_names.append(dim_name)\n else:\n dim_index = get_dim_index(js_dict, dim)\n dimensions.append(dim_index)\n dim_names.append(dim)\n return dimensions, dim_names",
"def shape_list(x):\n shape = list(x.shape)\n\n return shape",
"def getDimensions():",
"def queryDimensions(name):\n header, rows = querySciDB(\"dimensions(%s)\" % name)\n\n if len(rows) < 2:\n return 0, 0\n else:\n return [int(row[3]) + 1 for row in rows] #bfichter comment: i don't know why the + 1 was put here as the data is not 0 based, not going to change it though because of the large amount of compensating -1's now in the code",
"def dimensions():",
"def get_dimensions(self, obj):\n return str(obj.dimensions)",
"def process(data):\n items = data.get('items', [])\n logging.info('- processing %d items', len(items))\n return [_flatten_dimensions(t['properties']['dimensions']) for t in items]",
"def get_dim_attribute(self,attr):\n return [getattr(dim,attr) for dim in self.dimensions]",
"def _getDTypeList(self):\n return self._dtype",
"def _sort_ns(self):\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n",
"def shape_list(x):\n x = tf.convert_to_tensor(x)\n\n # If unknown rank, return dynamic shape\n if x.get_shape().dims is None:\n return tf.shape(x)\n\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n\n ret = []\n for i in range(len(static)):\n dim = static[i]\n if dim is None:\n dim = shape[i]\n ret.append(dim)\n return ret",
"def shape_list(x):\n x = tf.convert_to_tensor(x)\n\n # If unknown rank, return dynamic shape\n if x.get_shape().dims is None:\n return tf.shape(x)\n\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n\n ret = []\n for i in range(len(static)):\n dim = static[i]\n if dim is None:\n dim = shape[i]\n ret.append(dim)\n return ret",
"def get_dimension_props():\n props = eval(\"bpy.context.scene.\" + DIMENSION_PROPERTY_NAMESPACE)\n return props",
"def category_dimensions(self, include=True):\n strategy = self.advert.strategy\n\n manager = strategy.content_category_values if include else strategy.content_category_values_exclude\n for category in manager.all():\n for raw in category.represented.all():\n try:\n yield int(raw.name)\n except ValueError:\n yield raw.name",
"def get_dim_attribute(self,attr):\n return [getattr(getattr(self,name),attr) for name in self._dimensions]",
"def getAvailableDimensions(samweb):\n result = samweb.getURL('/files/list/dimensions?format=json&descriptions=1')\n return convert_from_unicode(result.json())",
"def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricDimensionArgs']]]]:\n return pulumi.get(self, \"dimensions\")"
]
| [
"0.75346553",
"0.746462",
"0.69651073",
"0.68094385",
"0.645766",
"0.6450359",
"0.64345264",
"0.63140184",
"0.6211617",
"0.61613286",
"0.61577505",
"0.6104858",
"0.6082072",
"0.60458875",
"0.60123855",
"0.59878355",
"0.597456",
"0.59037936",
"0.5862507",
"0.5858748",
"0.58487684",
"0.5846844",
"0.58405066",
"0.58355594",
"0.58355594",
"0.581092",
"0.5774741",
"0.57746524",
"0.57721347",
"0.5769793"
]
| 0.7960348 | 0 |
Converts a DataFrame to a Flat by indicating the dimensions and value column | def from_dataframe(cls, df: pd.DataFrame, schema: Schema, dims: List[str], value_column: str) -> "Flat":
index = schema.encode_many(df[dims])
vals = df[value_column].values
dim_mask = schema.dims_to_mask(dims)
vec = grblas.Vector.from_values(index, vals, size=dim_mask + 1)
return cls(vec, schema, dims) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flatten_df(df):\n flat_array = df.values.flatten()\n flat_df = pd.DataFrame(flat_array)\n flat_df.columns = [\"loan\"]\n flat_df[\"row_no\"] = flat_df.reset_index().index\n flat_df = flat_df[[\"row_no\", \"loan\"]]\n flat_df.row_no = flat_df.row_no // 100\n return flat_df",
"def flatten_df(df: DataFrame, df_engine: DataFrameEngine) -> Tuple[DataFrame, Dict[str, Tuple]]: # noqa: F821\n # Workaround for: https://issues.apache.org/jira/browse/ARROW-5645\n column_shapes = {}\n for c in df.columns:\n df = df_engine.persist(df)\n shape = df_engine.compute(\n df_engine.map_objects(\n df[c],\n lambda x: np.array(x).shape,\n ).max()\n )\n\n if len(shape) > 1:\n column_shapes[c] = shape\n df[c] = df_engine.map_objects(df[c], lambda x: np.array(x).reshape(-1))\n return df, column_shapes",
"def unflatten_df(df: DataFrame, column_shapes: Dict[str, Tuple], df_engine: DataFrameEngine) -> DataFrame: # noqa: F821\n for c in df.columns:\n shape = column_shapes.get(c)\n if shape:\n df[c] = df_engine.map_objects(df[c], lambda x: np.array(x).reshape(shape))\n return df",
"def _csmToFlat(self, value, rows, cols, numRow, numCol):\n if numRow == 1 and numCol == 1:\n # early exit for scalars\n valOut = value\n elif len(value) == len(rows) * len(cols):\n # early exit for non-sliced arrays\n valOut = value\n else:\n valOut = np.zeros(len(rows) * len(cols))\n irow = 0\n for rowInd in rows:\n icol = 0\n for colInd in cols:\n valOut[icol + irow * len(cols)] = value[(colInd - 1) + numCol * (rowInd - 1)]\n icol += 1\n irow += 1\n return valOut",
"def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)",
"def to_dataframe(self, value_column=\"* values *\") -> pd.DataFrame:\n index, vals = self.vector.to_values()\n df = self.schema.decode_many(index, self.dims_list)\n df[value_column] = vals\n return df",
"def transform(self, dataframe: DataFrame) -> DataFrame:",
"def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])",
"def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]",
"def flatten(self):\n return DataArray([s for s in self.unstructured()])",
"def flat(self):\n if len(self.description) != 1:\n msg = \"Results set with %d cols cannot be treated as flat\"\n raise TypeError(msg % len(self.description))\n return [r[0] for r in self._rows]",
"def listify(array, valuename, colnames):\n multiindex = pd.MultiIndex.from_product([range(i) for i in array.shape])\n colmapping = {\"level_\"+str(i): colnames[i] for i in range(len(colnames))}\n\n return pd.DataFrame({valuename: pd.Series(index=multiindex, data=array.flatten())}).reset_index().rename(colmapping, axis=1)",
"def reshape_data(self):\n # Initialise empty dataframe\n od_data = pd.DataFrame(columns=['Origin','Destination','Cost','Flow','OriginSupply','DestinationDemand'])\n # Loop over origins and destinations to populate dataframe\n for i,orig in tqdm(enumerate(self.origins),total=len(self.origins)):\n for j,dest in enumerate(self.destinations):\n # Add row properties\n new_row = pd.Series({\"Origin\": orig,\n \"Destination\": dest,\n \"Cost\": self.cost_matrix[i,j],\n \"OriginSupply\": self.origin_supply[i],\n \"DestinationDemand\":self.destination_demand[j]})\n # Append row to dataframe\n od_data = od_data.append(new_row, ignore_index=True)\n\n # Get flatten data and et column types appropriately\n orig_supply_flat = od_data.OriginSupply.values.astype('float64')\n dest_demand_flat = od_data.DestinationDemand.values.astype('float64')\n cost_flat = od_data.Cost.values.astype('float64')\n\n return orig_supply_flat,dest_demand_flat,cost_flat",
"def normalize_to_flat(classifier, df, col):\n name = str(classifier) + '_df'\n new_df= json_normalize(df.loc[classifier][col])\n new_df['classifier'] = [classifier]\n return new_df",
"def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:",
"def test_01_sparse_to_dataframe(self):\n # Flatten it...\n _, err = _iquery(\"store(flatten(SPARSE, _fast:1), DF1)\")\n assert not err, err\n self._array_cleanups.append('DF1')\n check_v_sum('DF1')\n self._df1_chunks = chunk_count(vaid_of(\"DF1\"))\n prt(\"DF1 has\", self._df1_chunks, \"chunks\")",
"def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df",
"def prepareDataframeForTable(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n\n if self._isIndexedDataframe(df):\n if df.size == 0:\n df[\"values\"] = np.nan\n elif len(df.columns) > 1:\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.stack()\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n current_columns_name = list(df.index.names)\n current_columns_name[len(current_columns_name)-1] = \"Measures\"\n df.index.names = current_columns_name\n\n return df",
"def _flat(D):\n if issparse(D):\n raise ValueError(\"Cannot flatten sparse matrix.\")\n d_flat = np.array(D).flatten()\n return d_flat",
"def flatten(tbl, tpcol, key, val):\n tblflat = tbl[tbl[tpcol].isin(val)]\n tblflatnm = '%s_flat' %key\n return tblflat, tblflatnm",
"def flatten_data(X):\n\n return X.reshape((-1, X.shape[-1]))",
"def flatten(x):\n return reshape(x, (x.shape[0], -1))",
"def _dataframe_conversion(da, order):\n assert da.data.squeeze().ndim == 2, (\n \"Dataframe conversion only possible for connectivity arrays when \"\n \"time dimension is missing\")\n da = da.squeeze().to_dataframe('mi').reset_index()\n da = da.pivot('sources', 'targets', 'mi')\n if isinstance(order, (list, np.ndarray)):\n da = da.reindex(order, axis='index').reindex(order, axis='columns')\n\n return da",
"def flatten():",
"def to_flat_nz(self):\n return self.to_ddm().to_flat_nz()",
"def flatten(x):\n return x.view(x.size(0), -1)",
"def column_convertor(x):\n x.shape = (1, x.shape[0])\n return x",
"def _replace_ragged_with_flat_values(value, partition_lists, flat_values_nrows):\n # Base case\n if ragged_tensor.is_ragged(value):\n value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value)\n partition_lists.append(value._nested_row_partitions) # pylint: disable=protected-access\n nrows = tensor_shape.dimension_at_index(value.flat_values.shape, 0).value\n if nrows is not None:\n flat_values_nrows.append(nrows)\n return value.flat_values\n\n # Recursion cases\n def recurse(v):\n return _replace_ragged_with_flat_values(v, partition_lists,\n flat_values_nrows)\n\n if isinstance(value, list):\n return [recurse(v) for v in value]\n elif isinstance(value, tuple):\n return tuple(recurse(v) for v in value)\n elif isinstance(value, dict):\n return dict((k, recurse(v)) for (k, v) in value.items())\n else:\n return value",
"def flatten_cols(self, arr: np.array) -> torch.Tensor:\n snake_ = []\n k = 1\n for i in range(arr.shape[1]):\n snake_ += list(arr[:, i][::k])\n k *= -1\n return torch.tensor(snake_).unsqueeze(-1)",
"def clean_and_transpose(df):\n print(\"Cleaning and Transposing\")\n df = df.apply(pd.Series)\n df = df[sorted(df.columns)]\n df = df.T\n df.index.name = 'date'\n return df"
]
| [
"0.6000981",
"0.5966032",
"0.5887755",
"0.56872046",
"0.5651386",
"0.5570109",
"0.55378705",
"0.55341214",
"0.5467069",
"0.54654133",
"0.53458506",
"0.5280397",
"0.52247965",
"0.52107245",
"0.5186249",
"0.51823395",
"0.51717144",
"0.51630336",
"0.51435846",
"0.5136841",
"0.5130054",
"0.51267046",
"0.51207185",
"0.5119221",
"0.511844",
"0.51031274",
"0.5102219",
"0.5088934",
"0.50576353",
"0.5051084"
]
| 0.65643346 | 0 |
Return a list of all feature classes in current_workplace, including those that exist within feature datasets. Paramteters | def find_all_feature_classes(current_workspace):
paths_to_export = arcpy.ListFeatureClasses()
# search for additional feature classes in feature datasets
for fds in arcpy.ListDatasets():
env.workspace = os.path.join(env.workspace, fds)
for fc in arcpy.ListFeatureClasses():
paths_to_export.append(os.path.join(fds, fc))
env.workspace = current_workspace
if len(paths_to_export) == 0:
raise EmptyGeodatabaseError
else:
return paths_to_export | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getFeatureClassNames(self):\n return self.featureClasses.keys()",
"def find_class_names(self) -> [str]:\n if not self.faithful:\n raise AttributeError('The workspace is not faithful')\n\n result = list()\n\n list_possible_class = list_folders_into_directory(join(self.directory_path, \"train-images\"))\n\n for possible_class in list_possible_class:\n if possible_class != \"img\":\n result.append(possible_class)\n\n self.class_names = result\n return result",
"def get_feature_names(self):\n return [self.__class__.__name__]",
"def listFeatureClassesInGDB(self, inWorkspaceLocation):\n env.workspace = inWorkspaceLocation\n featureClassesList = list()\n for foundFeatureDataset in ListDatasets(\"\",\"\"):\n for foundFeatureClass in ListFeatureClasses(\"\",\"\",foundFeatureDataset):\n FeatureClassesHold1 = str(foundFeatureDataset)\n FeatureClassesHold2 = str(foundFeatureClass)\n FeatureClassesJoin1 = os.path.join(FeatureClassesHold1, FeatureClassesHold2)\n featureClassesList.append(FeatureClassesJoin1)\n for foundFeatureClass in ListFeatureClasses(\"\",\"\"): # Doesn't seem to be working. Troubleshoot. Should show road alias table.\n featureClassesList.append(str(foundFeatureClass))\n for foundTable in ListTables(\"\", \"\"): # Added ListTables to actually list tables, see if it fixes.\n featureClassesList.append(str(foundTable))\n \n itemsToRemove = list()\n itemsToRemove.append(str(\"FeatureClassLookup\").upper())\n itemsToRemove.append(str(\"FieldNameLookup\").upper())\n itemsToRemove.append(str(\"FeatureAndFieldLookup\").upper())\n \n # List comprehension to build a list that is a copy of featureClasses list, without the\n # entries in the itemsToRemove list. Changes the strings to uppercase for comparison, \n # but does not change them in the output list.\n cleanedFeatureClassesList = [feature for feature in featureClassesList if str(feature).upper() not in itemsToRemove]\n cleanedFeatureClassesList = sorted(cleanedFeatureClassesList)\n return cleanedFeatureClassesList",
"def findFeatures(self):\n\t\tpass",
"def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes",
"def classes(self):\n if self.classname:\n return [self.classname]\n return []",
"def getClasses(self):\n self._process()\n return self._sets",
"def classes(self):\r\n return self._classes",
"def all(cls):\n results = []\n for name in FEATURES:\n feat = cls.get_by_name(name)\n if feat is None:\n feat = cls(name=name)\n cls.query.session.add(feat)\n results.append(feat)\n return results",
"def get_classes(self):\n return self._classes",
"def relevant_classifications(self):\n return self.relevant_classes",
"def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")",
"def get_classes(self):\n return",
"def get_classes(self, include_ref=True):\n defs = self._get_class_defs()\n ans = {}\n ans.update(defs)\n if include_ref:\n refs = self._get_class_refs()\n ans.update(refs)\n return list(ans.values())",
"def getFeatureNames(self, featureClassName):\n return self.featureClasses[featureClassName].getFeatureNames()",
"def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]",
"def get_features(self):\n return []",
"def _get_classifers(self):\n return self.__classifers",
"def _get_classifers(self):\n return self.__classifers",
"def _get_classifers(self):\n return self.__classifers",
"def _get_classifers(self):\n return self.__classifers",
"def _get_classifers(self):\n return self.__classifers",
"def _get_classifers(self):\n return self.__classifers",
"def get_all_features(self) :\n raise NotImplementedError",
"def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi",
"def get_loaded_features(self) -> list[FeatureModule]:\n return list(self._features.values())",
"def dataclasses(self):\n return self._dataclasses",
"def list_all_classes(self):\n classes = list(self.extended_class_only_graph.nodes())\n classes = [SchemaClass(_cls, self) for _cls in classes]\n return classes",
"def get_data_classes(self):\n return self._do_request(\"dataclasses\")"
]
| [
"0.6694056",
"0.639444",
"0.6393714",
"0.63899076",
"0.6104128",
"0.59974563",
"0.59970427",
"0.5965981",
"0.59317553",
"0.5907754",
"0.59022886",
"0.58818835",
"0.58594966",
"0.58223754",
"0.58205307",
"0.5818834",
"0.5802727",
"0.5772846",
"0.5761205",
"0.5761205",
"0.5761205",
"0.5761205",
"0.5761205",
"0.5761205",
"0.56594425",
"0.5642454",
"0.56278265",
"0.56205297",
"0.56030786",
"0.5591939"
]
| 0.7001144 | 0 |
Ensures that path points to a file that (1) exists and (2) appears to be a filegeodatabase (i.e. a file that has the extension .gdb). Paramteters | def ensure_valid_gdb(path):
if not os.path.isfile(path):
raise IOError("geodatabase path does not appear to point to a file.\n")
elif path[-4:] != '.gdb':
raise IOError("path exists, but does not point to a geodatabase.\n")
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_db_path_correct(self, path):\n\t\tif path is None:\n\t\t\treturn self.file_loc() is None\n\t\telse:\n\t\t\treturn self.file_loc() == os.path.abspath(path)",
"def check_path(f):\n if not os.path.exists(f):\n raise Path('%s does not exist' % f)",
"def check_path(path, diagnostic):\n if not os.path.exists(path):\n print(f\"Could not find {path}. {diagnostic}\")\n sys.exit(1)",
"def db_file_exist(path: str):\n if os.path.exists(path):\n logger.debug('>> DB-file found')\n else:\n open(path, 'a').close()\n logger.debug('>> DB-file not found. Creating...')",
"def detect_database(database_path):\n if database_path[-3:] != '.db':\n return False\n if not os.path.exists(database_path):\n return False\n return True",
"def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename",
"def existent_file(file_path):\n if not os.path.exists(file_path):\n raise argparse.ArgumentTypeError(\"Input file path does not exist\")\n return file_path",
"def verify_path(path):\r\n if not os.path.exists(path):\r\n raise FileNotFoundException(path)",
"def checkExistenceFile(path):\n path = os.path.abspath(path)\n return os.path.isfile(path)",
"def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg",
"def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath",
"def _check_file(self, name):\n self.assertTrue(os.path.exists(name), \"Could not find table %s.\" % name)",
"def check_file(f):\n if not os.path.isfile(f):\n raise OSError(f + ' not found.')\n if f.startswith('~'):\n raise OSError(f + ' has \"~\" in path.')",
"def check_path(path):\n import os\n if not os.path.exists(path):\n print(\"Path does not exist\")\n print(\"\")\n sys.exit()",
"def test_initiate_tested_path_does_not_exists(self):\n\n expected = {self.file_to_test: {}}\n\n self.inactive_db.database = {}\n self.inactive_db.initiate()\n\n self.assertEqual(expected, self.inactive_db.database)",
"def test_get_filepath(self):\r\n filepath = self.profile.get_filepath('testing.db')\r\n self.assertTrue(filepath.startswith(self.profile_path))",
"def check_valid_path(path):\n\n path = os.path.normpath(path)\n if not os.path.exists(path):\n print(f\"{path} doesn't exist\")\n print('Code execution exit')\n sys.exit()",
"def isfile (self, path):\r\n pass",
"def get_valid_path(file_path: Path, prompt_title: str=\"PATH TO FILE\") -> Path:\n\n print(f\"{Color.EMPHASIS}{prompt_title}{Color.END}\")\n while True:\n if file_path.exists() and file_path.is_file():\n return file_path\n else:\n file_path = Path(input(f\"{Color.INFORMATION}Enter the file's path: {Color.END}\"))",
"def is_file(self, path: PathLike):",
"def check_path(filename):\n return not bool(checkPath(filename))",
"def verify_path(path):\n if path is None:\n sys.exit('Program terminated. You must specify a correct path.')\n path = Path(path)\n assert path.exists(), f'The specified path was not found: {path}.'\n return path",
"def search_existing_file(path):\n return os.path.isfile(path)",
"def is_declaring_file(self, address, file_path):",
"def check_if_file_exists(file: str) -> Union[str, None]:\n if not os.path.exists(file) and not os.path.exists(f'{file}_qforce'):\n sys.exit(f'ERROR: \"{file}\" does not exist.\\n')\n return file",
"def test_valid_pathname(self):\n self.assertTrue(Util.is_pathname_valid('./myrandomvalidfilename.dat'))\n self.assertTrue(Util.is_pathname_valid('myrandomvalidfilename.dat'))",
"def find_hash_db_r(path: Path) -> Path:\n abs_path = path.absolute()\n cur_path = abs_path / DB_FILENAME\n if cur_path.is_file():\n return cur_path\n parent = abs_path.parent\n if parent != abs_path:\n return find_hash_db_r(parent)",
"def db_file():\n return abspath('vmchecker.db')",
"def exists(self, path):",
"def fileCheck(file):\n if not os.path.isfile(file):\n print('File : ',file)\n print('E... '+'no file')\n sys.exit()"
]
| [
"0.6616764",
"0.64977694",
"0.6336023",
"0.6326243",
"0.6283675",
"0.6255006",
"0.62168324",
"0.6167874",
"0.6159731",
"0.61583686",
"0.614238",
"0.61239034",
"0.61029625",
"0.6100908",
"0.608942",
"0.6063484",
"0.6059719",
"0.60456586",
"0.60453606",
"0.60333216",
"0.6032077",
"0.6017587",
"0.60087675",
"0.6004884",
"0.600471",
"0.6003405",
"0.59287184",
"0.59135485",
"0.5901351",
"0.58728033"
]
| 0.7969565 | 0 |
Convert a feature class infc from its current projection to a new projection, and place the new output in folder. If infc does not have a valid spatial reference, then it cannot be projected. Paramteters | def project_feature_class(infc, folder, projection = 4326):
dsc = arcpy.Describe(infc)
shortname = infc.split('\\')[1] if len(infc.split('\\')) == 2 else infc
if dsc.spatialReference.Name == "Unknown":
print 'Skipped {} - undefined coordinate system.'.format(shortname)
else:
print 'Projecting {}'.format(shortname)
outfc = os.path.join(folder, shortname + '.shp')
outcs = arcpy.SpatialReference(projection)
arcpy.Project_management(infc, outfc, outcs)
print arcpy.GetMessages() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reprojectAndSaveNewShapefile(inFilepath,outFilepath,to_EPSG):\r\n import geopandas as gpd\r\n from fiona.crs import from_epsg\r\n\r\n inFile = gpd.read_file(inFilepath)\r\n inFile_proj = inFile.copy()\r\n inFile_proj['geometry'] = inFile_proj['geometry'].to_crs(epsg=to_EPSG)\r\n inFile_proj.crs = from_epsg(to_EPSG)\r\n inFile_proj.to_file(outFilepath)",
"def transform(infile, output, insrs, format_name):\n\n logging.info('Transforming %s from %s to %s' % (infile, insrs, output)) \n in_srs = osr.SpatialReference()\n in_srs.ImportFromEPSG(insrs)\n out_srs = osr.SpatialReference()\n out_srs.ImportFromEPSG(4324)\n coordTrans = osr.CoordinateTransformation(in_srs, out_srs)\n\n in_dsn = ogr.Open(infile)\n in_layer = in_dsn.GetLayer()\n in_feature_definition = in_layer.GetLayerDefn()\n\n out_driver = ogr.GetDriverByName(format_name)\n out_dsn = out_driver.CreateDataSource(output)\n out_layer = out_dsn.CreateLayer(in_layer.GetName(),\n geom_type=in_layer.GetGeomType())\n\n # add fields\n for i in range(0, in_feature_definition.GetFieldCount()):\n fieldDefn = in_feature_definition.GetFieldDefn(i)\n out_layer.CreateField(fieldDefn)\n\n # get the output layer's feature definition\n out_feature_definition = out_layer.GetLayerDefn()\n\n # loop through the input features\n inFeature = in_layer.GetNextFeature()\n while inFeature:\n # get the input geometry\n geom = inFeature.GetGeometryRef().Clone()\n # reproject the geometry\n geom.Transform(coordTrans)\n # create a new feature\n outFeature = ogr.Feature(out_feature_definition)\n # set the geometry and attribute\n outFeature.SetGeometry(geom)\n for i in range(0, out_feature_definition.GetFieldCount()):\n outFeature.SetField(out_feature_definition.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))\n # add the feature to the shapefile\n out_layer.CreateFeature(outFeature)\n # destroy the features and get the next input feature\n outFeature.Destroy()\n inFeature.Destroy()\n inFeature = in_layer.GetNextFeature()\n\n # close the shapefiles\n in_dsn.Destroy()\n out_dsn.Destroy()",
"def reprojectAndSaveNewRaster(inFilepath,outFilepath,to_EPSG):\r\n from osgeo import gdal\r\n input_raster = gdal.Open(inFilepath)\r\n EPSG_string = \"EPSG:\"+str(to_EPSG)\r\n ras = gdal.Warp(outFilepath,input_raster,dstSRS=EPSG_string)\r\n del ras",
"def feature_transform(feature, crs_out, crs_in={'init': 'epsg:4326'}):\n p_in = Proj(crs_in)\n p_out = Proj(crs_out)\n feature_out = copy.deepcopy(feature)\n new_coords = []\n if feature['geometry']['type'] == 'Polygon':\n # Probably also work for multypolygons\n for ring in feature['geometry']['coordinates']:\n x2, y2 = transform(p_in, p_out, *zip(*ring))\n new_coords.append(zip(x2, y2))\n feature_out['geometry']['coordinates'] = new_coords\n elif feature['geometry']['type'] == 'Point':\n # Probably doesn't work for multipoints\n new_coords = transform(p_in, p_out, *feature['geometry']['coordinates'])\n feature_out['geometry']['coordinates'] = new_coords\n else:\n raise ValueError('Unsuported feature type')\n return feature_out",
"def reproject_vector( path, epsg_from=None, epsg_to=None):\n\n if not epsg_to: raise Exception(\"please, specify the output EPSG codes\")\n\n inDataSet = None\n outDataSet = None\n inFeature = None\n outFeature = None\n outLayer = None\n\n try:\n\n driver = ogr.GetDriverByName('ESRI Shapefile')\n inDataSet = driver.Open(path, 0) # 0 means read-only\n\n # define input SpatialReference\n if not epsg_from:\n layer = inDataSet.GetLayer()\n inSpatialRef = layer.GetSpatialRef()\n else:\n inSpatialRef = osr.SpatialReference()\n inSpatialRef.ImportFromEPSG(epsg_from)\n\n # define output SpatialReference\n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(epsg_to)\n\n # create the CoordinateTransformation\n coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)\n\n # get the first input layer and the geometry type\n inLayer = inDataSet.GetLayer()\n geotype = inLayer.GetGeomType()\n lname = inLayer.GetName()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n outDataSet = drv.CreateDataSource(\"/vsimem/memory.shp\")\n\n outLayer = outDataSet.CreateLayer(lname, srs=outSpatialRef, geom_type=geotype)\n\n # add fields\n inLayerDefn = inLayer.GetLayerDefn()\n\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n outLayer.CreateField(fieldDefn)\n\n # get the output layer\"s feature definition\n outLayerDefn = outLayer.GetLayerDefn()\n\n counter = 1\n\n # loop through the input features\n inFeature = inLayer.GetNextFeature()\n while inFeature:\n # get the input geometry\n geom = inFeature.GetGeometryRef()\n # reproject the geometry\n geom.Transform(coordTrans)\n # create a new feature\n outFeature = ogr.Feature(outLayerDefn)\n # set the geometry and attribute\n outFeature.SetGeometry(geom)\n for i in range(0, outLayerDefn.GetFieldCount()):\n outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))\n # add the feature to the shapefile\n outLayer.CreateFeature(outFeature)\n\n # destroy the features and get the next input feature\n if outFeature: outFeature = None\n inFeature = inLayer.GetNextFeature()\n\n counter += 1\n #print(counter)\n\n return outDataSet\n\n except RuntimeError as err:\n raise err\n except Exception as e:\n raise e\n\n finally:\n if inDataSet: outDataSet == None # give back control to C++\n if outDataSet: outDataSet == None\n if outLayer: outLayer == None\n if inFeature: inFeature == None\n if outFeature: outFeature = None",
"def Reproject(cls,inRaster, outRaster,EPSG):\r\n try:\r\n import gdal\r\n except:\r\n raise ImportError(\"Can not import module GDAL\")\r\n try:\r\n dataset = gdal.Open(infile)\r\n out = dataset.GetRasterBand(1)\r\n print dataset.GetMetadata()\r\n return out\r\n except:\r\n raise ImportError(\"Can not read band\")\r\n #if not tmp:\r\n # raise Exception(\"Could not orthorectify the image \")\r\n\r\n com=\" \".join([\"gdalwarp -t_srs EPSG:\"+str(EPSG),InRaster,OutRaster])\r\n tmp=os.system(com)\r\n print tmp\r\n #if not tmp:\r\n # raise Exception(\"Could not reproject the image \")\r\n #os.remove(OutRaster)\r",
"def _transform_feature(self, contig, in_feature):\n def _aliases(feat):\n keys = ('locus_tag', 'old_locus_tag', 'protein_id',\n 'transcript_id', 'gene', 'ec_number', 'gene_synonym')\n alias_list = []\n for key in keys:\n if key in feat['attributes']:\n alias_list.extend([(key, val) for val in feat['attributes'][key]])\n return alias_list\n\n if in_feature['start'] < 1 or in_feature['end'] > len(contig):\n self.warn(f\"Feature with invalid location for specified contig: {in_feature}\")\n if self.strict:\n raise ValueError(\"Features must be completely contained within the Contig in the \"\n f\"Fasta file. Feature: in_feature\")\n return\n\n feat_seq = contig.seq[in_feature['start']-1:in_feature['end']].upper()\n if in_feature['strand'] in {'-', '-1'}:\n feat_seq = feat_seq.reverse_complement()\n\n # if the feature ID is duplicated (CDS or transpliced gene) we only\n # need to update the location and dna_sequence\n if in_feature.get('ID') in self.feature_dict:\n existing = self.feature_dict[in_feature['ID']]\n existing['location'].append(self._location(in_feature))\n existing['dna_sequence'] = existing.get('dna_sequence', '') + str(feat_seq)\n existing['dna_sequence_length'] = len(existing['dna_sequence'])\n return\n\n # The following is common to all the feature types\n out_feat = {\n \"id\": in_feature.get('ID'),\n \"type\": in_feature['type'],\n \"location\": [self._location(in_feature)],\n \"dna_sequence\": str(feat_seq),\n \"dna_sequence_length\": len(feat_seq),\n \"md5\": hashlib.md5(str(feat_seq).encode('utf8')).hexdigest(),\n \"warnings\": [],\n \"flags\": [],\n }\n\n # add optional fields\n if 'note' in in_feature['attributes']:\n out_feat['note'] = in_feature['attributes'][\"note\"][0]\n ont, db_xrefs = self._get_ontology_db_xrefs(in_feature['attributes'])\n if ont:\n out_feat['ontology_terms'] = ont\n aliases = _aliases(in_feature)\n if aliases:\n out_feat['aliases'] = aliases\n if db_xrefs:\n out_feat['db_xrefs'] = db_xrefs\n if 'product' in in_feature['attributes']:\n out_feat['functions'] = in_feature['attributes'][\"product\"]\n if 'product_name' in in_feature['attributes']:\n if \"functions\" in out_feat:\n out_feat['functions'].extend(in_feature['attributes'][\"product_name\"])\n else:\n out_feat['functions'] = in_feature['attributes'][\"product_name\"]\n if 'function' in in_feature['attributes']:\n out_feat['functional_descriptions'] = in_feature['attributes'][\"function\"]\n if 'inference' in in_feature['attributes']:\n GenomeUtils.parse_inferences(in_feature['attributes']['inference'])\n if 'trans-splicing' in in_feature['attributes'].get('exception', []):\n out_feat['flags'].append('trans_splicing')\n if 'pseudo' in in_feature['attributes'].get('exception', []):\n out_feat['flags'].append('pseudo')\n if 'ribosomal-slippage' in in_feature['attributes'].get('exception', []):\n out_feat['flags'].append('ribosomal_slippage')\n parent_id = in_feature.get('Parent', '')\n if parent_id and parent_id not in self.feature_dict:\n raise ValueError(f\"Parent ID: {parent_id} was not found in feature ID list.\")\n\n # if the feature is a exon or UTR, it will only be used to update the\n # location and sequence of it's parent, we add the info to it parent\n # feature but not the feature dict\n if in_feature['type'] in self.skip_types:\n if parent_id and in_feature['type'] in {'exon', 'five_prime_UTR', 'three_prime_UTR'}:\n parent = self.feature_dict[parent_id]\n if in_feature['type'] not in parent:\n parent[in_feature['type']] = []\n parent[in_feature['type']].append(out_feat)\n return\n\n # add type specific features\n elif 'gene' in in_feature['type']:\n out_feat['protein_translation_length'] = 0\n out_feat['cdss'] = []\n\n elif in_feature['type'] == 'CDS':\n if parent_id:\n parent = self.feature_dict[parent_id]\n if 'cdss' in parent: # parent must be a gene\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"genes_CDS_child_fails_location_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"CDS_fail_child_of_gene_coordinate_validation\"].format(parent_id))\n parent['cdss'].append(in_feature['ID'])\n out_feat['parent_gene'] = parent_id\n else: # parent must be mRNA\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"mRNA_fail_parent_coordinate_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"CDS_fail_child_of_mRNA_coordinate_validation\"].format(parent_id))\n parent['cds'] = in_feature['ID']\n out_feat['parent_mrna'] = parent_id\n parent_gene = self.feature_dict[parent['parent_gene']]\n parent_gene['cdss'].append(in_feature['ID'])\n out_feat['parent_gene'] = parent['parent_gene']\n # keep track of CDSs for post processing\n self.cdss.add(out_feat['id'])\n\n elif in_feature['type'] == 'mRNA':\n if parent_id:\n parent = self.feature_dict[parent_id]\n if 'mrnas' not in parent:\n parent['mrnas'] = []\n if 'cdss' in parent: # parent must be a gene\n parent['mrnas'].append(in_feature['ID'])\n out_feat['parent_gene'] = parent_id\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"genes_mRNA_child_fails_location_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"mRNAs_parent_gene_fails_location_validation\"].format(parent_id))\n\n else:\n out_feat[\"type\"] = in_feature['type']\n # this prevents big misc_features from blowing up the genome size\n if out_feat['dna_sequence_length'] > MAX_MISC_FEATURE_SIZE:\n del out_feat['dna_sequence']\n if parent_id:\n parent = self.feature_dict[parent_id]\n if 'children' not in parent:\n parent['children'] = []\n parent['children'].append(out_feat['id'])\n out_feat['parent_gene'] = parent_id\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"generic_parents_child_fails_location_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"generic_childs_parent_fails_location_validation\"].format(parent_id))\n\n # cleanup empty optional arrays\n for key in ['warnings', 'flags']:\n if not out_feat[key]:\n del out_feat[key]\n\n self.feature_dict[out_feat['id']] = out_feat",
"def convert_and_update_xyfield(workspace,fc,xfield,yfield,to_cs,transformationname = None):\n # http://desktop.arcgis.com/en/arcmap/10.4/analyze/arcpy-classes/pdf/geographic_coordinate_systems.pdf\n # http://desktop.arcgis.com/en/arcmap/latest/map/projections/pdf/geographic_transformations.pdf\n \n arcpy.env.workspace = workspace\n errorcount = 0\n to_cs = arcpy.SpatialReference(to_cs)\n with arcpy.da.UpdateCursor(fc,['SHAPE@',xfield,yfield]) as cursor:\n for row in cursor:\n try:\n if transformationname:\n new_cs = row[0].projectAs(to_cs,transformationname)\n else:\n new_cs = row[0].projectAs(to_cs)\n row[1] = new_cs.firstPoint.X # xfield = SHAPE@X\n row[2] = new_cs.firstPoint.Y # yfield = SHAPE@Y\n cursor.updateRow(row)\n except RuntimeError as e:\n errorcount += 1\n print(f'{e}')\n except AttributeError as e:\n errorcount += 1\n print(f'{e}')\n print(f'errorcount: {errorcount}')",
"def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n os.path.basename(self.out_routes),\r\n \"POLYLINE\",\r\n self.route_fcs[0], # template feature class to transfer full schema\r\n \"SAME_AS_TEMPLATE\",\r\n \"SAME_AS_TEMPLATE\",\r\n desc.spatialReference\r\n ]\r\n )\r\n\r\n # Insert the rows from all the individual output feature classes into the final output\r\n fields = [\"SHAPE@\"] + [f.name for f in desc.fields]\r\n with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member\r\n for fc in self.route_fcs:\r\n for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member\r\n cur.insertRow(row)",
"def Projected(InputFilePath,OutputFilePath): # perform a reproject raster onto a DEM and return\r\n try:\r\n print(\"\"\"\r\nReprojecting Raster...\r\n \"\"\") \r\n \r\n arcpy.ProjectRaster_management(in_raster=InputFilePath,out_raster=OutputFilePath,out_coor_system=\"PROJCS['NAD_1983_StatePlane_California_III_FIPS_0403',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',2000000.0],PARAMETER['False_Northing',500000.0],PARAMETER['Central_Meridian',-120.5],PARAMETER['Standard_Parallel_1',37.06666666666667],PARAMETER['Standard_Parallel_2',38.43333333333333],PARAMETER['Latitude_Of_Origin',36.5],UNIT['Meter',1.0]]\",resampling_type=\"NEAREST\",cell_size=\"27.8165597364916 27.8165597364914\",geographic_transform=\"WGS_1984_(ITRF00)_To_NAD_1983\",Registration_Point=\"#\",in_coor_system=\"GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]\")\r\n print(\"Complete\")\r\n \r\n except Exception, err: # an error occurred (probably in arcGIS)\r\n raise RuntimeError(\"** Error: Project Raster Failed (\"+str(err)+\")\")",
"def Point_to_FeatureClass(self, fc):\n\n\n feature_class = []\n for index, traectory in enumerate(self.__traectory_list):\n point_row = arcpy.Point(X=traectory[0], Y=traectory[1], Z=traectory[2], ID=index)\n feature_class.append(arcpy.PointGeometry(point_row, arcpy.SpatialReference(2436)))\n arcpy.CopyFeatures_management(feature_class, (self.workspace + '\\\\' + fc))\n print 'Complete Creating a Point Feature Class'\n\n return None",
"def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_stat_fields = parameters[1]\r\n\t\t# out_location = parameters[2]\r\n\t\t# out_points_name = parameters[3]\r\n\t\t\r\n\t\tif in_wikiplace_IRI.altered and not in_stat_fields.altered:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tif currentWorkspace.endswith(\".gdb\") == False:\r\n\t\t\t\tmessages.addErrorMessage(\"Please enter a feature class in file geodatabase for the input feature class.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\t# if in_related_table.value:\r\n\t\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\t\t# out_location.value = currentWorkspace\r\n\t\t\t\t# out_points_name.value = featureClassName + \"_noFunc_merge\"\r\n\t\t\t\t# # check whether the input table are in the same file geodatabase as the input feature class\r\n\t\t\t\t# inputTableName = in_related_table.valueAsText\r\n\t\t\t\t# lastIndexOFTable = inputTableName.rfind(\"\\\\\")\r\n\t\t\t\t# currentWorkspaceTable = inputTableName[:lastIndexOFTable]\r\n\t\t\t\t# if currentWorkspaceTable != currentWorkspace:\r\n\t\t\t\t# \tmessages.addErrorMessage(\"Please enter a table in the same file geodatabase as the input feature class.\")\r\n\t\t\t\t# \traise arcpy.ExecuteError\r\n\t\t\t\t# else:\r\n\t\t\t\t# \tif UTIL.detectRelationship(inputFeatureClassName, inputTableName):\r\n\t\t\t\t# \t\tarcpy.AddMessage(\"The feature class and table are related!\")\r\n\t\t\t\trelatedTableList = UTIL.getRelatedTableFromFeatureClass(inputFeatureClassName)\r\n\t\t\t\t# fieldmappings = arcpy.FieldMappings()\r\n\t\t\t\t# fieldmappings.addTable(inputFeatureClassName)\r\n\t\t\t\t\r\n\t\t\t\tnoFunctionalPropertyTable = []\r\n\r\n\t\t\t\tfor relatedTable in relatedTableList:\r\n\t\t\t\t\tfieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t\tif \"origin\" not in fieldList and \"end\" not in fieldList:\r\n\t\t\t\t\t\tnoFunctionalFieldName = fieldList[2].name\r\n\t\t\t\t\t\tarcpy.AddMessage(\"noFunctionalFieldName: {0}\".format(noFunctionalFieldName))\r\n\t\t\t\t\t\tnoFunctionalPropertyTable.append([noFunctionalFieldName, 'COUNT', relatedTable])\r\n\t\t\t\t\t\t# MergeNoFunctionalProperty.relatedTableFieldList.append([noFunctionalFieldName, relatedTable, 'COUNT'])\r\n\t\t\t\t\t# fieldmappings.addTable(relatedTable)\r\n\t\t\t\t\t# fieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t\t# noFunctionalFieldName = fieldList[len(fieldList)-1].name\r\n\t\t\t\t\t# arcpy.AddMessage(\"noFunctionalFieldName: {0}\".format(noFunctionalFieldName))\r\n\t\t\t\t\t# fieldmap = fieldmappings.getFieldMap(fieldmappings.findFieldMapIndex(noFunctionalFieldName))\r\n\t\t\t\t\t# fieldmap.addInputField(relatedTable, \"wikiURL\")\r\n\t\t\t\t\t# fieldmap.addInputField(inputFeatureClassName, \"URL\")\r\n\t\t\t\t\t# fieldmappings.replaceFieldMap(fieldmappings.findFieldMapIndex(noFunctionalFieldName), fieldmap)\r\n\r\n\t\t\t\tin_stat_fields.values = noFunctionalPropertyTable\r\n\r\n\r\n\r\n\t\t\t\t# fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(\"wikiURL\"))\r\n\r\n\r\n\r\n\t\t\t\t# in_field_mapping.value = fieldmappings.exportToString()\r\n\r\n\t\t\t# if in_stat_fields.altered:\r\n\t\t\t# \tfieldMergeRuleTest = in_stat_fields.valueAsText\r\n\t\t\t# \tif fieldMergeRuleTest:\r\n\t\t\t# \tfieldSplitList = fieldMergeRuleTest.split(\";\")\r\n\t\t\t# \tfor fieldSplitItem in fieldSplitList:\r\n\t\t\t# \t\tfieldMergeList = fieldSplitList.split(\"\\t\")\r\n\t\t\t# \t\tfor item in MergeNoFunctionalProperty.relatedTableFieldList:\r\n\t\t\t# \t\t\tif item[]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\r\n\r\n\t\treturn",
"def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)",
"def reprojectShapefile(in_shapefile, model_raster, out_dir, name_override=None) -> str:\n\tshapefile_path = in_shapefile # old variable name; standardized to match function parameters\n\t# get out_path from out_dir\n\tif name_override:\n\t\tout_path = os.path.join(out_dir,name_override)\n\telse:\n\t\tin_base,in_ext = os.path.splitext(os.path.basename(in_shapefile))\n\t\tout_path = os.path.join(out_dir,in_base+\"_REPROJ\"+in_ext)\n\t# get raster projection as wkt\n\twith rasterio.open(model_raster,'r') as img:\n\t\traster_wkt = img.profile['crs'].to_wkt()\n\t# get shapefile projection as wkt\n\twith open(shapefile_path.replace(\".shp\",\".prj\")) as rf:\n\t\tshapefile_wkt = rf.read()\n\n\t# if it's a match, nothing needs to be done\n\tif raster_wkt == shapefile_wkt:\n\t\tlog.warning(\"CRS already match\")\n\t\t# get input directory and filename\n\t\tin_dir = os.path.dirname(shapefile_path)\n\t\tin_name = os.path.splitext(os.path.basename(shapefile_path))[0]\n\t\t# list all elements of shapefile\n\t\tall_shape_files = glob.glob(os.path.join(in_dir,f\"{in_name}.*\"))\n\t\t# get output directory and filenames\n\t\tout_dir = os.path.dirname(out_path)\n\t\tout_name = os.path.splitext(os.path.basename(out_path))[0]\n\t\tfor f in all_shape_files:\n\t\t\tname, ext = os.path.splitext(os.path.basename(f))\n\t\t\tout_f = os.path.join(out_dir,f\"{out_name}{ext}\")\n\t\t\twith open(f,'rb') as rf:\n\t\t\t\twith open(out_f,'wb') as wf:\n\t\t\t\t\tshutil.copyfileobj(rf,wf)\n\telse:\n\t\t# get CRS objects\n\t\traster_crs = CRS.from_wkt(raster_wkt)\n\t\tshapefile_crs = CRS.from_wkt(shapefile_wkt)\n\t\t#transformer = Transformer.from_crs(raster_crs,shapefile_crs)\n\n\t\t# convert geometry and crs\n\t\tout_shapefile_path = out_path # os.path.join(temp_dir,os.path.basename(shapefile_path))\n\t\tdata = gpd.read_file(shapefile_path)\n\t\tdata_proj = data.copy()\n\t\tdata_proj['geometry'] = data_proj['geometry'].to_crs(raster_crs)\n\t\tdata_proj.crs = raster_crs\n\n\t\t# save output\n\t\tdata_proj.to_file(out_shapefile_path)\n\n\n\treturn out_shapefile_path",
"def coord_transform_from_wkt(proj_ref_wkt, new_cs_wkt):\n # Transform the features into WGS-84\n # What is the NITF/ophoto Referenced in?\n old_cs = osr.SpatialReference()\n old_cs.ImportFromWkt(proj_ref_wkt)\n\n # How about going to WGS-84?\n new_cs = osr.SpatialReference()\n new_cs.ImportFromWkt(new_cs_wkt)\n\n # The actual Tranfromation class/object\n transform = osr.CoordinateTransformation(old_cs, new_cs)\n return transform",
"def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2",
"def reproject(dataset, epsg):\n dataclass = dataset.__class__.__name__\n # Run appropriate reprojection method\n if dataclass == 'GeoDataFrame':\n repro = geopandas.GeoDataFrame.copy(dataclass)\n repro[repro.geometry.name] = repro.geometry.to_crs(epsg=epsg)\n repro.crs = fiona.crs.from_epsg(epsg)\n elif dataclass == 'Dataset':\n repro = gdal_reproject(dataset, '', epsg=epsg)\n return repro",
"def project(self, feature):\n return feature",
"def create_new_feature_class(in_fc, out_fc, flds=None, where=None, shp_prefix=None):\n create_layer('__killme', in_fc, flds, where, shp_prefix)\n arcpy.CopyFeatures_management('__killme', out_fc)\n arcpy.Delete_management('__killme')\n\n # at 10.3 field aliases persist, so set these to match the field name\n for f in arcpy.ListFields(out_fc):\n if f.name != f.aliasName and f.type != 'Geometry':\n print f.name\n arcpy.AlterField_management(out_fc, f.name, new_field_alias=f.name)",
"def reproject(src_path, out_path, target_crs='EPSG:4326'):\n\n # load satdata\n satdata = load(src_path)\n\n # calculate a transform and new dimensions using our dataset's current CRS and dimensions\n transform, width, height = calculate_default_transform(satdata.crs,\n target_crs,\n satdata.width,\n satdata.height,\n *satdata.bounds)\n\n # Using a copy of the metadata from the clipped raster dataset and the transform we defined above,\n # we can write a new geoTIFF containing the reprojected and clipped raster data:\n metadata = satdata.meta.copy()\n\n # Change the CRS, transform, and dimensions in metadata to match our desired output dataset\n metadata.update({'crs':target_crs,\n 'transform':transform,\n 'width':width,\n 'height':height})\n\n # apply the transform & metadata to perform the reprojection\n with rasterio.open(out_path, 'w', **metadata) as reprojected:\n for band in range(1, satdata.count + 1):\n rasterio_reproject(\n source=rasterio.band(satdata, band),\n destination=rasterio.band(reprojected, band),\n src_transform=satdata.transform,\n src_crs=satdata.crs,\n dst_transform=transform,\n dst_crs=target_crs\n )",
"def reprojectQcew(overwrite=False):\n\n\tif exists(qcew_2913) and not overwrite:\n\t\tprint '\\nstate plane qcew already exists, if you wish to'\n\t\tprint 'overwrite the existing file use the \"overwrite\" flag\\n'\n\t\treturn\n\n\tgeom_type = 'POINT'\n\ttemplate = src_qcew\n\tospn = arcpy.SpatialReference(2913)\n\tmanagement.CreateFeatureclass(dirname(qcew_2913),\n\t\tbasename(qcew_2913), geom_type, template, spatial_reference=ospn)\n\n\ti_cursor = da.InsertCursor(qcew_2913, '*')\n\n\ts_fields = ['Shape@', '*']\n\twith da.SearchCursor(src_qcew, s_fields) as s_cursor:\n\t\t# replace point coordinates with geometry object in field\n\t\t# definition\n\t\tfields = list(s_cursor.fields)\n\t\tfields[1] = fields.pop(0)\n\n\t\tfor row in s_cursor:\n\t\t\tlist_row = list(row)\n\t\t\tlist_row[1] = list_row.pop(0)\n\t\t\td = OrderedDict(zip(fields, list_row))\n\n\t\t\tgeom = d['Shape@']\n\t\t\tgeom_2913 = geom.projectAs(ospn) \n\t\t\td['Shape@'] = geom_2913\n\t\t\td['POINT_X'] = geom_2913.firstPoint.X\n\t\t\td['POINT_Y'] = geom_2913.firstPoint.Y\n\n\t\t\twrite_row = [v for v in d.values()]\n\t\t\ti_cursor.insertRow(write_row)\n\n\tdel i_cursor",
"def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))",
"def spatialFilter(input_shp,aoi,output_shp):\n inDataSource = driver.Open(input_shp, 0)\n inlayer = inDataSource.GetLayer()\n\n # create the data source\n outdata_source = driver.CreateDataSource(output_shp)\n # create the spatial reference, WGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n # create the layer\n outlayer = outdata_source.CreateLayer(\"outlayer\", srs, ogr.wkbPolygon)\n\n # Add input Layer Fields to the output Layer if it is the one we want\n inLayerDefn = inlayer.GetLayerDefn()\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n outlayer.CreateField(fieldDefn)\n\n #load spatialfilter\n inspatialfilter = driver.Open(aoi, 0)\n inspatialfilterlayer = inspatialfilter.GetLayer()\n #get geometry for spatialfilter\n for inFeature in inspatialfilterlayer:\n spatialfiltergeom = inFeature.GetGeometryRef()\n\n inlayer.SetSpatialFilter(spatialfiltergeom)\n # Get the output Layer's Feature Definition\n outLayerDefn = outlayer.GetLayerDefn()\n for inFeature in inlayer:\n # Create output Feature\n outFeature = ogr.Feature(outLayerDefn)\n try:\n # Add field values from input Layer\n for i in range(0, outLayerDefn.GetFieldCount()):\n fieldDefn = outLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(),inFeature.GetField(i))\n # Set geometry\n geom = inFeature.GetGeometryRef()\n outFeature.SetGeometry(geom.Clone())\n # Add new feature to output Layer\n outlayer.CreateFeature(outFeature)\n except Exception:\n sys.exc_clear()\n inlayer.SetSpatialFilter(None)",
"def preprocess(input_dir, output_dir, crs, resolution, country, overwrite):\n # Set data directories if not provided and create them if necessary\n if not input_dir:\n input_dir = os.path.join(os.curdir, \"Data\", \"Input\")\n if not output_dir:\n output_dir = os.path.join(os.curdir, \"Data\", \"Intermediary\")\n input_dir, output_dir = Path(input_dir), Path(output_dir)\n for p in (input_dir, output_dir):\n p.mkdir(parents=True, exist_ok=True)\n\n # Create raster grid from CLI options\n geom = country_geometry(country)\n dst_crs = CRS.from_string(crs)\n transform, shape, bounds = create_grid(geom, dst_crs, resolution)\n args = {\n \"dst_crs\": dst_crs,\n \"dst_bounds\": bounds,\n \"dst_res\": resolution,\n \"overwrite\": overwrite,\n \"geom\": geom,\n }\n\n raw = Raw(input_dir)\n preprocess_land_cover(\n src_files=raw.land_cover,\n dst_raster=output_dir.joinpath(\"land_cover.tif\").as_posix(),\n **args,\n )\n preprocess_elevation(src_files=raw.elevation, dst_dir=output_dir, **args)\n preprocess_osm(\n src_file=raw.openstreetmap[0],\n dst_dir=output_dir,\n dst_crs=dst_crs,\n dst_shape=shape,\n dst_transform=transform,\n geom=geom,\n overwrite=overwrite,\n )\n preprocess_surface_water(\n src_files=raw.surface_water,\n dst_raster=output_dir.joinpath(\"surface_water.tif\").as_posix(),\n **args,\n )\n\n log.info(\"Writing area of interest to disk.\")\n with open(output_dir.joinpath(\"area_of_interest.geojson\"), \"w\") as f:\n json.dump(geom.__geo_interface__, f)",
"def transform_data(self, outformat=None, epsg=None):\n out_data = geopandas.GeoDataFrame.copy(self.data)\n if epsg and str(self.get_epsg()) != epsg:\n out_data[out_data.geometry.name] = \\\n self.data.geometry.to_crs(epsg=epsg)\n out_data.crs = fiona.crs.from_epsg(epsg)\n if outformat == formats.JSON and self.default_output in (\n formats.PANDAS, formats.JSON):\n out_json = out_data.to_json()\n if out_data.crs:\n gj = json.loads(out_json)\n gj[\"crs\"] = {\n \"type\": \"name\",\n \"properties\": {\n \"name\": out_data.crs[\"init\"].upper()\n }\n }\n return json.dumps(gj)\n else:\n return out_json\n elif outformat in [formats.PANDAS, None]:\n return out_data\n else:\n raise GaiaException(\"Format {} not supported\".format(outformat))",
"def createOutput(self, outputFC):\n\n #### Validate Output Workspace ####\n ERROR.checkOutputPath(outputFC)\n\n #### Shorthand Attributes ####\n ssdo = self.ssdo\n caseField = self.caseField\n\n #### Create Output Feature Class ####\n ARCPY.SetProgressor(\"default\", ARCPY.GetIDMessage(84003))\n tempCFLayer = \"tmpCFLayer\"\n\n try:\n DM.MakeFeatureLayer(ssdo.inputFC, tempCFLayer)\n first = True\n for key, value in self.cf.iteritems():\n oids = value[0]\n for oid in oids:\n sqlString = ssdo.oidName + '=' + str(oid)\n if first:\n DM.SelectLayerByAttribute(tempCFLayer, \n \"NEW_SELECTION\",\n sqlString)\n first = False\n else:\n DM.SelectLayerByAttribute(tempCFLayer,\n \"ADD_TO_SELECTION\", \n sqlString)\n\n UTILS.clearExtent(DM.CopyFeatures(tempCFLayer, outputFC))\n except:\n ARCPY.AddIDMessage(\"ERROR\", 210, outputFC)\n raise SystemExit()\n\n #### Set Attribute ####\n self.outputFC = outputFC",
"def geoPathToGPD(self, inFOV): \n points = []\n for geoPose in inFOV:\n points.append((geoPose.position.longitude, geoPose.position.latitude))\n poly = Polygon(points)\n # rospy.logerr(\"FOVPoly:\"+ str(poly))\n return gpd.GeoDataFrame({'geometry': [poly]})",
"def reproject_GeoGrid(geogrid_in, srs_string,\n out_xdim=None, out_ydim=None, out_geotransform=None,\n out_nodata_value=None, interp_method=None):\n src = geogrid_as_gdalInMem(geogrid_in)\n\n out_srs = osr.SpatialReference()\n assign_projection_to_srs(out_srs, srs_string)\n out_wkt = out_srs.ExportToWkt()\n\n dst_gdal_datatype = get_gdal_datatype(geogrid_in.data_array.dtype)\n\n try:\n dst = gdal.GetDriverByName('MEM').Create(\n '',\n out_xdim,\n out_ydim,\n 1,\n dst_gdal_datatype,\n )\n dst.SetGeoTransform(out_geotransform)\n dst.SetProjection(out_wkt)\n except ValueError:\n raise ValueError('Error creating dst in reproject_GeoGrid()')\n except AttributeError:\n raise ValueError('AttributeError in dst creation')\n\n\n gdal_interp_method = getGdalInterpMethod(interp_method)\n res = gdal.ReprojectImage(src,\n dst,\n src.GetProjection(),\n dst.GetProjection(),\n gdal_interp_method,\n )\n\n\n return geogrid_from_gdalInMem(dst)",
"def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d",
"def _to_arcpy_featureset(self):\r\n if HAS_ARCPY:\r\n import uuid, string, random\r\n l = []\r\n for i in range(3):\r\n l.append(random.choice(string.ascii_letters))\r\n l = \"\".join(l)\r\n out_name = l\r\n res = self.to_featureclass(out_location='in_memory',\r\n out_name=out_name)\r\n\r\n feature_set = arcpy.FeatureSet()\r\n feature_set.load(res)\r\n return feature_set\r\n else:\r\n raise Exception(\"ArcPy must be present to convert to arcpy.FeatureSet object\")"
]
| [
"0.69849026",
"0.6524256",
"0.624949",
"0.61722314",
"0.61330724",
"0.5708693",
"0.5525221",
"0.5507964",
"0.54671025",
"0.5429234",
"0.5400744",
"0.5377069",
"0.53676003",
"0.5363078",
"0.5346215",
"0.52861166",
"0.5228367",
"0.52146995",
"0.52057016",
"0.5200616",
"0.5190036",
"0.51697904",
"0.51635325",
"0.51585615",
"0.5147367",
"0.5121971",
"0.5051834",
"0.5035013",
"0.49986708",
"0.49896953"
]
| 0.80769724 | 0 |
Project all feature classes in the geodatabase and save them as shapefiles in a folder | def dump_geodatabase_to_folder(path, folder='Worldmap Files'):
# make sure that path exists and is a geodatabase
ensure_valid_gdb(path):
# set workspace and output folder
env.workspace = path
if os.path.isdir(folder):
os.removedirs(folder)
os.mkdir(folder)
# get complete list of FCs to project
feature_classes = find_all_feature_classes(env.workspace)
print 'Recovered {0} feature classes to project'.format(len(feature_classes))
# project feature classes - skipping any with unknown references
for infc in feature_classes:
project_feature_class(infc, folder) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_to_geojson(self, topology_map, filename):",
"def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)",
"def export_geojson(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".geojson\",\n filetypes=((\"geo json\", \"*.geojson\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n self.tabs.window.aistracker.create_geojson_map(outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def convert_gpkg_to_geojson(self,shape_fname, destdirectory):\r\n\t\tfeatures = []\r\n\t\tcrs = None\r\n\t\tif not os.path.isfile(shape_fname):\r\n\t\t\tself.logger.error('File not found: %s' % shape_fname)\r\n\t\tself.opstatus.add_info(stage=6, msg = \"Rounding coordinates to six decimal precision\")\r\n\r\n\r\n\t\tout_fname = os.path.join(destdirectory,os.path.basename(shape_fname).replace('.gpkg', '.geojson'))\r\n\t\twith fiona.open(shape_fname, driver='GPKG') as source:\r\n\t\t\twith fiona.open(out_fname, \"w\",driver='GeoJSON',crs = fiona.crs.from_epsg(4326),schema=source.schema) as sink:\r\n\t\t\t\tfor rec in source:\r\n\t\t\t\t\tsink.write(rec)\r\n\r\n\t\tself.logger.info('file written: %s' % out_fname)\r\n\t\tself.opstatus.set_status(stage=6, status=1, statustext =\"File successfully converted to GeoJSON with six decimal precision\")\r\n\t\tself.opstatus.add_success(stage=6, msg = \"GeoJSON file successfully written\")\r\n\t\treturn out_fname",
"def project_feature_class(infc, folder, projection = 4326):\r\n dsc = arcpy.Describe(infc)\r\n shortname = infc.split('\\\\')[1] if len(infc.split('\\\\')) == 2 else infc\r\n if dsc.spatialReference.Name == \"Unknown\":\r\n print 'Skipped {} - undefined coordinate system.'.format(shortname)\r\n else:\r\n print 'Projecting {}'.format(shortname)\r\n outfc = os.path.join(folder, shortname + '.shp')\r\n outcs = arcpy.SpatialReference(projection)\r\n arcpy.Project_management(infc, outfc, outcs)\r\n print arcpy.GetMessages()",
"def reprojectAndSaveNewShapefile(inFilepath,outFilepath,to_EPSG):\r\n import geopandas as gpd\r\n from fiona.crs import from_epsg\r\n\r\n inFile = gpd.read_file(inFilepath)\r\n inFile_proj = inFile.copy()\r\n inFile_proj['geometry'] = inFile_proj['geometry'].to_crs(epsg=to_EPSG)\r\n inFile_proj.crs = from_epsg(to_EPSG)\r\n inFile_proj.to_file(outFilepath)",
"def find_all_feature_classes(current_workspace):\r\n paths_to_export = arcpy.ListFeatureClasses()\r\n \r\n # search for additional feature classes in feature datasets\r\n for fds in arcpy.ListDatasets():\r\n env.workspace = os.path.join(env.workspace, fds)\r\n for fc in arcpy.ListFeatureClasses():\r\n paths_to_export.append(os.path.join(fds, fc))\r\n env.workspace = current_workspace\r\n \r\n if len(paths_to_export) == 0:\r\n raise EmptyGeodatabaseError\r\n else:\r\n return paths_to_export",
"def _create_shape(self, queryset, model, columns, filename):\n geo_field = geo_field_from_model(model, app_settings['GEOM_FIELD_NAME'])\n get_geom, geom_type, srid = info_from_geo_field(geo_field)\n\n if geom_type.upper() in (GeometryField.geom_type, GeometryCollectionField.geom_type):\n\n by_points, by_linestrings, multipoints, multilinestrings = self.split_bygeom(queryset, geom_getter=get_geom)\n\n for split_qs, split_geom_field in ((by_points, PointField),\n (by_linestrings, LineStringField),\n (multipoints, MultiPointField),\n (multilinestrings, MultiLineStringField)):\n if len(split_qs) == 0:\n continue\n split_geom_type = split_geom_field.geom_type\n shp_filepath = shape_write(split_qs, model, columns, get_geom, split_geom_type, srid)\n filename = '%s_%s' % (filename, split_geom_type.lower())\n self.layers[filename] = shp_filepath\n\n else:\n shp_filepath = shape_write(queryset, model, columns, get_geom, geom_type, srid)\n self.layers[filename] = shp_filepath",
"def get_shapefile(self, shpname: str):\r\n self.get_geojson()\r\n self.geojson.to_shp(shpname)",
"def write_shapefile(data, directory, filename, crs):\n prop_schema = []\n for name, value in data[0]['properties'].items():\n fiona_prop_type = next((\n fiona_type for fiona_type, python_type in \\\n fiona.FIELD_TYPES_MAP.items() if \\\n python_type == type(value)), None\n )\n\n prop_schema.append((name, fiona_prop_type))\n\n sink_driver = 'ESRI Shapefile'\n sink_crs = {'init': crs}\n sink_schema = {\n 'geometry': data[0]['geometry']['type'],\n 'properties': OrderedDict(prop_schema)\n }\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with fiona.open(\n os.path.join(directory, filename), 'w',\n driver=sink_driver, crs=sink_crs, schema=sink_schema) as sink:\n for datum in data:\n sink.write(datum)",
"def exportGIS(self, tabindex, curcycle):\n ubdata.exportGISShapeFile(self, tabindex, curcycle)\n ubdata.writeGeoJSONTempFiles(self, tabindex, curcycle)\n return True",
"def write_shapefile(data, directory, filename, crs):\n # Translate props to Fiona sink schema\n prop_schema = []\n for name, value in data[0]['properties'].items():\n fiona_prop_type = next((\n fiona_type for fiona_type, python_type in \\\n fiona.FIELD_TYPES_MAP.items() if \\\n python_type == type(value)), None\n )\n\n prop_schema.append((name, fiona_prop_type))\n\n sink_driver = 'ESRI Shapefile'\n sink_crs = {'init': crs}\n sink_schema = {\n 'geometry': data[0]['geometry']['type'],\n 'properties': OrderedDict(prop_schema)\n }\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Write all elements to output file\n with fiona.open(\n os.path.join(directory, filename), 'w',\n driver=sink_driver, crs=sink_crs, schema=sink_schema) as sink:\n for datum in data:\n sink.write(datum)",
"def test_to_geojson(self):\n fc = self.read_feature()\n dest_filename = str(self.datadir.join('test.geojson'))\n fc.to_geojson(dest_filename)\n fc_check = read_feature_collection(dest_filename)\n self.check_feature(fc_check.features[0])",
"def write_geojson(vec:gpd.GeoDataFrame, dest):\n\t\tdest = str(dest)\n\n\t\t# WGS 84\n\t\t#vec = vec.to_crs({'init': 'epsg:4326'})\n\n\t\tif os.path.isfile(dest):\n\t\t\tos.remove(dest)\n\t\t\t\n\t\tvec.to_file(dest, driver='GeoJSON', encoding='utf-8')",
"def load():\n\n # To run this command type: 'python manage.py shell'\n # 'from map.views import load; load()'\n\n mapping = {\"productivi\": \"productivi\", \"mpoly\": \"MULTIPOLYGON\"}\n map_path = os.path.abspath('gis_django/fields_test/test_fields.shp')\n lm = LayerMapping(Map, map_path, mapping, transform=False, encoding=\"iso-8859-1\")\n lm.save(verbose=True)",
"def _to_gisdb(self):\n self._ways.to_postgis(name=\"ways\", con=self._gisdb, if_exists=\"append\")\n self._nodes.to_sql(name=\"nodes\", con=self._gisdb, if_exists=\"append\")\n gdf_nodes, gdf_edges = osmnx.utils_graph.graph_to_gdfs(self._graph, node_geometry=False,\n fill_edge_geometry=False)\n gdf_edges[['id', 'length', 'u', 'v', 'key']].to_postgis(name=\"graph_edges\", con=self._gisdb, if_exists=\"append\")\n gdf_nodes[['id']].to_postgis(name=\"graph_nodes\", con=self._gisdb, if_exists=\"append\")\n self._nodes.to_sql(name=\"nodes\", con=self._gisdb, if_exists=\"append\")",
"def import_shapefile(self, shapefile, schema):\n logger.debug(\"Importing shapefile {}\".format(shapefile))\n layer = DataSource(shapefile)[0]\n for feature in layer:\n fields = schema.from_feature(feature)\n Region.objects.create(**fields)",
"def get_path_class_shapes(self):\n df = self.df_roi\n self.tumor_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Tumor\"]['geometry']])\n self.stroma_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Stroma\"]['geometry']])\n self.dcis_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Other\"]['geometry']]) \n\n # path_class_qupath_names = [\"Tumor\", \"Stroma\", \"Other\"]\n # for path_class in path_class_qupath_names:\n # mpolygon = MultiPolygon([self._get_shape(i) for i in df[df.class_ == path_class]['geometry']])\n\n # # replace name\n # if path_class == \"Other\":\n # path_class = \"dcis\"\n\n # attr_name = path_class.lower() + \"_shape\"\n # setattr(self, path_class, mpolygon)",
"def __make_geo(self):\n # gmsh freecad_part.iges -o out_iges.geo -0\n fname_list = self.__fname.split('.')\n geo_file = fname_list[0]+'.geo'\n runstr = \"%s %s -o %s -0\" % (environment.GMSH, self.__fname, geo_file)\n print(runstr)\n subprocess.call(runstr, shell=True)\n print('Wrote file: %s' % geo_file)",
"def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)",
"def write_shp(G, outdir, filename):\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"write_shp requires OGR: http://www.gdal.org/\")\n # easier to debug in python if ogr throws exceptions\n ogr.UseExceptions()\n\n def netgeometry(key, data):\n if 'Wkb' in data:\n geom = ogr.CreateGeometryFromWkb(data['Wkb'])\n elif 'Wkt' in data:\n geom = ogr.CreateGeometryFromWkt(data['Wkt'])\n elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples\n geom = ogr.Geometry(ogr.wkbLineString)\n _from, _to = key[0], key[1]\n try:\n geom.SetPoint(0, *_from)\n geom.SetPoint(1, *_to)\n except TypeError:\n # assume user used tuple of int and choked ogr\n _ffrom = [float(x) for x in _from]\n _fto = [float(x) for x in _to]\n geom.SetPoint(0, *_ffrom)\n geom.SetPoint(1, *_fto)\n else:\n geom = ogr.Geometry(ogr.wkbPoint)\n try:\n geom.SetPoint(0, *key)\n except TypeError:\n # assume user used tuple of int and choked ogr\n fkey = [float(x) for x in key]\n geom.SetPoint(0, *fkey)\n\n return geom\n\n # Create_feature with new optional attributes arg (should be dict type)\n def create_feature(geometry, lyr, attributes=None):\n feature = ogr.Feature(lyr.GetLayerDefn())\n feature.SetGeometry(g)\n if attributes != None:\n # Loop through attributes, assigning data to each field\n for field, data in attributes.items():\n feature.SetField(field, data)\n lyr.CreateFeature(feature)\n feature.Destroy()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n shpdir = drv.CreateDataSource(outdir)\n # delete pre-existing output first otherwise ogr chokes\n try:\n shpdir.DeleteLayer(filename + '_n')\n except:\n pass\n nodes = shpdir.CreateLayer(filename + '_n', None, ogr.wkbPoint)\n for n in G:\n data = G.node[n]\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n shpdir.DeleteLayer(filename + '_e')\n except:\n pass\n edges = shpdir.CreateLayer(filename + '_e', None, ogr.wkbLineString)\n\n # New edge attribute write support merged into edge loop\n fields = {} # storage for field names and their data types\n attributes = {} # storage for attribute data (indexed by field names)\n\n # Conversion dict between python and ogr types\n OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}\n\n # Edge loop\n for e in G.edges(data=True):\n data = G.get_edge_data(*e)\n g = netgeometry(e, data)\n # Loop through attribute data in edges\n for key, data in e[2].items():\n # Reject spatial data not required for attribute table\n if (key != 'Json' and key != 'Wkt' and key != 'Wkb'\n and key != 'ShpName'):\n # For all edges check/add field and data type to fields dict\n if key not in fields:\n # Field not in previous edges so add to dict\n if type(data) in OGRTypes:\n fields[key] = OGRTypes[type(data)]\n else:\n # Data type not supported, default to string (char 80)\n fields[key] = ogr.OFTString\n # Create the new field\n newfield = ogr.FieldDefn(key, fields[key])\n edges.CreateField(newfield)\n # Store the data from new field to dict for CreateLayer()\n attributes[key] = data\n else:\n # Field already exists, add data to dict for CreateLayer()\n attributes[key] = data\n # Create the feature with, passing new attribute data\n create_feature(g, edges, attributes)\n\n nodes, edges = None, None",
"def main():\r\n\r\n #Create a list of all files that have the GPX file format\r\n fileList = glob.glob(os.path.join(inFolder,\"*.{0}\".format(inFormat)))\r\n\r\n #Create a connection to PostGIS database\r\n pgConn = createPostgisConnection(dbFormat, dbHost, dbName, dbSchema, dbUser, dbPWD)\r\n\r\n #Process each *listed* layer type from a GPS file\r\n for f in fileList:\r\n importGPX(f, gpxImportLayers, pgConn)",
"def PrepareWorkspace():\n \n # define expected file paths for file gdb folder, fgdb, taxi feature class \n fgdb_folder = constants.fgdb_folder\n fgdb_name = constants.taxi_fgdb_name\n file_gdb = os.path.join(fgdb_folder, fgdb_name)\n taxi_feature_class_name = \"TaxiLocations\"\n taxi_feature_class = os.path.join(file_gdb, taxi_feature_class_name)\n \n out_coordinate_system = arcpy.SpatialReference('WGS 1984') # define output spatial reference\n \n if not os.path.exists(fgdb_folder): # if file gdb folder has not been created\n os.mkdir(fgdb_folder) # create the folder\n if not arcpy.Exists(file_gdb): # if file gdb has not been created\n arcpy.CreateFileGDB_management(fgdb_folder, fgdb_name) # create the file gdb\n \n if not arcpy.Exists(taxi_feature_class): # if the taxi feature class does not exist\n # create the point feature class in WGS84 spatial reference\n arcpy.CreateFeatureclass_management(file_gdb, \n taxi_feature_class_name, \n \"Point\", \n spatial_reference=out_coordinate_system) # create a point feature class with defined coordinate system\n \n arcpy.TruncateTable_management(taxi_feature_class) # delete existing features in the feature class\n \n return file_gdb, taxi_feature_class # return fgdb and feature class path to main\n \n \n # %%",
"def shapefile_generator(filename):\n with fiona.open(filename) as collection:\n for item in collection:\n item['geometry'] = transform_geom(\n collection.meta['crs'], 'epsg:4326', item['geometry'])\n yield item",
"def run(self):\n if self.is_complete:\n LOG.debug(\"Skipping Geopackage, file exists\")\n return\n keys_points = self.feature_selection.key_union('points')\n keys_lines = self.feature_selection.key_union('lines')\n keys_polygons = self.feature_selection.key_union('polygons')\n osmconf = OSMConfig(self.stage_dir,points=keys_points,lines=keys_lines,polygons=keys_polygons)\n conf = osmconf.create_osm_conf()\n ogr_cmd = self.ogr_cmd.safe_substitute({'gpkg': self.output_gpkg,\n 'osm': self.input_pbf, 'osmconf': conf})\n LOG.debug('Running: %s' % ogr_cmd)\n subprocess.check_call(ogr_cmd, shell=True, executable='/bin/bash')\n\n \"\"\"\n Create the default osm gpkg schema\n \"\"\"\n conn = sqlite3.connect(self.output_gpkg)\n conn.enable_load_extension(True)\n cur = conn.cursor()\n cur.execute(\"select load_extension('mod_spatialite')\")\n cur.execute(\"CREATE TABLE boundary (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, geom GEOMETRY)\");\n cur.execute(\"INSERT INTO boundary (geom) VALUES (GeomFromWKB(?,4326));\",(self.aoi_geom.wkb,))\n cur.executescript(SPATIAL_SQL)\n self.update_zindexes(cur,self.feature_selection)\n\n # add themes\n create_sqls, index_sqls = self.feature_selection.sqls\n for query in create_sqls:\n cur.executescript(query)\n for query in index_sqls:\n cur.executescript(query)\n conn.commit()\n conn.close()\n\n if self.per_theme:\n # this creates per-theme GPKGs\n for theme in self.feature_selection.themes:\n conn = sqlite3.connect(self.stage_dir + slugify(theme) + \".gpkg\")\n conn.enable_load_extension(True)\n cur = conn.cursor()\n cur.execute(\"attach database ? as 'geopackage'\",(self.output_gpkg,))\n cur.execute(\"create table gpkg_spatial_ref_sys as select * from geopackage.gpkg_spatial_ref_sys\")\n cur.execute(\"create table gpkg_contents as select * from geopackage.gpkg_contents where 0\")\n cur.execute(\"create table gpkg_geometry_columns as select * from geopackage.gpkg_geometry_columns where 0\")\n for geom_type in self.feature_selection.geom_types(theme):\n for stmt in self.feature_selection.create_sql(theme,geom_type):\n cur.executescript(stmt)\n conn.commit()\n conn.close()",
"def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)",
"def save_features(df, name, path=None):\n if path is not None :\n os.makedirs( f\"{path}/{name}\" , exist_ok=True)\n if isinstance(df, pd.Series):\n df0=df.to_frame()\n else:\n df0=df\n log( f\"{path}/{name}/features.parquet\" )\n log(df0, list(df0.columns))\n df0.to_parquet( f\"{path}/{name}/features.parquet\")\n else:\n log(\"No saved features, path is none\")",
"def convert_to_geojson(path):\n outdir = path.rstrip('.zip')\n basename = outdir.split('/')[-1]\n\n if os.path.exists(outdir): # Delete any existing outdir\n shutil.rmtree(outdir)\n os.makedirs(outdir, exist_ok=True)\n unzip(path, '-d', outdir)\n\n geojson_files = []\n\n for filename in os.listdir(outdir):\n if filename.endswith(\".shp\"):\n shpFile = os.path.join(outdir, filename)\n geojsonFile = shpFile.replace('.shp', '.geojson')\n print(shpFile, geojsonFile)\n\n ogr_command = 'ogr2ogr -f \"GeoJSON\" -t_srs crs:84 {outpath} {inpath}'.format(outpath=quote(geojsonFile), inpath=quote(shpFile))\n\n os.popen(ogr_command).read()\n geojson_files.append(geojsonFile)\n\n return geojson_files",
"def save(self):\r\n\r\n for video_name, video_data in self.data.items():\r\n save_path = os.path.join(\r\n self.features_dir, video_name + \".\" + self.file_type\r\n )\r\n write_df(\r\n df=video_data.fillna(0), file_type=self.file_type, save_path=save_path\r\n )\r\n print(\"Created additional ROI features for {}...\".format(video_name))\r\n self.timer.stop_timer()\r\n stdout_success(\r\n msg=\"Created additional ROI features for files within the project_folder/csv/features_extracted directory\",\r\n elapsed_time=self.timer.elapsed_time_str,\r\n )",
"def geojson2postgis(self, filepath, table_name, geo_type):\n map_data = gpd.GeoDataFrame.from_file(filepath)\n # Maybe you want to change link address\n link = \"postgresql://{0}:{1}@{3}:5432/{2}\".format(self.username, self.password, self.dbname, self.host)\n engine = create_engine(link, encoding='utf-8')\n map_data = self.dict_to_json(map_data)\n map_data['geometry'] = map_data['geometry'].apply(lambda x: WKTElement(x.wkt, 4326))\n # Maybe you want to change 'replace' to 'append' in the future\n map_data.to_sql(\n name=table_name,\n con=engine,\n if_exists='replace',\n dtype={'geometry': Geometry(geometry_type=geo_type, srid=4326)}\n )"
]
| [
"0.6630598",
"0.6579513",
"0.6309255",
"0.6297646",
"0.6271668",
"0.6196219",
"0.6082502",
"0.6075532",
"0.59326464",
"0.5927675",
"0.59165806",
"0.5888081",
"0.5879382",
"0.5877576",
"0.5852843",
"0.58418584",
"0.58341116",
"0.5779261",
"0.57326174",
"0.571467",
"0.5709001",
"0.5708797",
"0.5699491",
"0.5698353",
"0.56619054",
"0.5660756",
"0.5628677",
"0.5625713",
"0.56250566",
"0.5612128"
]
| 0.73370665 | 0 |
Parse label string from commandline. | def parse_cmdline(label_string):
if not label_string.startswith('//'):
label_string = '//' + label_string
return Label.parse(label_string, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_buildfile(label_string):\n if not isinstance(label_string, str):\n raise iga.fargparse.ParseError()\n return Label.parse(label_string, iga.context.current()['package'])",
"def parse(cls, label) -> Any:\n return label",
"def parse(label_string, current_package):\n if label_string.startswith('//'):\n package_start = 2\n package_end = (_find_or_none(label_string, ':', package_start) or\n len(label_string))\n package = label_string[package_start:package_end]\n if not package:\n raise IgaError('empty package of %r' % label_string)\n else:\n package_start = package_end = 0\n package = None\n\n if label_string[package_end:package_end+1] == ':':\n target_start = package_end + 1\n else:\n target_start = package_end\n target = label_string[target_start:]\n\n if not package and not target:\n raise IgaError('cannot parse %r' % label_string)\n package = package or current_package\n if not package:\n raise IgaError('cannot parse package part from %r' % label_string)\n target = target or _default_target(package)\n if not target:\n raise IgaError('cannot parse target part from %r' % label_string)\n\n return Label(\n package=PurePosixPath(package),\n target=PurePosixPath(target),\n )",
"def parse_label(label):\n res = {}\n clazz, instance_num, room_type, room_num, area_num = label.split(\"_\")\n res['instance_class'] = clazz\n res['instance_num'] = int(instance_num)\n res['room_type'] = room_type\n res['room_num'] = int(room_num)\n res['area_num'] = int(area_num)\n return res",
"def read_label(self):\r\n # label = str(self.parse_binary())#!!BAD\r\n label = ''\r\n while True:\r\n c = self.eat_char()\r\n if c=='n':\r\n #terminal char\r\n break\r\n else:\r\n label += c\r\n\r\n self.log += \"'\" + label + \"'\"\r\n return label",
"def parse_line(self, line):\n command, _, arg = line.strip().partition(\" \")\n return command, arg.strip()",
"def read_line(line):\n label = line[0:11]\n text = line[11:]\n y = 1 if label == '__label__2 ' else 0\n return text, y",
"def parseLine(self, line):\n # Removes surrounding whitespace\n line = self.separateElements(line)\n if len(line) == 0: return\n # Checks if the line is a label declaration\n if line[0].lower() == \"label\":\n # --Validates the line\n if len(line) != 2: raise Exception(\"Invalid Label\")\n if len(line[1]) < 2: raise Exception(\"Invalid Label\") \n if line[1][-1] != ':': raise Exception(\"Invalid Label\")\n # Gets the label name\n labelName = line[1][:-1]\n\n # Creates a new symbol entry for the label, the pointer refers to the memory location of the label\n # It defaults to the location of the label in the instruction sequence\n self.symbolTable.append({ \"type\": \"LABEL\", \"name\": labelName, \"pointer\": len(self.instructionList) * 4})\n # Checks if the line is data declaration\n elif line[0].lower() == \"data\" or (line[0].lower()[:4] == \"data\" and line[0][4] == \"[\"):\n # Removes the DATA tag from the data\n line[0] = line[0][4:]\n # --Validates the line\n if len(line) < 2: raise Exception(\"Invalid DATA\")\n # Gets the data name\n dataName = line[1]\n # Stores the data length\n dataLength = 4 # A word\n # Gets any default data\n defaultData = 0\n # Stores the data type\n dataType = \"int\"\n if len(line) == 3:\n if line[2][0] == \"\\\"\" and line[2][-1] == \"\\\"\":\n dataType = \"string\"\n defaultData = line[2][1:-1]\n dataLength = len(defaultData)\n elif line[2].isnumeric():\n defaultData = line[2]\n elif line[2][-1] == 'f' and line[2][:-1].isnumeric():\n dataType = \"float\"\n defaultData = line[2][0]\n # Checks if a data length was given\n if len(line[0]) > 2 and (line[0][0] == \"[\" and line[0][-1] == \"]\"):\n data = line[0][1:-1]\n if not data.isnumeric(): raise TypeError(\"Invalid data length type\")\n dataLength = int(data)\n\n # Creates a new symbol entry for the data\n self.symbolTable.append({ \"type\": \"DATA\", \"name\": dataName, \"default\": defaultData, \"dataType\": dataType, \"length\": dataLength})\n # The line is most likely an instruction\n else:\n # --Validates the line\n #Stores the control bits\n controlBits = 1 << 5 # Sets it to 0b100000\n # Checks if the first element is control bits\n if line[0][0] == \"{\" and line[0][-1] == \"}\": # First element is control bits\n # Separates the two sections of the control bits\n controlSections = line[0].split(':')\n #Goes through the characters and constructs the control bits for the instruction\n carryBits = controlSections[0].lower()\n carryFlag = int('c' in carryBits)\n zeroFlag = int('z' in carryBits)\n negativeFlag = int('n' in carryBits)\n signedOverflowFlag = int('s' in carryBits)\n #Gets the conditions bits\n if len(controlSections) == 2:\n conditionBits = controlSections[1].lower()\n isAnd = int('x' in conditionBits)\n isOne = int('1' in conditionBits)\n #Sets the last two bits on controlBits to the conditionBits\n controlBits ^= isAnd << 1\n controlBits ^= isOne\n # Constructs the control bits section\n controlBits ^= carryFlag << 5\n controlBits ^= zeroFlag << 4\n controlBits ^= negativeFlag << 3\n controlBits ^= signedOverflowFlag << 2\n # Removes the control bits section from the line\n line.pop(0)\n # Performs this check as the controlbits element gets removed (if it existed) and so the length of the elments could be zerp\n if len(line) == 0: raise Exception(\"Invalid Instruction\")\n # --The first element is the instruction\n # Identifies the instruction from the mnemonic using the lookup table\n if line[0] in self.InstructionLookupTable:\n ins = self.InstructionLookupTable[line[0]]\n insCode = ins[\"code\"]\n insControlBits = ins['controlBits'] if ins['controlBits'] else controlBits\n # Creates a representation of the instruction, this is stored in the instructionList and is assembled later\n instrucitonRepr = {\n \"code\": insCode,\n \"controlBits\": insControlBits,\n }\n # Parses the arguments given and stores the operandStruct returned in the instruciton representation\n if len(line) > 1: instrucitonRepr[\"operand\"] = self.parseArgs(line[1:], insCode)\n self.instructionList.append(instrucitonRepr)",
"def get_label_info(label):\n label_info = str(label)[2:-1].split(\"-\")\n return label_info",
"def _parse_option_name(line):\n return line.split('=')[0].strip()",
"def test_label_arg_env_key(self, dfparser, instruction):\n dfparser.content = dedent(\"\"\"\\\n FROM scratch\n {0} FOOBAR=\"foo bar\"\n LABEL \"$FOOBAR\"=\"baz\"\n \"\"\".format(instruction))\n assert dfparser.labels['foo bar'] == 'baz'",
"def get_label_name(self):\n command_type = self.get_type()\n if command_type == LABEL_COMMAND_TYPE:\n return self.command[1:-1] # ignores the () at the beginning and the end\n if command_type != EMPTY_COMMAND_TYPE: # increments the line number if it is not a blank line or a label\n self.__line_number += 1",
"def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment",
"def extract_label(selector):\n return selector.split('=')[-1][:-1]",
"def extract_label(line):\n start = line.find('\\\\label{')\n for i in range(start, len(line)):\n if line[i] == '}':\n label = line[start + 7 : i]\n break\n return label",
"def haiku_string_parser():\n pass",
"def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)",
"def infer_label(self, string):\n label_bits = []\n if string:\n label_bits.append(string)\n for k, v in self.table.items():\n # The automatic label includes any keys with multiple values\n if len(v) > 1:\n # If a key has multiple values, add both its name and its key.\n # That is, if @key1@ has multiple values, label_bits will have\n # 'key1' + '@key1@' appended. This means the label includes\n # both the key's name and the particular value it has for a\n # given job.\n label_bits.append(re.search(self.key_pattern, k).group(1) + k)\n label = '-'.join(label_bits)\n # Add the label as a key-values pair to the weird data structure\n # This is as if there were in the bp file the line,\n # label\n if not label:\n raise ValueError, \"The label is blank. No label was supplied \"\\\n \"and none can be inferred.\"\n self.table[self.label_key] = [label]",
"def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()",
"def parse_command_line(com_string):\r\n\t#\tREMINDER: command line format is $ python relabel_clean.py [options] module (exp_file) file/dir/\r\n\topt_dict = {}\r\n\topts, args = getopt.getopt(com_string, \"c:df:hl:o:\", [\"col=\", \"dict\", \"form=\", \"help\", \"lang=\", \"old=\"])\r\n\t\r\n\t# display help screen if present\r\n\tfor option, value in opts:\r\n\t\tif option == \"-h\" or option == \"--help\":\r\n\t\t\tdisplay_help_screen()\r\n\t\r\n\t# determine module to be used\r\n\tif args[0] == \"relabel\":\r\n\t\topt_dict[\"module\"] = \"1\"\r\n\telif args[0] == \"clean\":\r\n\t\topt_dict[\"module\"] = \"2\"\r\n\telif args[0] == \"dictionary\":\r\n\t\topt_dict[\"module\"] = \"3\"\r\n\telse:\r\n\t\tsys.exit(\"Unrecognized module.\")\r\n\t\r\n\t# populate option dictionary for each module with defaults and arguments\r\n\tif opt_dict[\"module\"] == \"1\":\r\n\t\tif len(args) == 3:\r\n\t\t\topt_dict[\"text file\"] = args[1]\r\n\t\t\topt_dict[\"file dir\"] = name_check(args[2])\r\n\t\telse:\r\n\t\t\topt_dict[\"text file\"] = None\r\n\t\t\topt_dict[\"file dir\"] = None\r\n\t\t\t\r\n\t\topt_dict[\"columns\"] = \"experiment_item_condition\"\r\n\t\topt_dict[\"dict\"] = False\r\n\t\topt_dict[\"format\"] = \"experiment_participant_item_condition\"\r\n\t\topt_dict[\"lang\"] = None\r\n\t\topt_dict[\"old dir\"] = \"0_old_labfile_relabel/\"\r\n\telif opt_dict[\"module\"] == \"2\":\r\n\t\tif len(args) == 2:\r\n\t\t\topt_dict[\"file dir\"] = name_check(args[1])\r\n\t\telse:\r\n\t\t\topt_dict[\"file dir\"] = None\r\n\t\t\r\n\t\topt_dict[\"dict\"] = False\r\n\t\topt_dict[\"lang\"] = None\r\n\t\topt_dict[\"old dir\"] = \"0_old_labfile_clean/\"\r\n\telif opt_dict[\"module\"] == \"3\":\r\n\t\tif len(args) == 2:\r\n\t\t\topt_dict[\"file dir\"] = name_check(args[1])\r\n\t\telse:\r\n\t\t\topt_dict[\"file dir\"] = None\r\n\t\r\n\t# override defaults with options, if necessary\r\n\tfor option, value in opts:\r\n\t\tif option == \"-c\" or option == \"--col\":\r\n\t\t\topt_dict[\"columns\"] = value\r\n\t\telif option == \"-d\" or option == \"--dict\":\r\n\t\t\topt_dict[\"dict\"] = True\r\n\t\telif option == \"-f\" or option == \"--form\":\r\n\t\t\topt_dict[\"format\"] = value\r\n\t\telif option == \"-l\" or option == \"--lang\":\r\n\t\t\topt_dict[\"lang\"] = value\r\n\t\telif option == \"-o\" or option == \"--old\":\r\n\t\t\topt_dict[\"old dir\"] = name_check(value)\r\n\t\r\n\treturn opt_dict",
"def want_label(self, op):\n return self.want_line(r'\\s*\\S*(%s)\\S*\\:.*' % (op))",
"def parse_and_validate_cmd_line():\n if len(sys.argv) != 4:\n print USAGE_STR.format(sys.argv[0])\n sys.exit()\n # attempt to parse the parameters tell the user and exit if we can't\n num_segments = parse_and_validate_num_segs(sys.argv[1])\n # try to parse numThreads\n num_threads = parse_and_validate_num_threads(sys.argv[2])\n # try to parse and test the data directory\n data_dir = parse_and_validate_data_dir(sys.argv[3])\n return num_segments, num_threads, data_dir",
"def parse_command_line(self, argv):\n # required arguments\n self.arg_parser.add_argument(\"model\", help=\"name of the model\")\n self.arg_parser.add_argument(\"ipaddress\", help=\"ip address of a target device\")\n\n # options\n self.arg_parser.add_argument(\"--cluster\", default=self.cluster, help=\"http address of the cluster server that provides access to the target device\")\n self.arg_parser.add_argument(\"--labels\", default=self.labels, help=\"name of the labels file\")\n self.arg_parser.add_argument(\"--maxfiles\", type=int, default=self.maxfiles, help=\"max number of files to copy (up to the size of the validation set)\")\n self.arg_parser.add_argument(\"--target_dir\", default=self.target_dir, help=\"destination directory on the target device\")\n self.arg_parser.add_argument(\"--username\", default=self.username, help=\"username for the target device\")\n self.arg_parser.add_argument(\"--password\", default=self.password, help=\"password for the target device\")\n self.arg_parser.add_argument(\"--target\", default=self.target, choices=['pi3', 'pi3_64', 'aarch64'], help=\"type of target device\")\n self.arg_parser.add_argument(\"--images\", default=self.images, help=\"path to the validation images on the target device\")\n self.arg_parser.add_argument(\"--truth\", default=self.truth, help=\"path to a tsv file on the target device, each line contains two values, the file name of the image and the integer classification value\")\n\n args = self.arg_parser.parse_args(argv)\n\n self._init(args)",
"def test_id_from_fasta_label_line(self):\r\n label_line = \\\r\n \">hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3\"\r\n self.assertEqual(id_from_fasta_label_line(label_line), \"hsa:8355\")",
"def parse_label(self):\n # TODO: make this work with attached labels as well as\n # stand alone labels.\n # Save the RAW full text of the label to self._raw\n input_stream = FileStream(self.infile)\n lexer = ODLv21Lexer(input_stream)\n tokens = CommonTokenStream(lexer)\n\n parser = ODLv21Parser(tokens)\n parse_tree = parser.label()\n self._parse_tree = parse_tree\n visitor = Pds3LabelVisitor()\n visitor.visit(parse_tree)\n return visitor.root_dict",
"def parse(self, command_line=sys.argv[1:]):\n return self._parser.parse_args(command_line)",
"def job(label):\n args = [\"/bin/launchctl\", \"list\", label]\n try:\n out = subprocess.check_output(args, stderr=subprocess.PIPE).decode()\n except subprocess.CalledProcessError:\n return {}\n result = dict()\n for l in out.splitlines():\n if '\" =' in l:\n key = l.split('\"')[1]\n if \";\" in l: # \"key\" = \"value\";\n result[key] = _py_value(_parse_value(l))\n if '=' not in l and '\";' in l: # \"value\";\n result[key] = result.get(key, []) + [l.split('\"')[1]]\n return result",
"def _parse_message(string):\n if not string:\n raise BadMessage('Empty string.')\n\n prefix, trailing = '', ''\n if string[0] == ':':\n prefix, string = string[1:].split(' ', 1)\n if ':' in string:\n string, trailing = string.split(':', 1)\n\n if not string or set(string).issubset(Message.whitespace):\n raise BadMessage('No command.')\n\n args = string.split()\n if trailing:\n args.append(trailing)\n\n if not set(args[0]).issubset(Message.alpha):\n raise BadMessage('Bad command.')\n\n return prefix, args[0], args[1:]",
"def parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='ml-1m', help='which dataset to use')\n args = parser.parse_args()\n main(args)",
"def parse_cmdline(args):\n usage = \"usage: %prog [options] <name> <snpfile> <human asm build No> \" +\\\n \"<database>\"\n parser = OptionParser(usage)\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\",\n action=\"store_true\", default=False,\n help=\"Give verbose output\")\n return parser.parse_args()"
]
| [
"0.7009553",
"0.67117226",
"0.6306262",
"0.60545367",
"0.6012107",
"0.5779139",
"0.5755251",
"0.56505704",
"0.56484544",
"0.5567437",
"0.5561202",
"0.5474867",
"0.54550755",
"0.5448368",
"0.5440687",
"0.5430991",
"0.5427897",
"0.5417681",
"0.53850394",
"0.53717554",
"0.53521",
"0.5351692",
"0.5336008",
"0.532929",
"0.5323738",
"0.5315106",
"0.5271904",
"0.5268761",
"0.52641994",
"0.525967"
]
| 0.80780363 | 0 |
Parse label string within BUILD file evaluation environment. | def parse_buildfile(label_string):
if not isinstance(label_string, str):
raise iga.fargparse.ParseError()
return Label.parse(label_string, iga.context.current()['package']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(label_string, current_package):\n if label_string.startswith('//'):\n package_start = 2\n package_end = (_find_or_none(label_string, ':', package_start) or\n len(label_string))\n package = label_string[package_start:package_end]\n if not package:\n raise IgaError('empty package of %r' % label_string)\n else:\n package_start = package_end = 0\n package = None\n\n if label_string[package_end:package_end+1] == ':':\n target_start = package_end + 1\n else:\n target_start = package_end\n target = label_string[target_start:]\n\n if not package and not target:\n raise IgaError('cannot parse %r' % label_string)\n package = package or current_package\n if not package:\n raise IgaError('cannot parse package part from %r' % label_string)\n target = target or _default_target(package)\n if not target:\n raise IgaError('cannot parse target part from %r' % label_string)\n\n return Label(\n package=PurePosixPath(package),\n target=PurePosixPath(target),\n )",
"def test_label_arg_env_key(self, dfparser, instruction):\n dfparser.content = dedent(\"\"\"\\\n FROM scratch\n {0} FOOBAR=\"foo bar\"\n LABEL \"$FOOBAR\"=\"baz\"\n \"\"\".format(instruction))\n assert dfparser.labels['foo bar'] == 'baz'",
"def parse_cmdline(label_string):\n if not label_string.startswith('//'):\n label_string = '//' + label_string\n return Label.parse(label_string, None)",
"def parse(cls, label) -> Any:\n return label",
"def parse_label(label):\n res = {}\n clazz, instance_num, room_type, room_num, area_num = label.split(\"_\")\n res['instance_class'] = clazz\n res['instance_num'] = int(instance_num)\n res['room_type'] = room_type\n res['room_num'] = int(room_num)\n res['area_num'] = int(area_num)\n return res",
"def extract_label(selector):\n return selector.split('=')[-1][:-1]",
"def extract_label(line):\n start = line.find('\\\\label{')\n for i in range(start, len(line)):\n if line[i] == '}':\n label = line[start + 7 : i]\n break\n return label",
"def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment",
"def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False",
"def parser(record):\n parsed = tf.parse_single_example(record, transformed_feature_spec)\n label = parsed.pop(LABEL_KEY)\n return parsed, label",
"def read_label(self):\r\n # label = str(self.parse_binary())#!!BAD\r\n label = ''\r\n while True:\r\n c = self.eat_char()\r\n if c=='n':\r\n #terminal char\r\n break\r\n else:\r\n label += c\r\n\r\n self.log += \"'\" + label + \"'\"\r\n return label",
"def test_arg_env_invalid(self, dfparser, instruction, label):\n dfparser.lines = [\"FROM fedora\\n\",\n \"{0} v=v\\n\".format(instruction),\n \"LABEL TEST={0}\\n\".format(label)]\n try:\n dfparser.labels['TEST']\n except KeyError:\n pass",
"def infer_label(self, string):\n label_bits = []\n if string:\n label_bits.append(string)\n for k, v in self.table.items():\n # The automatic label includes any keys with multiple values\n if len(v) > 1:\n # If a key has multiple values, add both its name and its key.\n # That is, if @key1@ has multiple values, label_bits will have\n # 'key1' + '@key1@' appended. This means the label includes\n # both the key's name and the particular value it has for a\n # given job.\n label_bits.append(re.search(self.key_pattern, k).group(1) + k)\n label = '-'.join(label_bits)\n # Add the label as a key-values pair to the weird data structure\n # This is as if there were in the bp file the line,\n # label\n if not label:\n raise ValueError, \"The label is blank. No label was supplied \"\\\n \"and none can be inferred.\"\n self.table[self.label_key] = [label]",
"def parse_label_column(label_string_tensor):\n # Build a Hash Table inside the graph\n table = tf.contrib.lookup.index_table_from_tensor(tf.constant(LABELS))\n\n # Use the hash table to convert string labels to ints and one-hot encode\n return table.lookup(label_string_tensor)",
"def parse_label(self):\n # TODO: make this work with attached labels as well as\n # stand alone labels.\n # Save the RAW full text of the label to self._raw\n input_stream = FileStream(self.infile)\n lexer = ODLv21Lexer(input_stream)\n tokens = CommonTokenStream(lexer)\n\n parser = ODLv21Parser(tokens)\n parse_tree = parser.label()\n self._parse_tree = parse_tree\n visitor = Pds3LabelVisitor()\n visitor.visit(parse_tree)\n return visitor.root_dict",
"def is_label_definition(line):\n\n return line.startswith(\"LABEL \")",
"def load_label(self, pr):\n return",
"def _parse(self, inval):\n regex = re.compile(r'^os\\.environ\\[(.*)\\]$')\n for val in inval:\n if val is None:\n continue\n # split on \\n\n cmd = val.split('\\n')\n for v2 in cmd:\n if not v2:\n continue\n dict_call, pth = v2.split(' = ')\n m = re.match(regex, dict_call)\n if m:\n key = m.groups()[0]\n self.env[key] = pth",
"def parse_labels(labels: [{}]) -> {str: str}:\n labels_dict = {}\n for label in labels:\n match = re.search(\"{([^=]+)=(.+)}\", label['name'])\n if match:\n key = match.group(1).strip().lower().title()\n value = match.group(2).strip()\n labels_dict[key] = value\n return labels_dict",
"def expr_label(runtime_addr, s):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n # TODO: If this continues to just forward to label() perhaps make that behavuour\n # official and just provide both names for backwards compatibility/documenting the\n # difference for users who want to??\n return label(runtime_addr, s)",
"def _make_transition(token_namespace, reserved_syntax_strings, label):\n if label[0].isalpha():\n # A named token (e.g. NAME, NUMBER, STRING)\n return getattr(token_namespace, label)\n else:\n # Either a keyword or an operator\n assert label[0] in ('\"', \"'\"), label\n assert not label.startswith('\"\"\"') and not label.startswith(\"'''\")\n value = literal_eval(label)\n try:\n return reserved_syntax_strings[value]\n except KeyError:\n r = reserved_syntax_strings[value] = ReservedString(value)\n return r",
"def want_label(self, op):\n return self.want_line(r'\\s*\\S*(%s)\\S*\\:.*' % (op))",
"def _modify_instruction_label_env(self, instruction, instr_key, instr_value):\n if instruction == 'LABEL':\n instructions = self.labels\n elif instruction == 'ENV':\n instructions = self.envs\n elif instruction == 'ARG':\n instructions = self.args\n else:\n raise ValueError(\"Unknown instruction '%s'\" % instruction)\n\n if instr_key not in instructions:\n raise KeyError('%s not in %ss' % (instr_key, instruction))\n\n # extract target instructions from the final stage only\n candidates = []\n for insn in self.structure:\n if insn['instruction'] == 'FROM':\n candidates = []\n if insn['instruction'] == instruction:\n candidates.append(insn)\n\n # Find where in the file to put the changes\n content = startline = endline = None\n for candidate in candidates:\n words = list(WordSplitter(candidate['value']).split(dequote=False))\n\n # LABEL/ENV/ARG syntax is one of two types:\n if '=' not in words[0]: # LABEL/ENV/ARG name value\n # Remove quotes from key name and see if it's the one\n # we're looking for.\n if WordSplitter(words[0]).dequote() == instr_key:\n if instr_value is None:\n # Delete this line altogether\n content = None\n else:\n # Adjust label/env value\n words[1:] = [quote(instr_value)]\n\n # Now reconstruct the line\n content = \" \".join([instruction] + words) + '\\n'\n\n startline = candidate['startline']\n endline = candidate['endline']\n break\n else: # LABEL/ENV/ARG \"name\"=\"value\"\n for index, token in enumerate(words):\n key, _ = token.split(\"=\", 1)\n if WordSplitter(key).dequote() == instr_key:\n if instr_value is None:\n # Delete this label\n del words[index]\n else:\n # Adjust label/env value\n words[index] = \"{0}={1}\".format(key,\n quote(instr_value))\n\n if len(words) == 0:\n # We removed the last label/env, delete the whole line\n content = None\n else:\n # Now reconstruct the line\n content = \" \".join([instruction] + words) + '\\n'\n\n startline = candidate['startline']\n endline = candidate['endline']\n break\n\n # We know the label/env we're looking for is there\n assert startline and endline\n\n # Re-write the Dockerfile\n lines = self.lines\n del lines[startline:endline + 1]\n if content:\n lines.insert(startline, content)\n self.lines = lines",
"def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'env': tf.FixedLenFeature([1, 4], tf.int64),\n # 'env_segment_number': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_cpu': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_mem': tf.FixedLenFeature([], tf.int64),\n # 'query_plan_ops': tf.VarLenFeature(tf.string),\n # 'query_table_size': tf.VarLenFeature(tf.float32),\n # 'segment_cpu_usage': tf.VarLenFeature(tf.float32),\n 'label': tf.FixedLenFeature([], tf.float32)\n })\n env = tf.cast(features['env'], tf.float32)\n # image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # # Reshape from [depth * height * width] to [depth, height, width].\n # image = tf.cast(\n # tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n # tf.float32)\n label = tf.cast(features['label'], tf.float32)\n reshape_label = tf.reshape(features['label'], (1,1))\n return env, reshape_label",
"def test_python_label(self):\n self.write_file('test.py', 'test file')\n config = testutil.AppInfoFake(\n runtime='python',\n entrypoint='run_me_some_python!',\n runtime_config=dict(python_version='3'))\n cfg_files = self.generate_config_data(appinfo=config, deploy=True)\n dockerfile = [f for f in cfg_files if f.filename == 'Dockerfile'][0]\n self.assertIn('LABEL python_version=python3.5\\n', dockerfile.contents)",
"def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def normalisesym(self, label):\n return label",
"def parse_category_label(label: str) -> str:\n return number_first_regex.sub(\n '_',\n space_regex.sub(\n '_',\n label.strip().lower().replace('*', '').replace('(', '').replace(\n ')', '').replace('.', '')))",
"def test_tb12_strings():\n\n err = _do_test_raw(\"\"\"\n var f = \"editImageMapButton.label\";\n var x = \"haveSmtp1.suffix2\";\n \"\"\", versions=TB12_DEFINITION)\n assert err.failed()\n assert err.warnings\n assert err.notices\n assert err.compat_summary[\"errors\"]",
"def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals"
]
| [
"0.65368384",
"0.6447805",
"0.63940996",
"0.63230217",
"0.56715554",
"0.55192065",
"0.5495118",
"0.54187506",
"0.54036874",
"0.53981274",
"0.5353999",
"0.53069943",
"0.5247078",
"0.517073",
"0.51227736",
"0.51175773",
"0.50712484",
"0.50627446",
"0.50543314",
"0.50518024",
"0.50277483",
"0.5008553",
"0.4984666",
"0.49353",
"0.4928316",
"0.4926786",
"0.48869607",
"0.48787653",
"0.48542735",
"0.48519132"
]
| 0.78581035 | 0 |
Find the start of the substring or return None. | def _find_or_none(string, substring, start):
index = string.find(substring, start)
return index if index != -1 else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_start(self): # -> str | None:\n ...",
"def hgvs_start(self):\n try:\n return self.hp.parse(self.term).posedit.pos.start\n except hgvs.exceptions.HGVSParseError:\n # Log me\n # print(self.term)\n return None",
"def startswith(self, s):\n return self.peek((0, len(s))).startswith(s)",
"def startswith(self, s):\n return self.peek((0, len(s))).startswith(s)",
"def test_match_start_check_at_beginning_of_string(self):\n first_letter = \"a\"\n s = \"abcdef\"\n self.assertEqual(__, re.search(first_letter, s).group())",
"def startswith(value, s):\n\n if not value: return False\n return value.find(s) == 0",
"def findWithStart(astring, achar, start=0):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1",
"def findWithStart(astring, achar, start=0):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1",
"def findWithStart(astring, achar, start=0):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1",
"def match_start(msg, options, fallback=None):\n # Check if actual option\n if not msg:\n return fallback\n if msg in options:\n return msg\n matches = [o for o in options if o.startswith(msg)]\n if len(matches) == 1:\n return matches[0]\n matches = [o for o in options if o.casefold().startswith(msg.casefold())]\n if len(matches) == 1:\n return matches[0]\n return match_start(msg[:-1], options, fallback)",
"def _match_start_get_remaining(self, start, text):\n if not text.startswith(start):\n return\n return text[len(start):]",
"def re_search(pattern, text: str, start_indx: int = 0) -> Optional[int]:\n # \n match = pattern.search(text, start_indx)\n if match is not None:\n indx = match.start()\n else:\n indx = None\n return indx",
"def start_tag_or_none(self, token):\n if self.patterns['start_tag'].match(token):\n return token[2:-6].upper()",
"def get_str_between_s_and_e(start_str, end_str, line):\n\tstart = line.find(start_str)\n\tif start >= 0:\n\t\tstart = start + len(start_str)\n\t\tend = line.find(end_str, start)\n\t\tif end >= 0:\n\t\t\treturn line[start:end].strip()\n\telse:\n\t\treturn None",
"def find_first_node_from_here_by_rule(start_node, select):\n try:\n return next(find_nodes_from_here_by_rule(start_node, select))\n except StopIteration:\n return None",
"def first_segment(self):\n\t\tseg_sort = sorted(self.segments, key=lambda x: stringutil.extract_numbers(x.filename))\n\t\tif seg_sort:\n\t\t\treturn seg_sort[0]\n\t\telse:\n\t\t\treturn None",
"def start_word(self):\n return self._start",
"def start_with_start_codon(rna: str):\n return rna.find(START_CODON)",
"def find_substring_offset(\n context: bytes,\n substring: bytes,\n):\n start = 0\n while True:\n start = context.find(substring, start)\n # use start += 1 to find overlapping matches\n if start == -1:\n return\n yield start\n start += len(substring)",
"def _visit_str_match(\n node,\n string,\n left_offset,\n result: ObjectParserResult\n) -> ParseDelegationReturnMetadata:\n look_for = node.value[1:-1]\n does_start_with = string.startswith(look_for)\n if does_start_with:\n result.remaining_start_i += len(look_for)\n return ParseDelegationReturnMetadata(does_start_with, string, left_offset,\n node, len(look_for))\n else:\n return ParseDelegationReturnMetadata(False, string, left_offset, node, None)",
"def test_match_must_start_at_the_beginning(self):\n third_letter = \"c\"\n s = \"abcdef\"\n self.assertEqual(__, re.match(third_letter, s))",
"def start_with_the_beggining(rna: str):\n return 0",
"def starts_with(text, substring):\n assert text.startswith(substring), \"%r doesn't start with %r\" % (text,\n substring)",
"def first(word):\n\treturn word[0]",
"def find_start(lines, start_str, reverse=True):\n start = -1\n # Iterate backwards until the last value is found\n if reverse:\n for i, line in reversed(list(enumerate(lines))):\n if start_str == line:\n return i\n else:\n for i, line in enumerate(lines):\n if start_str == line:\n return i\n return start",
"def test_search_must_not_start_at_the_beginning(self):\n pattern = \"cde\"\n s = \"abcdefabcdef\"\n self.assertEqual(__, re.search(pattern, s).group())",
"def parseStart(fp):\n\n try:\n ln = fp.readline()\n p = re.compile(r'^Inicial\\s*(?:#.*)?$')\n m = p.match(ln)\n if m == None:\n raise ParseError(ParseError.startMsg)\n\n # match and capture \"{ a }\" etc.\n ln = fp.readline()\n p = re.compile(r'^\\{\\s*(.*)\\s*\\}\\s*(?:#.*)?$')\n m = p.match(ln)\n if m == None:\n raise ParseError(ParseError.startMsg)\n\n return m.group(1).strip()\n\n except:\n raise",
"def next_section_start_pos(text, start):\n section_re = re.compile(\"^.*\\n-+$\", re.I|re.MULTILINE) \n next_section = section_re.search(text, start)\n return len(text) if next_section is None else next_section.start()",
"def starts_with(s, prefix):\n if prefix == '':\n return True\n elif s[0] != prefix[0]:\n return False\n else: # s[0] == prefix[0]\n return starts_with(s[1:], prefix[1:])",
"def _start_magic(line):\n return start(line)"
]
| [
"0.8028616",
"0.67470604",
"0.6622218",
"0.6622218",
"0.66153365",
"0.65290195",
"0.64821583",
"0.64821583",
"0.64821583",
"0.6412685",
"0.61959994",
"0.61613446",
"0.6096288",
"0.6087059",
"0.6080864",
"0.6071819",
"0.60441285",
"0.6015387",
"0.60138243",
"0.6008052",
"0.5991533",
"0.5985119",
"0.5985104",
"0.5980642",
"0.59709543",
"0.5970783",
"0.59682435",
"0.593283",
"0.59309983",
"0.593012"
]
| 0.7821502 | 1 |
Return the default target from a package string. | def _default_target(package):
return package[package.rfind('/')+1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_import(import_str, default=None):\r\n try:\r\n return import_module(import_str)\r\n except ImportError:\r\n return default",
"def get_module_from_package(package, module, default=None):\n full_module_path = str('{0}.{1}'.format(package.__name__, module))\n\n try:\n return __import__(full_module_path, {}, {}, [package.__name__])\n except ImportError:\n return default",
"def _get_source_type(version_string):\n # type: (str) -> Optional[str]\n if not version_string:\n return None\n\n if \"http://\" in version_string or \"https://\" in version_string:\n return \"url\"\n elif os.path.exists(version_string) and os.path.isfile(version_string):\n return \"file\"\n elif re.match(r\"\\d+\\.\\d+\\.\\d+\", version_string) or version_string == \"current\":\n return \"install_script\"\n else:\n raise ValueError(\n 'Invalid value \"%s\" for version_string. If it\\'s a path to a file, make'\n \"sure the file exists and if it's a URL, ensure URL exists.\"\n % (version_string)\n )",
"def parse(label_string, current_package):\n if label_string.startswith('//'):\n package_start = 2\n package_end = (_find_or_none(label_string, ':', package_start) or\n len(label_string))\n package = label_string[package_start:package_end]\n if not package:\n raise IgaError('empty package of %r' % label_string)\n else:\n package_start = package_end = 0\n package = None\n\n if label_string[package_end:package_end+1] == ':':\n target_start = package_end + 1\n else:\n target_start = package_end\n target = label_string[target_start:]\n\n if not package and not target:\n raise IgaError('cannot parse %r' % label_string)\n package = package or current_package\n if not package:\n raise IgaError('cannot parse package part from %r' % label_string)\n target = target or _default_target(package)\n if not target:\n raise IgaError('cannot parse target part from %r' % label_string)\n\n return Label(\n package=PurePosixPath(package),\n target=PurePosixPath(target),\n )",
"def package(self):\n if self.method == 'buildNotification':\n return self.params[1]['name']\n if self.method in ('createImage', 'image', 'livecd'):\n return self.params[0]\n if self.method == 'indirectionimage':\n return self.params[0]['name']\n # params[0] is the source URL for these tasks:\n if self.method not in ('build', 'buildArch', 'buildContainer',\n 'buildMaven', 'buildSRPMFromSCM', 'maven'):\n return None\n # (I wish there was a better way to do this.)\n source = self.params[0]\n o = urlparse(source)\n # build tasks can load an SRPM from a \"cli-build\" tmpdir:\n if source.endswith('.src.rpm'):\n srpm = os.path.basename(source)\n (name, version, release) = srpm.rsplit('-', 2)\n # Note we're throwing away version and release here. They could be\n # useful eventually, maybe in a \"Package\" class.\n return name\n # or an allowed SCM:\n elif o.scheme:\n package = os.path.basename(o.path)\n if package.endswith('.git'):\n package = package[:-4]\n if self.method == 'buildContainer':\n package += '-container'\n return package\n raise ValueError('could not parse source \"%s\"' % source)",
"def get_package_name():\n\tpackage = None\n\ttry:\n\t\tpackage = os.environ.get('LOCAL_PART', '') + os.environ.get('LOCAL_PART_SUFFIX', '') \n\t\tif not package and len(sys.argv) > 1:\n\t\t\tpackage = sys.argv[-1].lower()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\tfinally:\n\t\treturn package",
"def target_option(s):\n return s",
"def get_default(self, stmt, slist):\n for s in slist:\n if s.keyword == \"default\": return s.arg\n dst = stmt.search_one(\"default\")\n if dst: return dst.arg\n return None",
"def get_xpath_default_namespace(elem, default_namespace, target_namespace, default=None):\n value = elem.get('xpathDefaultNamespace')\n if value is None:\n return default\n\n value = value.strip()\n if value == '##local':\n return ''\n elif value == '##defaultNamespace':\n return default_namespace\n elif value == '##targetNamespace':\n return target_namespace\n elif len(value.split()) == 1:\n return value\n else:\n admitted_values = ('##defaultNamespace', '##targetNamespace', '##local')\n msg = \"wrong value %r for 'xpathDefaultNamespace' attribute, can be (anyURI | %s).\"\n raise XMLSchemaValueError(msg % (value, ' | '.join(admitted_values)))",
"def get_package_name(pkg, rem):\n flavor = rem.os.package_type\n\n try:\n return _PACKAGE_MAP[pkg][flavor]\n except KeyError:\n return None",
"def get_target_from_pkg_deps(self, matches):\n highest = None\n pkg_name = None\n\n pkgs=[]\n\n for match in matches:\n pkg_name = match['pkg']\n if match['slot'] and match['slot'] != '0':\n pkg_name += '-' + match['slot']\n try:\n pkg = self.manager.get_package(pkg_name)\n pkgs.append(pkg)\n except:\n pass\n\n deep_pkgs = self.get_needed_packages(*pkgs)\n\n for pkg in deep_pkgs:\n try:\n target = pkg.target()\n if not highest:\n highest = target\n pkg_name = pkg.name()\n if self.version_cmp(highest, target) < 0:\n highest = target\n pkg_name = pkg.name()\n except:\n pass\n\n return pkg_name, highest",
"def choose_package(file_type, file_name, exports, target):\n if not file_type:\n return None\n\n file_name = file_name.lower()\n\n if \"DLL\" in file_type:\n if file_name.endswith(\".cpl\"):\n return \"cpl\"\n elif has_com_exports(exports):\n return \"com\"\n else:\n return \"dll\"\n elif \"PE32\" in file_type or \"MS-DOS\" in file_type:\n return \"exe\"\n elif \"PDF\" in file_type or file_name.endswith(\".pdf\"):\n return \"pdf\"\n elif file_name.endswith(\".pub\"):\n return \"pub\"\n elif \"Rich Text Format\" in file_type or \\\n \"Microsoft Word\" in file_type or \\\n \"Microsoft Office Word\" in file_type or \\\n file_name.endswith((\".doc\", \".docx\", \".rtf\", \".docm\")):\n return \"doc\"\n elif \"Microsoft Office Excel\" in file_type or \\\n \"Microsoft Excel\" in file_type or \\\n file_name.endswith((\".xls\", \".xlsx\")):\n return \"xls\"\n elif \"Microsoft PowerPoint\" in file_type or \\\n file_name.endswith((\".ppt\", \".pptx\", \".pps\", \".ppsx\", \".pptm\", \".potm\", \".potx\", \".ppsm\")):\n return \"ppt\"\n elif \"OOXML\" in file_type:\n\treturn check_office_file(target)\n elif \"HTML\" in file_type or file_name.endswith((\".htm\", \".html\")) or file_type==\"data\":\n return \"ie\"\n elif file_name.endswith(\".jar\"):\n return \"jar\"\n elif \"Zip\" in file_type or \"zip\" in file_type:\n\treturn check_office_file(target)\n\n elif file_name.endswith((\".py\", \".pyc\")) or \"Python script\" in file_type:\n return \"python\"\n elif file_name.endswith(\".vbs\"):\n return \"vbs\"\n elif file_name.endswith((\".js\", \".jse\")):\n return \"js\"\n elif file_name.endswith(\".msi\"):\n return \"msi\"\n elif file_name.endswith(\".ps1\"):\n return \"ps1\"\n elif file_name.endswith(\".wsf\"):\n return \"wsf\"\n else:\n return \"generic\"",
"def locate_nuget():\n if NuGetRunner.valid_nuget_executable(\"nuget\"):\n return \"nuget\"\n return None",
"def get_package_name(x):\n return re.search(r\"^(\\w|-)*\", x).group()",
"def get_package_value(key, default_value=None):\r\n try:\r\n config = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)\r\n if config and config.has(key):\r\n return config.get(key)\r\n except RuntimeError:\r\n sublime.set_timeout(lambda: load_package_values(), 0)\r\n if S.CONFIG_PACKAGE:\r\n if key in S.CONFIG_PACKAGE:\r\n return S.CONFIG_PACKAGE[key]\r\n\r\n return default_value",
"def target(cls, address):\r\n return Target.get(Address.parse(cls.build_root, address, is_relative=False))",
"def builder(string):\n return getattr(sys.modules[__name__], string)",
"def _package_root(name):\n return name.split('.', 1)[0]",
"def package_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"package_name\")",
"def get_string_version(name,\n default=DEFAULT_STRING_NOT_FOUND,\n allow_ambiguous=True):\n # get filename of callar\n callar = inspect.getouterframes(inspect.currentframe())[1][1]\n if callar.startswith('<doctest'):\n # called from doctest, find written script file\n callar = inspect.getouterframes(inspect.currentframe())[-1][1]\n # get version info from distribution\n try:\n di = get_distribution(name)\n installed_directory = os.path.join(di.location, name)\n if not callar.startswith(installed_directory) and not allow_ambiguous:\n # not installed, but there is another version that *is*\n raise DistributionNotFound\n except DistributionNotFound:\n return default\n else:\n return di.version",
"def default_java_namespace(self, target):\n self._check_target(target)\n return target.default_java_namespace or self._default_default_java_namespace",
"def _resolve_launcher(launcher):\n import six\n import os\n import importlib\n\n if isinstance(launcher, six.string_types):\n module_name, function_name = os.path.splitext(launcher)\n m = importlib.import_module(module_name)\n return m[function_name]\n\n return launcher",
"def get_default_path(name):\n name_ = name\n if isinstance(name, (DottedName, Symbol)):\n name_ = str(name)\n if name_ in pyccel_external_lib.keys():\n name = pyccel_external_lib[name_].split('.')\n if len(name)>1:\n return DottedName(*name)\n else:\n return name[0]\n return name",
"def get_package_name(self, default=-1):\n specs = glob.glob(f'{self.distgit_repo().distgit_dir}/*.spec')\n if len(specs) != 1:\n if default != -1:\n return default\n raise IOError('Unable to find .spec file in RPM distgit: ' + self.qualified_name)\n\n spec_path = specs[0]\n with open(spec_path, mode='r', encoding='utf-8') as f:\n for line in f.readlines():\n if line.lower().startswith('name:'):\n return line[5:].strip() # Exclude \"Name:\" and then remove whitespace\n\n if default != -1:\n return default\n\n raise IOError(f'Unable to find Name: field in rpm spec: {spec_path}')",
"def get_package_name():\n return try_get_project_property('packageName')",
"def translate(self, package):\r\n if not isinstance(package, self._package_type):\r\n return None\r\n if not package.compatible(identity=self._identity, platform=self._platform):\r\n return None\r\n try:\r\n bdist = package.fetch(location=self._install_cache, conn_timeout=self._conn_timeout)\r\n except package.UnreadableLink as e:\r\n TRACER.log('Failed to fetch %s: %s' % (package, e))\r\n return None\r\n return DistributionHelper.distribution_from_path(bdist)",
"def default_package_version(self, default_package_version):\n self._default_package_version = default_package_version",
"def test_get_component_defaultpackage(self):\n self._ucr({\n 'repository/online/component/b/defaultpackage': 'b',\n 'repository/online/component/c/defaultpackages': 'ca cb',\n 'repository/online/component/d/defaultpackages': 'da,db',\n })\n self.assertEqual(set(('b',)), self.u.get_component_defaultpackage('b'))\n self.assertEqual(set(('ca', 'cb')), self.u.get_component_defaultpackage('c'))\n self.assertEqual(set(('da', 'db')), self.u.get_component_defaultpackage('d'))",
"def get_package_name(item: str) -> Union[str, None]:\n return remove_prefix(item, PackageInfoPrefix.PACKAGE)",
"def get_target_type():\n is_cli = prompt_yn(\"is this a command-line application?\")\n if is_cli:\n return APP_TYPE_CLI\n else:\n return APP_TYPE_SERVER"
]
| [
"0.5908625",
"0.5827704",
"0.56605",
"0.564478",
"0.5603874",
"0.55811745",
"0.5539311",
"0.5500699",
"0.545742",
"0.54094994",
"0.53598744",
"0.531693",
"0.5301025",
"0.5278612",
"0.52658737",
"0.52570933",
"0.5249404",
"0.52488565",
"0.52444404",
"0.5243546",
"0.5239999",
"0.52366304",
"0.5227464",
"0.5222646",
"0.52030694",
"0.5178168",
"0.5176791",
"0.5176149",
"0.5144045",
"0.5110315"
]
| 0.8449904 | 0 |
Pad `data` in 'edge' mode, and get n nearest elements in the padded array and their indexes in the original array | def get_neighbor(x, n, data):
pad_width = np.ceil(n / 2).astype(np.int32)
padded = np.pad(data, pad_width, mode='edge')
x += pad_width
idxes = get_neighbor_idxes(x, n, len(padded))
ret = padded[idxes]
return idxes - pad_width, ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def broadcast_offset_param(data):\n return np.array([[data[int(i / 16)][(j + i * 32) % 128]\n for j in range(32)] \n for i in range(32)])",
"def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])",
"def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)",
"def get_neighbor_idxes(x, n, limit):\n idxes = sorted(range(limit), key=lambda idx: (abs(x - idx), idx))[:n]\n idxes = sorted(idxes)\n return np.array(idxes)",
"def fft_pad_data(data, mode='edge'):\n\n n_points=int(2**(np.ceil(np.log(np.max(data.shape))/np.log(2))))\n nx, ny = data.shape \n padx = int((n_points - nx)/2)\n pady = int((n_points - ny)/2)\n \n padded_data = np.pad(data, ((padx, padx), (pady, pady)),mode) \n \n mask = np.zeros_like(padded_data, dtype=bool)\n mask[padx:padx+data.shape[0], pady:pady+data.shape[1]] = True \n fpdat = np.fft.fft2(padded_data)\n\n return (fpdat,mask)",
"def AdjacentAveraging(data, nPoints=10):\r\n N = data.shape[0]\r\n avdata = np.zeros_like(data)\r\n for u, i in enumerate(data):\r\n if u<nPoints:\r\n avdata[u] = np.average(data[:(u*2+1)])\r\n elif ((N-u)<nPoints):\r\n avdata[u] = np.average(data[-(N*2-u*2-1):])\r\n else:\r\n temp = data[:(u+1+nPoints)]\r\n temp = temp[-(1+2*nPoints):]\r\n avdata[u] = np.average(temp)\r\n return data-avdata",
"def pad(data, maxPadLength=False):\n data_padded = data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(\n bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data\n )\n data_padded[bad_indexes] = interpolated\n if maxPadLength:\n blocks, n_blocks = ndimage.label(np.isnan(data))\n for bl in range(1, n_blocks + 1):\n # if greater than max pad length then keep as nan\n # i.e. don't interpolate over too large a range\n if (blocks == bl).sum() > maxPadLength:\n data_padded[blocks == bl] = np.nan\n\n return data_padded",
"def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]",
"def __find_nearest_centroids(self, data):\n \n self.clusters = np.array([]) \n for i, d in enumerate(data):\n min_dist = np.inf\n self.clusters = np.concatenate((self.clusters, np.array([-1])))\n for j, c in enumerate(self.centroids):\n dist = self.__compute_distance(d, c)\n if min_dist > dist:\n min_dist = dist\n self.clusters[i] = j",
"def compact_neighb(self):\n order = np.argsort(self.edges[:, 0] * float(self.V) + self.edges[:, 1])\n neighb = self.edges[order, 1].astype(np.int_)\n weights = self.weights[order]\n degree, _ = self.degrees()\n idx = np.hstack((0, np.cumsum(degree))).astype(np.int_)\n return idx, neighb, weights",
"def _get_broundary(arr, n_max=16, n_skip=3):\n sub_arr = np.array(arr[n_skip:n_max])\n diffs = sub_arr[1:] - sub_arr[:-1]\n return np.argmin(diffs) + n_skip + 1",
"def find_inner_edge(wrap, dist=25, prom=0.08): # used to be named as find_cell_corner\n if len(wrap.shape) == 2:\n wrap_g = wrap\n elif len(wrap.shape) == 3:\n wrap_g = cv.cvtColor(wrap, cv.COLOR_BGR2GRAY)\n\n sum_x = np.sum(wrap_g, axis=0)\n sum_x = sum_x / np.max(sum_x)\n peak_x, _ = signal.find_peaks(-sum_x, distance=dist, prominence=prom)\n\n sum_y = np.sum(wrap_g, axis=1)\n sum_y = sum_y / np.max(sum_y)\n peak_y, _ = signal.find_peaks(-sum_y, distance=dist, prominence=prom)\n\n return peak_x, peak_y",
"def connect_nearby_runs(pileup_flat, allowed_gap_num):\n chunked = [(k, list(g)) for k, g in itertools.groupby(list(pileup_flat))]\n retval = []\n for i, (item, group) in enumerate(chunked):\n if not item and len(group) <= allowed_gap_num and 0 < i < len(chunked) - 1:\n retval.extend([1] * len(group))\n else:\n retval.extend(group)\n return np.array(retval, dtype=int)",
"def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))",
"def find_edge(point, offset, max_dist, hi, lo, bgArray):\n for i in range(1, max_dist):\n next = (point[0] + i * offset[0], point[1] + i * offset[1])\n if is_edge(next, hi, lo, bgArray):\n return (next, i)\n return None",
"def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]",
"def get_n_neighbors(values, index, n): # TODO returning same tweets every time?\n neighbors = []\n diff = 0\n # check that n/2 lower neighbors exist\n if index >= (n/2):\n for i in range(int(n/2)):\n neighbors.append(values[index - i])\n # add as many as possible, add extra from end if possible\n else:\n diff = int(n/2) - index\n for i in range(int(n/2) - diff):\n neighbors.append(values[index - i])\n # check that n/2 + diff upper neighbors exist, add them\n if len(values) > (index + int(n/2) + diff):\n for i in range(int(n/2) + diff):\n neighbors.append(values[index - i])\n # not enough stuff, add as many as possible\n else:\n diffUpp = (len(values) - 1) - index\n for i in range(diffUpp):\n neighbors.append(values[index + i])\n return neighbors",
"def get_mask_with_stent_likely_positions(data, th, verbose=False):\n \n # NOTE: this pure-Python implementation is little over twice as slow\n # as the Cython implementation, which is a neglectable cost since\n # the other steps in stent segmentation take much longer. By using\n # pure-Python, installation and modification are much easier!\n # It has been tested that this algorithm produces the same results\n # as the Cython version.\n \n # Init mask\n mask = np.zeros_like(data, np.uint8)\n \n # Criterium 1A: voxel must be above th\n # Note that we omit the edges\n #mask[25:-25,25:-25,25:-25] = (data[25:-25,25:-25,25:-25] > th[0]) * 3\n mask[1:-1,1:-1,1:-1] = (data[1:-1,1:-1,1:-1] > th[0]) * 3\n \n cnt = 0\n seed = None\n seeds = []\n values = []\n for z, y, x in zip(*np.where(mask==3)):\n \n # Only proceed if this voxel is \"free\"\n if mask[z,y,x] == 3:\n \n # Set to 0 initially\n mask[z,y,x] = 0 \n \n # Get value\n val = data[z,y,x]\n \n # Get maximum of neighbours\n patch = data[z-1:z+2, y-1:y+2, x-1:x+2].copy()\n patch[1,1,1] = 0\n themax = patch.max()\n \n # # Criterium 2: must be local max\n # if themax > val:\n # continue\n # # Also ensure at least one neighbour to be *smaller*\n # if (val > patch).sum() == 0:\n # continue\n \n # Criterium 3: one neighbour must be above th\n if themax <= th[0]:\n continue\n \n # Criterium 1B: voxel must be below upper seed th, if given\n if len(th) ==2:\n if val > th[1]:\n if verbose:\n print('Seed removed by higher th: ',(z,y,x),'ctvalue=', val)\n continue\n \n # # Criterium 4: seed must be at least 5 voxels away from other seeds\n # if not seed is None:\n # newseed = np.asarray([z,y,x])\n # v = seeds - newseed\n # d = (v[:,0]**2 + v[:,1]**2 + v[:,2]**2)**0.5 # np.linalg.norm(v) # magnitude\n # if d.min() < 5:\n # cnt+=1\n # continue\n seed = np.asarray([z,y,x])\n seeds.append(seed)\n \n # Set, and suppress stent points at direct neighbours\n #mask[z-1:z+2, y-1:y+2, x-1:x+2] = 1 # do not suppress neighbours to have more points for centerline\n mask[z,y,x] = 2\n values.append(data[z,y,x])\n \n print()\n # print('Seed ctvalues: {}'.format(sorted(values)))\n print('-------')\n # print('Seeds removed by criterium 4: {}'.format(cnt))\n \n return mask",
"def _get_edges(padded, axis, width_pair):\n left_index = width_pair[0]\n left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)\n left_edge = padded[left_slice]\n\n right_index = padded.shape[axis] - width_pair[1]\n right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)\n right_edge = padded[right_slice]\n\n return left_edge, right_edge",
"def nan_interp(A):\n\tni,nj = np.shape(A)\n\t# extend edges of A by one\n\tA = np.concatenate((np.array([A[:,0]]).transpose(),A,np.array([A[:,-1]]).transpose()),axis=1)\n\tA = np.concatenate((np.array([A[0,:]]),A,np.array([A[-1,:]])),axis=0)\n\t\n\t#nit = 0\n\t#while np.sum(np.isnan(A)) != 0:\n\t#nit+=1\n\tnanp = np.isnan(A)\n\tfor i in range(1,ni+1):\n\t\tfor j in range(1,nj+1):\n\t\t\tif nanp[i,j]:\n\t\t\t\t#\t# edges\n\t\t\t\t#\tif (i==0) & (j!=0)& (j!=nj-1):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j-1],A[i,j+1]])\n\t\t\t\t#\tif (i==ni-1) & (j!=0)& (j!=nj-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j-1],A[i,j+1]])\n\t\t\t\t#\tif (j==0) & (i!=0)& (i!=ni-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i+1,j],A[i,j+1]])\n\t\t\t\t#\tif (j==nj-1) & (i!=0)& (i!=ni-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i+1,j],A[i,j-1]])\n\t\t\t\t#\t# corners\n\t\t\t\t#\tif (i==0) & (j==0):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j+1]])\n\t\t\t\t#\tif (i==ni-1) & (j==0):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j+1]])\n\t\t\t\t#\tif (i==0) & (j==nj-1):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j-1]])\n\t\t\t\t#\tif (i==ni-1) & (j==nj-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j-1]])\n\t\t\t\t#\t# core\n\t\t\t\t#\telse:\n\t\t\t\tb = np.array([A[i-1,j],A[i,j-1],A[i+1,j],A[i,j+1]])\n\t\t\t\tsnan = np.sum(np.isnan(b))\n\t\t\t\tsb = np.nansum(b)\n\t\t\t\tA[i,j] = sb/(len(b)-snan)\n\t\t\t\t#print(i,j)\n\t# only the core matters\n\tA = A[1:ni+1,1:nj+1]\n\treturn A",
"def get_n_nearest_neighbors(words: np.ndarray, E: np.ndarray, vocab: np.ndarray, n: int = 10):\n w_idx = np.where(np.in1d(vocab, np.array(words)))[0] # Words indices in Vocab and Embedding E\n C = cosine_distances(E)\n np.fill_diagonal(C, np.inf)\n w_C = C[:, w_idx] # Filter columns for words\n nNN = np.argpartition(w_C, range(n), axis=0)[:n] # Every column j contains the indices of NNs of word_j\n return np.vstack([words, vocab[nNN]]) # 1st row: words, rows 1...n: nearest neighbors",
"def ideal_spacing(data, npoints):\n dims = data.shape\n actual_npoints = (data >= 0).sum()\n spacing = np.ones(3, dtype='uint')\n\n while actual_npoints > npoints:\n\n # Subsample the direction with the highest number of samples\n ddims = dims / spacing\n if ddims[0] >= ddims[1] and ddims[0] >= ddims[2]:\n dir = 0\n elif ddims[1] > ddims[0] and ddims[1] >= ddims[2]:\n dir = 1\n else:\n dir = 2\n spacing[dir] += 1\n subdata = data[::spacing[0], ::spacing[1], ::spacing[2]]\n actual_npoints = (subdata >= 0).sum()\n\n return spacing",
"def neighbours(ar, cur_index, cnt_of_neiboors=3, exclude_from_neibors_index=[]):\n rmax = np.max([0, cur_index + cnt_of_neiboors - len(ar)])\n lmin = np.max([cur_index - (cnt_of_neiboors + rmax), 0])\n\n excl = set(exclude_from_neibors_index) | {cur_index}\n nbs = [i for i in range(lmin, len(ar)) if i not in excl]\n return ar[nbs[:cnt_of_neiboors * 2]]",
"def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass",
"def extract_unpadded(self, data, ind):\n batch_range = torch.arange(0, data.shape[0], dtype=torch.int64).to(self.device)\n indices = torch.stack([batch_range, ind], dim=1)\n res = data[indices.transpose(0, 1).tolist()]\n return res",
"def _get_same_padding_conv_nd(\n image_size: list[int], kernel_size: tuple[int, ...], dilation: tuple[int, ...], stride: tuple[int, ...]\n) -> list[int]:\n # get number of spatial dimensions, corresponds to kernel size length\n num_dims = len(kernel_size)\n\n # additional checks to populate dilation and stride (in case they are single entry tuples)\n if len(dilation) == 1:\n dilation = dilation * num_dims\n\n if len(stride) == 1:\n stride = stride * num_dims\n\n # equation to calculate (pad^+ + pad^-) size\n _pad_size: list[int] = [\n max((math.ceil(_i_s / _s) - 1) * _s + (_k_s - 1) * _d + 1 - _i_s, 0)\n for _i_s, _k_s, _d, _s in zip(image_size, kernel_size, dilation, stride)\n ]\n # distribute paddings into pad^+ and pad^- following Tensorflow's same padding strategy\n _paddings: list[tuple[int, int]] = [(_p // 2, _p - _p // 2) for _p in _pad_size]\n\n # unroll list of tuples to tuples, and then to list\n # reversed as nn.ConstantPadNd expects paddings starting with last dimension\n _paddings_ret: list[int] = [outer for inner in reversed(_paddings) for outer in inner]\n return _paddings_ret",
"def _get_neighbors(x, y, data, distance=1):\n mask = numpy.zeros_like(data, dtype=numpy.bool)\n y_max, x_max = data.shape\n y_low = max(y - distance, 0)\n y_high = min(y + distance + 1, y_max)\n x_low = max(x - distance, 0)\n x_high = min(x + distance + 1, x_max)\n mask[y_low:y_high, x_low:x_high] = True\n mask[y, x] = False\n return mask",
"def pad_array_index(low,high,segment_length,reverse=False):\n \n remainder = (segment_length-(high-low)%segment_length)\n if not reverse:\n return high + remainder\n else:\n return low - remainder",
"def get_arr_edge_indices(arr, res='4x5', extra_points_point_on_edge=None,\n verbose=True, debug=False):\n if verbose:\n print(('get_arr_edge_indices for arr of shape: ', arr.shape))\n\n # initialise variables\n lon_c, lat_c, NIU = get_latlonalt4res(res=res, centre=True)\n lon_e, lat_e, NIU = get_latlonalt4res(res=res, centre=False)\n lon_diff = lon_e[-5]-lon_e[-6]\n lat_diff = lat_e[-5]-lat_e[-6]\n nn, n, = 0, 0\n last_lat_box = arr[nn, n]\n coords = []\n last_lon_box = arr[nn, n]\n need_lon_outer_edge, need_lat_outer_edge = False, False\n if debug:\n print((lon_e, lat_e))\n\n # ---- Loop X dimension ( lon )\n for nn, lon_ in enumerate(lon_c):\n\n # Loop Y dimension ( lat ) and store edges\n for n, lat_ in enumerate(lat_c):\n\n if debug:\n print((arr[nn, n], last_lat_box, last_lon_box,\n arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))\n\n if arr[nn, n] != last_lat_box:\n\n # If 1st lat, selct bottom of box\n point_lon = lon_e[nn]+lon_diff/2\n if need_lat_outer_edge:\n point_lat = lat_e[n+1]\n else:\n point_lat = lat_e[n]\n need_lat_outer_edge = True\n need_lat_outer_edge = False\n\n # Add mid point to cordinates list\n if isinstance(extra_points_point_on_edge, type(None)):\n mid_point = [point_lon, point_lat]\n coords += [mid_point]\n\n # Add given number of points along edge\n else:\n coords += [[lon_e[nn]+(lon_diff*i), point_lat] for i in\n np.linspace(0, 1, extra_points_point_on_edge,\n endpoint=True)]\n\n # temporally save the previous box's value\n last_lat_box = arr[nn, n]\n\n # ---- Loop Y dimension ( lat )\n for n, lat_ in enumerate(lat_c):\n\n if debug:\n print((arr[nn, n], last_lat_box, last_lon_box,\n arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))\n # Loop X dimension ( lon ) and store edges\n for nn, lon_ in enumerate(lon_c):\n\n # If change in value at to list\n if arr[nn, n] != last_lon_box:\n point_lat = lat_e[n]+lat_diff/2\n\n # Make sure we select the edge lon\n if need_lon_outer_edge:\n point_lon = lon_e[nn+1]\n else:\n point_lon = lon_e[nn]\n need_lon_outer_edge = True\n need_lon_outer_edge = False\n\n # Add mid point to coordinates list\n if isinstance(extra_points_point_on_edge, type(None)):\n mid_point = [point_lon, point_lat]\n coords += [mid_point]\n\n # Add given number of points along edge\n else:\n coords += [[point_lon, lat_e[n]+(lat_diff*i)] for i in\n np.linspace(0, 1, extra_points_point_on_edge,\n endpoint=True)]\n\n # temporally save the previous box's value\n last_lon_box = arr[nn, n]\n\n return coords",
"def pad_edge_to_shape(arr, nx, ny):\n\tnxa, nya = arr.shape\n\n\tfor n in [nx, ny, nxa, nya]:\n\t\tif not isodd(n):\n\t\t\traise Exception(\"[pad_edge_to_shape] only accept odd-number-length arrays\")\n\n\tif nx > nxa:\n\t\tdx = (nx - nxa)/2\n\t\tarr = np.pad(arr, ((int(dx), int(dx)), (0, 0)), 'constant', constant_values=0)\n\telif nx < nxa:\n\t\traise Exception(\"[pad_edge_to_shape] final dimension smaller than array\")\n\n\tif ny > nya:\n\t\tdy = (ny - nya)/2\n\t\tarr = np.pad(arr, ((0, 0), (int(dy), int(dy))), 'constant', constant_values=0)\n\telif ny < nya:\n\t\traise Exception(\"[pad_edge_to_shape] final dimension smaller than array\")\n\n\treturn arr"
]
| [
"0.60615134",
"0.590217",
"0.5884431",
"0.56500673",
"0.55898046",
"0.5451748",
"0.5354865",
"0.5353324",
"0.53041357",
"0.526425",
"0.5255124",
"0.5213514",
"0.51954556",
"0.51946807",
"0.5189732",
"0.5172915",
"0.51662403",
"0.515853",
"0.5158071",
"0.51450956",
"0.513981",
"0.5106935",
"0.50972724",
"0.50862044",
"0.5066594",
"0.5052345",
"0.50489455",
"0.50392544",
"0.502027",
"0.5013237"
]
| 0.68009865 | 0 |
Return value in array even if they are null. | def _func_null_leaf(self, arr: list, search: str) -> list:
return [a.get(search) for a in arr] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get(self, value):\n if self.array is not None:\n len_array = self._len()\n if len_array == 0:\n return False\n elif value < len_array and value >= 0:\n return int(self.array[value])\n print(\">>> List is None\")\n return None",
"def _always_array(value: str | list | None) -> list:\n if value is None:\n value = []\n elif not isinstance(value, list):\n value = [value]\n return value",
"def _get_none(self, x, y):\n try:\n return self[x, y]\n except ArrayError:\n return None",
"def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)",
"def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)",
"def nonull(val):\n return val if not pd.isnull(val) else None",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def in_array(val, obj):\n return (val in obj)",
"def get_arraysurdit(self,list_values):\n\n\t\tself.list_values = list_values\n\t\tset_values = set([x for x in self.list_values if self.list_values.count(x) > 1])\n\t\treturn list(set_values)[0]",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetTableNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def filterNull(self, result):\n\t\treturn [_ for _ in result if _]",
"def _get_list_value(index, array):\r\n if len(array) == 0:\r\n return None\r\n elif index >= 0 and index < len(array):\r\n return array[index]\r\n return array[index % len(array)]",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetPyNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def _parse_array(node, key):\n element = node.get(key)\n if element is not None:\n return element\n else:\n return None",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetJsonNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def _single_array_element(data_obj, xj_path, array_path, create_dict_path):\n\n val_type, array_path = _clean_key_type(array_path)\n array_idx = _get_array_index(array_path)\n if data_obj and isinstance(data_obj, (list, tuple)):\n try:\n value = data_obj[array_idx]\n if val_type is not None and not isinstance(value, val_type):\n raise XJPathError('Index array \"%s\" of \"%s\" type does not '\n 'match expected type \"%s\"' %\n (array_idx, type(value).__name__,\n val_type.__name__))\n\n if xj_path:\n return path_lookup(value, xj_path, create_dict_path)\n else:\n return value, True\n except IndexError:\n return None, False\n else:\n if val_type is not None:\n raise XJPathError('Expected the list element type, but \"%s\" found' %\n type(data_obj).__name__)\n return None, False",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetLineNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def get_value(self) -> Union[Iterable[T], List[T]]:\n return self.array",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetAreaNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBillboardNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetFunnelNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetLogTableNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def possible_values(self, seq):\n for element in seq:\n if element:\n return element\n return False",
"def values(self):\n return [entry.value for entry in self.table if entry.value is not None]",
"def convert_null(values: Iterable) -> list:\n\n return [x\n if x is not None\n else NULL\n for x in values]",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetStackedBarNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def _get_value(self, i, j):\n m = len(self.data)\n n = len(self.data[0])\n if i >= m or j >= n:\n return None\n if self.data[i][j] is None:\n indices = [(i+di, j+dj) for di, dj in [[0, -1], [0,1], [-1,0], [1,0]]]\n values = [self.data[x][y] for x,y in indices if 0<=x<m and 0<=y<n]\n if any(v is None for v in values):\n raise Exception(\"ERROR: Input contains adjacent missing values\")\n return sum(values)/float(len(values))\n else:\n return self.data[i][j]",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBarNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def get_existing_values(self): #DONE\n return (value.value for value in self.address.values() if value.value)"
]
| [
"0.592894",
"0.5727499",
"0.5704296",
"0.5620495",
"0.5620495",
"0.5583163",
"0.5563568",
"0.5524162",
"0.5510691",
"0.54853797",
"0.5483888",
"0.5475",
"0.54746735",
"0.5448026",
"0.54332584",
"0.5428005",
"0.54278314",
"0.5414081",
"0.5383123",
"0.538096",
"0.537916",
"0.5379147",
"0.5362966",
"0.53624713",
"0.5332161",
"0.52951616",
"0.5280454",
"0.5279443",
"0.52741855",
"0.527089"
]
| 0.6111107 | 0 |
Ensure value is always an array. | def _always_array(value: str | list | None) -> list:
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_array(value):\n\tif not isinstance(value, list):\n\t\treturn [value]\n\treturn value",
"def is_array(val):\n return (\n isinstance(val, tuple) or \\\n isinstance(val, dict) or \\\n isinstance(val, list)\n )",
"def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)",
"def is_array(self):\n return False",
"def _validate_array(t):\n basetype = t.type\n if is_array(basetype):\n raise ArrayOfArrayError(t)\n validate(basetype)",
"def is_arrayexpress_array(val):\n return arrayexpress_array_regexp.match(val)",
"def is_array(t):\n return isinstance(t, ast.Array)",
"def normalize_value(value: Any) -> Optional[Union[np.ndarray, List[Any], Tuple[Any]]]:\n if value is None:\n # Exclude None from falling through to blanket np.asarray conversion.\n return value\n\n if isinstance(value, (list, tuple, dict)):\n return value\n\n array = np.asarray(value)\n # TODO(#5359): Move into the function abi.\n if isinstance(value, (bool, int, float)):\n # Manually convert ints and floats to 32 bits.\n if array.dtype == np.float64:\n array = array.astype(np.float32)\n elif array.dtype == np.int64:\n array = array.astype(np.int32)\n\n return array",
"def IsArray(obj):\n return isinstance(obj, (list, tuple))",
"def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True",
"def validate(self, value):\n\n if value is None:\n return None\n\n if not isinstance(value, list):\n raise ValueError(\"%r is not a list\" % (value,))\n\n for i, choice in enumerate(value):\n if choice is None:\n raise ValueError(\"Array elements can not be null\")\n if choice not in self.choices:\n raise ValueError(\"%s is not a valid value for element %s\" %\n (choice, i))\n\n return value",
"def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value",
"def is_array(self, arr):\n return isinstance(arr, np.ndarray)",
"def is_array_param(param):\n return param.get('tags') and param['tags']['type'] == 'array'",
"def is_array(self):\n return len(self.descriptor) > 1",
"def testscfvaluetype(self):\r\n assert isinstance(self.data.scfvalues, list)\r\n assert isinstance(self.data.scfvalues[0], numpy.ndarray)",
"def is_array(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return schema_obj.is_array\n return False",
"def type_array():\n return []",
"def check_array(self, v, t):\n raise NotImplementedError('check_array')",
"def test_make_np_iterable_list(val):\n val_rec = uc._make_np_iterable(val)\n\n assert isinstance(val_rec, np.ndarray)\n assert len(val_rec) == len(val)",
"def test_make_np_iterable(val):\n val_rec = uc._make_np_iterable(val)\n\n assert isinstance(val_rec, np.ndarray)\n assert len(val_rec) == 1",
"def is_list(value):\n return isinstance(value, list) or None",
"def _is_list(val):\n\n return isinstance(val, list)",
"def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types",
"def is_list(value):\n return isinstance(value, list)",
"def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value",
"def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)",
"def is_string_array(self):\n return self.type == Property.PropertyType.stringArray",
"def arrayify(possible_array):\n if isinstance(possible_array, basestring):\n return [possible_array]\n return possible_array",
"def is_array(type):\n nake_type = remove_alias(type)\n nake_type = remove_reference(nake_type)\n nake_type = remove_cv(nake_type)\n return isinstance(nake_type, cpptypes.array_t)"
]
| [
"0.7712342",
"0.74892855",
"0.708963",
"0.7066387",
"0.7044346",
"0.70055664",
"0.69729424",
"0.66974515",
"0.66935796",
"0.66363895",
"0.6573458",
"0.6485523",
"0.6449914",
"0.64304817",
"0.6423054",
"0.64192736",
"0.6382778",
"0.63701683",
"0.6363354",
"0.634957",
"0.63191634",
"0.6280931",
"0.6261651",
"0.6244993",
"0.6210169",
"0.62072474",
"0.6204578",
"0.6161775",
"0.6159777",
"0.6156578"
]
| 0.7897924 | 0 |
Process fields that can be static values or a MetadataTransformModel. If value is not a MetadataTransformModel (i.e., it's a static value), and expected_length is given, "spread" the static value into an array of expected_length length. | def _process_metadata_transform_model(
self, value: bool | MetadataTransformModel | str | None, expected_length: int | None = None
) -> list:
if value is None:
if expected_length:
return [value] * expected_length
return []
if isinstance(value, MetadataTransformModel):
transformed_value = self._transform_values(value)
if expected_length is not None and len(transformed_value) != expected_length:
raise RuntimeError(
f'Expected transform value of length {expected_length} for {value}, '
f'but length was {len(transformed_value)}'
)
return transformed_value
if expected_length is not None:
transformed_value = [value] * expected_length
else:
transformed_value = [value]
return transformed_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_value(self, value, strict: bool = True):\n value = super().init_value(value)\n if isinstance(self.data_type, MetaModel):\n t = self.data_type.get_class()\n elif issubclass(self.data_type, SimpleField):\n t = self.data_type.static_field_type\n else:\n raise MetaTypeException(f'The data_type must be either a '\n f'SimpleField or a MetaModel instance, not '\n f'a {type(self.data_type)}.')\n for el in value:\n # noinspection PyTypeHints\n if not isinstance(el, t):\n raise ModelInitException(f'The type of the {el} is not {t}')\n return value",
"def _transform_values(self, metadata: MetadataTransformModel | None) -> list[str]:\n\n def _default() -> list:\n \"\"\"Return default value (as list) if exists, else empty list.\"\"\"\n if metadata is None or metadata.default is None:\n return []\n\n return self._always_array(metadata.default)\n\n # not all items have all metadata fields\n if metadata is None:\n # self.log.trace('feature=transform, action=transform-values, metadata=None')\n return []\n\n # not all metadata fields have a path, but they must have a path or default\n if metadata.path is None:\n default = _default()\n # self.log.trace(\n # f'feature=transform, action=transform-values, metadata-path=None, value={default}'\n # )\n return default\n\n # path search can return multiple data types and single or multiple values\n value = self._path_search(metadata.path)\n\n # return default if value of None is returned from Path\n # IMPORTANT: a None value passed to the transform may cause a failure (lambda x: x.lower())\n if value in [None, []]:\n default = _default()\n # self.log.trace(\n # f'feature=transform, action=transform-values, metadata-path=None, value={default}'\n # )\n return default\n\n for t in metadata.transform or []:\n if t.filter_map is not None:\n # when path search returns an array of values, each value is mapped\n value = [\n self._transform_value_map(v, t.filter_map, True)\n for v in self._always_array(value)\n ]\n elif t.static_map is not None:\n # when path search returns an array of values, each value is mapped\n _values = []\n for v in self._always_array(value):\n v = self._transform_value_map(v, t.static_map)\n if v is not None:\n _values.append(v)\n value = _values\n # PYRIGHT-MISS - None check for value already performed above\n elif callable(t.method) and value is not None:\n value = self._transform_value_callable(value, t.method, t.kwargs)\n elif callable(t.for_each):\n value = [\n self._transform_value_callable(v, t.for_each, t.kwargs) if v is not None else v\n for v in self._always_array(value)\n ]\n\n # the output should be an array of strings or empty array\n _value = []\n for v in self._always_array(value):\n if v in [None, '']:\n if metadata.default is not None:\n _value.append(metadata.default)\n else:\n _value.append(v)\n else:\n _value.append(v)\n\n # self.log.trace(f'feature=transform, action=transform-values, value={_value}')\n return _value",
"def _check_values_len(self, data_batch: Dict[str, List[str]]):\n values_len = [len(v) for _, v in data_batch.items()]\n unique_len = len(set(values_len))\n assert unique_len == 1, \"Length of values are not consistent across\"",
"def test_SameNumberOfFields(self):\n pass",
"def __extract_fields(self):\n for name, stuff in self.data.items():\n if stuff == (): # Empty tuple == 1 bit, value of 0\n self.fields.append(Field(name=name, value=0, size=1))\n elif isinstance(stuff, int): # int == specified value, value of 0\n self.fields.append(Field(name=name, value=0, size=stuff))\n elif isinstance(stuff, str): # str == specified value, value of 0\n pattern = re.compile(\"[0-9]+[bB]\")\n if pattern.match(stuff):\n if \"b\" in stuff: # bits specified\n size = int(stuff[:stuff.lower().index(\"b\")])\n self.fields.append(Field(name=name, value=0, size=size))\n elif \"B\" in stuff: # Bytes specified\n size = int(stuff[:stuff.lower().index(\"b\")]) * 8\n self.fields.append(Field(name=name, value=0, size=size))\n else: # No other string option, so must have been one of the \"vary\" constants from above.\n self.fields.append(Field(name=name, value=stuff, size=\"vary\"))\n elif isinstance(stuff, tuple) or isinstance(stuff, list): # specified value and size.\n if isinstance(stuff[0], str):\n if \"b\" in stuff[0]: # Bits\n size = int(stuff[0][:stuff[0].lower().index(\"b\")])\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif \"B\" in stuff[0]: # Bytes\n size = int(stuff[0][:stuff[0].lower().index(\"b\")]) * 8\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif stuff[0].lower() == NULL_TERMINATE:\n self.fields.append(Field(name=name, value=stuff[1], size=NULL_TERMINATE))\n elif stuff[0].lower() == PREFIX_LENGTH:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LENGTH))\n elif stuff[0].lower() == PREFIX_LEN_NULL_TERM:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LEN_NULL_TERM))\n elif stuff[0].lower() == IPv4:\n self.fields.append(Field(name=name, value=stuff[1], size=IPv4))\n elif isinstance(stuff[0], int):\n # if not self.__check_bit_size(stuff[1], stuff[0]):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(stuff[0]) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=stuff[0]))",
"def _standardize_input(self, value):\n tuple_maker = lambda x: ((x,) \n if (any(isinstance(x, t) for t in (str, bytes, bytearray)) \n or not isinstance(x, collections.Iterable))\n else (x if hasattr(x, \"__len__\") else tuple(x)))\n \n if isinstance(value, Dta):\n value = value._varvals\n else: # force input into 2d structure\n if (any(isinstance(value, t) for t in (str,bytes,bytearray))\n or not isinstance(value, collections.Iterable)):\n value = ((value,),)\n else:\n value = tuple(tuple_maker(v) for v in value)\n \n return value",
"def test_field_length_matches_data_type_field_length(self):\n for known_message_type in KNOWN_MESSAGE_TYPES:\n for field in known_message_type.fields:\n if self.is_verbose:\n print 'Checking length setup of field {0} in message {1}'.format(field.name, known_message_type.name)\n self.assertEqual(field.length, field.data_type.length)",
"def _parse_fixed_length_data(self, original_record, column, null_table):\n column_name = column.col_name_str\n # Boolean fields are encoded in the null table\n if column.type == TYPE_BOOLEAN:\n if column.column_id > len(null_table):\n logging.error(f\"Failed to parse bool field, Column not found in null_table column: {column_name} ,\"\n f\" column id: {column.column_id} , null_table: {null_table}\")\n return\n\n parsed_type = null_table[column.column_id]\n else:\n\n if column.fixed_offset > len(original_record):\n logging.error(f\"Column offset is bigger than the length of the record {column.fixed_offset}\")\n return\n record = original_record[column.fixed_offset:]\n parsed_type = parse_type(column.type, record, version=self.version)\n\n if not null_table[column.column_id] and column.type != TYPE_BOOLEAN:\n self.parsed_table[column_name].append(\"\")\n else:\n self.parsed_table[column_name].append(parsed_type)",
"def test_construct_values_raises_missing_cleaned_no_error(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields[:-1], values[:-1])))\n self.form.cleaned_data = cleaned_data\n err = \"This computed value can only be evaluated after fields it depends on have been cleaned. \"\n err += \"The field order must have the computed field after fields used for its value. \"\n with self.assertRaisesMessage(ImproperlyConfigured, err):\n self.form.construct_value_from_values(constructor_fields)",
"def test_construct_values_raises_on_invalid_normalize(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['first_value', 'second_value', 'last_value']\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n normalize = 'not a valid normalize function'\n message = \"The normalize parameter must be a callable or None. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values(constructor_fields, normalize=normalize)",
"def _pre_process_record(self, data):\n result = []\n symbolic_split = \",\"\n if isinstance(data, dict):\n if self.measure is None:\n logging.error(\"Missing the name of keys pointing to values\")\n raise UnSADException.data_format_exception()\n if self.timestamp is not None:\n if self.timestamp in data:\n try:\n result.append(float(data[self.timestamp]))\n [result.append(data[measure])\n for measure in self.measure]\n except RuntimeError:\n logging.error(\"Invalid input data type, should be a numerical type\")\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"Input data should contain a timestamp field:\" + str(self.timestamp))\n raise UnSADException.data_format_exception()\n else:\n try:\n [result.append(data[measure]) for measure in self.measure]\n except RuntimeError:\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_format_exception()\n elif isinstance(data, Iterable) and not isinstance(data, str):\n if self.timestamp is not None:\n if len(data) == len(self.measure) + 1:\n try:\n result = list(data)\n result[0] = float(result[0])\n except RuntimeError as e:\n logging.error(\"Invalid input data type, timestamp should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure) + 1))\n raise UnSADException.input_number_exception()\n else:\n if self.measure is None or len(data) == len(self.measure):\n result = data\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure)))\n raise UnSADException.input_number_exception()\n else:\n if (self.measure is None or len(self.measure) == 1) and self.timestamp is None:\n if self.symbolic:\n return str(data)\n else:\n try:\n return float(data)\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"This detector is not initialized properly\")\n raise UnSADException.not_proper_initialize_exception()\n\n if not self.symbolic:\n try:\n processed_result = [float(result[i])\n for i in range(len(result))]\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n\n return processed_result[0] if len(processed_result) == 1 else processed_result\n\n else:\n if self.timestamp is not None:\n return [result[0], symbolic_split.join([str(s) for s in result[1:]])]\n else:\n return symbolic_split.join([str(s) for s in result])",
"def _parse_dynamic_length_data(self, original_record, relative_record_metadata,\n relative_records_column_map):\n relative_offsets = relative_record_metadata.variable_length_field_offsets\n jump_table_addition = 0\n for i, column_index in enumerate(relative_records_column_map):\n column = relative_records_column_map[column_index]\n col_name = column.col_name_str\n if self.version == 3:\n if i in relative_record_metadata.variable_length_jump_table:\n jump_table_addition += 0x100\n rel_start = relative_offsets[i]\n # If this is the last one use var_len_count as end offset\n if i + 1 == len(relative_offsets):\n rel_end = relative_record_metadata.var_len_count\n else:\n rel_end = relative_offsets[i + 1]\n\n\n\n # Not sure why\n # if self.version > 3:\n # if rel_end > len(original_record):\n # rel_end = rel_end & 0xff\n # if rel_start > len(original_record):\n # rel_start = rel_start & 0xff\n\n # if rel_start and rel_end are the same there is no data in this slot\n if rel_start == rel_end:\n self.parsed_table[col_name].append(\"\")\n continue\n\n if len(relative_offsets) > i + 2 and rel_end > relative_offsets[i + 2]:\n rel_end -= 256\n relative_offsets[i + 1] = rel_end\n\n if i > 0 and rel_start < relative_offsets[i - 1]:\n rel_start += 512\n\n if rel_end > len(original_record):\n rel_end = rel_end & 0xff\n\n if rel_start > len(original_record):\n rel_start = rel_start & 0xff\n\n if rel_start > rel_end and i + 1 == len(relative_offsets):\n rel_end += 256\n\n relative_obj_data = original_record[rel_start + jump_table_addition: rel_end + jump_table_addition]\n if column.type == TYPE_MEMO:\n try:\n parsed_type = self._parse_memo(relative_obj_data, column)\n except ConstructError:\n logging.warning(\"Failed to parse memo field. Using data as bytes\")\n parsed_type = relative_obj_data\n else:\n parsed_type = parse_type(column.type, relative_obj_data, column.length, version=self.version)\n dummy = 1\n self.parsed_table[col_name].append(parsed_type)",
"def test_construct_values_skips_already_caught_errors(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n expected = None # Normal is: '_'.join(ea for ea in values if ea).casefold()\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields[:-1], values[:-1])))\n self.form.cleaned_data = cleaned_data\n original_errors = deepcopy(self.form._errors)\n if self.form._errors is None:\n self.form._errors = ErrorDict() # mimic full_clean: _error is an ErrorDict\n self.form.add_error('last', 'An error for testing')\n actual = self.form.construct_value_from_values(constructor_fields)\n\n self.assertIsNone(actual)\n self.assertEqual(expected, actual)\n\n self.form._errors = original_errors",
"def test_construct_values_as_expected(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n expected = '_**_'.join(ea for ea in values if ea).casefold()\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n actual = self.form.construct_value_from_values(constructor_fields, '_**_')\n simple = self.form.construct_value_from_values(constructor_fields)\n\n self.assertEqual(expected, actual)\n self.assertEqual('firstvalue_**_secondvalue_**_lastvalue', actual)\n self.assertEqual('_'.join(values).casefold(), simple)\n self.assertEqual('firstvalue_secondvalue_lastvalue', simple)",
"def _prep_values(\n self,\n size: Union[\n int, float, List[int], Tuple[int, int], List[float], Tuple[float, float]\n ],\n ) -> Union[List[int], List[float], Tuple[int, int], Tuple[float, float]]:\n size = [size] * 2 if not isinstance(size, (list, tuple)) else size\n assert len(size) == 2\n return size",
"def validate(self, value):\n if value is None:\n msg = message_factory.get_message(\n 'vapi.data.dynamicstruct.validate.mismatch',\n self._valid_types,\n 'None')\n return [msg]\n\n if value.type not in self._valid_types:\n msg = message_factory.get_message(\n 'vapi.data.dynamicstruct.validate.mismatch',\n self._valid_types,\n value.type)\n return [msg]\n\n return None",
"def _check_field_length(self, field, value, options=None):\n options = options if options else field.GetOptions()\n for (option, setting) in options.ListFields():\n if option.name == \"length\":\n if self.__gt_by_type(value, setting):\n if hasattr(field, \"name\"):\n raise FieldTooLongException(\"The field '\" + field.name +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")\n else:\n raise FieldTooLongException(\"List element '\" + str(value) +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")",
"def testscfvaluetype(self):\r\n assert isinstance(self.data.scfvalues, list)\r\n assert isinstance(self.data.scfvalues[0], numpy.ndarray)",
"def run_validation(self, data=fields.empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n value = self.to_internal_value(data)\n self.run_validators(value)\n if isinstance(value, list):\n return value\n try:\n value = data.replace(\"u'\", \"\\\"\")\n value = value.replace(\"'\", \"\\\"\")\n json.loads(value)\n except:\n raise serializers.ValidationError('Invalid Array')\n return value",
"def test_clean_moves_computed_fields_to_fields(self):\n name = 'test_field'\n if isinstance(self.form.computed_fields, (list, tuple)):\n self.form.computed_fields = self.form.get_computed_fields([name])\n computed_names = list(self.form.computed_fields.keys())\n field_names = list(self.form.fields.keys())\n field_data = {f_name: f\"input_{f_name}_{i}\" for i, f_name in enumerate(field_names)}\n field_data.update({name: f\"value_{f_name}_{i}\" for i, f_name in enumerate(computed_names)})\n original_data = self.form.data\n original_fields = self.form.fields\n original_computed_fields = self.form.computed_fields\n original_errors = getattr(self.form, '_errors', None)\n original_cleaned_data = getattr(self.form, 'cleaned_data', None)\n self.form.data = original_data.copy()\n self.form.fields = original_fields.copy()\n self.form.computed_fields = original_computed_fields.copy()\n self.form._errors = ErrorDict() if original_errors is None else original_errors.copy() # mimic full_clean\n populated_cleaned_data = deepcopy(original_cleaned_data or {})\n populated_cleaned_data.update(field_data)\n self.form.cleaned_data = populated_cleaned_data.copy() # ensure cleaned_data is present (mimic full_clean)\n final_cleaned_data = self.form.clean()\n\n self.assertIn(name, computed_names)\n self.assertNotIn(name, field_names)\n self.assertEqual(1, len(computed_names))\n self.assertIn(name, self.form.fields)\n self.assertNotEqual(original_cleaned_data, final_cleaned_data)\n\n self.form.data = original_data\n self.form.fields = original_fields\n self.form.computed_fields = original_computed_fields\n self.form._errors = original_errors\n self.form.cleaned_data = original_cleaned_data\n if original_errors is None:\n del self.form._errors\n if original_cleaned_data is None:\n del self.form.cleaned_data",
"def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")",
"def test_hk_getdata_field_array_type(self):\n fields, _ = load_data(self._file)\n assert isinstance(fields['position'], np.ndarray)",
"def _extend_run_record_data_field(self, run_idx, run_record_key,\n field_name, field_data):\n\n records_grp = self.h5['{}/{}/{}'.format(RUNS, run_idx, run_record_key)]\n field = records_grp[field_name]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check whether it is a variable length record, by getting the\n # record dataset dtype and using the checker to see if it is\n # the vlen special type in h5py\n if h5py.check_dtype(vlen=field.dtype) is not None:\n\n # if it is we have to treat it differently, since it\n # cannot be multidimensional\n\n # if the dataset has no data in it we need to reshape it\n if all([i == 0 for i in field.shape]):\n # initialize this array\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n field.resize( (n_new_frames,) )\n\n # set the new data to this\n for i, row in enumerate(field_data):\n field[i] = row\n\n # otherwise just add the data\n else:\n\n # resize the array but it is only of rank because\n # of variable length data\n field.resize( (field.shape[0] + n_new_frames, ) )\n\n # add each row to the newly made space\n for i, row in enumerate(field_data):\n field[(field.shape[0] - 1) + i] = row\n\n # if it is not variable length we don't have to treat it\n # differently\n else:\n\n # if this is empty we need to reshape the dataset to accomodate data\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n # otherwise just add the data\n else:\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data",
"def __set__(self, instance, value):\n # Run process for the nested field type for each value in list\n instance._values[self.name] = [self.field.process(v) for v in value]",
"def test_fieldValueTypes(self):\n # tests for \"method\" and \"datetime\" values follow later on ...\n # booleans are not tested yet\n\n factory = self.root.manage_addProduct['Formulator']\n factory.manage_add('form', 'ValueTest')\n factory.manage_add('form2', 'ValueTest')\n form = self.root.form\n form.manage_addField('int_field', 'Test Integer Field', 'IntegerField')\n form.manage_addField('float_field', 'Test Float Field', 'FloatField')\n form.manage_addField('date_field', 'Test Date Field', 'DateTimeField')\n form.manage_addField('list_field', 'Test List Field', 'ListField')\n form.manage_addField(\n 'multi_field',\n 'Test Checkbox Field',\n 'MultiCheckBoxField')\n form.manage_addField('link_field', 'Test Link Field', 'LinkField')\n form.manage_addField('empty_field', 'Test Empty Field', 'StringField')\n int_field = form.int_field\n float_field = form.float_field\n date_field = form.date_field\n list_field = form.list_field\n multi_field = form.multi_field\n link_field = form.link_field\n empty_field = form.empty_field\n\n # XXX editing fields by messing with a fake request\n # -- any better way to do this?\n # (could assign to \"values\" directly ...)\n\n default_values = {'field_title': 'Test Title',\n 'field_display_width': '92',\n 'field_required': 'checked',\n 'field_enabled': 'checked',\n }\n try:\n form_values = default_values.copy()\n form_values.update({'field_default': 'None',\n 'field_required': '',\n })\n empty_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '42',\n 'field_enabled': 'checked'})\n int_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '1.7'})\n float_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n # XXX cannot test \"defaults to now\", as this may fail randomly\n form_values = default_values.copy()\n form_values.update({'field_input_style': 'list',\n 'field_input_order': 'mdy',\n 'field_date_only': '',\n 'field_css_class': 'test_css',\n 'field_time_separator': '$'})\n date_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'foo',\n 'field_size': '1',\n 'field_items': 'Foo | foo\\n Bar | bar'})\n list_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update(\n {'field_default': 'foo',\n 'field_size': '3',\n 'field_items': 'Foo | foo\\n Bar | bar\\nBaz | baz',\n 'field_orientation': 'horizontal',\n 'field_view_separator': '<br />\\n'})\n multi_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'http://www.absurd.org',\n 'field_required': '1',\n 'field_check_timeout': '5.0',\n 'field_link_type': 'external',\n })\n link_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n except ValidationError as e:\n self.fail('error when editing field %s; error message: %s' %\n (e.field_id, e.error_text))\n\n form2 = self.root.form2\n\n xml = formToXML(form)\n XMLToForm(xml, form2)\n\n self.assertEqualForms(form, form2)\n\n request = TestRequest()\n request.form['field_int_field'] = '42'\n request.form['field_float_field'] = '2.71828'\n request.form['subfield_date_field_month'] = '11'\n request.form['subfield_date_field_day'] = '11'\n # This field only allows ten years in the future, today 2023-03-14\n request.form['subfield_date_field_year'] = '2033'\n request.form['subfield_date_field_hour'] = '09'\n request.form['subfield_date_field_minute'] = '59'\n request.form['field_list_field'] = 'bar'\n request.form['field_multi_field'] = ['bar', 'baz']\n request.form['field_link_field'] = 'http://www.zope.org'\n try:\n result1 = form.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n\n try:\n result2 = form2.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n self.assertEqual(result1, result2)\n self.assertEqual(42, result2['int_field'])\n self.assertEqual(2.71828, result2['float_field'])\n\n # check link field timeout value\n self.assertEqual(link_field.get_value('check_timeout'),\n form2.link_field.get_value('check_timeout'))\n\n # XXX not tested: equal form validation failure on invalid input",
"def validate_test_value(cls, value):\n for validator in cls._meta.get_field_by_name('value')[0].validators:\n validator(value)",
"def split(self,\n allow_nonseq_value: bool = False) -> Sequence['DataSample']:\n # 1. split\n data_sample_list = [DataSample() for _ in range(len(self))]\n for k in self.all_keys():\n stacked_value = self.get(k)\n if isinstance(stacked_value, torch.Tensor):\n # split tensor shape like (N, *shape) to N (*shape) tensors\n values = [v for v in stacked_value]\n elif isinstance(stacked_value, LabelData):\n # split tensor shape like (N, *shape) to N (*shape) tensors\n labels = [l_ for l_ in stacked_value.label]\n values = [LabelData(label=l_) for l_ in labels]\n elif isinstance(stacked_value, DataSample):\n values = stacked_value.split()\n else:\n if is_splitable_var(stacked_value):\n values = stacked_value\n elif allow_nonseq_value:\n values = [deepcopy(stacked_value)] * len(self)\n else:\n raise TypeError(\n f'\\'{k}\\' is non-sequential data and '\n '\\'allow_nonseq_value\\' is False. Please check your '\n 'data sample or set \\'allow_nonseq_value\\' as True '\n f'to copy field \\'{k}\\' for all split data sample.')\n\n field = 'metainfo' if k in self.metainfo_keys() else 'data'\n for data, v in zip(data_sample_list, values):\n data.set_field(v, k, field_type=field)\n\n return data_sample_list",
"def clean(self, value):\n return [f.clean(v) for v,f in zip(value, self.fields)]",
"def test_construct_values_no_join_artifact_if_empty_value(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n values[1] = ''\n expected = '_'.join(ea for ea in values if ea).casefold()\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n actual = self.form.construct_value_from_values(constructor_fields)\n\n self.assertEqual('', self.form.cleaned_data['second'])\n self.assertEqual(expected, actual)\n self.assertEqual('firstvalue_lastvalue', actual)",
"def _parse_dynamic_length_records_metadata(self, reverse_record, original_record, null_table_length):\n if self.version > 3:\n reverse_record = reverse_record[null_table_length + 1:]\n # Not sure why we sometimes get an extra 0\n if len(reverse_record) > 1 and reverse_record[0] == 0:\n reverse_record = reverse_record[1:]\n return parse_relative_object_metadata_struct(reverse_record, version=self.version)\n # Parse relative metadata.\n # Metadata is from the end of the record(reverse_record is used here)\n variable_length_jump_table_cnt = (len(original_record) - 1) // 256\n reverse_record = reverse_record[null_table_length:]\n try:\n relative_record_metadata = parse_relative_object_metadata_struct(reverse_record,\n variable_length_jump_table_cnt,\n self.version)\n # relative_record_metadata = RELATIVE_OBJS.parse(reverse_record)\n # we use this offset in original_record so we have to update the length with the null_tables\n relative_record_metadata.relative_metadata_end = relative_record_metadata.relative_metadata_end + null_table_length\n except ConstructError:\n relative_record_metadata = None\n logging.error(\"Failed parsing record\")\n\n if relative_record_metadata and \\\n relative_record_metadata.variable_length_field_count != self.table_header.variable_columns:\n\n # best effort - try to find variable column count in the record and parse from there\n # this is limited to the 10 first bytes to reduce false positives.\n # most of the time iv'e seen this there was an extra DWORD before the actual metadata\n metadata_start = reverse_record.find(bytes([self.table_header.variable_columns]))\n if metadata_start != -1 and metadata_start < 10:\n reverse_record = reverse_record[metadata_start:]\n try:\n relative_record_metadata = parse_relative_object_metadata_struct(reverse_record,\n variable_length_jump_table_cnt,\n self.version)\n except ConstructError:\n logging.error(f\"Failed to parse record metadata: {original_record}\")\n relative_record_metadata.relative_metadata_end = relative_record_metadata.relative_metadata_end + \\\n metadata_start\n else:\n logging.warning(\n f\"Record did not parse correctly. Number of columns: {self.table_header.variable_columns}\"\n f\" number of parsed columns: {relative_record_metadata.variable_length_field_count}\")\n return None\n return relative_record_metadata"
]
| [
"0.5317404",
"0.5179199",
"0.5044",
"0.5017944",
"0.4996385",
"0.4966759",
"0.4951833",
"0.48719847",
"0.4859972",
"0.47839037",
"0.4748695",
"0.47472686",
"0.47458175",
"0.4743307",
"0.46897227",
"0.46752998",
"0.46727523",
"0.46621963",
"0.45767164",
"0.4572485",
"0.45438367",
"0.4530042",
"0.45297077",
"0.45171058",
"0.4496502",
"0.44959804",
"0.44863805",
"0.44851595",
"0.4452514",
"0.44418517"
]
| 0.6757192 | 0 |
Process File Occurrences data. File Occurrences are a bit weird, in that none of the fields are required. Because of this, There may be results where all of the fields are None, in which case we'll skip that result and not call add_file_occurrence. | def _process_file_occurrences(self, file_occurrences: list[FileOccurrenceTransformModel]):
for file_occurrence in file_occurrences or []:
expected_length = max(
map(
lambda f: len(self._process_metadata_transform_model(f)),
[
field
for field in (
file_occurrence.file_name,
file_occurrence.path,
file_occurrence.date,
)
if field is not None
],
)
)
file_name = self._process_metadata_transform_model(
file_occurrence.file_name, expected_length
)
path = self._process_metadata_transform_model(file_occurrence.path, expected_length)
date = self._process_metadata_transform_model(file_occurrence.date, expected_length)
param_keys = ['file_name', 'path', 'date']
params = [dict(zip(param_keys, p)) for p in zip(file_name, path, date)]
for kwargs in filter(bool, params): # get rid of empty dicts
self.add_file_occurrence(**self.util.remove_none(kwargs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tests_ti_file_get_occurrences(self):\n file = cast(File, self.ti_helper.create_indicator())\n occurrence_names = ['pytest_occurrence_1', 'pytest_occurrence_2']\n file.add_occurrence(\n occurrence_names[0], (datetime.now() - timedelta(days=2)).isoformat(), '.'\n )\n file.add_occurrence(\n occurrence_names[1], (datetime.now() - timedelta(days=2)).isoformat(), '.'\n )\n length = 0\n for occurrence in file.occurrences():\n length += 1\n assert occurrence.get('fileName') in occurrence_names\n occurrence_names.remove(occurrence.get('fileName'))\n assert length == 2",
"def tests_ti_file_get_occurrence(self):\n file = cast(File, self.ti_helper.create_indicator())\n response = file.add_occurrence(\n 'pytest_occurrence', (datetime.now() - timedelta(days=2)).isoformat(), '.'\n )\n assert response.ok\n occurrence_id = response.json().get('data').get('fileOccurrence').get('id')\n response = file.occurrence(occurrence_id)\n if response is None:\n assert False, 'Occurrence not found'\n assert response.ok\n data = response.json().get('data').get('fileOccurrence')\n assert data.get('fileName') == 'pytest_occurrence'\n assert data.get('id') == occurrence_id",
"def tests_ti_file_add_occurrence(self):\n file = cast(File, self.ti_helper.create_indicator())\n response = file.add_occurrence(\n 'pytest_occurrence', (datetime.now() - timedelta(days=2)).isoformat(), '.'\n )\n assert response.ok\n data = response.json().get('data').get('fileOccurrence')\n assert data.get('fileName') == 'pytest_occurrence'",
"def process_file(self, data, filename):\n\n for relation in data:\n if \"start_date\" in relation[1]:\n start = convert_to_date(relation[1][\"start_date\"])\n end = convert_to_date(relation[1][\"end_date\"])\n\n if start <= START_DATE and end >= START_DATE:\n first = relation[1][\"first\"]\n second = relation[1][\"second\"]\n if first not in self.relations:\n self.add_tag(first)\n if second not in self.relations:\n self.add_tag(second)\n\n if relation[0] == \"vassal\": \n self.relations[first][\"vassal\"].append(second)\n self.relations[second][\"overlord\"].append(first)\n elif relation[0] == \"alliance\":\n self.relations[first][\"alliance\"].append(second)\n self.relations[second][\"alliance\"].append(first)\n elif relation[0] == \"dependency\":\n self.relations[first][\"tributary\"].append(second)\n self.relations[second][\"hegemon\"].append(first)\n elif relation[0] == \"guarantee\":\n self.relations[first][\"guaranteeing\"].append(second)\n self.relations[second][\"guarantor\"].append(first)\n elif relation[0] == \"union\":\n self.relations[first][\"junior\"].append(second)\n self.relations[second][\"senior\"].append(first)\n elif relation[0] == \"royal_marriage\":\n self.relations[first][\"marriage\"].append(second)\n self.relations[second][\"marriage\"].append(first)",
"def process(self, terms):\n for entry in self.files:\n try:\n logger.info('file - {0}'.format(entry.path))\n\n # notional output file path\n path_sentences = self.path.joinpath('{0}.csv'.format(entry.path.stem))\n path_summary = self.path.joinpath('{0}-summary.csv'.format(entry.path.stem))\n logger.info('will save to - {0}'.format(path_sentences.resolve()))\n\n reports = self.inspect_doc(entry, terms)\n\n # receiving a list of dicts\n # therefore pandas can package into a useful outcome\n if len(reports) > 0:\n frame_sentences = pd.DataFrame(reports)\n\n frame_sentences = frame_sentences[['page', 'term', 'sentence']]\n logger.info('saving sentence file to - {0}'.format(path_sentences.resolve()))\n frame_sentences.to_csv(str(path_sentences.resolve()))\n \n frame_summary = frame_sentences.pivot_table(\n index='page',\n columns='term',\n aggfunc='size',\n fill_value=0\n )\n logger.info('saving summary file to - {0}'.format(path_sentences.resolve()))\n frame_summary.to_csv(str(path_summary.resolve()))\n\n\n except Exception as e:\n logger.error(e)",
"def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)",
"def reduce( self, vid=None ):\n if vid is None:\n vid = self._vid\n rows = self._filetable.find_files( self.variableid, time_range=self.timerange,\n lat_range=self.latrange, lon_range=self.lonrange,\n level_range=self.levelrange )\n if rows==None or len(rows)<=0:\n # this belongs in a log file:\n print \"ERROR no data found for reduced variable\",self.variableid\n print \"in\",self.timerange, self.latrange, self.lonrange, self.levelrange\n print \"filetable is\",self._filetable\n return None\n\n # To make it even easier on the first cut, I won't worry about missing data and\n # anything else inconvenient, and I'll assume CF compliance.\n files = list(set([r.fileid for r in rows]))\n if len(files)>1:\n # Piece together the data from multiple files. That's what cdscan is for...\n # One problem is there may be more than one file family in the same\n # directory! If we see more than one at this point, the user wasn't\n # careful in his specifications. We'll just have to choose one.\n famdict = { f:self.extract_filefamilyname(f) for f in files }\n families = list(set([ famdict[f] for f in files ]))\n families.sort(key=len) # a shorter name is more likely to be what we want\n if len(families)==0:\n print \"ERROR. No data to reduce. files[0]=:\",files[0]\n return None\n elif len(families)>1:\n print \"WARNING: \",len(families),\" file families found, will use the first one:\",families\n fam = families[0]\n\n # We'll run cdscan to combine the multiple files into one logical file.\n # To save (a lot of) time, we'll re-use an xml file if a suitable one already exists.\n # To do this safely, incorporate the file list (names,lengths,dates) into the xml file name.\n famfiles = [f for f in files if famdict[f]==fam]\n famfiles.sort() # improves consistency between runs\n file_list = '-'.join(\n [ f+'size'+str(os.path.getsize(f))+'mtime'+str(os.path.getmtime(f))\\\n for f in famfiles ] )\n csum = hashlib.md5(file_list).hexdigest()\n xml_name = fam+'_cs'+csum+'.xml'\n if os.path.isfile( xml_name ):\n files = [ xml_name ]\n\n if len(files)>1:\n famfiles = [f for f in files if famdict[f]==fam]\n # Normally when we get here, it's because data has been divided by time among\n # several files. So when cdscan puts it all back together, it needs the time\n # units. If the time variable is named 'time' and has a valid 'units'\n # attribute, we're fine; otherwise we're in trouble. But for some AMWG obs\n # data which I have, the time units may be found in the long_name attribute.\n # The -e option will normally be the way to fix it up, but maybe the -r option\n # could be made to work.\n \n # I know of no exception to the rule that all files in the file family keep their\n # units in the same place; so find where they are by checking the first file.\n f = cdms2.open( famfiles[0] )\n time_units = f['time'].units\n if type(time_units) is str and len(time_units)>3:\n # cdscan can get time units from the files; we're good.\n f.close()\n cdscan_line = 'cdscan -q '+'-x '+xml_name+' '+' '.join(famfiles)\n else:\n # cdscan needs to be told what the time units are. I'm betting that all files\n # use the same units. I know of cases where they all have different units (e.g.,\n # GISS) but in all those cases, the units attribute is used properly, so we don't\n # get here.\n # Another problem is that units stuck in the long_name sometimes are\n # nonstandard. So fix them!\n if hasattr(f['time'],'long_name'):\n time_units = f['time'].long_name\n else:\n time_units = 'days' # probably wrong but we can't go on without something\n # Usually when we get here it's a climatology file where time is meaningless.\n f.close()\n if type(time_units) is str and len(time_units)>1 and (\n time_units.find('months')==0 or time_units.find('days')==0 or\n time_units.find('hours')==0 ):\n time_units = fix_time_units( time_units )\n cdscan_line = 'cdscan -q '+'-x '+xml_name+' -e time.units=\"'+time_units+'\" '+\\\n ' '.join(famfiles)\n else:\n print \"WARNING, cannot find time units; will try to continue\",famfiles[0]\n cdscan_line = 'cdscan -q '+'-x '+xml_name+' -e time.units=\"'+time_units+'\" '+\\\n ' '.join(famfiles)\n print \"cdscan_line=\",cdscan_line\n proc = subprocess.Popen([cdscan_line],shell=True)\n proc_status = proc.wait()\n if proc_status!=0: print \"ERROR: cdscan terminated with\",proc_status\n f = cdms2.open( xml_name )\n else:\n # the easy case, just one file has all the data on this variable\n f = cdms2.open(files[0])\n fcf = get_datafile_filefmt(f)\n reduced_data = self._reduction_function( f(self.variableid), vid=vid )\n if reduced_data is not None:\n reduced_data._vid = vid\n f.close()\n return reduced_data",
"def process(self):\n level = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n for (n, input_file) in enumerate(self.input_files):\n self.logger.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = make_file_id(input_file, self.output_file_grp)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n self.add_metadata(pcgts)\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n page_image, page_xywh, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n self.logger.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n \n if level == 'page':\n self.process_page(page, page_image, page_xywh, zoom,\n input_file.pageId, file_id)\n else:\n if level == 'table':\n regions = page.get_TableRegion()\n else: # region\n regions = page.get_AllRegions(classes=['Text'], order='reading-order')\n if not regions:\n self.logger.warning('Page \"%s\" contains no text regions', page_id)\n for region in regions:\n region_image, region_xywh = self.workspace.image_from_segment(\n region, page_image, page_xywh, feature_filter='binarized')\n if level == 'region':\n self.process_region(region, region_image, region_xywh, zoom,\n input_file.pageId, file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n self.logger.warning('Page \"%s\" region \"%s\" contains no text lines',\n page_id, region.id)\n for line in lines:\n line_image, line_xywh = self.workspace.image_from_segment(\n line, region_image, region_xywh, feature_filter='binarized')\n self.process_line(line, line_image, line_xywh, zoom,\n input_file.pageId, region.id,\n file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_path = os.path.join(self.output_file_grp, file_id + '.xml')\n pcgts.set_pcGtsId(file_id)\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n self.logger.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.output_file_grp, out.local_filename)",
"def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'",
"def _process_result_file_name_aggregated(\n self,\n test,\n dupes,\n vuln_ids_from_tool,\n findingdetail,\n query,\n result,\n find_date,\n ):\n name, cwe, categories, queryId = self.getQueryElements(query)\n titleStart = query.get(\"name\").replace(\"_\", \" \")\n description, lastPathnode = self.get_description_file_name_aggregated(\n query, result\n )\n sinkFilename = lastPathnode.find(\"FileName\").text\n if sinkFilename:\n title = \"{} ({})\".format(titleStart, sinkFilename.split(\"/\")[-1])\n else:\n title = titleStart\n false_p = result.get(\"FalsePositive\")\n sev = result.get(\"Severity\")\n aggregateKeys = \"{}{}{}\".format(cwe, sev, sinkFilename)\n state = result.get(\"state\")\n active = self.isActive(state)\n verified = self.isVerified(state)\n\n if not (aggregateKeys in dupes):\n find = Finding(\n title=title,\n cwe=int(cwe),\n test=test,\n # active, verified and false_p may be overwritten later by\n # another member of the aggregate, see \"else\" below\n active=active,\n verified=verified,\n false_p=(false_p == \"True\"),\n # Concatenates the query information with this specific finding\n # information\n description=findingdetail + description,\n severity=sev,\n file_path=sinkFilename,\n # No line number because we have aggregated different\n # vulnerabilities that may have different line numbers\n date=find_date,\n static_finding=True,\n nb_occurences=1,\n )\n dupes[aggregateKeys] = find\n # a list containing the vuln_id_from_tool values. They are\n # formatted once we have analysed all the findings\n vuln_ids_from_tool[aggregateKeys] = [queryId]\n else:\n # We have already created a finding for this aggregate: updates the\n # description and the nb_occurences\n find = dupes[aggregateKeys]\n find.nb_occurences = find.nb_occurences + 1\n if find.nb_occurences == 2:\n find.description = \"### 1. {}\\n{}\".format(\n find.title, find.description\n )\n find.description = \"{}\\n\\n-----\\n### {}. {}\\n{}\\n{}\".format(\n find.description,\n find.nb_occurences,\n title,\n findingdetail,\n description,\n )\n if queryId not in vuln_ids_from_tool[aggregateKeys]:\n vuln_ids_from_tool[aggregateKeys].append(queryId)\n # If at least one of the findings in the aggregate is exploitable,\n # the defectdojo finding should not be \"false positive\"\n if false_p == \"False\":\n dupes[aggregateKeys].false_p = False\n # If at least one of the findings in the aggregate is active, the\n # defectdojo finding should be active\n if active:\n dupes[aggregateKeys].active = True\n # If at least one of the findings in the aggregate is verified, the\n # defectdojo finding should be verified\n if verified:\n dupes[aggregateKeys].verified = True",
"def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])",
"def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)",
"def fileAgglomeration(self, dataset: list):\n result = dict()\n\n startTimeForAgglomeration = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n print(\"CPU Model,Index, Filename, Elapsed Time\")\n for idx, e in enumerate(dataset):\n # CPU TIME\n startTime = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n result[idx] = self._count_occurrences(filename=e)\n endTime = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n\n # CPU Model, Index, Filename, Time Taken Processing File\n fileName = e.split(\"/\")[-1]\n print(f\"{self.cpuModel},{idx + 1},{fileName},{endTime - startTime}\") # Logger ^ Markdown\n\n endTimeForAgglomeration = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n print(\n f\"Total Files Aggregated: {len(dataset)} and total {endTimeForAgglomeration - startTimeForAgglomeration} seconds elapsed.\")\n\n return result",
"def load_file_data_from_db(self):\n\n file_objs = self.file_queryset.filter(sip=self.sip, removedtime__isnull=True)\n for file_obj in self._batch_query(file_objs):\n self.file_events = get_file_events(file_obj)\n if not self.file_events:\n return\n try:\n # merge the map_file_data dict with the map_av_data\n mapped_file_info = merge_file_data_dicts(\n map_file_data(file_obj, self.file_events), map_av_data(file_obj)\n )\n self.md_info[\"files\"].append(mapped_file_info)\n self.md_info[\"premis:size\"] = create_package_size(\n mapped_file_info[\"premis:size\"]\n )\n self.md_info[\"amount_of_files\"] += 1\n failed_virus_checks = get_failed_virus_checks(self.file_events)\n if failed_virus_checks:\n self.md_info[\"virus_scan_info\"][\"failed_virus_checks\"].append(\n failed_virus_checks\n )\n passed_virus_checks = get_passed_virus_checks(self.file_events)\n # add info virus_scan_tools if they passed and respect\n # different tools and versions if needed.\n if (\n passed_virus_checks\n and passed_virus_checks\n not in self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"]\n ):\n self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"].append(\n passed_virus_checks\n )\n except KeyError:\n logger.info(\n \"File is no longer present on the filesystem: %s\",\n file_obj.currentlocation,\n )\n continue",
"def process(self, fields):\n if fields['size'] > self.size:\n # Add files to the warning set\n self.warning_files.add(\n (fields['file_requested'], fields['size'])\n )\n\n # We want to keep track of the longest file\n # name, for formatting later.\n fs = len(fields['file_requested'])\n if fs > self.name_size:\n self.name_size = fs",
"def count_aggregate(self):\n count = FileCount()\n if self._status == FileStatus.EMPTY:\n count.found_empty_file()\n else:\n for expd in self._expected_docstrings:\n if expd.ignore_reason:\n pass # Ignores will be counted in a future version\n elif expd.has_docstring:\n count.found_needed_docstr()\n else:\n count.missed_needed_docstring()\n return count",
"def process_all():\n\tconfilepath = check_args()\n\tif confilepath != \"\": #check arguments and sets some global variables \n\t\tconfig = read_conf(confilepath) #read config-file\n\t\tinst = get_hgf_institute(config) #check which hgf-institute\n\t\tbuild_or_remove_fielddesc(config) #create/delete fielddescriptors (fields + marctags)\n\t\tinsert_repnr_fielddesc(inst) #report number as hidden input in submit \n\t\tbuild_or_remove_doctypes(config,inst) #create/delete doctypes\n\t\tbuild_or_remove_schema(config) #create/delete collections for submit form\n\t\tgenerate_css(fieldlabels,inst) #create css_file \n\telse: pass",
"def tests_ti_file_delete_occurrence(self):\n file = cast(File, self.ti_helper.create_indicator())\n response = file.add_occurrence(\n 'pytest_occurrence', (datetime.now() - timedelta(days=2)).isoformat(), '.'\n )\n occurrence_id = response.json().get('data').get('fileOccurrence').get('id')\n response = file.occurrence(occurrence_id)\n if response is None:\n assert False, 'Occurrence not found'\n assert response.ok",
"def parse_documents():\n\n\tcount_before = control.find().count()\n\n\tprint \"There are currently %i unprocessed records.\" % count_before\n\n\t#dispatch\n\t# executor = concurrent.futures.ThreadPoolExecutor(10)\n\t# futures = [executor.submit(analyze_message, document) for document in control.find()]\n\t# concurrent.futures.wait(futures)\n\n\tfor document in control.find():\n\t\tanalyze_message(document)\n\n\tcount_after = control.count()\n\tprint \"There are now %i stored records.\" % control.count()",
"def refresh_record_from_data(record, exclude_file_nos = []):\n record.label = record.data.get(\"Defendant\", {}).get(\"Name\", \"\")\n record.file_no = record.data.get(\"General\", {}).get(\"File No\", \"\")\n record.county = record.data.get(\"General\", {}).get(\"County\", \"\")\n record.dob = record.data.get(\"Defendant\", {}).get(\n \"Date of Birth/Estimated Age\", None\n )\n record.sex = record.data.get(\"Defendant\", {}).get(\"Sex\", constants.NOT_AVAILABLE)\n record.race = record.data.get(\"Defendant\", {}).get(\"Race\", \"\")\n record.case_status = record.data.get(\"Case Information\", {}).get(\"Case Status\", \"\")\n record.offense_date = make_datetime_aware(\n record.data.get(\"Case Information\", {}).get(\"Offense Date\", None)\n )\n record.arrest_date = record.data.get(\"Case Information\", {}).get(\n \"Arrest Date\", dt_obj_to_date(record.offense_date)\n )\n record.jurisdiction = get_jurisdiction(record)\n record.has_additional_offenses = \"Additional offenses exist\" in record.data.get(\"_meta\", {}).get(\"source\", {})\n\n if exclude_file_nos and record.file_no in exclude_file_nos:\n logger.warning(f\"Not saving ciprs record {record.file_no} (most likely because it's a duplicate).\")\n return\n\n logger.info(f\"Saving ciprs record {record.file_no}\")\n record.save()\n refresh_offenses(record)",
"def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)",
"def get_additional_data_from_files(df, file_description): # file description one of [\"video\", \"eaf\", \"seg\", \"gentle\"]\n if file_description == \"gentle\":\n file_folder = FILE_BASE + \"/gentle/\"\n is_gentle_file = True\n else:\n file_folder = FILE_BASE + \"/original/\"\n is_gentle_file = False\n\n file_df = None\n\n if file_description not in list(FILE_DESCRIPTIONS_TO_EXT.keys()):\n print(\"Unknown file description! Don't know what to do with %s files...\" % file_description)\n return None\n\n else:\n print(\"Load and extract information from %s files...\" % file_description)\n #pbar = tqdm.tqdm(total = len(np.unique(df[\"source_file\"])),desc='Files', position=0,leave=True,file=sys.stdout)\n #file_log = tqdm.tqdm(total=0, position=1, bar_format='{desc}',leave=True,file=sys.stdout)\n print(\"Total files to laod and preprocess: \", len(np.unique(df[\"source_file\"])))\n \n for i,file in enumerate(np.unique(df[\"source_file\"])):\n if i%100 == 0:\n print(\"File: \",i)\n \n filepath = file_folder + get_file_path(file,is_gentle_file=is_gentle_file) + FILE_DESCRIPTIONS_TO_EXT[file_description]\n\n if file_description == \"video\":\n file_i_df = mp4_file_processing.get_word_video_snippet_size(df, filepath)\n elif file_description == \"eaf\":\n speech_annotation_eaf_data, gesture_eaf_data = eaf_file_processing.read_eaf(filepath)\n file_i_df = eaf_file_processing.map_gestures_to_annotation(speech_annotation_eaf_data, gesture_eaf_data, remove_pauses=False)\n file_i_df = eaf_file_processing.binary_encode_gestures(file_i_df, gesture_column=\"gesture\")\n\n elif file_description == \"seg\":\n file_i_df = seg_file_processing.get_seg_file_pos_info(filepath)\n\n elif file_description == \"gentle\":\n file_i_df = gentle_file_processing.get_gentle_file_transcripts(filepath)\n \n else:\n print(\"Unknown file format!!!\")\n return \n\n if file_df is None:\n file_df = file_i_df\n else:\n file_df = pd.concat([file_df, file_i_df], ignore_index=True)\n\n #file_log.set_description_str(f'Processed file: {file}')\n #pbar.update(1)\n #sleep(0.02)\n #file_log.close()\n #pbar.close()\n return file_df",
"def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)",
"def e_related_docs(self, files, storedfile=None):\n results = {}\n for f in files:\n with open(f) as df:\n daily_info = json.load(df)\n for country in daily_info:\n results.setdefault(country, {})\n for day in daily_info[country]:\n results[country].setdefault(day, 0)\n results[country][day] += daily_info[\n country][day][\"count\"]\n if storedfile:\n with open(storedfile, \"w\") as sf:\n json.dump(results, sf)\n return results",
"def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table",
"def process_file(self):\n self._processing_logger.log_info('Start processing')\n self.parsing_start_time = datetime.datetime.now()\n if os.path.exists(self.tmp_stat_file_path) \\\n and not HcsParsingUtils.active_processing_exceed_timeout(self.tmp_stat_file_path):\n self._processing_logger.log_info('This file is processed by another parser, skipping...')\n return 2\n self.create_tmp_stat_file()\n hcs_index_file_path = self.hcs_root_dir + MEASUREMENT_INDEX_FILE_PATH\n time_series_details = self._extract_time_series_details(hcs_index_file_path)\n self.generate_ome_xml_info_file()\n xml_info_tree = ET.parse(self.ome_xml_info_file_path).getroot()\n plate_width, plate_height = self._get_plate_configuration(xml_info_tree)\n wells_tags = self.read_wells_tags()\n if wells_tags:\n self._processing_logger.log_info(\"Tags \" + str(wells_tags))\n if not TAGS_PROCESSING_ONLY and not EVAL_PROCESSING_ONLY:\n if not self._localize_related_files():\n self._processing_logger.log_info('Some errors occurred during copying files from the bucket, exiting...')\n return 1\n else:\n self._processing_logger.log_info('Localization is finished.')\n local_preview_dir = os.path.join(self.tmp_local_dir, 'preview')\n hcs_local_index_file_path = get_path_without_trailing_delimiter(self.tmp_local_dir) \\\n + MEASUREMENT_INDEX_FILE_PATH\n for sequence_id, timepoints in time_series_details.items():\n self._processing_logger.log_info('Processing sequence with id={}'.format(sequence_id))\n sequence_index_file_path = self.extract_sequence_data(sequence_id, hcs_local_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {}'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_index_file_path, local_preview_dir, sequence_id))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful...')\n return 1\n sequence_overview_index_file_path, wells_grid_mapping = self.build_sequence_overview_index(sequence_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {} \"{}\"'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_overview_index_file_path, local_preview_dir,\n sequence_id, 'overview_data.ome.tiff'))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful: well preview generation failure')\n return 1\n self.write_dict_to_file(os.path.join(local_preview_dir, sequence_id, 'wells_map.json'),\n self.build_wells_map(sequence_id, wells_grid_mapping, wells_tags))\n if LOCALIZE_USE_PIPE == \"true\":\n cloud_transfer_result = os.system('pipe storage cp -f -r \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n else:\n cloud_transfer_result = os.system('aws s3 sync \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n if cloud_transfer_result != 0:\n self._processing_logger.log_info('Results transfer was not successful...')\n return 1\n self._write_hcs_file(time_series_details, plate_width, plate_height)\n if not EVAL_PROCESSING_ONLY:\n tags_processing_result = self.try_process_tags(xml_info_tree, wells_tags)\n if TAGS_PROCESSING_ONLY:\n if wells_tags:\n for sequence_id, timepoints in time_series_details.items():\n path = os.path.join(self.hcs_img_service_dir, sequence_id, 'wells_map.json')\n self.write_dict_to_file(path, self.update_wells_json(path, wells_tags))\n return tags_processing_result\n if not TAGS_PROCESSING_ONLY:\n eval_processing_result = self.try_process_eval()\n if EVAL_PROCESSING_ONLY:\n return eval_processing_result\n self.create_stat_file()\n return 0",
"def add_to_db(self):\r\n for filename in self.new_data_files:\r\n unique_name = form_unique_name(filename)\r\n extracted_date = extract_date(filename)\r\n if extracted_date is not None:\r\n # If we can parse the date from the filename we parse the file\r\n file_ = File(filename, unique_name, extracted_date)\r\n content = file_.get_content()\r\n for element in content:\r\n # If each of the spectra in the file has data, we\r\n # add it to the data base\r\n if element[1] is not None:\r\n self.add_to_db_single(element)\r\n status_msg('Elements of file {0} added to db'.format(\r\n unique_name), True)\r\n else:\r\n status_msg('File {0} not added, unknown filename format'.\r\n format(unique_name), False)",
"def process_one_file(\n file_for_processing: FileForProcessing, survey_id_dict: dict, all_binified_data: DefaultDict,\n ftps_to_remove: set\n):\n \n if file_for_processing.exception:\n file_for_processing.raise_data_processing_error()\n \n # there are two cases: chunkable data that can be stuck into \"time bins\" for each hour, and\n # files that do not need to be \"binified\" and pretty much just go into the ChunkRegistry unmodified.\n if file_for_processing.chunkable:\n process_chunkable_file(file_for_processing, survey_id_dict, all_binified_data, ftps_to_remove)\n else:\n process_unchunkable_file(file_for_processing, ftps_to_remove)",
"def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()",
"def _log_file_processing_stats(self, known_file_paths):\n # File Path: Path to the file containing the DAG definition\n # PID: PID associated with the process that's processing the file. May\n # be empty.\n # Runtime: If the process is currently running, how long it's been\n # running for in seconds.\n # Last Runtime: If the process ran before, how long did it take to\n # finish in seconds\n # Last Run: When the file finished processing in the previous run.\n headers = [\"File Path\", \"PID\", \"Runtime\", \"# DAGs\", \"# Errors\", \"Last Runtime\", \"Last Run\"]\n\n rows = []\n now = timezone.utcnow()\n for file_path in known_file_paths:\n last_runtime = self.get_last_runtime(file_path)\n num_dags = self.get_last_dag_count(file_path)\n num_errors = self.get_last_error_count(file_path)\n file_name = os.path.basename(file_path)\n file_name = os.path.splitext(file_name)[0].replace(os.sep, \".\")\n\n processor_pid = self.get_pid(file_path)\n processor_start_time = self.get_start_time(file_path)\n runtime = (now - processor_start_time) if processor_start_time else None\n last_run = self.get_last_finish_time(file_path)\n if last_run:\n seconds_ago = (now - last_run).total_seconds()\n Stats.gauge(f\"dag_processing.last_run.seconds_ago.{file_name}\", seconds_ago)\n\n rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))\n\n # Sort by longest last runtime. (Can't sort None values in python3)\n rows.sort(key=lambda x: x[3] or 0.0)\n\n formatted_rows = []\n for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:\n formatted_rows.append(\n (\n file_path,\n pid,\n f\"{runtime.total_seconds():.2f}s\" if runtime else None,\n num_dags,\n num_errors,\n f\"{last_runtime:.2f}s\" if last_runtime else None,\n last_run.strftime(\"%Y-%m-%dT%H:%M:%S\") if last_run else None,\n )\n )\n log_str = (\n \"\\n\"\n + \"=\" * 80\n + \"\\n\"\n + \"DAG File Processing Stats\\n\\n\"\n + tabulate(formatted_rows, headers=headers)\n + \"\\n\"\n + \"=\" * 80\n )\n\n self.log.info(log_str)"
]
| [
"0.6059152",
"0.5905004",
"0.5778753",
"0.5699282",
"0.5683771",
"0.5519713",
"0.5514828",
"0.5480031",
"0.54740256",
"0.54733485",
"0.5458618",
"0.5427449",
"0.5393411",
"0.5329494",
"0.530595",
"0.52048373",
"0.51568794",
"0.51409423",
"0.5137317",
"0.51298285",
"0.50797176",
"0.50720245",
"0.5070114",
"0.50525343",
"0.5052479",
"0.50460505",
"0.5002454",
"0.50000757",
"0.49973476",
"0.49346268"
]
| 0.7470923 | 0 |
Process Group Specific data. | def _process_group(self):
if not isinstance(self.transform, GroupTransformModel):
return
self._process_name()
if self.transformed_item['type'] == 'Campaign':
self._process_metadata_datetime('firstSeen', self.transform.first_seen)
if self.transformed_item['type'] == 'Document':
self._process_metadata('fileName', self.transform.file_name)
self._process_metadata('malware', self.transform.malware)
self._process_metadata('password', self.transform.password)
if self.transformed_item['type'] == 'Email':
self._process_metadata('body', self.transform.body)
self._process_metadata('from', self.transform.from_addr)
self._process_metadata('header', self.transform.header)
self._process_metadata('subject', self.transform.subject)
self._process_metadata('to', self.transform.to_addr)
if self.transformed_item['type'] in ('Event', 'Incident'):
self._process_metadata_datetime('eventDate', self.transform.event_date)
self._process_metadata('status', self.transform.status)
if self.transformed_item['type'] == 'Report':
self._process_metadata('fileName', self.transform.file_name)
self._process_metadata_datetime('publishDate', self.transform.publish_date)
# Handle sig specific fields here
if self.transformed_item['type'] == 'Signature':
self._process_metadata('fileName', self.transform.file_name)
self._process_metadata('fileType', self.transform.file_type)
self._process_metadata('fileText', self.transform.file_text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def group_data_callback(self, action: EventType, group_id: str) -> None:\n self.process_item(group_id, {})",
"def data_group():\n ...",
"def _parse_groupped_data(self):\n for i, val in enumerate(self.values.keys()):\n xy = self.values[val]\n self._set_and_get(\"x_\", val, xy[:, 0])\n self._set_and_get(\"y_\", val, xy[:, 1])",
"async def process_group_write(self, telegram):\n # await self.datetime.process(telegram)\n await self.date.process(telegram)\n await self.time.process(telegram)",
"def _process_data(self, data: T) -> List[TestGroupReport]:\n raise NotImplementedError",
"def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n output_shape = list(input_data.shape)\n output_shape[-1] = len(self.label_splits)\n output_data = np.zeros(output_shape)\n\n # Merge Target Channels\n if self.split_method == 'integer_levels':\n for label_idx, label in enumerate(self.label_splits):\n if type(label) is list:\n # This is a little clunky\n single_label_data = np.zeros(output_shape[0:-1])[..., np.newaxis]\n for index in label:\n single_label_data += np.where(input_data == index, 1, 0)\n single_label_data = np.where(single_label_data > 0, 1, 0)\n else:\n single_label_data = np.where(input_data == label, 1, 0)\n\n output_data[..., label_idx] = single_label_data[..., 0]\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data",
"def onGroupDataset(self, event):\n\t\tselections = self.tree.GetSelections()\n\t\tif not selections and self.selectedItem:\n\t\t\tselections = [self.selectedItem]\n\t\tif not self.groupedDataUnit:\n\t\t\tpluginLoader = Modules.DynamicLoader.getPluginLoader()\n\t\t\ttaskMod = pluginLoader.getPluginModule(\"Task\", \"Process\")\n\t\t\tunitType = taskMod.getDataUnit()\n\t\t\tmoduleType = pluginLoader.getPluginClass(\"Task\",\"Process\")\n\t\t\tself.groupedDataUnit = unitType()\n\t\t\tmodule = moduleType()\n\t\t\tself.groupedDataUnit.setModule(module)\n\t\t\n\t\tfor item in selections:\n\t\t\tif item in self.groupedItems: continue\n\t\t\tself.groupedItems.append(item)\n\t\t\tself.markBlue([item], \"}\")\n\t\t\tobj = self.tree.GetPyData(item)\n\t\t\tprint \"Adding source dataunit\",obj\n\t\t\tself.groupedDataUnit.addSourceDataUnit(obj)\n\t\tsetting = self.groupedDataUnit.getSettings()\n\t\tsetting.set(\"FilterList\",[])\n\t\t\n\t\tprint \"Now=\",self.groupedDataUnit.getSourceDataUnits()",
"def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n\n # Split Channels\n if self.channels is None:\n channel_subset = np.copy(input_data)\n else:\n all_channels = set(range(input_data.shape[-1]))\n remaining_channels = list(all_channels.difference(set(self.channels)))\n reminaing_channel_subset = np.take(input_data, remaining_channels, axis=-1)\n channel_subset = np.take(input_data, self.channels, axis=-1)\n\n # Merge Target Channels\n if self.merge_method == 'maximum':\n channel_subset = np.max(channel_subset, axis=-1)[..., np.newaxis]\n\n # Join Channels\n if self.channels is None:\n output_data = channel_subset\n else:\n output_data = np.concatenate((reminaing_channel_subset, channel_subset), axis=-1)\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data",
"def process_group_message(self, d):\n dpid = int(d.get(\"dpid\", 0))\n dp = self.dpset.get(dpid)\n if not dp:\n return \"Datapath does not exist!\"\n\n ofproto = dp.ofproto\n parser = dp.ofproto_parser\n\n command = {\n 'add': ofproto.OFPGC_ADD,\n 'mod': ofproto.OFPGC_MODIFY,\n 'del': ofproto.OFPGC_DELETE,\n }\n\n cmd = command.get(d[\"operation\"], ofproto.OFPGC_ADD)\n\n type_convert = {'ALL': dp.ofproto.OFPGT_ALL,\n 'SELECT': dp.ofproto.OFPGT_SELECT,\n 'INDIRECT': dp.ofproto.OFPGT_INDIRECT,\n 'FF': dp.ofproto.OFPGT_FF}\n\n gtype = type_convert.get(d[\"type\"])\n\n group_id = d[\"group_id\"]\n\n buckets = []\n for bucket in d[\"buckets\"]:\n weight = bucket.get('weight', 0)\n watch_port = bucket.get('watch_port', ofproto.OFPP_ANY)\n watch_group = bucket.get('watch_group', dp.ofproto.OFPG_ANY)\n actions = []\n if bucket['actions']:\n actions_list = []\n if type(bucket['actions'][0]) is str or \\\n (not PYTHON3 and type(bucket['actions'][0]) is unicode):\n # Ryu's format\n for i in bucket['actions']:\n x = i.split(':', 1)\n y = x[1].replace('{', '').replace(\n '}', '').strip() if len(x) > 1 else ''\n y = y.replace(\n ':', '=', 1) if x[0] == 'SET_FIELD' else y\n actions_list.append({x[0]: y})\n else: # FlowManager's format\n actions_list = bucket['actions']\n actions = self.get_actions(parser, actions_list)\n buckets.append(dp.ofproto_parser.OFPBucket(\n weight, watch_port, watch_group, actions))\n\n #print(dp, cmd, gtype, group_id, buckets)\n group_mod = parser.OFPGroupMod(\n dp, cmd, gtype, group_id, buckets)\n\n try:\n dp.send_msg(group_mod) # ryu/ryu/controller/controller.py\n except KeyError as e:\n return e.__repr__()\n except Exception as e:\n return e.__repr__()\n\n return \"Message sent successfully.\"",
"def process(self, event):\n matchgroups = {}\n try:\n self.lock.acquire() # matchgroups are not thread safe, but we need to be reentrant here\n if not self.matcher.matches(event):\n return \"PASS\"\n matchgroups = self.matcher.get_match_groups()\n finally:\n self.lock.release()\n\n\n if self.autoclear:\n event[\"group_autoclear\"] = 1\n else:\n event[\"group_autoclear\"] = 0\n \n self.set_aggregation_group_id(event, matchgroups)\n (group, lastmod) = self.datasource.get_group_leader(event[\"group_id\"])\n\n if group and time.time()-lastmod >= self.config[\"maxdelay\"]:\n logging.debug(\"Cleared group %s \", event[\"group_id\"])\n self.datasource.deactivate_group(event[\"group_id\"])\n group = None\n\n if self.clear_matcher.matches(event):\n group_id = event[\"group_id\"]\n event[\"clear_group_leader\"] = group\n event[\"clear_group_id\"] = group_id\n event[\"group_id\"] = None\n self.datasource.deactivate_group(group_id)\n self.datasource.acknowledge_group(group_id, group)\n if self.auto_acknowledge:\n event[\"ack\"] = 1\n group = None\n return \"CLEAR\"\n\n \n if group: \n event[\"group_leader\"] = group\n event[\"group_active\"] = True\n return \"AGGR\"\n else:\n msg = self.create_aggregation_message(event, matchgroups)\n event[\"group_leader\"] = -1\n event[\"alternative_message\"] = msg\n event[\"group_active\"] = True\n return \"NEW\"",
"def data_for_grouping() -> NoReturn:\n raise NotImplementedError",
"def data_for_grouping() -> NoReturn:\n raise NotImplementedError",
"def data(self, *args, **kwargs):\n\n data = self.cached(NR_CACHE_NAME)\n if not data:\n raise core.InvalidState(\"No grouping loaded\")\n\n mapping = self.mapping(data['release'], data['groups'])\n data['groups'] = self.transform(data['groups'], mapping)\n self.cache(NR_CACHE_NAME, data)\n return None",
"def group_hook(self, accumulation, group):\n group_hook_directions(accumulation, group)\n group_hook_filter_directions(accumulation, group)\n if compute_gammas:\n group_hook_gammas(accumulation, group)\n if compute_lambdas:\n group_hook_lambdas(accumulation, group)\n group_hook_memory_cleanup(accumulation, group)",
"def _get_group_example_data(self, data_group_id: str) -> Dict[\n str, dict\n ]:\n return {\n e['example_id']: self._get_example_data(e['example_id'])\n for e in self.tasks['data_groups'][data_group_id]\n }",
"def process_read_group(self, data, x_field, v_field, g_field, accumulate=None):\n\n xnames = []\n gnames = []\n\n for d in data:\n for a, f in [(xnames, x_field), (gnames, g_field)]:\n # a - array\n # f - field name\n name = d[f]\n if name not in a:\n a.append(name)\n\n # matrix = {x_name: {g_name: value}}\n matrix = dict([(x_name, {}) for x_name in xnames])\n for d in data:\n matrix[d[x_field]][d[g_field]] = d[v_field]\n # processed = {g_name: {'values': [value]}}\n processed = dict([(g_name, {'values': []}) for g_name in gnames])\n for g_name, g_info in processed.items():\n cur_value = accumulate[g_name] if accumulate else 0\n for x_name in xnames:\n value = matrix[x_name].get(g_name, 0)\n if accumulate:\n value += cur_value\n cur_value = value\n g_info['values'].append(value)\n return xnames, processed",
"def process(self, data, output, processes, process):\n slice_list = du.get_grouped_slice_list(data, self.get_filter_frame_type(), self.get_max_frames())\n self._process_chunks(slice_list, data, output, len(processes), process)",
"def _read_group_format_result_custom(self, data, annotated_groupbys, groupby, domain):\n\n sections = []\n for gb in annotated_groupbys:\n ftype = gb['type']\n value = data[gb['groupby']]\n\n # full domain for this groupby spec\n d = None\n if value:\n if ftype == 'many2one':\n value = value[0]\n elif ftype in ('date', 'datetime'):\n locale = self._context.get('lang') or 'en_US'\n if locale == \"ar_SY\":\n locale = \"ar\"\n fmt = DEFAULT_SERVER_DATETIME_FORMAT if ftype == 'datetime' else DEFAULT_SERVER_DATE_FORMAT\n tzinfo = None\n range_start = value\n range_end = value + gb['interval']\n # value from postgres is in local tz (so range is\n # considered in local tz e.g. \"day\" is [00:00, 00:00[\n # local rather than UTC which could be [11:00, 11:00]\n # local) but domain and raw value should be in UTC\n if gb['tz_convert']:\n tzinfo = range_start.tzinfo\n range_start = range_start.astimezone(pytz.utc)\n range_end = range_end.astimezone(pytz.utc)\n\n range_start = range_start.strftime(fmt)\n range_end = range_end.strftime(fmt)\n if ftype == 'datetime':\n label = babel.dates.format_datetime(\n value, format=gb['display_format'],\n tzinfo=tzinfo, locale=locale\n )\n else:\n label = babel.dates.format_date(\n value, format=gb['display_format'],\n locale=locale\n )\n data[gb['groupby']] = ('%s/%s' % (range_start, range_end), label)\n d = [\n '&',\n (gb['field'], '>=', range_start),\n (gb['field'], '<', range_end),\n ]\n\n if d is None:\n d = [(gb['field'], '=', value)]\n sections.append(d)\n sections.append(domain)\n\n data['__domain'] = expression.AND(sections)\n if len(groupby) - len(annotated_groupbys) >= 1:\n data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}\n del data['id']\n return data",
"def process_groups(groups, logs):\n events = list()\n \n for group in groups:\n tag = group[2]\n target = group[3]\n msg_type = group[-1].lower()\n if tag == ACTIVITY_TAG or tag == DIALOG_TAG or tag == VIEW_TAG:\n\n if group[0] == group[1]:\n if msg_type == 'touchevent':\n events.append(touch_processor.create_touch_event(msg_type, target, logs[group[0]], group[0], tag))\n elif msg_type == 'keyevent':\n events.append(key_processor.create_key_event(msg_type, target, logs[group[0]], group[0]))\n continue\n\n # Activity & Dialig\n if msg_type == 'touchevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG, VIEW_TAG])\n ev = touch_processor.parse_touch_event(msg_type, target, event_logs, group[0], tag)\n elif msg_type == 'keyevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == POPUPWINDOW_TAG:\n # PopupWindow, process view onTouchEvent\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[0]], group[0]))\n view_groups = group[4]\n view_events = process_groups(view_groups, logs)\n if len(view_events) != 0:\n events += view_events\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[1]], group[1]))\n elif tag == EDITABLE_INPUT_CONNECTION_TAG:\n # Input Event\n nested_groups = group[4]\n # Process nested events\n nested_events = process_groups(nested_groups, logs)\n evs = input_processor.parse_input_event(msg_type, target, logs[group[0]:group[1]+1], nested_events, group[0])\n events += evs\n elif tag == TEXT_VIEW_KEY_TAG:\n # Keyboard event caught by TextView onKeyPreIme\n event_logs = clear_logs(logs[group[0]:group[1]+1], [TEXT_VIEW_KEY_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n ev.intent = event.KeyEvent.HIDE_KEYBOARD_INTENT\n events.append(ev)\n elif tag == WEBVIEW_KEY_EVENT_TAG:\n # WebView KeyBoard event\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == WEBVIEW_CLIENT_TAG:\n # WebView page loaded\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_page_loaded_processor.parse_page_loaded(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == SENSOR_LISTENER_TAG:\n # Low level sensor\n event_logs = logs[group[0]:group[1]+1]\n ev = low_level_sensor_processor.parse_low_level_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == LOCATION_MANAGER_TAG or tag == LOCATION_LISTENER_TAG:\n event_logs = logs[group[0]:group[1]+1]\n ev = location_processor.parse_location_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n\n return events",
"def parse_data( self ):\n super( PivotGroupGraph, self ).parse_data()\n new_parsed_data = {}\n parsed_data = getattr( self, 'parsed_data', self.results )\n for pivot, groups in parsed_data.items():\n new_pivot = self.parse_pivot(pivot)\n if new_pivot == None:\n continue\n new_groups = {}\n new_parsed_data[ new_pivot ] = new_groups\n for group, data in groups.items():\n new_group = self.parse_group(group)\n new_datum = self.parse_datum(data)\n if new_group == None:\n continue\n new_groups[ new_group ] = new_datum\n if len(new_groups) == 0:\n del new_parsed_data[new_pivot]\n self.parsed_data = new_parsed_data",
"def set_group(self, data, group, intg, dq=None):\n # TODO: Include a 2-D DQ array to be combined with the GROUPDQ array\n #\n # Copy the input data to a 2-D plane for this group/intg combination.\n # NOTE: This only works if data array is broadcastable so the shape\n # of the data array is checked.\n #\n data = np.asarray(data, dtype=self.data.dtype)\n detector_shape = (self.rows, self.columns)\n if data.shape == detector_shape:\n self.data[intg, group, :, :] = data \n # Invalidate the averaged data\n self._data_averaged = None\n # Update the group data quality array if necessary.\n if dq is not None:\n if self.include_groupdq:\n dq = np.asarray(dq, dtype=self.groupdq.dtype) # Convert to same data type.\n self.groupdq[intg, group, :, :] |= dq\n else:\n strg = \"Incompatible arguments. A groupdq array is \"\n strg += \"provided when include_groupdq=False. \"\n strg += \"The array is ignored.\"\n LOGGER.error(strg)\n else:\n strg = \"Group data array has the wrong shape \"\n strg += \"(%s instead of %s).\" % (str(data.shape),\n str(detector_shape))\n raise TypeError(strg)",
"def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties",
"def _process(self):\n # choose the correct transform model before processing TI data\n self._select_transform()\n\n # process type first, fail early\n self._process_type()\n\n # process type specific data\n if isinstance(self.transform, GroupTransformModel):\n self._process_group()\n elif isinstance(self.transform, IndicatorTransformModel):\n self._process_indicator()\n\n # self.process_associations(self.transform.associations)\n self._process_associated_group(self.transform.associated_groups)\n self._process_attributes(self.transform.attributes or [])\n self._process_security_labels(self.transform.security_labels or [])\n self._process_tags(self.transform.tags or [])\n\n # date added\n self._process_metadata_datetime('dateAdded', self.transform.date_added)\n\n # last modified\n self._process_metadata_datetime('lastModified', self.transform.last_modified)\n\n # xid\n self._process_metadata('xid', self.transform.xid)",
"def process_data(self, data):\n return data",
"def visit_group(self, group):\n for obj in self.event_json['events']:\n event_id = obj['id']\n event = self.world.events[event_id]\n group.add(event)",
"def _data_process(self, v):\n pass",
"def process_data_group(folder:Path, type:str, light:bool = False) -> dict:\n\n if type == dm.Delivery:\n data_folder = folder / 'data'\n else:\n data_folder = folder\n\n # check for non-existent or empty folder\n if not data_folder.exists():\n raise FileNotFoundError\n try:\n next((data_folder).glob(\"**/*\"))\n except StopIteration:\n # folder is empty can't process it\n raise FileNotFoundError\n\n # Get file sizes, last modified dates, and names to count,\n # sum size, and hash the file data provided\n file_sizes, file_modified_dates, file_metamodified_dates, file_names = zip(\n *[\n (f.stat().st_size, f.stat().st_mtime, f.stat().st_ctime, f)\n for f in (data_folder).glob(\"**/*\")\n if f.is_file() and f.name != 'receipt.rst'\n ]\n )\n\n last_modified = datetime.fromtimestamp(\n max(max(file_modified_dates),\n max(file_metamodified_dates)))\n\n # Hash the files in the delivery\n if light:\n folder_hash = 'skipped'\n else:\n folder_hash = hash_files(file_names)\n\n dg = {\n 'name' : folder.name,\n 'type' : type.__name__,\n 'last_update' : datetime.now(),\n 'size' : sum(file_sizes),\n 'num_files' : len(file_sizes),\n 'group_hash' : folder_hash,\n 'group_last_modified' : last_modified,\n }\n\n return dg",
"async def process_group_write(self, telegram):\n await self.switch.process(telegram)",
"def get_group_call_data(rawbody, addressbook, mid):\n\n if not rawbody:\n return None\n\n try:\n structured_call = StructuredGroupCall.loads(rawbody)\n except (ValueError, IndexError, TypeError) as e:\n logger.warn(\n f\"Failed to load group call data for message {mid}: {str(e)}\"\n )\n return []\n\n timestamp = dt.datetime.fromtimestamp(structured_call.when // 1000)\n timestamp = timestamp.replace(\n microsecond=(structured_call.when % 1000) * 1000\n )\n recipient = addressbook.get_recipient_by_uuid(structured_call.by)\n if recipient:\n initiator = recipient.name\n\n group_call_data = GroupCallData(\n initiator=initiator,\n timestamp=timestamp,\n )\n\n return group_call_data",
"def get_grouped_data(self, field_name):\n pass"
]
| [
"0.7243357",
"0.6760643",
"0.6536721",
"0.62792784",
"0.6244008",
"0.6113101",
"0.60894483",
"0.6086338",
"0.60303324",
"0.5993764",
"0.59914124",
"0.59914124",
"0.5969717",
"0.59248054",
"0.59004796",
"0.58814335",
"0.5875647",
"0.57523483",
"0.57357395",
"0.5716076",
"0.56948733",
"0.5688969",
"0.56766546",
"0.56722593",
"0.56613404",
"0.5643248",
"0.56274784",
"0.56199044",
"0.55990857",
"0.55985135"
]
| 0.73376715 | 0 |
Process Indicator Specific data. | def _process_indicator(self):
if not isinstance(self.transform, IndicatorTransformModel):
return
# handle the 3 possible indicator fields
self._process_indicator_values()
if self.transform.active:
self._process_metadata('active', self.transform.active)
self._process_confidence(self.transform.confidence)
self._process_rating(self.transform.rating)
if self.transformed_item['type'] == 'File':
self._process_metadata('size', self.transform.size)
self._process_file_occurrences(self.transform.file_occurrences or [])
if self.transformed_item['type'] == 'Host':
self._process_metadata('dnsActive', self.transform.dns_active)
self._process_metadata('whoisActive', self.transform.whois_active) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _data_process(self, v):\n pass",
"def process_indicators(actapi: act.api.Act, args: Config, falcon: Intel) -> None:\n\n indicator_marker = None\n\n for indicator in crowdstrike_intel.get_latest_indicators(\n falcon, get_last_indicator()\n ):\n handle_indicator(actapi, indicator, args.output_format)\n\n indicator_marker = indicator.marker\n\n if indicator_marker:\n update_last_indicator(indicator_marker)",
"def process_data(self, data):\n return data",
"def handle_data(self, data):\n if verbose(): print(\"TIParser.handle_data(self, '%s')\" % (data))\n pass",
"def handle_data(self, context, data):\n\n self.timer += 1\n\n if self.timer >= self.execution_time:\n if self.currently_trading >= len(self.assets):\n self.timer = len(self.assets)\n return\n else:\n return\n\n if self.currently_trading == 0:\n self.log_portfolio(context)\n\n self.load_history(data, self.assets[self.currently_trading])\n self.load_indicators(data, self.assets[self.currently_trading])\n self.execution_logic(context, data, self.assets[self.currently_trading])\n\n # Cycle to next trading pair\n self.currently_trading += 1",
"def _process(self, data: np.ndarray) -> np.ndarray:",
"def _process(self, data: np.ndarray) -> np.ndarray:",
"def process_heater_pump_data(self, data):\n\n # Digital outputs have the following syntax:\n # name=PinX,pin_num=X,value=val, where val is 0/1 for OFF/ON\n sensor_details = data.split(',')\n pin_num = int(sensor_details[1].split('=')[1])\n value = sensor_details[2].split('=')[1]\n status = \"OFF\"\n if int(value) == 1:\n status = \"ON\"\n\n # Store the status of the d pins in a dictionary\n self.digital_pin_status[pin_num] = status",
"def process_data(self, data):\n try:\n payload = self.extract(data)\n except (ValueError, InvalidToken) as doh:\n self.log.error('Error: {}, Data: {}'.format(doh, data))\n else:\n fields = {}\n tags = {}\n # writing strings to a field in Influx requires a double-quote\n tags['username'] = '\"{}\"'.format(payload['user'])\n # Dumbass Influx doesn't let you group by fields or aggregate tags...\n # I want to count the unique occurrences of a user over a period of time\n # to show current connected user counts, *and* be able to group by\n # those usernames over time to show specific user usage. Wish I\n # used TimescaleDB instead of InfluxDB\n fields['user'] = '\"{}\"'.format(payload.pop('user'))\n fields['source'] = '\"{}\"'.format(payload.pop('source'))\n fields['target'] = '\"{}\"'.format(payload.pop('target'))\n fields['packets'] = 1 # each event represents a single packet\n timestamp = payload.pop('time')\n self.influx.write(fields=fields, tags=tags, timestamp=timestamp)",
"def _handleIncomingDataAnalysis(self, msg: str):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleIncomingDataAnalysis method is being called\")\n\t\tad = DataUtil.jsonToActuatorData(self, msg)\n\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)",
"def process_data(self, value):\n try:\n self.data = value.value\n except AttributeError:\n self.data = value",
"def extract(self, data):",
"def _extract_data(self, *args, **kwargs) -> None:\n raise NotImplementedError",
"def onAdaptorData(self, message):\n #logging.debug(\"%s onadaptorData, message: %s\", ModuleName, message)\n if message[\"characteristic\"] == \"acceleration\":\n for a in self.accel:\n if a.id == self.idToName[message[\"id\"]]: \n a.processAccel(message)\n break\n elif message[\"characteristic\"] == \"temperature\":\n for t in self.temp:\n if t.id == self.idToName[message[\"id\"]]:\n t.processTemp(message)\n break\n elif message[\"characteristic\"] == \"ir_temperature\":\n for t in self.irTemp:\n if t.id == self.idToName[message[\"id\"]]:\n t.processIrTemp(message)\n break\n elif message[\"characteristic\"] == \"gyro\":\n for g in self.gyro:\n if g.id == self.idToName[message[\"id\"]]:\n g.processGyro(message)\n break\n elif message[\"characteristic\"] == \"magnetometer\":\n for g in self.magnet:\n if g.id == self.idToName[message[\"id\"]]:\n g.processMagnet(message)\n break\n elif message[\"characteristic\"] == \"buttons\":\n for b in self.buttons:\n if b.id == self.idToName[message[\"id\"]]:\n b.processButtons(message)\n break\n elif message[\"characteristic\"] == \"humidity\":\n for b in self.humidity:\n if b.id == self.idToName[message[\"id\"]]:\n b.processHumidity(message)\n break\n elif message[\"characteristic\"] == \"binary_sensor\":\n for b in self.binary:\n if b.id == self.idToName[message[\"id\"]]:\n b.processBinary(message)\n break\n elif message[\"characteristic\"] == \"power\":\n for b in self.power:\n if b.id == self.idToName[message[\"id\"]]:\n b.processPower(message)\n break\n elif message[\"characteristic\"] == \"battery\":\n for b in self.battery:\n if b.id == self.idToName[message[\"id\"]]:\n b.processBattery(message)\n break\n elif message[\"characteristic\"] == \"connected\":\n for b in self.connected:\n if b.id == self.idToName[message[\"id\"]]:\n b.processConnected(message)\n break\n elif message[\"characteristic\"] == \"luminance\":\n for b in self.luminance:\n if b.id == self.idToName[message[\"id\"]]:\n b.processLuminance(message)\n break",
"def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r",
"def processData(self):\n recordSet = AresChartsService.toMultiSeries(self.vals, self.chartKeys, self.selectedX , self.chartVals, extKeys=self.extKeys)\n self.aresObj.jsGlobal.add(\"data_%s = %s\" % (self.htmlId, json.dumps(recordSet)))",
"def preprocess(self, data, label):\n\t\traise NotImplementedError",
"def _handleSensorDataAnalysis(self, data: SensorData):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleSensorDataAnalysis method is being called\")\n\t\t\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif self.enableHandleTempChangeOnDevice and data.getSensorType() == SensorData.TEMP_SENSOR_TYPE:\n\n\t\t\tad = ActuatorData(actuatorType = ActuatorData.HVAC_ACTUATOR_TYPE)\n\t\t\tvalue = data.getValue()\n\t\t\tif value >= self.triggerHvacTempFloor and value <= self.triggerHvacTempCeiling:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_OFF)\n\t\t\telse:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_ON)\n\t\t\t\n\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\t\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif self.enableHandleSoilHumidityChangeOnDevice and data.getSensorType() == SensorData.SOIL_HUMIDITY_SENSOR_TYPE:\n\t\t\t\n\t\t\tad = ActuatorData(actuatorType = ActuatorData.SPRINKLER_ACTUATOR_TYPE)\n\t\t\tvalue = data.getValue()\n\t\t\tif value >= self.triggerWaterDeviceHumiCeiling: \n\t\t\t\tad.setCommand(ActuatorData.COMMAND_OFF)\n\t\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\telif value <= self.triggerWaterDeviceHumiFloor:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_ON)\n\t\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\t\tself.coapClient.sendGetRequest(ResourceNameEnum.CDA_ACTUATOR_CMD_RESOURCE, False, 5)\n\t\t\telse:\n\t\t\t\tself.coapClient.sendGetRequest(ResourceNameEnum.CDA_CLOUD_ACTUATOR_CMD_RESOURCE, False, 5)",
"def _process(proc_data):\n int_list = {'agrctlrssi', 'agrextrssi', 'agrctlnoise', 'agrextnoise',\n 'lasttxrate', 'maxrate', 'lastassocstatus', 'mcs'}\n\n for key in proc_data:\n if key in int_list:\n proc_data[key] = jc.utils.convert_to_int(proc_data[key])\n\n return proc_data",
"def parse_dataset(self, data):\n pass",
"def analyse(self, data=None):\n pass",
"def handle_indicator(\n actapi: act.api.Act,\n indicator: crowdstrike_intel.Indicator,\n output_format: str = \"json\",\n) -> None:\n\n obj = expand_objects(indicator)\n\n if not obj:\n # No extracted objects\n return\n\n if not (obj.uri and obj.content):\n # Do not handle indicators that do not have an URL (domain/fqdn is handled as URLs\n # using network://<ip> and network://<fqdn>)\n # We might extend this later, but there is a large amount indicators where we only\n # have a hash\n return\n\n handle_hash_content_uri(actapi, indicator, obj)\n\n for vuln in indicator.vulnerabilities:\n handle_fact(\n actapi.fact(\"exploits\")\n .source(\"content\", obj.content)\n .destination(\"vulnerability\", vuln),\n output_format=output_format,\n )\n\n for malware_family in indicator.malware_families:\n handle_fact(\n actapi.fact(\"classifiedAs\")\n .source(\"content\", obj.content)\n .destination(\"tool\", malware_family),\n output_format=output_format,\n )\n\n for actor in indicator.actors:\n fact_chain.handle_ta_uri(\n actapi,\n output_format,\n actor,\n obj.uri,\n )\n\n for label in indicator.labels:\n sector = TARGET_SECTOR_MAP.get(label.name)\n\n if sector:\n fact_chain.handle_ta_sectors(actapi, output_format, actor, [sector])",
"def classify(self, data):\n abstract",
"def process_deal(self, data):\n for i in data:\n if i.status == OrderStatus.PARTIALLY_FILLED or i.status == OrderStatus.FILLED:\n symbol, exchange = convert_symbol_tiger2vt(str(i.contract))\n self.tradeid += 1\n\n trade = TradeData(\n symbol=symbol,\n exchange=exchange,\n direction=Direction.NET,\n tradeid=self.tradeid,\n orderid=self.ID_TIGER2VT[str(i.order_id)],\n price=i.avg_fill_price,\n volume=i.filled,\n time=datetime.fromtimestamp(\n i.trade_time / 1000).strftime(\"%H:%M:%S\"),\n gateway_name=self.gateway_name,\n )\n\n self.on_trade(trade)",
"def preProcess(self, datum):\n pass",
"def run(self, data):\n\t\t# no processing here\n\t\treturn data",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage"
]
| [
"0.66653603",
"0.63825715",
"0.63084006",
"0.62935305",
"0.61722577",
"0.6003158",
"0.6003158",
"0.6001415",
"0.5996518",
"0.5973672",
"0.5973634",
"0.5907465",
"0.59034735",
"0.59011394",
"0.58908945",
"0.58360183",
"0.5789101",
"0.5783111",
"0.57760155",
"0.5755044",
"0.57524717",
"0.5727775",
"0.5715659",
"0.570548",
"0.5699103",
"0.568352",
"0.56404126",
"0.56404126",
"0.56404126",
"0.56404126"
]
| 0.7404228 | 0 |
Process Group Name data. | def _process_name(self):
if not isinstance(self.transform, GroupTransformModel):
return
name = self._transform_value(self.transform.name)
if name is None:
self.log.error(
'feature=ti-transform, event=process-group-name, message=no-name-found, '
f'path={self.transform.name.path}'
)
raise RuntimeError('At least one indicator value must be provided.')
self.add_name(name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def group_add_name(org_id, data):\n if data.has_key('groupname'):\n groupname = data['groupname']\n add_group(org_id, groupname, False)",
"def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()",
"def set_group_name(self, name):\n self.groupname = name",
"def bb_groupname(hit):\n try:\n group = hit.group(1)\n G = Group.objects.get(name=group)\n T = loader.get_template('webview/t/group.html')\n C = Context({'G' : G})\n return T.render(C)\n except:\n # This is normally thrown when the group is invalid. Return the original result,\n # Only we add an icon to indicate an invalid group.\n return '<img src=\"/static/user_error.png\" alt=\"user\" border=\"0\" /> %s' % (group)",
"def process_or_group_name(name):\r\n s = str(name).strip()\r\n if ' ' in s or ':' in s:\r\n raise ValueError(\"Invalid name: \" + repr(name))\r\n return s",
"def set_group_name(self, name):\n params = [('groupname', name, 'cdata')]\n\n self.get(COMMAND_UIC, 'SetGroupName', params)",
"def groupname():\n return jsonify(name=getRandomLine(groupNamesFile))",
"def test_api_v1_groups_names_get(self):\n pass",
"def add_group_data(self, group_name):\n self.sorted = False\n self.grouped = False\n self.labels_to_add = []\n for path in self.all_groups.get(group_name):\n io = NWBHDF5IO(path, 'r')\n nwb_file = io.read()\n # self.labels.append(nwb_file.identifier)\n self.nwb_path_list.update({nwb_file.identifier: path})\n self.labels_to_add.append(nwb_file.identifier)\n self.musketeers_widget.session_widget.populate(self.labels_to_add, 'add')\n self.musketeers_widget.session_widget.update_text_filter()\n self.groupMenu.setEnabled(True)\n self.sortMenu.setEnabled(True)",
"def _get_name(self, report):\n match = re.search(\"(.*)\\:\\s*\\(groupid\", report)\n if match:\n return match.group(1)",
"def validate_group_name(self, key, value):\n if len(value.split(':')) != 2:\n raise AssertionError('group_name {} is not prefixed'.format(value))\n return value",
"def group_data_callback(self, action: EventType, group_id: str) -> None:\n self.process_item(group_id, {})",
"def get_group_name(self):\n return self.groupname",
"def parseName(self,event=None):\r\n txt = event.widget.get()\r\n\r\n resPhone = self.cPhone.search(txt)\r\n resEmail = self.cEmail.search(txt)\r\n resName = self.cName.search(txt)\r\n \r\n if resPhone!=None:\r\n self.phone.setVal(resPhone.group())\r\n\r\n if resEmail!=None:\r\n self.email.setVal(resEmail.group())\r\n\r\n if resName!=None:\r\n if resName.group('title')==None:\r\n self.title.setVal(\"Mr.\")\r\n else:\r\n self.title.setVal(resName.group('title'))\r\n self.first.setVal(resName.group('first'))\r\n if resName.group('middle')==None:\r\n self.middle.setVal(\"\")\r\n else:\r\n self.middle.setVal(resName.group('middle'))\r\n self.last.setVal(resName.group('last'))",
"def get_group(group_data,path):\n path_name = path.label()\n group_name = re.sub(r'[0-9]+','',path_name)\n if group_name not in group_data:\n group_data[group_name] = len(group_data.keys())\n return group_data[group_name]",
"def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")",
"def test_set_group_name(self, componentName='ocean', objectType='region',\n featureName='Celtic Sea',\n groupName='testGroupName'):\n # Authors\n # -------\n # Phillip J. Wolfram\n # Xylar Asay-Davis\n\n # verification that groupName is in file\n def verify_groupName(destfile, groupName):\n with open(destfile) as f:\n filevals = json.load(f)\n assert 'groupName' in filevals, \\\n 'groupName does not exist in {}'.format(destfile)\n assert filevals['groupName'] == groupName, \\\n 'Incorrect groupName of {} specified instead of ' \\\n '{}.'.format(filevals['groupName'], groupName)\n\n gf = GeometricFeatures()\n fc = gf.read(componentName, objectType, [featureName])\n fc.set_group_name(groupName)\n assert fc.otherProperties['groupName'] == groupName, \\\n 'groupName not assigned to FeatureCollection'\n destfile = str(self.datadir.join('test.geojson'))\n fc.to_geojson(destfile)\n verify_groupName(destfile, groupName)",
"def _fold_group_identifier(name: str) -> str:\n # whitespace to underscores\n identifier = re.sub(r\"\\s+\", \"_\", name)\n # remove non-alphanum\n identifier = re.sub(r\"[^A-Za-z\\d_]+\", \"\", identifier)\n # trim underscores\n identifier = identifier.strip(\"_\")\n # lowercase, shorten\n return identifier.lower()[:20]",
"def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")",
"def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")",
"def log_group_name(self) -> str:\n ...",
"def clean_group_name(self):\r\n group_name = self.cleaned_data['group_name']\r\n master_id = 0\r\n\r\n if self.instance:\r\n # master id is used to exclude current master so that it is not checked as duplicate\r\n master_id = self.instance.id\r\n\r\n if LedgerGroup.objects.filter(company=self.company, group_name__iexact=group_name).exclude(id=master_id).exists():\r\n raise forms.ValidationError(\"Group name already exists\")\r\n\r\n return group_name",
"def process_group(row):\n splitted_name = row.name.split(extreme_separator)\n return sorted(splitted_name) + [row[2]]",
"def data_group():\n ...",
"def getGroupName(Id):\r\n return \"Group name\"",
"def _get_group_data(self, group_name):\n if self.plotter.plot_hues is None:\n data = self._get_group_data_without_hue(group_name)\n else:\n data = self._get_group_data_with_hue(group_name)\n\n group_data = remove_null(data)\n\n return group_data",
"def _process_group(self):\n if not isinstance(self.transform, GroupTransformModel):\n return\n\n self._process_name()\n\n if self.transformed_item['type'] == 'Campaign':\n self._process_metadata_datetime('firstSeen', self.transform.first_seen)\n\n if self.transformed_item['type'] == 'Document':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('malware', self.transform.malware)\n self._process_metadata('password', self.transform.password)\n\n if self.transformed_item['type'] == 'Email':\n self._process_metadata('body', self.transform.body)\n self._process_metadata('from', self.transform.from_addr)\n self._process_metadata('header', self.transform.header)\n self._process_metadata('subject', self.transform.subject)\n self._process_metadata('to', self.transform.to_addr)\n\n if self.transformed_item['type'] in ('Event', 'Incident'):\n self._process_metadata_datetime('eventDate', self.transform.event_date)\n self._process_metadata('status', self.transform.status)\n\n if self.transformed_item['type'] == 'Report':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata_datetime('publishDate', self.transform.publish_date)\n\n # Handle sig specific fields here\n if self.transformed_item['type'] == 'Signature':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('fileType', self.transform.file_type)\n self._process_metadata('fileText', self.transform.file_text)",
"def get_data_name(name):\n if name.find('Data') == 0:\n name = name[4:]\n name_ = ''\n for i, char in enumerate(name):\n if char.isupper() and i > 0:\n name_ += '_'\n name_ += char.lower()\n return name_",
"def format_group_name(gn):\r\n\r\n return {\r\n \"Studies in language and literature\": \"Group 1 - Studies in Language and Literature\",\r\n \"Language acquisition\": \"Group 2 - Language Acquisition\",\r\n \"Individuals and societies\": \"Group 3 - Individuals and Societies\",\r\n \"Experimental sciences\": \"Group 4 - Sciences\",\r\n \"Mathematics\": \"Group 5 - Mathematics\",\r\n \"The arts\": \"Group 6 - The Arts\"\r\n }[gn]",
"def _get_node_group(self, node_name):\n\n pass"
]
| [
"0.6870541",
"0.647826",
"0.6424762",
"0.6213746",
"0.61461794",
"0.61408144",
"0.61103433",
"0.6044551",
"0.60382485",
"0.59623533",
"0.5926703",
"0.5880001",
"0.585873",
"0.58447593",
"0.57991725",
"0.5753259",
"0.5715129",
"0.5704183",
"0.56917757",
"0.56917757",
"0.56846094",
"0.5674622",
"0.56503946",
"0.56468534",
"0.5626158",
"0.5617182",
"0.5606521",
"0.5597834",
"0.5591489",
"0.5568004"
]
| 0.7495443 | 0 |
Process metadata fields that should be a TC datetime. | def _process_metadata_datetime(self, key: str, metadata: DatetimeTransformModel | None):
if metadata is not None and metadata.path is not None:
value = self._path_search(metadata.path)
if value is not None:
self.add_metadata(
key, self.util.any_to_datetime(value).strftime('%Y-%m-%dT%H:%M:%SZ')
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_t(data):\n if 't_utc' not in data['properties']:\n return None\n data['properties']['DateTime'] = util.datestring(data['properties']['t_utc'], tz=config['local_tz']) \n return data",
"def prepare_data(self, data):\n for i, v in data.items():\n field_type = self.get_field_type(i)\n #log.info('i = %s, type = %s', i, field_type)\n if field_type == 'datetime' and isinstance(v, (str, unicode)):\n data[i] = datetime_from_string(v)\n return data",
"def test_datetime_field():",
"def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)",
"def preprocess_time(data, metadata):\n timestamp_name = metadata[\"timestamp_name\"]\n if timestamp_name == \"\":\n timestamp_name = \"fake_ts\"\n data[timestamp_name] = data.index\n\n data[timestamp_name] = pd.to_datetime(data[timestamp_name])\n data.sort_values(by=[timestamp_name], inplace=True)\n data.set_index([timestamp_name], inplace=True)\n\n return data",
"def __handleDateAttribute(self, timeString):\n try:\n if len(str(timeString)) == 13:\n return datetime.datetime.fromtimestamp(timeString / 1000)\n else:\n return datetime.datetime.fromtimestamp(timeString)\n except ValueError:\n return None\n except TypeError:\n return None",
"def test_convert_datetime():",
"def meta_data(date):\n return {'sourceDate': datetime.strptime(date, '%a, %d %b %Y %H:%M:%S %Z').replace(tzinfo=UTC).isoformat()}",
"def convert_generic_timestamps(result: ResponseObject) -> ResponseObject:\n # Format inner record if present, e.g. for search results\n if 'record' in result:\n result['record'] = convert_generic_timestamps(result['record'])\n return result\n\n for field in GENERIC_TIME_FIELDS:\n datetime_obj = try_datetime(result.get(field, ''))\n if datetime_obj:\n result[field] = datetime_obj\n return result",
"def test_transform_metadata(self):\n response = {\"createDateTime\": \"2018-10-20T20:33:40Z\", \"updateDateTime\": \"2018-10-20T20:33:40Z\",\n \"info\": {\"accessType\": \"PUBLIC\"}}\n record = Record(\"PUBLIC\", createDateTime=datetime.strptime(\"2018-10-20 20:33:40+00\", '%Y-%m-%d %H:%M:%S+00'),\n updateDateTime=datetime.strptime(\"2018-10-20 20:33:40+00\", '%Y-%m-%d %H:%M:%S+00'))\n result = transform_metadata(record)\n self.assertEqual(result, response)",
"def _update_metadata_date(self, properties):\n if \"core\" not in properties:\n properties.core = Struct()\n properties.core.metadata_date = self._backend.server_time_utc()",
"def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)",
"def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)",
"def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)",
"def _get_field_data(self, _field, _line):\n if not _field:\n return None\n line_copy = _line\n for f in _field.split('.'):\n data_type = line_copy._fields[f].type\n line_copy = line_copy[f]\n if data_type == 'date':\n if line_copy:\n line_copy = dt.strptime(line_copy, '%Y-%m-%d')\n elif data_type == 'datetime':\n if line_copy:\n line_copy = dt.strptime(line_copy, '%Y-%m-%d %H:%M:%S')\n if isinstance(line_copy, basestring):\n line_copy = line_copy.encode('utf-8')\n return line_copy",
"def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None",
"def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))",
"def _parse_datetime(self, data):\n d = data.find('./itdDate').attrib\n t = data.find('./itdTime').attrib\n\n # -1 means nope, there is no time known\n if d['weekday'] == '-1' or d['day'] == '-1' or t['minute'] == '-1':\n return None\n\n # convert time – the EFA API likes to talk about 24:00, so we have to correct that.\n result = datetime(int(d['year']), int(d['month']), int(d['day']), min(int(t['hour']), 23), int(t['minute']))\n if int(t['hour']) == 24:\n result += timedelta(hours=1)\n return result",
"def parse_date_time_pacific(object_key):\n return extract_date_info(object_key)",
"def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )",
"def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data",
"def test_created_at(self):\n self.assertIsInstance(self.certificate_history.datetime, datetime.datetime)",
"def test_datetime_creation(self):\n self.assertIsInstance(self.user_1.created_at, datetime)\n self.assertIsInstance(self.user_1.updated_at, datetime)",
"def test_created_at(self):\n self.assertIsInstance(self.obj_ticket.created_at, datetime)",
"def compare_date_fields(self, details, encoded, context, field):\r\n if details[field] is not None:\r\n date = Date()\r\n if field in encoded and encoded[field] is not None:\r\n dt1 = date.from_json(encoded[field])\r\n dt2 = details[field]\r\n\r\n self.assertEqual(dt1, dt2, msg=\"{} != {} at {}\".format(dt1, dt2, context))\r\n else:\r\n self.fail(field + \" missing from encoded but in details at \" + context)\r\n elif field in encoded and encoded[field] is not None:\r\n self.fail(field + \" included in encoding but missing from details at \" + context)",
"def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())",
"def test_create_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)",
"def parse_timestamps(tags, ignore_errors):\n\n if tags.get(\"StudyDate\"):\n tags[\"StudyDateTime\"] = mk_time(tags.get(\"StudyDate\"), tags.get(\"StudyTime\"))\n\n if tags.get(\"SeriesDate\"):\n tags[\"SeriesDateTime\"] = mk_time(tags.get(\"SeriesDate\"), tags.get(\"SeriesTime\"))\n\n if not tags.get(\"SeriesDateTime\"):\n tags[\"SeriesDateTime\"] = tags.get(\"StudyDateTime\")\n\n if not tags.get(\"SeriesDateTime\"):\n err = \"No series creation time identified\"\n handle_errors(err, ignore_errors)\n\n if tags.get(\"InstanceCreationDate\"):\n tags[\"InstanceCreationDateTime\"] = mk_time(tags.get(\"InstanceCreationDate\"),\n tags.get(\"InstanceCreationTime\"))\n\n if not tags.get(\"InstanceCreationDateTime\"):\n tags[\"InstanceCreationDateTime\"] = tags.get(\"SeriesDateTime\") or \\\n tags.get(\"StudyDateTime\")\n\n if not tags.get(\"InstanceCreationDateTime\"):\n err = \"No instance creation time identified\"\n handle_errors(err, ignore_errors)\n\n return tags",
"def test_time_field():",
"def test_basecreated(self):\n self.assertEqual(datetime, type(BaseModel().created_at))"
]
| [
"0.6487719",
"0.6416545",
"0.62630284",
"0.60930234",
"0.607387",
"0.59573936",
"0.5950122",
"0.5936687",
"0.5919602",
"0.5879977",
"0.58798766",
"0.5831652",
"0.5831652",
"0.5831652",
"0.5783733",
"0.57555133",
"0.5716862",
"0.5673746",
"0.56561273",
"0.564494",
"0.563797",
"0.5626059",
"0.55516464",
"0.5547925",
"0.5540319",
"0.5529857",
"0.54991055",
"0.54861933",
"0.54703814",
"0.54661834"
]
| 0.7287922 | 0 |
Select the correct transform based on the "applies" field. | def _select_transform(self):
for transform in self.transforms:
if transform.applies is None or transform.applies(self.ti_dict) is True:
self.transform = transform
break
else:
raise RuntimeError('No transform found for TI data') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _apply_transform(self):\n pass",
"def _apply_transform(self, w2w_transform):\n raise NotImplementedError",
"def get_transform_fn():",
"def getCurrentTransformSelection():\n node = cmds.ls(sl=True)\n if node:\n node = node[0]\n if cmds.nodeType(node) == 'transform':\n xform = node\n return xform\n else:\n relatives = cmds.listRelatives(node, shapes=True, f=1)\n if relatives:\n for i in relatives:\n if cmds.nodeType(i) == \"transform\":\n xform = i\n return xform\n return None",
"def transform(self):\n\n if not hasattr(self, \"_transform\"):\n self._transform = Doc.load_single_filter(self.session, self.FILTER)\n return self._transform",
"def _validate_transforms(self):\n if len(self.transforms) > 1:\n for transform in self.transforms:\n if transform.applies is None:\n raise ValueError(\n 'If more than one transform is provided, each '\n 'provided transform must provide an apply field.',\n )",
"def _validate_transforms(self):\n if len(self.transforms) > 1:\n for transform in self.transforms:\n if transform.applies is None:\n raise ValueError(\n 'If more than one transform is provided, each '\n 'provided transform must provide an apply field.',\n )",
"def _get_transform(self, transform):\n try:\n from cartopy.crs import CRS\n except ModuleNotFoundError:\n CRS = None\n cartopy = getattr(self, 'name', None) == 'proplot_cartopy'\n if (\n isinstance(transform, mtransforms.Transform)\n or CRS and isinstance(transform, CRS)\n ):\n return transform\n elif transform == 'figure':\n return self.figure.transFigure\n elif transform == 'axes':\n return self.transAxes\n elif transform == 'data':\n return PlateCarree() if cartopy else self.transData\n elif cartopy and transform == 'map':\n return self.transData\n else:\n raise ValueError(f'Unknown transform {transform!r}.')",
"def get_transformation_func(item, type_transformation):\n transformation = item.get(type_transformation)\n if transformation is None:\n return lambda x: x\n try:\n return registered_transformation[transformation]\n except KeyError:\n raise Invalid('{} is not registered as transformation'.format(transformation))",
"def get_transform(self):\n return self.transform",
"def _transform(self, document):\n pass",
"def do_transform(self):\r\n if not self.transform:\r\n return\r\n try:\r\n self.latest_value = utils.Transform(\r\n expr=self.transform, value=self.latest_value,\r\n timedelta=self.time_between_updates().total_seconds()).result()\r\n except (TypeError, ValueError):\r\n logger.warn(\"Invalid transformation '%s' for metric %s\",\r\n self.transfrom, self.pk)\r\n self.transform = ''",
"def transform():",
"def transform(self, node):\n return self.get_transform_func(node)(node)",
"def retrieve_transformer(\n self,\n transformation: str = None,\n param: dict = {},\n df=None,\n random_seed: int = 2020,\n ):\n\n if transformation in (trans_dict.keys()):\n return trans_dict[transformation]\n\n elif transformation in list(have_params.keys()):\n return have_params[transformation](**param)\n\n elif transformation == 'MinMaxScaler':\n from sklearn.preprocessing import MinMaxScaler\n\n return MinMaxScaler()\n\n elif transformation == 'PowerTransformer':\n from sklearn.preprocessing import PowerTransformer\n\n transformer = PowerTransformer(\n method='yeo-johnson', standardize=True, copy=True\n )\n return transformer\n\n elif transformation == 'QuantileTransformer':\n from sklearn.preprocessing import QuantileTransformer\n\n quants = param[\"n_quantiles\"]\n quants = quants if df.shape[0] > quants else int(df.shape[0] / 3)\n param[\"n_quantiles\"] = quants\n return QuantileTransformer(copy=True, **param)\n\n elif transformation == 'StandardScaler':\n from sklearn.preprocessing import StandardScaler\n\n return StandardScaler(copy=True)\n\n elif transformation == 'MaxAbsScaler':\n from sklearn.preprocessing import MaxAbsScaler\n\n return MaxAbsScaler(copy=True)\n\n elif transformation == 'RobustScaler':\n from sklearn.preprocessing import RobustScaler\n\n return RobustScaler(copy=True)\n\n elif transformation == 'PCA':\n from sklearn.decomposition import PCA\n\n # could probably may it work, but this is simpler\n if df.shape[1] > df.shape[0]:\n raise ValueError(\"PCA fails when n series > n observations\")\n transformer = PCA(\n n_components=min(df.shape), whiten=False, random_state=random_seed\n )\n return transformer\n\n elif transformation == 'FastICA':\n from sklearn.decomposition import FastICA\n\n if df.shape[1] > 500:\n raise ValueError(\"FastICA fails with > 500 series\")\n transformer = FastICA(\n n_components=df.shape[1],\n whiten=True,\n random_state=random_seed,\n **param,\n )\n return transformer\n\n elif transformation in ['RollingMean', 'FixedRollingMean']:\n param = 10 if param is None else param\n if not str(param).isdigit():\n window = int(''.join([s for s in str(param) if s.isdigit()]))\n window = int(df.shape[0] / window)\n else:\n window = int(param)\n window = 2 if window < 2 else window\n self.window = window\n if transformation == 'FixedRollingMean':\n transformer = RollingMeanTransformer(window=self.window, fixed=True)\n else:\n transformer = RollingMeanTransformer(window=self.window, fixed=False)\n return transformer\n\n elif transformation in ['SeasonalDifference', 'SeasonalDifferenceMean']:\n if transformation == 'SeasonalDifference':\n return SeasonalDifference(lag_1=param, method='LastValue')\n else:\n return SeasonalDifference(lag_1=param, method='Mean')\n\n elif transformation == 'RollingMean100thN':\n window = int(df.shape[0] / 100)\n window = 2 if window < 2 else window\n self.window = window\n return RollingMeanTransformer(window=self.window)\n\n elif transformation == 'RollingMean10thN':\n window = int(df.shape[0] / 10)\n window = 2 if window < 2 else window\n self.window = window\n return RollingMeanTransformer(window=self.window)\n\n else:\n print(\n f\"Transformation {transformation} not known or improperly entered, returning untransformed df\"\n )\n return EmptyTransformer()",
"def transform(self, query):\n query = preprocessing(query)\n return lookup(query, self.model.wv)",
"def apply_transform(key, data, transform_list):\n for transform in transform_list:\n method_name = transform[MethodKeys.METHOD]\n method_params = transform[MethodKeys.PARAMETERS]\n\n if method_name == 'compute_and_apply_vocabulary':\n method_params.update({'vocab_filename': key})\n\n data = TransformMethods.get_method(method_name)(data,\n **method_params)\n return data",
"def transform():\n pass",
"def apply(self):\n if self.applied:\n raise RuntimeError(\"Transform applied more than once\")\n \n self._apply()\n \n self.applied = True\n \n return self.template",
"def get_apply_scale(self, applyScaleFactor, scale_quality = 1.0):\n v = self.scale * self.scale_quality * scale_quality\n if applyScaleFactor:\n v *= self.scale_factor\n return v",
"def _try_to_match_transformation(graph: Union[SDFG, SDFGState], collapsed_graph: nx.DiGraph, subgraph: Dict[int, int],\n sdfg: SDFG, xform: Union[xf.PatternTransformation, Type[xf.PatternTransformation]],\n expr_idx: int, nxpattern: nx.DiGraph, state_id: int, permissive: bool,\n options: Dict[str, Any]) -> Optional[xf.PatternTransformation]:\n subgraph = {\n nxpattern.nodes[j]['node']: graph.node_id(collapsed_graph.nodes[i]['node'])\n for i, j in subgraph.items()\n }\n\n try:\n if isinstance(xform, xf.PatternTransformation):\n match = xform\n else: # Construct directly from type with options\n opts = options or {}\n try:\n match = xform(**opts)\n except TypeError:\n # Backwards compatibility, transformation does not support ctor arguments\n match = xform()\n # Set manually\n for oname, oval in opts.items():\n setattr(match, oname, oval)\n\n match.setup_match(sdfg, sdfg.sdfg_id, state_id, subgraph, expr_idx, options=options)\n match_found = match.can_be_applied(graph, expr_idx, sdfg, permissive=permissive)\n except Exception as e:\n if Config.get_bool('optimizer', 'match_exception'):\n raise\n if not isinstance(xform, type):\n xft = type(xform)\n else:\n xft = xform\n print('WARNING: {p}::can_be_applied triggered a {c} exception:'\n ' {e}'.format(p=xft.__name__, c=e.__class__.__name__, e=e))\n return None\n\n if match_found:\n return match\n\n return None",
"def get_transform(self):\n raise NotImplementedError",
"def get_transform(self, map_from='visual', map_to='render'):\n return self.transforms.get_transform(map_from, map_to)",
"def transform(self):\n return self._transform",
"def attrTransform(self, matrix, transform):\n for ttype, targs in self.reTransformFind.findall(transform):\n targs = list(map(lambda x: float(x), self.reNumberFind.findall(targs)))\n if ttype == 'matrix':\n newmatrix = [ targs[0], targs[1],\n targs[2], targs[3],\n targs[4], targs[5] ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'translate':\n tx = targs[0]\n ty = targs[1] if len(targs) > 1 else 0\n newmatrix = [ 1, 0, 0, 1, tx, ty ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'scale':\n sx = targs[0]\n sy = targs[1] if len(targs) > 1 else sx\n newmatrix = [ sx, 0, 0, sy, 0, 0 ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'rotate':\n if len(targs) == 1:\n alpha = targs[0]\n newmatrix = [ math.cos(alpha), math.sin(alpha),\n -math.sin(alpha), math.cos(alpha),\n 0, 0]\n self.matrixMul(matrix, newmatrix)\n else:\n alpha = targs[0]\n newmatrix = [ 1, 0, 0, 1, targs[1], targs[2] ]\n self.matrixMul(matrix, newmatrix)\n newmatrix = [ math.cos(alpha), math.sin(alpha),\n -math.sin(alpha), math.cos(alpha),\n 0, 0]\n self.matrixMul(matrix, newmatrix)\n newmatrix = [ 1, 0, 0, 1, -targs[1], -targs[2] ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'skewX' or ttype == 'skewY':\n self.alert(\"skewX and skewY transformations are not supported\", elem)\n else:\n print('unknown transform type: ', ttype)\n return matrix",
"def transform(self, results: Dict) -> Optional[Dict]:\n for t in self.transforms:\n results = t(results) # type: ignore\n if results is None:\n return None\n return results",
"def _convert_text_edits(meta_dict, text_key):\n text_func = DataSet._convert_edits\n args = ()\n kwargs = {'text_key': text_key}\n DataSet._apply_to_texts(text_func, meta_dict, args, kwargs)\n return None",
"def apply(args):\n html_doc = document.Document(get_code(args.file))\n with open(args.transform_file, 'r', encoding='UTF-8') as tfr_file:\n tfr_json = yaml.load(tfr_file)\n not_applied = html_doc.apply(tfr_json)\n\n if len(not_applied) == 0:\n print('All transforms applied.')\n else:\n print('The following transforms could not be applied:')\n print(yaml.dump(not_applied))\n set_code(args.file, html_doc)",
"def transform_define(transform):\n if transform == 'tanh':\n return np.tanh\n elif transform == 'exp':\n return np.exp\n elif transform == 'logit':\n return Family.ilogit\n elif transform is None:\n return np.array\n else:\n return None",
"def apply_transform(transform):\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n def apply(points, discard_z_coord=False, treat_input_as_vector=False):\n points, is_columnized, maybe_decolumnize = columnize(\n points, (-1, 3), name=\"points\"\n )\n\n homogenous_coordinate_value = 0 if treat_input_as_vector else 1\n padded_points = np.pad(\n points,\n ((0, 0), (0, 1)),\n mode=\"constant\",\n constant_values=homogenous_coordinate_value,\n )\n transformed_padded_points = np.dot(transform, padded_points.T).T\n transformed_points = np.delete(transformed_padded_points, 3, axis=1)\n\n result = maybe_decolumnize(transformed_points)\n if discard_z_coord:\n return result[:, 0:2] if is_columnized else result[0:2]\n else:\n return result\n\n return apply"
]
| [
"0.599259",
"0.5802514",
"0.5593477",
"0.55098355",
"0.5502565",
"0.5366491",
"0.5366491",
"0.52897954",
"0.52482057",
"0.52445346",
"0.52439463",
"0.5236918",
"0.5220782",
"0.5141374",
"0.5130341",
"0.51176065",
"0.50397825",
"0.5034048",
"0.49685302",
"0.49674764",
"0.49308807",
"0.4929202",
"0.49192876",
"0.49016994",
"0.48912907",
"0.48797145",
"0.48739913",
"0.48551404",
"0.48548883",
"0.48494524"
]
| 0.6984942 | 0 |
Transform a value using a static map. | def _transform_value_map(self, value: str, map_: dict, passthrough: bool = False) -> str:
_default = value if passthrough is True else None
if isinstance(value, str):
# a static map is a dict of key/value pairs
value = map_.get(value.lower(), _default)
else:
self.log.warning(
f'''feature=ti-transform, action=transform-value, '''
f'''message='static-map-requires-str-value", value={value}'''
)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_value(self) -> global___Expression.MapValue:",
"def constant_transform(input_dict, constant):\n return {\n \"data\": constant,\n }",
"def transform(self, name, value):\n raise NotImplementedError()",
"def remap_single_value(value, remapping):\n return remapping[value] if value in remapping else ''",
"def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))",
"def friendly_to_internal(self, value):\n return value * self.conversion",
"def clean_w_map(value, mapping):\n if value in mapping.keys():\n return mapping[value]\n return value",
"def map_value(field):\n\n if is_map(field):\n return field\n return None",
"def makeValMap(self,value = 'readcount'):\n self.valMap = np.zeros(len(self))\n self.valMap = self.valMap-1\n myTmp = []\n for x in range(0,len(self)):\n myTmp.append([])\n for i in self.children:\n for j in range(i.start,i.end+1):\n myTmp[j-self.start].append(i.__dict__[value])\n for nt in range(0,len(myTmp)):\n if len(myTmp[nt])>0:\n self.valMap[nt]=sum(myTmp[nt])/len(myTmp[nt])",
"def map_values(fun, a_dict):\n return dict((k, fun(v)) for (k, v) in a_dict.items())",
"def mapping_for_switch(mapping):\n return {key[0]: value for key, value in mapping.items()}",
"def transform():",
"def transform_val(self, properties, val):\n transformed = val\n\n # encode if needed\n encoding = properties.get(\"encoding\")\n if encoding is not None:\n transformed = self.encode(encoding, transformed)\n\n # hash if needed\n hashing = properties.get(\"hashing\")\n if hashing is not None:\n transformed = self.hash(hashing, transformed)\n\n return transformed",
"def map():",
"def set_provenance_map_entry(self, ksf_value: Any) -> Any:\n if isinstance(ksf_value, str):\n ksf_value = ksf_value.strip()\n if ksf_value.lower() == \"true\":\n mapping = self.processor()\n elif ksf_value.lower() == \"false\":\n mapping = self.default() # source suppressed\n else:\n mapping = self.default(ksf_value)\n elif isinstance(ksf_value, bool):\n if ksf_value:\n mapping = self.processor()\n else: # false, ignore this source?\n mapping = self.default() # source suppressed\n elif isinstance(ksf_value, (list, set, tuple)):\n mapping = self.processor(infores_rewrite_filter=ksf_value)\n else:\n mapping = ksf_value\n return mapping",
"def map_values(function, dictionary):\n return {k: function(dictionary[k]) for k in dictionary}",
"def _ConvertMapFieldValue(self, value, message, field, path):\n if not isinstance(value, dict):\n raise ParseError(\n 'Map field {0} must be in a dict which is {1} at {2}'.format(\n field.name, value, path))\n key_field = field.message_type.fields_by_name['key']\n value_field = field.message_type.fields_by_name['value']\n for key in value:\n key_value = _ConvertScalarFieldValue(key, key_field,\n '{0}.key'.format(path), True)\n if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n self.ConvertMessage(value[key],\n getattr(message, field.name)[key_value],\n '{0}[{1}]'.format(path, key_value))\n else:\n getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(\n value[key], value_field, path='{0}[{1}]'.format(path, key_value))",
"def map(s,dic):\n state=s.getstate()\n if not state in dic:raise Exception(\"the current state \"+str(state)+\" is not available to map to using the dictionary \"+str(dic))\n val=dic[state]\n if callable(val):\n return val()\n states=s.getstates()\n if val in states:\n return s.setstate(val)\n raise Exception(\"I dont know how to use this \"+str(state)+\" since it maps to a type of \"+str(type(val))+\" namely \"+str(val))",
"def from_value(cls, value: str):\n return cls._value2member_map_[value]",
"def map_dict(dictionary, transform):\n return dict(transform(k, v) for k, v in dictionary.items())",
"def cache_item(mapping, key, value):\n mapping[key] = value\n return value",
"def apply_remap_values(labels: np.ndarray, label_map: Dict[int, int]) -> np.ndarray:\n for l1, l2 in label_map.items():\n labels[labels == l1] = l2",
"def _StringToMapHelper(self, map_type, value_type):\n m = map_type()\n with self.assertRaises(AssertionError):\n m[1] = value_type()\n with self.assertRaises(AssertionError):\n m['1'] = 2\n m['1'] = value_type()\n self.assertEqual(m, {'1': value_type()})\n m[u'2'] = value_type()\n self.assertEqual(m, {'1': value_type(), u'2': value_type()})",
"def remap(self, value, from1, to1, from2, to2):\n return from2 + (value - from1) * (to2 - from2) / (to1 - from1)",
"def mapComponentValues(*args):\n return _libsbml.SBMLTransforms_mapComponentValues(*args)",
"def test_apply_scalar_map(self):\n super(TestObjDict, self).test_apply_scalar_map(_as_obj=True)",
"def identity_mapper(key, value):\n yield key, value",
"def map(keys, values) -> MapValue:\n return ops.Map(keys, values).to_expr()",
"def map_values_c(fun):\n return partial(map_values, fun)",
"def castInputToBuiltInType(key, value):\n\n try:\n if key in ['bind_npi', 'dst_npi', 'src_npi']:\n return addr_npi_value_map[value]\n elif key in ['bind_ton', 'dst_ton', 'src_ton']:\n return addr_ton_value_map[value]\n elif key == 'ripf':\n return replace_if_present_flap_value_map[value]\n elif key == 'priority':\n return priority_flag_value_map[value]\n elif key in ['con_fail_retry', 'con_loss_retry', 'ssl']:\n if value == 'yes':\n return True\n elif value == 'no':\n return False\n else:\n raise KeyError('Boolean value must be expressed by yes or no.')\n elif (key == 'loglevel' and\n value not in [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]):\n raise KeyError('loglevel must be numeric value of 10, 20, 30, 40 or 50.')\n elif isinstance(value, str) and value.lower() == 'none':\n value = None\n except KeyError:\n raise UnknownValue('Unknown value for key %s: %s' % (key, value))\n\n return value"
]
| [
"0.6872702",
"0.6598292",
"0.6037038",
"0.5979952",
"0.57496524",
"0.55890745",
"0.5582468",
"0.5575885",
"0.55652285",
"0.5521447",
"0.54932886",
"0.54850787",
"0.53919625",
"0.53560996",
"0.5353477",
"0.53521",
"0.534856",
"0.532428",
"0.5300235",
"0.52695125",
"0.5253864",
"0.52436835",
"0.52321887",
"0.5207374",
"0.5197375",
"0.51948136",
"0.51703197",
"0.51631296",
"0.51432663",
"0.51164305"
]
| 0.7314087 | 0 |
Passed the raw JSON data about a User from Twitter's API, it returns an HTMLified version of the User's description. Replaces t.co URLs with clickable, full links. Makes hashtags into clickable links. Makes into clickable links. | def htmlify_description(json_data):
# I don't think users in the Twitter archive JSON have description
# elements:
try:
desc = json_data["description"]
except KeyError:
return ""
# Make t.co URLs into their original URLs, clickable.
if "entities" in json_data and "description" in json_data["entities"]:
entities = json_data["entities"]["description"]
if "urls" in entities:
for entity in entities["urls"]:
start, end = entity["indices"][0], entity["indices"][1]
shown_url = entity["display_url"]
link_url = entity["expanded_url"]
url_html = '<a href="%s" rel="external">%s</a>'
desc = desc.replace(
json_data["description"][start:end],
url_html % (link_url, shown_url),
)
# Make #hashtags and @usernames clickable.
parser = ttp.Parser()
parsed = parser.parse(desc)
return parsed.html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_user_desc(self):\n desc = self.data['user']['description']\n if desc is not None:\n desc = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", desc).split())\n desc = \" \".join(re.sub(\"(#\\S+)\", ' ', desc).split())\n desc = ''.join(list(filter(lambda x: x.isalpha() or x is ' ',\n desc))).replace(' ', ' ').replace(' ', ' ').lower().strip()\n return {'plain_desc': desc}",
"def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)[email protected]\">%(user)s</a>' % {\"user\": user}",
"def html_ann_tweet(tweets):\r\n for tweet in tweets:\r\n\r\n # Fairly efficient way of dealing with the fact that these keys might not exist\r\n try:\r\n text = tweet['text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['full_text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['extended_tweet']['full_text']\r\n except:\r\n pass\r\n\r\n\r\n # Hashtags\r\n tweet['text_html_annotated'] = re.sub(r'\\B#\\w\\w+',\r\n '<span class=\"hashtag\">\\g<0></span>',\r\n text)\r\n\r\n # Usernames\r\n tweet['text_html_annotated'] = re.sub(r'(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@'\r\n r'([A-Za-z]+[A-Za-z0-9]+)',\r\n '<span class=\"user\">\\g<0></span>',\r\n tweet['text_html_annotated'])\r\n\r\n # Links\r\n tweet['text_html_annotated'] = re.sub(\r\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'\r\n r'(?:%[0-9a-fA-F][0-9a-fA-F]))+', '<a href=\"\\g<0>\">\\g<0></a>',\r\n tweet['text_html_annotated'])\r\n\r\n return tweets",
"def _make_links(tweet):\n for pattern, repl in (USER_SUB, KEYWORD_SUB):\n tweet = re.sub(pattern, repl, tweet)\n return tweet",
"def get_HTML_box(self, url):\n who = self.getShortAuthor()\n if self.comments is None:\n title = \"\"\n else:\n title = html.escape(self.comments)\n return '<a href=\"%s\" title=\"%s\">%s</a>' % (url,\n title,\n html.escape(who))",
"def htmlify_tweet(json_data):\n\n # Temporary, until Twython.html_for_tweet() can handle tweets with\n # 'full_text' attributes.\n if \"full_text\" in json_data:\n json_data[\"text\"] = json_data[\"full_text\"]\n\n # Some Tweets (eg from a downloaded archive) don't have entities['symbols']\n # which Twython.html_for_tweet() currently expects.\n # Not needed once github.com/ryanmcgrath/twython/pull/451 is in Twython.\n if \"entities\" in json_data and \"symbols\" not in json_data[\"entities\"]:\n json_data[\"entities\"][\"symbols\"] = []\n\n # Some Tweets (eg from a downloaded archive) have strings instead of ints\n # to define text ranges. [\"0\", \"140\"] rather than [0, 140].\n # We fix those here so that Twython doesn't complain.\n if \"display_text_range\" in json_data:\n json_data[\"display_text_range\"] = [\n int(n) for n in json_data[\"display_text_range\"]\n ]\n if \"entities\" in json_data:\n for key, value in json_data[\"entities\"].items():\n for count, entity in enumerate(value):\n if \"indices\" in entity:\n json_data[\"entities\"][key][count][\"indices\"] = [\n int(n) for n in entity[\"indices\"]\n ]\n\n # This does most of the work for us:\n # https://twython.readthedocs.org/en/latest/usage/special_functions.html#html-for-tweet\n html = Twython.html_for_tweet(\n json_data, use_display_url=True, use_expanded_url=False\n )\n\n # Need to do some tidying up:\n\n try:\n ents = json_data[\"entities\"]\n except KeyError:\n ents = {}\n\n urls_count = len(ents[\"urls\"]) if \"urls\" in ents else 0\n media_count = len(ents[\"media\"]) if \"media\" in ents else 0\n hashtags_count = len(ents[\"hashtags\"]) if \"hashtags\" in ents else 0\n symbols_count = len(ents[\"symbols\"]) if \"symbols\" in ents else 0\n user_mentions_count = len(ents[\"user_mentions\"]) if \"user_mentions\" in ents else 0\n\n # Replace the classes Twython adds with rel=\"external\".\n html = html.replace('class=\"twython-hashtag\"', 'rel=\"external\"')\n html = html.replace('class=\"twython-mention\"', 'rel=\"external\"')\n html = html.replace('class=\"twython-media\"', 'rel=\"external\"')\n html = html.replace('class=\"twython-symbol\"', 'rel=\"external\"')\n\n # Twython uses the t.co URLs in the anchor tags.\n # We want to replace those with the full original URLs.\n # And replace the class it adds with rel=\"external\".\n if (urls_count + media_count) > 0 and urls_count > 0:\n for url in ents[\"urls\"]:\n html = html.replace(\n '<a href=\"%s\" class=\"twython-url\">' % url[\"url\"],\n '<a href=\"%s\" rel=\"external\">' % url[\"expanded_url\"],\n )\n\n if media_count > 0:\n # Remove any media links, as we'll make the photos/movies visible in\n # the page. All being well.\n for item in ents[\"media\"]:\n html = html.replace(\n '<a href=\"%s\" rel=\"external\">%s</a>'\n % (item[\"url\"], item[\"display_url\"]),\n \"\",\n )\n\n if (\n urls_count + media_count + hashtags_count + symbols_count + user_mentions_count\n ) == 0:\n # Older Tweets might contain links but have no 'urls'/'media' entities.\n # So just make their links into clickable links:\n # But don't do this for newer Tweets which have an entities element,\n # or we'll end up trying to make links from, say user_mentions we\n # linked earlier.\n html = urlize(html)\n\n # Replace newlines with <br>s\n html = re.sub(r\"\\n\", \"<br>\", html.strip())\n\n return html",
"def user_link( username ):\n user = User.get_by_name(username)\n if user and user.last_seen:\n cls = \"\"\n delta = (datetime.now() - user.last_seen).seconds\n if delta < 300: # 5 minutes\n cls = 'class=recent'\n if delta < 60: # 1 minute\n cls = 'class=veryrecent'\n return '<a %s href=\"/user/%s\">%s</a>' % (cls, username, username)\n elif user:\n return '<a href=\"/user/%s\">%s</a>' % (username, username)\n else:\n return username",
"async def github_user_info(self, ctx: commands.Context, username: str) -> None:\n async with ctx.typing():\n user_data = await self.fetch_data(f\"{GITHUB_API_URL}/users/{quote_plus(username)}\")\n\n # User_data will not have a message key if the user exists\n if \"message\" in user_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=f\"The profile for `{username}` was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n org_data = await self.fetch_data(user_data[\"organizations_url\"])\n orgs = [f\"[{org['login']}](https://github.com/{org['login']})\" for org in org_data]\n orgs_to_add = \" | \".join(orgs)\n\n gists = user_data[\"public_gists\"]\n\n # Forming blog link\n if user_data[\"blog\"].startswith(\"http\"): # Blog link is complete\n blog = user_data[\"blog\"]\n elif user_data[\"blog\"]: # Blog exists but the link is not complete\n blog = f\"https://{user_data['blog']}\"\n else:\n blog = \"No website link available\"\n\n embed = discord.Embed(\n title=f\"`{user_data['login']}`'s GitHub profile info\",\n description=f\"```{user_data['bio']}```\\n\" if user_data[\"bio\"] else \"\",\n colour=discord.Colour.blurple(),\n url=user_data[\"html_url\"],\n timestamp=datetime.strptime(user_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n )\n embed.set_thumbnail(url=user_data[\"avatar_url\"])\n embed.set_footer(text=\"Account created at\")\n\n if user_data[\"type\"] == \"User\":\n\n embed.add_field(\n name=\"Followers\",\n value=f\"[{user_data['followers']}]({user_data['html_url']}?tab=followers)\"\n )\n embed.add_field(\n name=\"Following\",\n value=f\"[{user_data['following']}]({user_data['html_url']}?tab=following)\"\n )\n\n embed.add_field(\n name=\"Public repos\",\n value=f\"[{user_data['public_repos']}]({user_data['html_url']}?tab=repositories)\"\n )\n\n if user_data[\"type\"] == \"User\":\n embed.add_field(\n name=\"Gists\",\n value=f\"[{gists}](https://gist.github.com/{quote_plus(username, safe='')})\"\n )\n\n embed.add_field(\n name=f\"Organization{'s' if len(orgs)!=1 else ''}\",\n value=orgs_to_add if orgs else \"No organizations.\"\n )\n embed.add_field(name=\"Website\", value=blog)\n\n await ctx.send(embed=embed)",
"def content(self, uid, text):\n\n if uid and isinstance(uid, str) and uid.lower().startswith(\"http\"):\n return f\"<a href='{uid}' rel='noopener noreferrer' target='blank'>{text}</a>\"\n\n return text",
"def generate_user_link(user):\n return '[@{0}](https://github.com/{0})'.format(user)",
"def format_tweet(tweet):\n user = tweet['user']\n return {\n 'tweet_id': tweet['id'],\n 'hashtag': HASHTAG,\n 'text': tweet['text'],\n 'created_at': tweet['created_at'],\n 'user': {\n 'user_id': user['id'],\n 'name': user['name'],\n 'handle': user['screen_name'],\n 'profile_image_url': user['profile_image_url'],\n 'profile_url': f\"https://twitter.com/{user['screen_name']}\"\n }\n }",
"def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)",
"def hashtag_and_username_test(self):\n text = 'test #hashtag and @username test'\n html = 'test <a href=\"https://www.instagram.com/explore/tags/hashtag\">#hashtag</a> and <a href=\"https://www.instagram.com/username/\">@username</a> test'\n self.assertEqual(linkify_text(text), html)",
"def crawl_user(username):\r\n url_to_parse = 'http://habrahabr.ru/users/' + username + '/' \r\n root = ut.doc4url(url_to_parse)\r\n\r\n def get_set(css_class_name, set_num=0):\r\n \"\"\"\r\n Find in the page list of some hyperlinked properties\r\n (such as friends, interests, etc)\r\n and return a set of them.\r\n \"\"\"\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set\r\n\r\n user = so.SmartObject({\r\n 'interests' : get_set('interests'),\r\n 'companies' : get_set('companies_list'),\r\n 'friends' : get_set('friends_list'),\r\n 'hubs' : get_set('hubs_list'),\r\n 'invitees': get_set('friends_list', 1)\r\n }) \r\n return user",
"def weblinks_view(request):\n\n\trecords = WebLinks.objects.filter(user_id=request.user.id)\n\tjson_response = [dict(title=record.title, url=record.url, description=record.description) for record in records]\n\treturn JsonResponse(json_response, safe=False)",
"def remake_user_url(self, share_url):\n signature_list = [\"ToWZhwAALto9c5Po75QH1k6FmZ\", \"OCp12QAAWHxL3H-2aPAunDgqdc\", \"U6o86AAAM.ogXDaHGe4txlOqPP\",\n \"P0BGzAAAXyNMtkyjOHBM2z9ARt\"]\n text_head = \"https://www.iesdouyin.com/web/api/v2/aweme/post/?\"\n text_mid = \"count=21&\"\n text_cursor = \"max_cursor={}\".format(self.cursor)\n text_sign = random.choice(signature_list)\n text_aid_sign = \"&aid=1128&_signature=\" + text_sign + \"&dytk=\"\n text_sec_num = get_sec_id(share_url)\n remake = text_head + text_sec_num + text_mid + text_cursor + text_aid_sign\n return remake",
"def bb_user(hit):\n try:\n user = hit.group(1)\n U = User.objects.get(username=user)\n T = loader.get_template('webview/t/user.html')\n C = Context({'U' : U})\n return T.render(C)\n\n except:\n # This is normally thrown when the user is invalid. Return the original result,\n # Only we add an icon to indicate an invalid user.\n return '<img src=\"/static/user_error.png\" alt=\"user\" border=\"0\" />%s' % (user)",
"def renderProfile(request, user, identities):\n sourcesResults = lifestream.models.Feed.objects.order_by('url').filter(user__username=user.username)\n sources = []\n for s in sourcesResults:\n if s.title:\n sources.append({'title': s.title, 'url': s.url})\n \n # avatar\n \n gravatarHash = hashlib.md5(user.email).hexdigest()\n avatar_url = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid&s=80\" % gravatarHash\n \n t = django.template.loader.select_template(('foo', 'lifestream/profile_blurb.html'))\n c = django.template.Context(\n {'avatar_src': avatar_url, 'avatar_width':'80', 'avatar_height':'80',\n 'user': user,\n 'username': user.username,\n 'preferences': json.loads(user.get_profile().properties),\n 'sources': sources,\n 'identities': identities})\n return t.render(c)",
"def format_url(self, url, text):\r\n return u'<a href=\"%s\">%s</a>' % (escape(url), text)",
"def expand_user_refs(body, user_pattern):\n def repl(m):\n attributes = {}\n for a in RE_ATTRIBUTE.finditer(m.group(1)):\n a, v = a.group(1), a.group(2)\n attributes[a.lower()] = html.unescape(v)\n # Some <user...> elements have anomalous attributes\n for alt in ['name', 'comm']:\n if alt in attributes:\n attributes['user'] = attributes[alt]\n # Users with an underscore in the name turn into dashes when in a URL\n attributes['label'] = RE_UNDERSCORE.sub(\"-\", attributes['user'])\n if 'site' in attributes:\n # If a site is specified follow that\n attributes['url'] = 'https://{label}.{site}/profile'.format(\n **attributes)\n else:\n # Otherwise assume it's on this site\n attributes['url'] = user_pattern.format(attributes[\"label\"])\n return \"<a class=user href=\\\"{}\\\">{}</a>\".format(html.escape(attributes['url']), html.escape(attributes['user']))\n return RE_USER_REF.sub(repl, body)",
"def twitter_text(\n self,\n text: str,\n urls: List[Dict[str, str]],\n user_mentions: List[Dict[str, Any]],\n media: List[Dict[str, Any]],\n ) -> Element:\n\n to_process: List[Dict[str, Any]] = []\n # Build dicts for URLs\n for url_data in urls:\n to_process.extend(\n {\n \"type\": \"url\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": url_data[\"url\"],\n \"text\": url_data[\"expanded_url\"],\n }\n for match in re.finditer(re.escape(url_data[\"url\"]), text, re.IGNORECASE)\n )\n # Build dicts for mentions\n for user_mention in user_mentions:\n screen_name = user_mention[\"screen_name\"]\n mention_string = \"@\" + screen_name\n to_process.extend(\n {\n \"type\": \"mention\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": \"https://twitter.com/\" + urllib.parse.quote(screen_name),\n \"text\": mention_string,\n }\n for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE)\n )\n # Build dicts for media\n for media_item in media:\n short_url = media_item[\"url\"]\n expanded_url = media_item[\"expanded_url\"]\n to_process.extend(\n {\n \"type\": \"media\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": short_url,\n \"text\": expanded_url,\n }\n for match in re.finditer(re.escape(short_url), text, re.IGNORECASE)\n )\n # Build dicts for emojis\n for match in POSSIBLE_EMOJI_RE.finditer(text):\n orig_syntax = match.group(\"syntax\")\n codepoint = emoji_to_hex_codepoint(unqualify_emoji(orig_syntax))\n if codepoint in codepoint_to_name:\n display_string = \":\" + codepoint_to_name[codepoint] + \":\"\n to_process.append(\n {\n \"type\": \"emoji\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"codepoint\": codepoint,\n \"title\": display_string,\n }\n )\n\n to_process.sort(key=lambda x: x[\"start\"])\n p = current_node = Element(\"p\")\n\n def set_text(text: str) -> None:\n \"\"\"\n Helper to set the text or the tail of the current_node\n \"\"\"\n if current_node == p:\n current_node.text = text\n else:\n current_node.tail = text\n\n db_data: Optional[DbData] = self.zmd.zulip_db_data\n current_index = 0\n for item in to_process:\n # The text we want to link starts in already linked text skip it\n if item[\"start\"] < current_index:\n continue\n # Add text from the end of last link to the start of the current\n # link\n set_text(text[current_index : item[\"start\"]])\n current_index = item[\"end\"]\n if item[\"type\"] != \"emoji\":\n elem = url_to_a(db_data, item[\"url\"], item[\"text\"])\n assert isinstance(elem, Element)\n else:\n elem = make_emoji(item[\"codepoint\"], item[\"title\"])\n current_node = elem\n p.append(elem)\n\n # Add any unused text\n set_text(text[current_index:])\n return p",
"def test_basic(self):\n tweet_object = self.load_tweet('basic')\n tweet_text = self.api.html_for_tweet(tweet_object)\n self.assertEqual(tweet_text,\n '<a href=\"http://t.co/FCmXyI6VHd\" class=\"twython-url\">google.com</a> is a <a href=\"https://twitter.com/search?q=%23cool\" class=\"twython-hashtag\">#cool</a> site, lol! <a href=\"https://twitter.com/mikehelmick\" class=\"twython-mention\">@mikehelmick</a> shd <a href=\"https://twitter.com/search?q=%23checkitout\" class=\"twython-hashtag\">#checkitout</a>. Love, <a href=\"https://twitter.com/__twython__\" class=\"twython-mention\">@__twython__</a> <a href=\"https://t.co/67pwRvY6z9\" class=\"twython-url\">github.com</a> <a href=\"http://t.co/N6InAO4B71\" class=\"twython-media\">pic.twitter.com/N6InAO4B71</a>')",
"def format_username(self, at_char, user):\r\n return u'<a href=\"http://{domain}/user/{user}\" data-user=\"{user}\">{char}{user}</a>'.format(\r\n **dict(domain=self.domain, user=user, char=at_char, text=user))\r\n\r\n #return u'<a href=\"http://%s/user/%s\" data-user=\"\">%s%s</a>' \\\r\n # % (self.domain, user, at_char, user)\r",
"def user(inp):\n user = inp.text.lower().replace(' ', '-')\n return 'http://www.wikidot.com/user:info/' + user",
"def username_test(self):\n text = 'test @username'\n html = 'test <a href=\"https://www.instagram.com/username/\">@username</a>'\n self.assertEqual(linkify_text(text), html)",
"def clean_tweet(tweet): \n #Remove URL\n tweet = re.sub('\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', ' ', tweet) \n #Remove user\n tweet = re.sub('@[^\\s]+','',tweet)\n #Replace #word with word\n tweet = re.sub(r'#([^\\s]+)', ' ', tweet) \n return tweet",
"def linkified_description(self):\n links = []\n def linkify(matchobj, links=links):\n if '|' in matchobj.group(1):\n url = matchobj.group(1).split('|')\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', url[0], url[1])\n else:\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', self.url, matchobj.group(1))\n links.append(link)\n return '{%d}' % (len(links) - 1)\n\n fmt = re.sub(r'\\[\\[([^\\]]+)\\]\\]', linkify, self.description)\n return format_html(fmt, *links)",
"def __full_tweet_text(cls, link_t):\n\n try:\n page = requests.get(link_t)\n tree = html.fromstring(page.content)\n text = tree.xpath('//div[contains(@class, \\\n \"permalink-tweet-container\")]//p[contains(@class,\\\n \"tweet-text\")]//text()')\n\n for i in range(len(text)):\n if (text[i][:4] == \"pic.\") or (\n text[i][:7] == \"http://\") or (\n text[i][:4] == \"www.\") or (text[i][:8] == \"https://\"):\n\n text[i] = \" \" + text[i]\n if text[i] == \"\\xa0\" or text[i] == \"…\":\n text[i] = \"\"\n\n return \"\".join(text)\n except Exception as e:\n print(e)\n return \"\"",
"def stalk(user):\n now = datetime.now()\n api = requests.get(get_profile(user))\n res = api.json()\n count_api_url = os.environ.get('CONTRI_API')\n profile = \"\"\n \n if res.get('error'):\n return \"Invalid GitHub username!\"\n \n if api.status_code == 200:\n pic = \"<a href='{}?a={}'>‍</a>\".format(\n res[\"avatar_url\"], datetime.now().isoformat())\n # The above line is hack of the year.\n profile += pic\n for data in res:\n url = data.endswith('url')\n ids = data.endswith('id')\n hireable = data.endswith('hireable')\n type_ = data.endswith('type')\n admin = data.endswith('admin')\n updated = data.endswith('updated_at')\n if url or ids or type_ or admin or updated:\n pass\n else:\n copy = data\n copy_res = res[data]\n if copy == \"created_at\":\n copy = \"Joined\"\n copy_res = copy_res.split('T')[0]\n if copy == \"hireable\":\n copy = \"Hireable?\"\n copy_res = \"Hell Yeah!\" if copy_res else \"Nay\"\n if copy_res != None:\n profile += \"<b>{}:</b> {}\\n\".format(\n str(copy.title().replace(\"_\", \" \")), escape(str(copy_res)))\n # Yeah I know that's too much of hacks\n if res['type'] == \"User\":\n streak, contri = streak_handler(user)\n include_today = \"Hope you commit today :p\" if contri == 0 else \"Glad you committed today! :)\"\n profile += \"<b>Today's Contribution:</b> {}\\n\".format(contri)\n profile += \"<b>Current Streak:</b> {} days ({})\".format(\n streak, include_today)\n\n else:\n # Serious shit\n error_messages = {\n 404: \"User with username {} does not exists, please check and try again\".format(user),\n 403: \"API rate limit exceeded for IP address\"\n }\n # Using Jio?\n fallback_error_message = (\n \"Something went wrong, please check your internet connection \\n\"\n \"Type help for Help\"\n )\n profile = error_messages.get(api.status_code, fallback_error_message)\n return profile",
"def get_content(self) -> str:\n content = StringIO()\n tweet = self.inner\n if self.is_retweet:\n content.write('<p>{name} Retweeted</p>\\n'.format(name=self.display_name))\n content.write(EnhancedTweet(tweet.retweeted_status).get_content())\n return content.getvalue()\n\n if self.is_reply:\n reply_url = _get_tweet_url(tweet.in_reply_to_screen_name, tweet.in_reply_to_status_id)\n content.write(\n '<p>Replying to <a href=\"{reply_url}\">@{username}</a></p>\\n'.format(\n reply_url=reply_url, username=tweet.in_reply_to_screen_name\n )\n )\n\n content.write('<blockquote>\\n')\n self._add_sanitized_text(content)\n self._add_media(content)\n\n content.write('</blockquote>\\n')\n content.write(\n '<p><img src=\"{img_url}\" width=\"32\" height=\"32\" class=\"alignleft\" /> '\n '-- {name} (@{username}) <a href=\"{url}\">{created_at}</a></p>\\n'.format(\n img_url=tweet.user.profile_image_url_https,\n name=self.display_name,\n username=self.username,\n url=self.url,\n created_at=tweet.created_at,\n )\n )\n\n if self.has_quoted:\n content.write('<p>{name} tweeted this while quoting the below tweet.</p>\\n'.format(name=self.display_name))\n content.write(EnhancedTweet(tweet.quoted_status).get_content())\n\n return content.getvalue()"
]
| [
"0.626007",
"0.61623025",
"0.5927306",
"0.5736171",
"0.56661755",
"0.56513816",
"0.5638651",
"0.56263965",
"0.56249464",
"0.55535585",
"0.54890394",
"0.5486235",
"0.543844",
"0.53785974",
"0.53651506",
"0.5362454",
"0.53605413",
"0.53557855",
"0.53542066",
"0.5318884",
"0.53144276",
"0.5313101",
"0.5303389",
"0.5297108",
"0.528245",
"0.52385336",
"0.5193145",
"0.51863354",
"0.5181567",
"0.5160814"
]
| 0.73408073 | 0 |
Returns a list of banned IPs | def getBanIps(self):
banned = []
q = """SELECT clients.ip as target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id
WHERE penalties.type = 'Ban' AND penalties.inactive = 0 AND penalties.time_expire = -1
GROUP BY clients.ip"""
cursor = self.query(q)
if cursor:
while not cursor.EOF:
banned.append(cursor.getValue('target_ip'))
cursor.moveNext()
cursor.close()
return banned | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def blacklist_ips(self):\r\n if self.blacklist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.blacklist.split(',')]) # pylint: disable=no-member\r",
"def getTempBanIps(self):\n banned = []\n q = \"\"\"SELECT clients.ip AS target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id\n WHERE penalties.type = 'TempBan' AND penalties.inactive = 0 AND penalties.time_expire > %s\n GROUP BY clients.ip\"\"\" % int(time())\n cursor = self.query(q)\n if cursor:\n while not cursor.EOF:\n banned.append(cursor.getValue('target_ip'))\n cursor.moveNext()\n cursor.close()\n return banned",
"def get_banned(self):\n return self.execute(TABELLE['id_users']['select']['banned'])",
"def ip_whitelists(self) -> pulumi.Output[Sequence['outputs.InstanceIpWhitelist']]:\n return pulumi.get(self, \"ip_whitelists\")",
"def whitelist_ips(self):\r\n if self.whitelist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.whitelist.split(',')]) # pylint: disable=no-member\r",
"def listBlockedIpAddresses(ip_addresses):\n for ip_address in ip_addresses[:500]:\n hostname = ip_address\n if resolve_ipaddress:\n try:\n hostname = str(resolver.query(reversename.from_address(ip_address), \"PTR\")[0])\n except:\n hostname = None\n logger.info('%s (%s)' % (ip_address, hostname))",
"def allowed_ips(self):\n\n return value_list_to_comma('AllowedIPs', self._peer.allowed_ips)",
"def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list",
"def test_exclude_ip_ban(self):\n pass",
"def block_list(to_block_list, blocked_ips_list):\n to_be_blocked_list = []\n for host in to_block_list:\n found_ip = False\n host_ip = host['host']['ip_address']\n for blocked in blocked_ips_list:\n if blocked['ip'] == host_ip:\n found_ip = True\n # if we want to block already blocked IP, nothing happens,\n # but if the host IP was not found in blocked IPs, block it\n if not found_ip:\n to_be_blocked_list.append(host_ip)\n return to_be_blocked_list",
"def get_federation_iprange_blacklist(self):\n blacklist = self.charm_config[\"federation-ip-range-blacklist\"]\n return list(filter(None, blacklist.split(\",\")))",
"def user_ip_list(uid):\r\n session = tables.get_session()\r\n res = []\r\n if session is None:\r\n return res\r\n try:\r\n ip_table = IpAddrs()\r\n res.extend(ip_table.get_ips_by_uid(uid, session))\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user ip list failed: %s', err)\r\n return []\r\n finally:\r\n session.close()\r\n return res",
"def ip_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpWhitelistArgs']]]]:\n return pulumi.get(self, \"ip_whitelists\")",
"def ip_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpWhitelistArgs']]]]:\n return pulumi.get(self, \"ip_whitelists\")",
"def unblock_list(blocked_ips_list, to_block_list):\n to_be_unblocked_list = []\n for blocked in blocked_ips_list:\n found_ip = False\n blocked_ip = blocked['ip']\n for host in to_block_list:\n if host['host']['ip_address'] == blocked_ip:\n found_ip = True\n # if the blocked_ip was not found in list of blockings, unblock it\n if not found_ip:\n to_be_unblocked_list.append(blocked_ip)\n return to_be_unblocked_list",
"def get_blocked_ips(logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.get(f\"http://{firewall_ip_and_port}/firewall/blocked\")\n if request.ok:\n return request.json()\n else:\n logger.warning(f\"Getting blocked IPs on firewall failed with code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Getting blocked IPs on firewall failed with code {request.status_code}\",\n \"time\": time.time()})\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n # error, continue program",
"def parse_ignore_cidr_option(cidrlist):\n l = list()\n for c in cidrlist.split(','):\n try:\n s = c.strip(' ')\n i = IP(s)\n l.append(i)\n except ValueError as e:\n logging.warning('Received invalid CIDR in ignore_cidr: {}'.format(e))\n return l",
"def grab_ips(self):\n parse_log = open(self.xmlrpc_log, 'r')\n for entry in parse_log:\n just_ip = entry.split()\n ip = just_ip[0]\n self.ip_list.append(ip)\n ip_set = set(self.ip_list)\n ips = list(ip_set)\n return ips",
"def discovered_ips(self) -> Sequence[str]:\n return pulumi.get(self, \"discovered_ips\")",
"async def global_unban(self, ctx: commands.Context, *, name: str):\n if re.match(r'^[\\d\\.-]*$', name) is None:\n query = 'SELECT ip FROM ddnet_bans WHERE name = $1;'\n ips = [r['ip'] for r in await self.bot.pool.fetch(query, name)]\n if not ips:\n return await ctx.send(f'`{escape_backticks(name)}` isn\\'t banned')\n else:\n ips = [name]\n\n for ip in ips:\n try:\n await self.ddnet_unban(ip)\n except RuntimeError as exc:\n await ctx.send(exc)\n else:\n await ctx.send(f'Successfully unbanned `{ip}`')",
"def get_blocked_usernames_list():\n return []",
"def _get_ip_addresses(ip_addresses):\n ret = []\n for item in ip_addresses:\n ret.append(item)\n\n return ret",
"def private_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"private_ip_addresses\")",
"def IP_list(pwd):\r\n # Connect to the gmail server.\r\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\r\n mail.login('[email protected]', pwd)\r\n\r\n # get mail IDs.\r\n mail.select('Inbox')\r\n typ, data = mail.search(None, '(From \"arc.pi.reg\")')\r\n mail_ids = data[0].decode()\r\n id_list = mail_ids.split()\r\n\r\n pi_ip = []\r\n\r\n # More description here.\r\n for id in id_list[::-1]:\r\n typ, msg_data = mail.fetch(id, '(BODY.PEEK[TEXT])')\r\n msg = msg_data[0][1].decode().strip()\r\n\r\n name, addr = msg.split(\" \")\r\n pi_ip.append((name, addr))\r\n\r\n return pi_ip",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips",
"def get_allowed_ipblocks(user):\n try:\n up = user.get_profile()\n except AttributeError:\n return []\n\n if user.has_perm(\"vnswww.ipblock_use_any\"):\n # Can use any blocks\n blocks = db.IPBlock.objects.filter()\n else:\n q_org = Q(org=up.org)\n q_childorg = Q(org=up.org.parentOrg, usable_by_child_orgs=True)\n print user.get_all_permissions()\n if user.has_perm(\"vnswww.ipblock_use_org\"):\n print \"Using blocks from own organization\"\n blocks = db.IPBlock.objects.filter(q_org | q_childorg)\n else:\n print \"Not using blocks from own organization\"\n blocks = []\n\n return blocks",
"def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list",
"def get_permitted_ips():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def ban_all():\n sudo(\"varnishadm 'ban req.url ~ .'\")",
"def blacklist(self) -> List[str]:\n return self.raw_config.get(\"blacklist\", [])"
]
| [
"0.76528513",
"0.7535373",
"0.71156114",
"0.6923343",
"0.68603015",
"0.66740763",
"0.6577855",
"0.65607035",
"0.6518757",
"0.63871664",
"0.63259524",
"0.6294472",
"0.62581086",
"0.62581086",
"0.6232289",
"0.6191144",
"0.6184423",
"0.60499585",
"0.60441494",
"0.60439366",
"0.6037838",
"0.603244",
"0.60026723",
"0.59973013",
"0.5983854",
"0.5980765",
"0.59678674",
"0.5938387",
"0.5935892",
"0.5878379"
]
| 0.8300037 | 0 |
Returns a list of TempBanned IPs | def getTempBanIps(self):
banned = []
q = """SELECT clients.ip AS target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id
WHERE penalties.type = 'TempBan' AND penalties.inactive = 0 AND penalties.time_expire > %s
GROUP BY clients.ip""" % int(time())
cursor = self.query(q)
if cursor:
while not cursor.EOF:
banned.append(cursor.getValue('target_ip'))
cursor.moveNext()
cursor.close()
return banned | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getBanIps(self):\n banned = []\n q = \"\"\"SELECT clients.ip as target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id\n WHERE penalties.type = 'Ban' AND penalties.inactive = 0 AND penalties.time_expire = -1\n GROUP BY clients.ip\"\"\"\n cursor = self.query(q)\n if cursor:\n while not cursor.EOF:\n banned.append(cursor.getValue('target_ip'))\n cursor.moveNext()\n cursor.close()\n return banned",
"def blacklist_ips(self):\r\n if self.blacklist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.blacklist.split(',')]) # pylint: disable=no-member\r",
"def get_banned(self):\n return self.execute(TABELLE['id_users']['select']['banned'])",
"def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list",
"async def get_bans(self) -> 'typing.List[dt_user.User]':\n if not self.me.guild_permissions.ban_members:\n raise PermissionsError(\"ban_members\")\n\n bans = await self._bot.http.get_bans(self.id)\n users = []\n\n for user_data in bans:\n # TODO: Audit log stuff, if it ever comes out.\n user_data = user_data.get(\"user\", None)\n users.append(dt_user.User(self._bot, **user_data))\n\n return users",
"def whitelist_ips(self):\r\n if self.whitelist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.whitelist.split(',')]) # pylint: disable=no-member\r",
"def block_list(to_block_list, blocked_ips_list):\n to_be_blocked_list = []\n for host in to_block_list:\n found_ip = False\n host_ip = host['host']['ip_address']\n for blocked in blocked_ips_list:\n if blocked['ip'] == host_ip:\n found_ip = True\n # if we want to block already blocked IP, nothing happens,\n # but if the host IP was not found in blocked IPs, block it\n if not found_ip:\n to_be_blocked_list.append(host_ip)\n return to_be_blocked_list",
"def ip_whitelists(self) -> pulumi.Output[Sequence['outputs.InstanceIpWhitelist']]:\n return pulumi.get(self, \"ip_whitelists\")",
"async def global_unban(self, ctx: commands.Context, *, name: str):\n if re.match(r'^[\\d\\.-]*$', name) is None:\n query = 'SELECT ip FROM ddnet_bans WHERE name = $1;'\n ips = [r['ip'] for r in await self.bot.pool.fetch(query, name)]\n if not ips:\n return await ctx.send(f'`{escape_backticks(name)}` isn\\'t banned')\n else:\n ips = [name]\n\n for ip in ips:\n try:\n await self.ddnet_unban(ip)\n except RuntimeError as exc:\n await ctx.send(exc)\n else:\n await ctx.send(f'Successfully unbanned `{ip}`')",
"def unblock_list(blocked_ips_list, to_block_list):\n to_be_unblocked_list = []\n for blocked in blocked_ips_list:\n found_ip = False\n blocked_ip = blocked['ip']\n for host in to_block_list:\n if host['host']['ip_address'] == blocked_ip:\n found_ip = True\n # if the blocked_ip was not found in list of blockings, unblock it\n if not found_ip:\n to_be_unblocked_list.append(blocked_ip)\n return to_be_unblocked_list",
"def get_blocked_usernames_list():\n return []",
"def grab_ips(self):\n parse_log = open(self.xmlrpc_log, 'r')\n for entry in parse_log:\n just_ip = entry.split()\n ip = just_ip[0]\n self.ip_list.append(ip)\n ip_set = set(self.ip_list)\n ips = list(ip_set)\n return ips",
"def test_exclude_ip_ban(self):\n pass",
"def get_existing_local_ips(self, count: int=1):\n\n if count <= 0:\n return []\n\n if count > len(self.remaining_local_ips):\n print(\"Warning: There are no more {} local IPs in the .pcap file. Returning all remaining local IPs.\".format(count))\n\n\n total = min(len(self.remaining_local_ips), count)\n\n retr_local_ips = []\n local_ips = self.remaining_local_ips\n for _ in range(0, total):\n random_local_ip = choice(sorted(local_ips))\n retr_local_ips.append(str(random_local_ip))\n local_ips.remove(random_local_ip)\n\n return retr_local_ips",
"def listBlockedIpAddresses(ip_addresses):\n for ip_address in ip_addresses[:500]:\n hostname = ip_address\n if resolve_ipaddress:\n try:\n hostname = str(resolver.query(reversename.from_address(ip_address), \"PTR\")[0])\n except:\n hostname = None\n logger.info('%s (%s)' % (ip_address, hostname))",
"def user_ip_list(uid):\r\n session = tables.get_session()\r\n res = []\r\n if session is None:\r\n return res\r\n try:\r\n ip_table = IpAddrs()\r\n res.extend(ip_table.get_ips_by_uid(uid, session))\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user ip list failed: %s', err)\r\n return []\r\n finally:\r\n session.close()\r\n return res",
"def get_federation_iprange_blacklist(self):\n blacklist = self.charm_config[\"federation-ip-range-blacklist\"]\n return list(filter(None, blacklist.split(\",\")))",
"def get_existing_external_ips(self, count: int=1):\n\n if not (len(self.external_ips) > 0):\n print(\"Warning: .pcap does not contain any external ips.\")\n return []\n\n total = min(len(self.remaining_external_ips), count)\n retr_external_ips = []\n external_ips = self.remaining_external_ips\n\n for _ in range(0, total):\n random_external_ip = choice(sorted(external_ips))\n retr_external_ips.append(str(random_external_ip))\n external_ips.remove(random_external_ip)\n\n return retr_external_ips",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips",
"def _new_recovery_broadcast_arps(victim_ips: Iterable[str]) -> List[ARP]:\n victim_macs = _request_macs(victim_ips)\n return [ARP(psrc=v_ip, pdst=v_ip, hwsrc=v_mac)\n for v_ip, v_mac in victim_macs.items()]",
"def ip_get_free(net=\"a\"):\n tnet = net.upper()\n\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.get('https://imhsc.imhadmin.net/index.php',\n params={'v': \"IPManager\", 'net': tnet, 'pool': \"12\"})\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n iplist = []\n for tip in bs.table.tbody.find_all('tr'):\n # get IP id\n try:\n t_id = re.match(r'.+id=([0-9]+).+', tip.find_all('td')[8].a['href'], re.I).group(1)\n except:\n t_id = False\n\n # gather IP infos\n t_info = {\n 'id': t_id,\n 'ip': tip.find_all('td')[0].string,\n 'domain': tip.find_all('td')[1].string,\n 'server': tip.find_all('td')[2].string,\n 'net': tip.find_all('td')[3].string,\n 'user': tip.find_all('td')[5].string,\n 'assigned': tip.find_all('td')[6].string,\n 'edit_url': tip.find_all('td')[8].a['href']\n }\n iplist.append(t_info)\n\n return iplist",
"def list_uptime_check_ips() -> pagers.ListUptimeCheckIpsPager:\n client = monitoring_v3.UptimeCheckServiceClient()\n ips = client.list_uptime_check_ips(request={})\n print(\n tabulate.tabulate(\n [(ip.region, ip.location, ip.ip_address) for ip in ips],\n (\"region\", \"location\", \"ip_address\"),\n )\n )\n return ips",
"def ban_all():\n sudo(\"varnishadm 'ban req.url ~ .'\")",
"def get_permitted_ips():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get_used_ips():\n\n all_ips = list()\n\n topologies = Topology.objects.all()\n\n ip_pattern = '\\d+\\.\\d+\\.\\d+\\.\\d+'\n for topology in topologies:\n try:\n json_data = json.loads(topology.json)\n except ValueError as ve:\n logger.error(ve)\n logger.error(\"Could not parse saved topology with id: %s\" % topology.id)\n continue\n\n for json_object in json_data:\n\n if \"userData\" in json_object and json_object[\"userData\"] is not None and \"ip\" in json_object[\"userData\"]:\n ud = json_object[\"userData\"]\n ip = ud[\"ip\"]\n if re.match(ip_pattern, ip) is None:\n logger.info('Found an invalid IP on topology: %s' % topology.id)\n logger.info(\"Invalid IP is %s\" % ip)\n logger.info(type(ip))\n continue\n\n last_octet = ip.split('.')[-1]\n # logger.debug(topology.id)\n # logger.info(\"'%s'\" % ip)\n # logger.info(last_octet)\n all_ips.append(int(last_octet))\n\n dhcp_leases = get_consumed_management_ips()\n all_ips.extend(dhcp_leases)\n\n logger.debug(\"sorting and returning all_ips\")\n all_ips.sort()\n return all_ips",
"def get_blocked_ips(logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.get(f\"http://{firewall_ip_and_port}/firewall/blocked\")\n if request.ok:\n return request.json()\n else:\n logger.warning(f\"Getting blocked IPs on firewall failed with code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Getting blocked IPs on firewall failed with code {request.status_code}\",\n \"time\": time.time()})\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n # error, continue program",
"async def get_blacklisted_users() -> list:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n async with db.execute(\n \"SELECT user_id, strftime('%s', created_at) FROM blacklist\"\n ) as cursor:\n result = await cursor.fetchall()\n return result",
"def allowed_ips(self):\n\n return value_list_to_comma('AllowedIPs', self._peer.allowed_ips)",
"def IP_list(pwd):\r\n # Connect to the gmail server.\r\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\r\n mail.login('[email protected]', pwd)\r\n\r\n # get mail IDs.\r\n mail.select('Inbox')\r\n typ, data = mail.search(None, '(From \"arc.pi.reg\")')\r\n mail_ids = data[0].decode()\r\n id_list = mail_ids.split()\r\n\r\n pi_ip = []\r\n\r\n # More description here.\r\n for id in id_list[::-1]:\r\n typ, msg_data = mail.fetch(id, '(BODY.PEEK[TEXT])')\r\n msg = msg_data[0][1].decode().strip()\r\n\r\n name, addr = msg.split(\" \")\r\n pi_ip.append((name, addr))\r\n\r\n return pi_ip",
"def get_vms(self, user=None, count=None):\n crit = dict()\n if count is not None:\n crit['count'] = count\n s = self._NDL_API('getvms', crit, user)\n if len(s) == 0:\n return []\n ips = s.split(',')\n # if the localhost's IP is in the list, move it to the front\n localips = getmyips()\n for i in range(len(ips)):\n if ips[i] in localips:\n x = ips[i]\n del ips[i]\n return [ x, ] + ips\n # otherwise order does not matter?\n return ips"
]
| [
"0.7872632",
"0.69478935",
"0.6512142",
"0.62523556",
"0.613556",
"0.61001235",
"0.6085994",
"0.60793865",
"0.6066895",
"0.602393",
"0.6016197",
"0.598317",
"0.59719723",
"0.59685814",
"0.5909207",
"0.5827688",
"0.57749563",
"0.5765984",
"0.57615256",
"0.5756254",
"0.57404757",
"0.57295144",
"0.57252544",
"0.5707975",
"0.5660473",
"0.56537795",
"0.5628622",
"0.56152487",
"0.5607297",
"0.5586252"
]
| 0.86281383 | 0 |
Will extract information for the "ip_address", "user_agent" and "locale" properties from the given HTTP header dictionary. | def from_headers(self, headers):
try:
# First IP address is the one of the client
ip = headers['X_FORWARDED_FOR'].split(',')[0].strip()
except KeyError:
ip = headers.get('REMOTE_ADDR')
if ip:
# Double-check if the address has a valid format
if re.match(r'^[\d+]{1,3}\.[\d+]{1,3}\.[\d+]{1,3}\.[\d+]{1,3}$',
ip, re.I):
ip = None
# Exclude private IP address ranges
if re.match(r'^(?:127\.0\.0\.1|10\.|192\.168\.|172\.(?:1[6-9]|2[0-9]|3[0-1])\.)', ip):
ip = None
self.ip_address = ip
self.user_agent = headers.get('HTTP_USER_AGENT')
if 'HTTP_ACCEPT_LANGUAGE' in headers:
parsed_locales = []
res = re.findall(
r'(^|\s*,\s*)([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})*)\s*(;\s*q\s*=\s*(1(\.0{0,3})?|0(\.[0-9]{0,3})))?',
headers['HTTP_ACCEPT_LANGUAGE'], re.I)
for r in res:
name = r[1].replace('-', '_')
value = 1 if not r[4] else r[4]
parsed_locales += [(name, value)]
self.locale = sorted(parsed_locales, key=lambda x: x[1],
reverse=True)[0][0]
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_headers(request: Dict[str, str]) -> Tuple[str, Dict[str, str]]:\n host = None\n headers = {}\n for name, value in request['META'].items():\n if name == \"HTTP_HOST\":\n host = value\n continue # comment to preserve host header, but eventual output contains host twice.\n if name.startswith('HTTP_'):\n headers[convert_header_names(name[5:])] = value.replace('\"', r'\\\"')\n assert host is not None, \"HTTP_HOST not found in request headers.\"\n return host, headers",
"def _get_headers(self, request):\n headers = {}\n for key, value in request.META.items():\n if key.startswith('HTTP_') and key != 'HTTP_HOST':\n headers[key[5:].replace('_', '-')] = value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH') and value:\n headers[key.replace('_', '-')] = value\n\n if request.user:\n headers['PARTNER-EMAIL'] = request.user.email\n lang = self._get_lang(request)\n if lang:\n headers['ACCEPT-LANGUAGE'] = lang\n return headers",
"def get_ip(request):\n # lower the\n key = settings.IP_HEADER_KEY\n meta = request.META\n\n # Lowercase keys\n simple_meta = {k.lower(): v for k, v in request.META.items()}\n\n ip = meta.get(key, simple_meta.get(key, '0.0.0.0'))\n\n return ip",
"def test_get_ip_from_headers(self):\n response = self.client.get(self.voter_location_url, REMOTE_ADDR='69.181.21.132')\n self.assertEqual(response.status_code, 200)\n json_data = json.loads(response.content.decode())\n self.assertEqual(json_data['success'], True)\n self.assertEqual(json_data['voter_location_found'], True)",
"def parse_header(header_lines):\n info = {}\n for line in header_lines:\n if line.startswith('Citation'):\n info['Citation'] = line.split()[-1].strip()\n elif ':' in line:\n try:\n field, value = map(strip,line.split(':',1))\n info[field] = value\n except ValueError:\n #no interesting header line\n continue\n else:\n continue\n return Info(info)",
"def get_headers(req):\n user = req.headers.get('X-User-ID', None)\n tenant = req.headers.get('X-Tenant-ID', None)\n return user, tenant",
"def parse_header(self):",
"def header_values(header, mail):\n\tif header not in mail.keys():\n\t\traise HeaderMissed(header)\n\tvalues = [header_decode(mail[header])]\n\tif header in HEADER_ADDRESS_FIELDS:\n\t\treturn [email.utils.formataddr(x) for x in email.utils.getaddresses(values)]\n\treturn values",
"def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())",
"def get_email_details(header: str) -> dict:\n # this is one way to solve the exercise\n # result_keys = [\"from\", \"to\", \"subject\", \"date\"]\n # search_strings = [\n # r\"From\\:\\s(.*)\",\n # r\"To\\:\\s(.*)\",\n # r\"Subject\\:\\s(.*)\",\n # r\"Date\\:\\s(.*)\\s[+-]\",\n # ]\n # result_values = [re.search(s, EMAIL_HEADER).group(1) for s in search_strings]\n # print(dict(zip(result_keys, result_values)))\n\n # or we could use groupdict as suggested\n m = re.search(\n r\"From\\:\\s(?P<from>.*)\\n.*To\\:\\s(?P<to>.*)\\n.*Subject\\:\\s(?P<subject>.+?)\\n.*Date\\:\\s(?P<date>.*)\\s[+-]\",\n header,\n re.MULTILINE | re.DOTALL,\n )\n return m.groupdict() if m else None",
"def unpack_header(header):\n header_values = {}\n for line in header.split('\\n'):\n tokens = line.split('=')\n if len(tokens) > 1:\n header_values[tokens[0].strip()] = tokens[1].split(';')[0].strip()\n return header_values",
"def get_data_for_header(req):\n try:\n user_id = req.user\n except KeyError as e:\n msg = req.get_error_msg(e)\n return send_error_response(msg)\n try:\n header_data = dict()\n lang = rt.get_state(user_id).language\n #TODO change on database access\n header_data['languages'] = common_getter.get_languages_list(pt, lang)\n header_data['user'] = common_getter.get_user_info(pt, user_id, lang)\n header_data['client'] = common_getter.get_client_info(pt, user_id, lang)\n return send_success_response(header_data)\n except Exception as e:\n msg = req.get_error_msg(e, lang=lang)\n return send_error_response(msg)",
"def __process_ipframe(self, ip_header: bytes) -> Dict[str, Any]:\n # Extract the 20 bytes IP header, ignoring the IP options\n iph = struct.unpack('!BBHHHBBH4s4s', ip_header)\n version = (iph[0] >> 4) & 0xF\n length = iph[2]\n ihl = (iph[0] & 0xF) * 4\n ttl = iph[5]\n proto = iph[6]\n\n if ihl < Sniffer.IP_HEADER_LENGTH:\n raise InvalidPacketException(\n 'Invalid IP length {}'.format(ihl),\n )\n\n if version != 4:\n raise UnknownPacketException(\n 'Unknown IP version {}'.format(version),\n )\n\n s_addr = socket.inet_ntoa(iph[8])\n d_addr = socket.inet_ntoa(iph[9])\n\n return {\n 'header_length': ihl,\n 'version': version,\n 'length': length,\n 'ttl': ttl,\n 'protocol': proto,\n 'source_address': s_addr,\n 'destination_address': d_addr,\n }",
"def get_email_details(header: str) -> dict:\r\n try:\r\n m = re.match(\r\n r\"\"\"\r\n ([\\w\\W]* # remove lines \r\n (\r\n ^Date: \\s*(?P<date>[\\w\\W]{25}) # obtain date (\"date\")\r\n |^From: \\s*(?P<from>[\\w\\W]*?$) # obtain sender (\"from\")\r\n |^To: \\s*(?P<to>[\\w\\W]*?$) # obtain receiver (\"to\")\r\n |^Subject: \\s*(?P<subject>[\\w\\W]*?$) # obtain subject (\"subject\")\r\n )){4}\r\n \"\"\",\r\n header,\r\n re.VERBOSE | re.MULTILINE,\r\n )\r\n\r\n return m.groupdict()\r\n\r\n except:\r\n return None",
"def process_header_request(self, request, http_s_obj):\n response_dict = {}\n data = request.split(\"\\r\\n\\r\\n\")\n header_info = data[0].split(\"\\r\\n\")\n headers = self.updateheader(header_info, http_s_obj)\n response_dict.update({'type': header_info[0].split()[0]})\n response_dict.update({'headers': headers})\n body = data[1]\n response_dict.update({'data': body})\n path = header_info[0].split()[1]\n if path.find('?') != -1:\n split_sym = '?'\n if path.find('&') != -1:\n split_sym = '&'\n try:\n req = path.split(split_sym)\n path = req[0]\n query = req[1]\n except Exception as e:\n query = ''\n response_dict.update({'path': path})\n response_dict.update({'query': query})\n\n return response_dict",
"def _get_client_info():\n if hasattr(request.authorization, 'username'):\n auth_user = request.authorization.username\n else:\n auth_user = 'Unknown'\n info = request.headers\n origin_string = info.get(\"User-Agent\", \"\")\n origin_props = {}\n if origin_string:\n try:\n origin_props = dict(\n [_.split(\"/\", 1) for _ in origin_string.split()]\n )\n except ValueError:\n pass\n prog_name = origin_props.get(\"prog_name\", \"Unknown\")\n uuid = origin_props.get(\"uuid\", uuid4())\n host = info.get(\"Host\", \"Unknown\")\n if info.get(\"From\") and \"@\" in info[\"From\"]:\n user = info[\"From\"].split(\"@\")[0]\n else:\n user = (\"Unknown\")\n return auth_user, prog_name, user, host, uuid",
"def _parse_wsgi_headers(wsgi_environ):\n prefix = 'HTTP_'\n p_len = len(prefix)\n # use .items() despite suspected memory pressure bc GC occasionally\n # collects wsgi_environ.iteritems() during iteration.\n headers = {\n key[p_len:].replace('_', '-').lower():\n val for (key, val) in wsgi_environ.items()\n if key.startswith(prefix)}\n return headers",
"def __parseHeaders(headers):\n global __all_headers\n if headers and len(headers) > 0:\n for header in headers:\n name = header.getElementsByTagName(\"name\")[0].childNodes[0].data\n value = header.getElementsByTagName(\"value\")[0].childNodes[0].data\n __addHeader(name, value)\n #print(__all_headers)",
"def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)",
"def make_headers(self):\n return {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US;\\\n rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}",
"def _extract_metadata(self, header, cleaner):\n metadata = []\n for k, v in header.items():\n key = str(cleaner(k)) # clean key and ensure it is a string\n val = str(cleaner(v)) # clean value and ensure it is a string\n if (key and val):\n metadata.append(Metadatum(key, val))\n return metadata",
"def parse_header(self, header):\n\n m = re.search(HEADER_REGEX, header)\n if m:\n type = m.group(1)\n version = m.group(2)\n return type, version\n else:\n return None",
"def parse_cookies( headers ):",
"def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}",
"def get_fields_from_header (header, ignore=FITS_IGNORE_KEYS):\n hdrs = dict()\n filtered = [ card for card in header.items() if (card[0] not in ignore) ]\n hdrs.update(filtered)\n return hdrs",
"def __getHeaderInfo(self, decoded_data):\n\t\tip = decoded_data.child()\n\t\ttcp = ip.child()\n\t\t#src = (ip.get_ip_src(), tcp.get_th_sport())\n\t\ttry:\tsrc = ip.get_ip_src()\n\t\texcept:\tsrc = '?'\n\t\t#dst = (ip.get_ip_dst(), tcp.get_th_dport())\n\t\ttry:\tdst = ip.get_ip_dst()\n\t\texcept:\tdst = '?'\n\t\t#data = tcp.get_data_as_string()\n\t\tdata = tcp.get_packet()\n\t\treturn (src, dst, data)",
"def parseHeader(header):\n tokens = [t for t in header.split(' ') if t]\n result = {}\n for i in range(len(tokens)):\n result[tokens[i]] = i \n\n return result",
"def get_headers() -> dict:\n\n return {\"Connection\": \"keep-alive\",\n \"Cache-Control\": \"max-age=0\",\n \"Upgrade-Insecure-Requests\": 1,\n \"User-Agent\": (\"Mozilla/5.0 (X11; Linux x86_64)\"\n \" AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/73.0.3683.86 Safari/537.36\"),\n \"Accept\": (\"text/html,application/xhtml+xml,\"\n \"application/xml;q=0.9,image/webp,\"\n \"image/apng,*/*;q=0.8,\"\n \"application/signed-exchange;v=b3\"),\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\"}",
"def test_parse_header(self):\n data = parse_header(self.header)\n self.assertEqual(data.get(\"application\"), \"my Grandma\")\n self.assertEqual(data.get(\"version\"), \"has\")\n self.assertEqual(data.get(\"reference\"), \"furry\")\n self.assertEqual(data.get(\"query_letters\"), 27)\n self.assertEqual(data.get(\"database\"), \"Cats\")",
"def _parse_head(line):\n retval = {}\n m = re.match(\n '[0-9]+: (?P<if>\\w+\\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',\n line\n )\n if m:\n retval['ifname'] = m.group('if')\n retval['mtu'] = int(m.group('mtu'))\n retval['flags'] = m.group('flags').split(',')\n return retval"
]
| [
"0.6395573",
"0.6370883",
"0.6120538",
"0.6111331",
"0.6002312",
"0.58925486",
"0.5866152",
"0.58608603",
"0.5837583",
"0.5790471",
"0.57902354",
"0.5771844",
"0.5757349",
"0.5755024",
"0.5725017",
"0.57227486",
"0.5641827",
"0.55207497",
"0.5515972",
"0.55122787",
"0.55082244",
"0.5495783",
"0.54872113",
"0.54778135",
"0.54729843",
"0.5464606",
"0.545886",
"0.5448211",
"0.5439855",
"0.540989"
]
| 0.6731328 | 0 |
Equality operator for Curcio1990Map. Compares two Curcio1990Map's based attribute equality | def __eq__(self, other):
if not isinstance(other, Curcio1990Map):
return False
if id(self) == id(other):
return True
return self.__dict__ == other.__dict__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n if not isinstance(other, Watson2014Map):\n return False\n if id(self) == id(other):\n return True\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n eq = True\n for attr in ['geocode',\n 'geocodeDict',\n 'geolevel',\n 'parentGeocode',\n 'raw',\n 'raw_housing',\n 'dp',\n 'syn',\n 'syn_unrounded',\n # 'cons',\n # 'invar',\n # 'dp_queries',\n # 'congDistGeocode',\n # 'sldlGeocode',\n # 'slduGeocode',\n ]:\n\n eq = eq and self.__getattribute__(attr) == other.__getattribute__(attr)\n\n #eq = eq and (np.array_equal(self.raw.toDense(), other.raw.toDense()))\n return eq",
"def test_eq(self):\r\n self.assertTrue(self.empty_map == MetadataMap({}, []))\r\n self.assertTrue(self.overview_map == MetadataMap(\r\n self.overview_map._metadata, self.overview_map.Comments))",
"def test_eq(self):\n self.assertTrue(self.empty_map == MetadataMap({}, []))\n self.assertTrue(self.overview_map == MetadataMap(\n self.overview_map._metadata, self.overview_map.Comments))",
"def __eq__(self, other):\n\n if not isinstance(other, IPMap):\n return False\n\n return ipset.ipmap_is_equal(self.map, other.map)",
"def __eq__(self, other):\n return dict.__eq__(self, other)",
"def __eq__(self, other):\n return self.items() == other.items()",
"def __eq__(self, other):\n if isinstance(other, SON):\n return len(self) == len(other) and self.items() == other.items()\n return self.to_dict() == other",
"def __eq__(self, other):\n if not isinstance(other, DataDict):\n return False\n for key, item in self.items():\n if key not in other:\n return False\n if isinstance(item, pd.DataFrame):\n if not self[key].equals(other[key]):\n return False\n elif not self[key] == other[key]:\n return False\n return True",
"def __eq__(self, other):\r\n return self.id_map == other.id_map and self.matrix == other.matrix\\\r\n and self.size == other.size",
"def __eq__(self, other):\n return (self.name == other.name and\n self.attrs == other.attrs and\n self.range_key == other.range_key and\n self.global_indexes == other.global_indexes and\n self.read_throughput == other.read_throughput and\n self.write_throughput == other.write_throughput)",
"def __eq__(self, other):\n if not isinstance(other, AppleMappingRequest):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if isinstance(other, CAPDU):\n ours = self.__dict__.items()\n theirs = other.__dict__.items()\n return ours == theirs\n return NotImplemented",
"def __eq__(self, other):\n try:\n other_dict = other.to_dict()\n except AttributeError:\n return False\n return self.to_dict() == other_dict",
"def __eq__(self, other):\r\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.__dict__ == other.__dict__"
]
| [
"0.7665283",
"0.69506687",
"0.686751",
"0.6837607",
"0.6741164",
"0.6676089",
"0.66535497",
"0.66449416",
"0.6511533",
"0.64939123",
"0.64839643",
"0.6445785",
"0.6424692",
"0.6396489",
"0.63746417",
"0.6373116",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835",
"0.63492835"
]
| 0.8193522 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.