query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test that we can decode the energy sums. These can be tricky b/c the baseline is encoded in IEEE 754 format.
def test_decode_energy_sums(self): self.assertEqual(td.esums(decoded=True), decoder.decode_energy_sums(BytesIO(td.esums(True))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_internal_energy(self):\n buff_score = self.pdb[0].get_internal_energy(ff=self.ff)\n self.assertAlmostEqual(buff_score.total_energy, -3722.49, places=2)", "def test_energy():\n # Test something\n \n from nose.tools import assert_equal\n assert_equal(energy([0.0,0.0],1.0), 0)\n assert_equal(energy([4.0,5.0],1.0), 10)", "def test_check_conformer_energy(self):\n v_list = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654,\n -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643,\n -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743,\n -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]\n v_list = np.array(v_list, np.float64)\n v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000\n self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5)", "def test_ofe(self):\n df = dep.read_ofe(get_path('ofe.txt'))\n self.assertAlmostEquals(df['precip'].max(), 107.56, 2)\n\n df = dep.read_ofe(get_path('ofe2.txt'))\n print(df['sedleave'].sum())\n self.assertAlmostEquals(df['sedleave'].sum(), 400257.48, 2)", "def test_statistics_calculator_absval():\n from resistics.statistics.calculator import StatisticCalculator\n import numpy as np\n\n specData, evalfreq = get_spectrum_data()\n calculator = StatisticCalculator()\n calculator.winLen = 1\n assert calculator.winLen == 1\n calculator.setSpectra(specData.freqArray, specData, evalfreq)\n statData = calculator.getDataForStatName(\"absvalEqn\")\n testData = {\n 24: {\n \"absExHx\": 53.956000593075835,\n \"absEyHx\": 47.01063709417264,\n \"absHxHx\": 93.5,\n \"absHyHx\": 38.01315561749642,\n \"absExHy\": 28.609439001839934,\n \"absEyHy\": 28.635642126552707,\n \"absHxHy\": 38.01315561749642,\n \"absHyHy\": 105.0,\n \"absExEx\": 57.0,\n \"absEyEx\": 40.0,\n \"absHxEx\": 53.956000593075835,\n \"absHyEx\": 28.609439001839934,\n \"absExEy\": 40.0,\n \"absEyEy\": 40.0,\n \"absHxEy\": 47.01063709417264,\n \"absHyEy\": 28.635642126552707,\n },\n 40: {\n \"absExHx\": 34.60130055359191,\n \"absEyHx\": 31.622776601683793,\n \"absHxHx\": 49.5,\n \"absHyHx\": 24.73863375370596,\n \"absExHy\": 51.24451190127583,\n \"absEyHy\": 22.80350850198276,\n \"absHxHy\": 24.73863375370596,\n \"absHyHy\": 84.0,\n \"absExEx\": 49.0,\n \"absEyEx\": 33.83784863137726,\n \"absHxEx\": 34.60130055359191,\n \"absHyEx\": 51.24451190127583,\n \"absExEy\": 33.83784863137726,\n \"absEyEy\": 30.0,\n \"absHxEy\": 31.622776601683793,\n \"absHyEy\": 22.80350850198276,\n },\n }\n for efreq in evalfreq:\n for key, val in statData[efreq].items():\n np.testing.assert_almost_equal(val, testData[efreq][key])", "def test_energy_flux_conversion(self):\n init_wl = np.linspace(300, 500, num=10)\n init_spec = np.ones(init_wl.shape)\n\n test_spec_base = Spectrum(init_wl, init_spec, x_unit='nm', is_photon_flux=True)\n spectrum = test_spec_base.get_spectrum(to_x_unit='nm')\n\n # Prepare an expected spectrum for comparsion\n expect_spec = init_spec * sc.h * sc.c / (init_wl*1e-9)\n\n # Since the values of the spectrum are very small, causing the errors in np.isclose()\n # ( both are in the order of ~1e-19) Need renormalise them for proper comparison.\n assert np.all(np.isclose(spectrum[1, :] * 1e19, expect_spec * 1e19))", "def test_inode_energy_meter(self):\n data_string = \"043E2102010000473A6D6F1200150201060EFF90820400CFE40000DC05B0ED10020A08A5\"\n data = bytes(bytearray.fromhex(data_string))\n\n # pylint: disable=unused-variable\n ble_parser = BleParser()\n sensor_msg, tracker_msg = ble_parser.parse_data(data)\n\n assert sensor_msg[\"firmware\"] == \"iNode\"\n assert sensor_msg[\"type\"] == \"iNode Energy Meter\"\n assert sensor_msg[\"mac\"] == \"00126F6D3A47\"\n assert sensor_msg[\"packet\"] == \"0400cfe40000dc05b0ed10\"\n assert sensor_msg[\"data\"]\n assert sensor_msg[\"energy\"] == 39.05\n assert sensor_msg[\"energy unit\"] == \"kWh\"\n assert sensor_msg[\"power\"] == 160.0\n assert sensor_msg[\"power unit\"] == \"W\"\n assert sensor_msg[\"constant\"] == 1500\n assert sensor_msg[\"battery\"] == 100\n assert sensor_msg[\"voltage\"] == 2.88\n assert sensor_msg[\"light level\"] == 0.0\n assert sensor_msg[\"week day\"] == 0\n assert sensor_msg[\"week day total\"] == 4333\n assert sensor_msg[\"rssi\"] == -91", "def test_e0_prod(self):\n self.assertAlmostEqual(self.tunneling.E0_prod.value_si * 0.001, self.E0_prod, 4)", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def test_interaction_energy(self):\n buff_score = self.pdb.get_interaction_energy(ff=self.ff)\n self.assertAlmostEqual(buff_score.total_energy, -1005.41, places=2)", "def test_fluxes(self):\n\n t, x = self.t, self.x_edge\n np.testing.assert_array_almost_equal(self.N_e_hat(t, x[0]), 0, decimal=3)\n np.testing.assert_array_almost_equal(self.N_e_hat(t, x[-1]), 0, decimal=3)", "def parse_eplus_msg(self, msg):\n msg = msg.decode(\"utf-8\") \n msg = msg.rstrip()\n _log.info(f\"Received message from EnergyPlus: {msg}\")\n arry = msg.split()\n arry = [float(item) for item in arry]\n _log.info(f\"Received message from EnergyPlus: {arry}\")\n slot = 6\n self.sim_flag = arry[1]\n\n if self.sim_flag != 0.0:\n # Exit based on error status\n _log.debug(\"FLAG: {} - {}\".format(self.sim_flag, type(self.sim_flag)))\n self._check_sim_flag()\n elif arry[2] < self.eplus_outputs and len(arry) < self.eplus_outputs + 6:\n self.exit('Got message with ' + arry[2] + ' inputs. Expecting ' + str(self.eplus_outputs) + '.')\n else:\n if float(arry[5]):\n self.time = float(arry[5])\n for input in self.inputs:\n name_value = input.get('name', None)\n dynamic_default_value = input.get('dynamic_default', None)\n if name_value is not None and dynamic_default_value is not None:\n slot = 6\n for output in self.outputs:\n _log.debug(\"Output: {}\".format(output))\n default_value = output.get('default', None)\n if default_value is not None:\n if default_value.lower().find(name_value.lower()) != -1:\n input['default'] = float(arry[slot])\n slot += 1\n slot = 6\n for output in self.outputs:\n name_value = output.get('name', None)\n type_value = output.get('type', None)\n field_value = output.get('field', None)\n if name_value is not None and type_value is not None:\n try:\n output['value'] = float(arry[slot])\n except:\n _log.debug(slot)\n self.exit('Unable to convert received value to double.')\n if \"currentmonthv\" in type_value.lower():\n self.month = float(arry[slot])\n _log.debug(f\"month {self.month}\")\n elif \"currentdayofmonthv\" in type_value.lower():\n self.day = float(arry[slot])\n _log.debug(f\"day {self.day}\")\n elif \"currenthourv\" in type_value.lower():\n self.hour = float(arry[slot])\n _log.debug(f\"hour {self.hour}\")\n elif \"currentminutev\" in type_value.lower():\n self.minute = float(arry[slot])\n _log.debug(f\"minute: {self.minute}\")\n elif field_value is not None and 'operation' in field_value.lower():\n self.operation = float(arry[slot])\n _log.debug(f\"operation (1:on, 0: off) {self.operation}\")\n slot += 1", "def test_bar_free_energies(bar_and_test):\n\n bars, test = bar_and_test[\"bars\"], bar_and_test[\"test\"]\n\n fe0 = test.analytical_free_energies()\n fe0 = fe0[1:] - fe0[0]\n\n results_fp = bars[\"fp\"]\n fe_fp = results_fp[\"Delta_f\"]\n dfe_fp = results_fp[\"dDelta_f\"]\n z = (fe_fp - fe0) / dfe_fp\n assert_almost_equal(z / z_scale_factor, np.zeros(len(z)), decimal=0)\n\n results_sci = bars[\"sci\"]\n fe_sci = results_sci[\"Delta_f\"]\n dfe_sci = results_sci[\"dDelta_f\"]\n z = (fe_sci - fe0) / dfe_sci\n assert_almost_equal(z / z_scale_factor, np.zeros(len(z)), decimal=0)\n\n results_bis = bars[\"bis\"]\n fe_bis = results_bis[\"Delta_f\"]\n dfe_bis = results_bis[\"dDelta_f\"]\n z = (fe_bis - fe0) / dfe_bis\n assert_almost_equal(z / z_scale_factor, np.zeros(len(z)), decimal=0)\n\n # make sure the different methods are nearly equal.\n assert_almost_equal(fe_bis, fe_fp, decimal=precision)\n assert_almost_equal(fe_sci, fe_bis, decimal=precision)\n assert_almost_equal(fe_fp, fe_bis, decimal=precision)\n\n # Test uncertainty methods\n results_dBAR = bars[\"dBAR\"]\n dfe_bar = results_dBAR[\"dDelta_f\"]\n results_dMBAR = bars[\"dMBAR\"]\n dfe_mbar = results_dMBAR[\"dDelta_f\"]\n\n # not sure exactly how close they need to be for sample problems?\n assert_almost_equal(dfe_bar, dfe_mbar, decimal=3)", "def test_evi(self):\n scene = Landsat8Scene(self.filenames)\n geoimg = scene.evi()\n self.assertEquals(geoimg.nbands(), 1)\n self.assertTrue('evi' in geoimg.bandnames())", "def testmoenergies(self):\r\n assert len(self.data.moenergies) == 1\r\n if hasattr(self.data, \"mocoeffs\"):\r\n assert len(self.data.mocoeffs) == 1", "def test_str_energy(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"energy\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x45,\n 0x4B,\n 0xB3,\n 0xF8,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 3259.248046875)\n self.assertEqual(sensor.unit_of_measurement(), \"J\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_decode_bits(self):\r\n for bitvec in ten_bitvecs:\r\n corr, num_errs = golay.decode_bits(bitvec)\r\n if corr is None:\r\n self.assertEqual(num_errs, 4)\r\n else:\r\n self.assertEqual(((corr + bitvec) % 2).sum(), num_errs)", "def test_str_activation_energy(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"activation_energy\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x46,\n 0x0,\n 0x3E,\n 0xEE,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 8207.732421875)\n self.assertEqual(sensor.unit_of_measurement(), \"J/mol\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_hex_straining():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n # single non-hex message\n r = p([\"12.8 Volts\"])\n assert len(r) == 1\n assert r[0].ecu == ECU.UNKNOWN\n assert len(r[0].frames) == 1\n\n\n # multiple non-hex message\n r = p([\"12.8 Volts\", \"NO DATA\"])\n assert len(r) == 2\n\n for m in r:\n assert m.ecu == ECU.UNKNOWN\n assert len(m.frames) == 1\n\n # mixed hex and non-hex\n r = p([\"NO DATA\", \"48 6B 10 41 00 00 01 02 03 FF\"])\n assert len(r) == 2\n\n # first message should be the valid, parsable hex message\n # NOTE: the parser happens to process the valid one's first\n check_message(r[0], 1, 0x10, [0x41, 0x00, 0x00, 0x01, 0x02, 0x03])\n\n # second message: invalid, non-parsable non-hex\n assert r[1].ecu == ECU.UNKNOWN\n assert len(r[1].frames) == 1\n assert len(r[1].data) == 0 # no data", "def evaluate_scheme(train_data, test_data):\n\n # x_raw: int data (0, 1,... , 16)\n # x: modulated data : (0.707+0.707j, ...)\n x_raw, x = train_data \n # train the receiver\n # TODO\n\n x_raw, x = test_data\n # put test data through receiver\n # TODO\n # provide bitdata in x_recon (0, 3, ...)\n x_recon = \n\n # count bit errors- this code is a bit messy \n diff = x_recon^x_raw # bitwise comparison\n bit_errors = np.sum(error_values[diff])\n ber = bit_errors/(NUM_SAMPLES*BITS_PER_SYMBOL)\n return ber", "def test_fuel_for_electricity(pudl_out_eia, live_dbs):\n if not live_dbs:\n pytest.skip(\"Data validation only works with a live PUDL DB.\")\n\n gf_eia923 = pudl_out_eia.gf_eia923()\n\n excess_fuel = (\n gf_eia923.fuel_consumed_for_electricity_mmbtu > gf_eia923.fuel_consumed_mmbtu\n )\n\n if excess_fuel.any():\n raise ValueError(\n \"Fuel consumed for electricity is greater than all fuel consumed!\"\n )", "def energy(data):\n return sum(pow(data, 2))", "def test_measurement(eit_map):\n assert eit_map.measurement.value in [195, 171]", "def test_exp_con():\n c=14\n assert {'diff':EF.exp(c).der, 'value': EF.exp(c).val}=={'diff':0, 'value': math.exp(c)}", "def evaluate_baseline(test_data):\n\n # create mapping xor diff -> biterrors \n error_values = np.array([bin(x).count('1') for x in range(CONST_POINTS)]) \n\n x_raw, x = test_data\n means = Data_generator().constellations[MODULATION][1]\n x_recon = np.argmin(np.abs(x[:, None] - means[None, :]), axis=1)\n\n diff = x_recon^x_raw # bitwise comparison\n bit_errors = np.sum(error_values[diff])\n ber = bit_errors/(NUM_SAMPLES*BITS_PER_SYMBOL)\n return ber", "def test_function_fuel_sum(data, mode_constrained, space_heating_enduses):\n fuel_in = 0\n fuel_in_solid_fuel = 0\n fuel_in_gas = 0\n fuel_in_elec = 0\n fuel_in_oil = 0\n fuel_in_heat = 0\n fuel_in_hydrogen = 0\n fuel_in_biomass = 0\n\n fuel_heating_all_fueltypes = 0\n fuel_heating_gas = 0\n tot_heating = 0\n #mode_constrained = True #SCRAP\n\n for region in data['rs_fuel_disagg']:\n for enduse in data['rs_fuel_disagg'][region]:\n fuel_in += np.sum(data['rs_fuel_disagg'][region][enduse])\n fuel_in_heat += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses: #Exclude inputs for heating\n tot_heating += np.sum(data['rs_fuel_disagg'][region][enduse])\n #pass\n else:\n fuel_in_elec += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['biomass']])\n \n for region in data['ss_fuel_disagg']:\n for enduse in data['ss_fuel_disagg'][region]:\n for sector in data['ss_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n for region in data['is_fuel_disagg']:\n for enduse in data['is_fuel_disagg'][region]:\n for sector in data['is_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n return fuel_in, fuel_in_biomass, fuel_in_elec, fuel_in_gas, fuel_in_heat, fuel_in_hydrogen, fuel_in_solid_fuel, fuel_in_oil, tot_heating", "def test_compute_inventory_float():\n T = 1000\n c_max = 1e20\n time = 1e3\n with pytest.raises(TypeError):\n inv, sig = divHretention.compute_inventory(T, c_max, time)", "def test_mcintosh_e(self):\n c = array([1,2,3,1])\n num = sqrt(15)\n den = sqrt(19)\n exp = num/den\n self.assertEqual(mcintosh_e(c), exp)", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def test_em_sum_exception(self):\n z_matrix = np.array(\n [[0.000, 0.0, 0.333],\n [0.033, 0.2, 0.267],\n [0.067, 0.4, 0.200],\n [0.100, 0.7, 0.100],\n [0.200, 0.8, 0.067],\n [0.267, 0.9, 0.033],\n [0.333, 1.0, 0.000]],\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"EM\")" ]
[ "0.6065364", "0.59546584", "0.58342063", "0.5746904", "0.57446885", "0.5736451", "0.5714147", "0.57141185", "0.56794614", "0.565437", "0.5632818", "0.5597184", "0.55819803", "0.5568268", "0.5568061", "0.5558533", "0.55471855", "0.55132204", "0.55014616", "0.55003136", "0.549141", "0.54604965", "0.54505306", "0.5443459", "0.5441194", "0.5440728", "0.5437736", "0.5428958", "0.54228914", "0.54158044" ]
0.8079843
0
Tests that we can decode external timestamps appropriately.
def test_decode_external_timestamp(self): self.assertEqual(td.external_timestamp(), decoder.decode_external_timestamp( BytesIO(td.external_timestamp(True)), self.mask))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_process_timestamp() -> None:\n datetime_with_tzinfo = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC)\n datetime_without_tzinfo = datetime(2016, 7, 9, 11, 0, 0)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n nst = dt_util.get_time_zone(\"Canada/Newfoundland\")\n datetime_nst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=nst)\n hst = dt_util.get_time_zone(\"US/Hawaii\")\n datetime_hst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=hst)\n\n assert process_timestamp(datetime_with_tzinfo) == datetime(\n 2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC\n )\n assert process_timestamp(datetime_without_tzinfo) == datetime(\n 2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC\n )\n assert process_timestamp(datetime_est_timezone) == datetime(\n 2016, 7, 9, 15, 0, tzinfo=dt_util.UTC\n )\n assert process_timestamp(datetime_nst_timezone) == datetime(\n 2016, 7, 9, 13, 30, tzinfo=dt_util.UTC\n )\n assert process_timestamp(datetime_hst_timezone) == datetime(\n 2016, 7, 9, 21, 0, tzinfo=dt_util.UTC\n )\n assert process_timestamp(None) is None", "def test_parse_timestamp(\n test_input: int,\n expected: datetime.datetime,\n):\n assert tvmaze.parsers.parse_timestamp(test_input) == expected", "def test_interpret_datetime():\n timestamps = [\n \"2019-01-01 01:01:01\",\n \"2019-01-01 01:01:01.000001\",\n \"2019-01-01T01:01:01Z\",\n \"2019-01-01T01:01:01.000001Z\",\n \"2019-01-01_01:01:01.000001\",\n \"2019-01-01_01-01-01-000000\",\n ]\n\n for timestamp in timestamps:\n dt = interpret_datetime(timestamp)\n assert isinstance(dt, datetime)\n if \".\" in timestamp:\n assert dt == datetime(2019, 1, 1, 1, 1, 1, 1)\n else:\n assert dt == datetime(2019, 1, 1, 1, 1, 1)", "def test_timestamp_compat(value):\n\tts = Timestamp.convert(value, DEFAULT_POD)\n\tif isinstance(value, (float, int)):\n\t\tassert ts.value == value\n\telse:\n\t\tassert ts.datetime == value\n\n\tassert ts.hour == 4\n\tassert ts.year == 2020\n\n\tassert ts.strftime(\"%Y-%m-%d %H:%M %z\") == \"2020-01-02 04:06 +0000\"", "def test_timestamp():\n natural = timestamp(\"December 15, 2015\")\n assert natural == {\n \"unix\": 1450137600,\n \"natural\": \"December 15, 2015\"\n }\n unix = timestamp(\"1450137600\")\n assert unix == {\n \"unix\": 1450137600,\n \"natural\": \"December 15, 2015\"\n }", "def test_is_valid_timestamp_invalid(self):\n timestamps = (\n (\"B4Yffw\", \"DISCORD_EPOCH - TOKEN_EPOCH - 1\"),\n (\"ew\", \"123\"),\n (\"AoIKgA\", \"42076800\"),\n (\"{hello}[world]&(bye!)\", \"ASCII invalid Base64\"),\n (\"Þíß-ï§-ňøẗ-våłìÐ\", \"Unicode invalid Base64\"),\n )\n\n for timestamp, msg in timestamps:\n with self.subTest(msg=msg):\n result = TokenRemover.is_valid_timestamp(timestamp)\n self.assertFalse(result)", "def test_parse_date_from_string(self):\n\n dt_ = pytz.UTC.localize(dt.datetime(2014, 11, 23, 1, 2, 3))\n epoch = pytz.UTC.localize(dt.datetime(1970, 1, 1, 0, 0, 0))\n epoch_expected = (dt_ - epoch).total_seconds()\n\n pts = parse_date('20141123 01:02:03')\n self.assertEqual(pts.value / 1e9, epoch_expected)\n\n pts = parse_date('2014-11-23 01:02:03')\n self.assertEqual(pts.value / 1e9, epoch_expected)\n\n pts = parse_date('2014-11-23T010203')\n self.assertEqual(pts.value / 1e9, epoch_expected)", "def testGetNormalizedTimestamp(self):\n golang_timestamp = bytes.fromhex('010000000000000000000000000000')\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n normalized_timestamp = golang_time_object._GetNormalizedTimestamp()\n self.assertIsNone(normalized_timestamp)\n\n golang_timestamp = struct.pack('>Bqih', 1, 63772480949, 711098348, 0)\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n normalized_timestamp = golang_time_object._GetNormalizedTimestamp()\n self.assertEqual(\n normalized_timestamp, decimal.Decimal('1636884149.711098348'))\n\n golang_timestamp = struct.pack('>Bqih', 1, 63772480949, 711098348, 60)\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n normalized_timestamp = golang_time_object._GetNormalizedTimestamp()\n self.assertEqual(\n normalized_timestamp, decimal.Decimal('1636880549.711098348'))\n\n golang_timestamp = struct.pack('>Bqih', 1, 63772480949, 711098348, 0)\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n golang_time_object.time_zone_offset = 60\n\n normalized_timestamp = golang_time_object._GetNormalizedTimestamp()\n self.assertEqual(\n normalized_timestamp, decimal.Decimal('1636880549.711098348'))\n\n golang_timestamp = bytes.fromhex('010000000e7791f70000000000ffff')\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n normalized_timestamp = golang_time_object._GetNormalizedTimestamp()\n self.assertEqual(normalized_timestamp, decimal.Decimal('0'))\n\n golang_timestamp = bytes.fromhex('010000000e7791f60000000000ffff')\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n normalized_timestamp = golang_time_object._GetNormalizedTimestamp()\n self.assertIsNone(normalized_timestamp)", "def test_convert_datetime():", "def test_timestamp_and_datetime_extraction():\n test_datetime = datetime.datetime(2017, 1, 15)\n test_timestamp = (test_datetime - datetime.datetime(1970, 1, 1)).total_seconds() * 1000.0\n\n # valid timestamp in the form kafka would send it if SET\n mock_message.timestamp = Mock(return_value=(1, test_timestamp))\n\n timestamp = extract_timestamp_from_message(mock_message)\n assert timestamp == test_timestamp\n assert kafka_timestamp_to_datetime(timestamp) == test_datetime\n\n # valid timestamp in the form kafka would send it if NOT SET\n mock_message.timestamp = Mock(return_value=(1, -1))\n\n timestamp = extract_timestamp_from_message(mock_message)\n assert timestamp is None\n assert kafka_timestamp_to_datetime(timestamp) is None\n\n # no timestamp in the form kafka would send it if NOT AVAILABLE\n mock_message.timestamp = Mock(return_value=(0, 0))\n\n timestamp = extract_timestamp_from_message(mock_message)\n assert timestamp is None\n assert kafka_timestamp_to_datetime(timestamp) is None", "def test_parse_no_timezine_strict():\n iso8601.parse_datetime(\"2007-01-01T08:00:00\")", "def test_encode_decode(self):\n self.assertEquals(self.txt_when,\n self.TDTT.encode_when(self.dt_when))\n self.assertEquals(None,\n self.TDTT.encode_when(self.NOT_DATE_AND_TIME))\n\n expected = {'when': self.txt_when}\n encoded_dict = self.TDTT.encode(when=self.dt_when)\n self.assertEquals(expected, encoded_dict)\n\n decoded_dtt = self.TDTT.decode(expected)\n self.assertEquals(self.dt_when, decoded_dtt.when)\n self.assertEquals(expected, decoded_dtt.encoded)\n\n constructed_dtt = self.TDTT(when=self.txt_when)\n self.assertEquals(expected, constructed_dtt.encoded)\n decoded_dict = constructed_dtt.decoded\n self.assertEquals(self.dt_when, decoded_dict.get('when'))", "def test_parse_time_unix_timestamp(self):\n self.assertEqual(\n parse_time(\"1422748800\", None), datetime(2015, 2, 1, 0, 0, 0))\n self.assertEqual(parse_time(\"0\", None), datetime(1970, 1, 1, 0, 0, 0))\n # The following are treated as unix timestamps, not YYYYMMDD strings.\n self.assertEqual(\n parse_time(\"19000101\", None), datetime(1970, 8, 8, 21, 48, 21))\n self.assertEqual(\n parse_time(\"20150132\", None), datetime(1970, 8, 22, 5, 15, 32))\n self.assertEqual(\n parse_time(\"20151301\", None), datetime(1970, 8, 22, 5, 35, 1))", "async def test_process_datetime_to_timestamp_mirrors_utc_isoformat_behavior(\n time_zone, hass: HomeAssistant\n) -> None:\n hass.config.set_time_zone(time_zone)\n datetime_with_tzinfo = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC)\n datetime_without_tzinfo = datetime(2016, 7, 9, 11, 0, 0)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n nst = dt_util.get_time_zone(\"Canada/Newfoundland\")\n datetime_nst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=nst)\n hst = dt_util.get_time_zone(\"US/Hawaii\")\n datetime_hst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=hst)\n\n assert (\n process_datetime_to_timestamp(datetime_with_tzinfo)\n == dt_util.parse_datetime(\"2016-07-09T11:00:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_without_tzinfo)\n == dt_util.parse_datetime(\"2016-07-09T11:00:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_est_timezone)\n == dt_util.parse_datetime(\"2016-07-09T15:00:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_nst_timezone)\n == dt_util.parse_datetime(\"2016-07-09T13:30:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_hst_timezone)\n == dt_util.parse_datetime(\"2016-07-09T21:00:00+00:00\").timestamp()\n )", "def test_is_valid_timestamp_valid(self):\n timestamps = (\n \"XsyRkw\",\n \"Xrim9Q\",\n \"XsyR-w\",\n \"XsySD_\",\n \"Dn9r_A\",\n )\n\n for timestamp in timestamps:\n with self.subTest(timestamp=timestamp):\n result = TokenRemover.is_valid_timestamp(timestamp)\n self.assertTrue(result)", "def testSpecificTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:9\"\n\n # Check we can specify a timestamp\n data_store.DB.Set(subject, predicate, \"2\", timestamp=1000, token=self.token)\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n self.assertEqual(ts, 1000)\n self.assertEqual(stored, \"2\")", "def verify_t(data):\n if 't_utc' not in data['properties']:\n return None\n data['properties']['DateTime'] = util.datestring(data['properties']['t_utc'], tz=config['local_tz']) \n return data", "def test_170518_bad_dbtime(self):\n spc = parser(get_file('PTSDY1_baddbtime.txt'))\n answer = utc(2017, 5, 1, 12, 0)\n for _, outlook in spc.outlook_collections.items():\n self.assertEqual(outlook.expire, answer)", "def testTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:8\"\n\n # Extend the range of valid timestamps returned from the table to account\n # for potential clock skew.\n start = long(time.time() - 60) * 1e6\n data_store.DB.Set(subject, predicate, \"1\", token=self.token)\n\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n end = long(time.time() + 60) * 1e6\n\n self.assert_(ts >= start and ts <= end)\n self.assertEqual(stored, \"1\")", "def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")", "def testProperties(self):\n golang_timestamp = struct.pack('>Bqih', 1, 0, 0, -1)\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n self.assertEqual(golang_time_object._number_of_seconds, 0)\n self.assertEqual(golang_time_object._nanoseconds, 0)\n self.assertEqual(golang_time_object.is_local_time, False)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n golang_timestamp = struct.pack(\n '>Bqih', 1, golang_time.GolangTime._GOLANG_TO_POSIX_BASE, 0, 60)\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n self.assertEqual(golang_time_object._number_of_seconds,\n golang_time.GolangTime._GOLANG_TO_POSIX_BASE)\n self.assertEqual(golang_time_object._nanoseconds, 0)\n self.assertEqual(golang_time_object.is_local_time, False)\n self.assertEqual(golang_time_object._time_zone_offset, 60)\n\n golang_timestamp = bytes.fromhex('010000000e7791f70000000000ffff')\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n self.assertEqual(golang_time_object._number_of_seconds,\n golang_time.GolangTime._GOLANG_TO_POSIX_BASE)\n self.assertEqual(golang_time_object._nanoseconds, 0)\n self.assertEqual(golang_time_object.is_local_time, False)\n self.assertEqual(golang_time_object._time_zone_offset, 0)", "def test_process_datetime_to_timestamp(time_zone, hass: HomeAssistant) -> None:\n hass.config.set_time_zone(time_zone)\n utc_now = dt_util.utcnow()\n assert process_datetime_to_timestamp(utc_now) == utc_now.timestamp()\n now = dt_util.now()\n assert process_datetime_to_timestamp(now) == now.timestamp()", "def test_validate(self):\n # Instances of datetime.datetime simply pass through as-is.\n self.assertEquals(self.dt_when,\n self.TDTT.validate_when(self.dt_when))\n\n # Date/time in string form should be in ISO-8601 format.\n self.assertEquals(self.dt_when,\n self.TDTT.validate_when(self.txt_when))\n\n self.assertEquals(None,\n self.TDTT.validate_when(self.NOT_DATE_AND_TIME))\n\n encoded = {'when': self.txt_when, 'unused': 'ignored'}\n decoded_props = {'when': self.dt_when}\n self.check_validate(encoded, decoded_props, self.TDTT.validate)", "def test_raw_file_name_to_time_json_alternative(self):\n\n this_time_unix_sec = probsevere_io.raw_file_name_to_time(\n ALTERNATIVE_JSON_FILE_NAME)\n\n self.assertTrue(this_time_unix_sec == VALID_TIME_UNIX_SEC)", "def test_raw_file_name_to_time_json(self):\n\n this_time_unix_sec = probsevere_io.raw_file_name_to_time(JSON_FILE_NAME)\n self.assertTrue(this_time_unix_sec == VALID_TIME_UNIX_SEC)", "def _test_template(timezone_text):\n time_utils.set_timezone(timezone_text)\n expected_epoch = 63054001\n assert expected_epoch == time_utils.get_epoch_from_utc_text(\n '1971-12-31 19:00:01', '%Y-%m-%d %H:%M:%S')\n assert expected_epoch == time_utils.get_epoch_from_utc_text(\n '1971-12-31 19:00:01')\n assert expected_epoch == time_utils.get_epoch_from_utc_text(\n '1971/12/31T19:00:01Z', '%Y/%m/%dT%H:%M:%SZ')", "def test_datestring_to_timestamp(self):\n result = datestring_to_timestamp(\"01-JAN-1990\")\n self.assertEqual(result, 631148400.0)\n result = datestring_to_timestamp(\"01-DEC-2000\")\n self.assertEqual(result, 975625200.0)", "def test_parse_part_A_timestamp():\n\n result = parse_part_A(PART_A)\n\n assert result[0] == datetime.datetime(2008, 1, 9, 12, 27, 56)", "def test_download_date_tz_1A(temp_file):\n from osxmetadata import OSXMetaData\n from osxmetadata.datetime_utils import datetime_naive_to_local\n import datetime\n\n meta = OSXMetaData(temp_file, tz_aware=True)\n dt = datetime.datetime.now()\n meta.set_attribute(\"downloadeddate\", dt)\n dt_tz = datetime_naive_to_local(dt)\n assert meta.downloadeddate == [dt_tz]\n assert meta.get_attribute(\"downloadeddate\") == [dt_tz]", "def verify(timestamp):\n if not isinstance(timestamp, str):\n raise TypeError('\"{}\" is not str type'.format(type(timestamp)))\n elif match('^[0-9]{1,2}(:[0-9]{1,2}){1,2}(\\.[0-9]{1,9})?$', timestamp):\n return True\n return False" ]
[ "0.6826415", "0.678895", "0.6769374", "0.67243725", "0.6696744", "0.6450985", "0.6423673", "0.6392706", "0.63700205", "0.63413143", "0.6326594", "0.629541", "0.62936324", "0.62907755", "0.62751067", "0.62236845", "0.6223447", "0.6193005", "0.61763287", "0.6139348", "0.61373883", "0.61132336", "0.60929114", "0.6078306", "0.60735184", "0.60701174", "0.6067112", "0.60646534", "0.6052662", "0.60506135" ]
0.87496525
0
Tests that we can decode the QDC header into an array.
def test_decode_qdc(self): self.assertEqual(td.qdc(), decoder.decode_qdc(BytesIO(td.qdc(True))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_header(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def test_process_optional_header_data(self):\n self.assertDictEqual({'external_timestamp': td.external_timestamp()},\n decoder.process_optional_header_data(\n BytesIO(td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_ETS, self.mask))\n self.assertDictEqual({'esums': td.esums(False, True)},\n decoder.process_optional_header_data(BytesIO(td.esums(True)),\n decoder.HeaderCodes.HEADER_W_ESUM,\n self.mask))\n self.assertDictEqual(\n {'external_timestamp': td.external_timestamp(), 'esums': td.esums(False, True)},\n decoder.process_optional_header_data(\n BytesIO(td.esums(True) + td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_ESUM_ETS, self.mask))\n self.assertDictEqual({'qdc': td.qdc()},\n decoder.process_optional_header_data(BytesIO(td.qdc(True)),\n decoder.HeaderCodes.HEADER_W_QDC,\n self.mask))\n self.assertDictEqual({'external_timestamp': td.external_timestamp(), 'qdc': td.qdc()},\n decoder.process_optional_header_data(\n BytesIO(td.qdc(True) + td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_QDC_ETS, self.mask))\n self.assertDictEqual({'esums': td.esums(False, True), 'qdc': td.qdc()},\n decoder.process_optional_header_data(\n BytesIO(td.esums(True) + td.qdc(True)),\n decoder.HeaderCodes.HEADER_W_ESUM_QDC, self.mask))\n self.assertDictEqual({'external_timestamp': td.external_timestamp(), 'qdc': td.qdc(),\n 'esums': td.esums(False, True)}, decoder.process_optional_header_data(\n BytesIO(td.esums(True) + td.qdc(True) + td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_ESUM_QDC_ETS, self.mask))", "def test_decode(self):\n pass # TODO(tlarsen)", "def test_decode_listmode_data(self):\n self.assertEqual([td.header(decoded=True)],\n decoder.decode_listmode_data(BytesIO(td.header(as_bytes=True)), self.mask))\n self.assertEqual([{**td.header_with_trace(decoded=True), **{'trace': td.trace()}}],\n decoder.decode_listmode_data(\n BytesIO(td.header_with_trace(as_bytes=True) + td.trace(True)),\n self.mask))", "def test_decode():\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x", "def test_decode_barcode_8_ok(self):\r\n self.assertEqual(decode_barcode_8(self.valid_bc_1),\r\n (self.valid_bc_1, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_2),\r\n (self.valid_bc_2, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_3),\r\n (self.valid_bc_3, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_4),\r\n (self.valid_bc_4, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_5),\r\n (self.valid_bc_5, 0))", "def test_process_optional_header_data_bad_header_length(self):\n with self.assertRaises(ValueError):\n decoder.process_optional_header_data(BytesIO(td.external_timestamp(True)), 3, self.mask)", "def test_utf8_bytes_in_an_array(self):\n # Python3 doesn't support bytestrings, don't run this test\n if str is unicode:\n return\n input = \"A r\\xc3\\xa9sum\\xc3\\xa9, also spelled resum\\xc3\\xa9 or resume\"\n output = input.split(\" \")\n output[1] = output[1][0:-1]\n input = array.array('c', input)\n output = [array.array('c', w) for w in output]\n for (itmO, itmV) in zip(output, tokenize_en(array.array('c', input))):\n self.assertEqual(itmO, itmV[0])\n self.assertEqual(input[itmV[1]:itmV[1] + len(itmV[0])], itmO)", "def test_iseq_to_qseq_fields(self):\r\n i = \"HWI-ST753_50:6:1101:15435:9071#0/1:ACCAGACGATGCTACGGAGGGAGCTAGCGTTGTTCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTTTGTAAGTTAGAGGTGAAAGCCTGGAGCTCAAC:gggggggfggdegggggggggggggggggggegggggggggegggggggeggcccccFUZSU_]]^^ggggggdggdgeeeccYacadcbeddceegggeeg\"\r\n # barcode in sequence, barcode length = 12\r\n expected = (\r\n (\"HWI-ST753\", \"50\", \"6\", \"1101\", \"15435\", \"9071\", \"0\", \"1\"),\r\n \"TACGGAGGGAGCTAGCGTTGTTCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTTTGTAAGTTAGAGGTGAAAGCCTGGAGCTCAAC\", \"gggggggggggggggggggegggggggggegggggggeggcccccFUZSU_]]^^ggggggdggdgeeeccYacadcbeddceegggeeg\", \"ACCAGACGATGC\", \"gggggggfggde\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=False, barcode_length=12),\r\n expected)\r\n # barcode in sequence, barcode length = 6\r\n expected = (\r\n (\"HWI-ST753\", \"50\", \"6\", \"1101\", \"15435\", \"9071\", \"0\", \"1\"),\r\n \"CGATGCTACGGAGGGAGCTAGCGTTGTTCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTTTGTAAGTTAGAGGTGAAAGCCTGGAGCTCAAC\", \"gfggdegggggggggggggggggggegggggggggegggggggeggcccccFUZSU_]]^^ggggggdggdgeeeccYacadcbeddceegggeeg\", \"ACCAGA\", \"gggggg\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=False, barcode_length=6),\r\n expected)\r\n\r\n # barcode in header, barcode length = 6\r\n i = \"HWI-6X_9267:1:1:4:1699#ACCACCC/1:TACGGAGGGTGCGAGCGTTAATCGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGAAAAAAAAAAAAAAAAAAAAAAA:abbbbbbbbbb`_`bbbbbb`bb^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDaabbBBBBBBBBBBBBBBBBBBB\"\r\n expected = ((\"HWI-6X\", \"9267\", \"1\", \"1\", \"4\", \"1699\", \"ACCACCC\", \"1\"),\r\n \"TACGGAGGGTGCGAGCGTTAATCGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGAAAAAAAAAAAAAAAAAAAAAAA\", \"abbbbbbbbbb`_`bbbbbb`bb^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDaabbBBBBBBBBBBBBBBBBBBB\", \"ACCACC\", \"bbbbbb\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=True, barcode_length=6),\r\n expected)\r\n # barcode in header, barcode length = 3\r\n expected = ((\"HWI-6X\", \"9267\", \"1\", \"1\", \"4\", \"1699\", \"ACCACCC\", \"1\"),\r\n \"TACGGAGGGTGCGAGCGTTAATCGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGAAAAAAAAAAAAAAAAAAAAAAA\", \"abbbbbbbbbb`_`bbbbbb`bb^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDaabbBBBBBBBBBBBBBBBBBBB\", \"ACC\", \"bbb\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=True, barcode_length=3),\r\n expected)", "def test_header(self):\n header = mibheader(TEST_MIB)\n\n true_value = {\n \"ID\": \"MQ1\",\n \"seq_num\": 1,\n \"offset\": 384,\n \"nchips\": 1,\n \"shape\": (256, 256),\n \"dtype\": np.dtype(\">u2\"),\n \"timestamp\": datetime(2018, 1, 19, 20, 55, 10, 966026).timestamp(),\n }\n\n self.assertDictEqual(header, true_value)", "def test_parse_header(self):\n data = parse_header(self.header)\n self.assertEqual(data.get(\"application\"), \"my Grandma\")\n self.assertEqual(data.get(\"version\"), \"has\")\n self.assertEqual(data.get(\"reference\"), \"furry\")\n self.assertEqual(data.get(\"query_letters\"), 27)\n self.assertEqual(data.get(\"database\"), \"Cats\")", "def test_check_header_missing_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['AAA', 'XXX', 'YYY',\r\n 'ZZZ']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field XXX, expected field BarcodeSequence\\t0,1',\r\n 'Found header field YYY, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field ZZZ, last field should be Description\\t0,3']\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def testDecode(self):\n test_cases = [\n ('1\\n', '\\n'),\n ('1 ', ' '),\n ('3a 3b', 'aaabbb'),\n ('1a 1 1b', 'a b'),\n ('3\\n', '\\n\\n\\n'),\n ('11 22 33', '122333'),\n ('10a', 'aaaaaaaaaa'),\n ('10a 11b', 'aaaaaaaaaabbbbbbbbbbb'),\n ('1001a', 'a'*1001),\n ('1001a 909b 65c 2d', ''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2])),\n ]\n for data, expected in test_cases:\n decoded_result = ASCIITransportFormat.decode_data(data)\n self.assertEqual(decoded_result, expected)", "def test_check_header_required_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find all as errors if not named correctly\r\n header = ['AAA', 'BBB', 'CCC', 'DDD',\r\n 'EEE']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field BBB, expected field BarcodeSequence\\t0,1',\r\n 'Found header field CCC, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field EEE, last field should be Description\\t0,4',\r\n 'Missing added demultiplex field run_prefix\\t-1,-1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def test05(self):\n a = np.array([u\"aŀle\", u\"eñe\", u\"açò\"], dtype=\"U4\")\n ac = bcolz.carray(a, dtype='U4')\n self.assertTrue(ac.dtype == np.dtype('U4'))\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac, \"Arrays are not equal\")", "def test_parse_rfh2_with_correct_encoding(self):\n\n rfh2 = pymqi.RFH2()\n try:\n rfh2.unpack(self.single_rfh2_message, 273)\n self.assertEqual(len(rfh2.get()), 14, \"Number of attributes incorrect. Should be 12? But is %s\" % str(len(rfh2.get())))\n self.assertEqual(rfh2[\"StrucId\"], CMQC.MQRFH_STRUC_ID, \"StrucId has incorrect value. Should be: %s But is: %s\" % (CMQC.MQRFH_STRUC_ID, str(rfh2[\"StrucId\"])))\n self.assertEqual(rfh2[\"Version\"], CMQC.MQRFH_VERSION_2, \"Version has incorrect value. Should be: %i But is: %s\" % (CMQC.MQRFH_VERSION_2, str(rfh2[\"Version\"])))\n self.assertEqual(rfh2[\"StrucLength\"], 284, \"StrucLength has incorrect value. Should be: %i But is: %s\" % (284, str(rfh2[\"StrucLength\"])))\n self.assertEqual(rfh2[\"Encoding\"], 273, \"Encoding has incorrect value. Should be: %i But is: %s\" % (273, str(rfh2[\"Encoding\"])))\n self.assertEqual(rfh2[\"CodedCharSetId\"], 1208, \"CodedCharSetId has incorrect value. Should be: %i But is: %s\" % (1208, str(rfh2[\"CodedCharSetId\"])))\n self.assertEqual(rfh2[\"Format\"], CMQC.MQFMT_STRING, \"Format has incorrect value. Should be: %s But is: %s\" % (CMQC.MQFMT_NONE, str(rfh2[\"Format\"])))\n self.assertEqual(rfh2[\"Flags\"], 0, \"Flags has incorrect value. Should be: %i But is: %s\" % (0, str(rfh2[\"Flags\"])))\n self.assertEqual(rfh2[\"NameValueCCSID\"], 1208, \"NameValueCCSID has incorrect value. Should be: %i But is: %s\" % (1208, str(rfh2[\"NameValueCCSID\"])))\n self.assertEqual(rfh2[\"pscLength\"], 152, \"pscLength has incorrect value. Should be: %i But is: %s\" % (152, str(rfh2[\"pscLength\"])))\n self.assertEqual(rfh2[\"psc\"], b\"<psc><Command>RegSub</Command><Topic>$topictree/topiccat/topic</Topic><QMgrName>DebugQM</QMgrName><QName>PUBOUT</QName><RegOpt>PersAsPub</RegOpt></psc> \", \"psc has incorrect value. Should be: %s But is: %s\" % (\"<psc><Command>RegSub</Command><Topic>$topictree/topiccat/topic</Topic><QMgrName>DebugQM</QMgrName><QName>PUBOUT</QName><RegOpt>PersAsPub</RegOpt></psc> \", \">\" + str(rfh2[\"psc\"]) + \"<\"))\n self.assertEqual(rfh2[\"testFolderLength\"], 56, \"testFolderLength has incorrect value. Should be: %i But is: %s\" % (56, str(rfh2[\"testFolderLength\"])))\n self.assertEqual(rfh2[\"testFolder\"], b\"<testFolder><testVar>testValue</testVar></testFolder> \", \"testFolder has incorrect value. Should be: %s But is: %s\" % (\"<testFolder><testVar>testValue</testVar></testFolder> \", str(rfh2[\"testFolder\"])))\n self.assertEqual(rfh2[\"mcdLength\"], 28, \"mcdLength has incorrect value. Should be: %i But is: %s\" % (28, str(rfh2[\"mcdLength\"])))\n self.assertEqual(rfh2[\"mcd\"], b\"<mcd><Msd>xmlnsc</Msd></mcd>\", \"mcd has incorrect value. Should be: %s But is: %s\" % (\"<mcd><Msd>xmlnsc</Msd></mcd>\", str(rfh2[\"mcd\"])))\n\n except Exception as e:\n self.fail(e)", "def test_parse_msg_header():\n header = IMFV283Parser()._parse_msg_header(IMFV283_EXAMPLE_VIC)\n assert_equals(header['obs'], 'VIC')", "def test_decode():", "def test_array_abc_sequence(parser):\n obj = parser.parse(b'[1, 2, 3, 4, 5]')\n assert isinstance(obj, simdjson.Array)\n\n # __iter__\n assert list(iter(obj)) == [1, 2, 3, 4, 5]\n # __len__\n assert len(obj) == 5\n # __contains__\n assert 3 in obj\n assert 7 not in obj\n # __getitem__\n assert obj[2] == 3\n with pytest.raises(IndexError):\n obj[99]\n # __reversed__, implemented via __len__ and __getitem__ for now.\n assert list(reversed(obj)) == [5, 4, 3, 2, 1]", "def test_run_a_scan_on_sdp_subarray_in_mid():", "def test_check_header_match_pre180(self):\r\n\r\n # match w illumina qual string\r\n self.assertTrue(check_header_match_pre180(\"@990:2:4:11272:5533#1/1\",\r\n \"@990:2:4:11272:5533#1/2\"))\r\n self.assertTrue(check_header_match_pre180(\"@990:2:4:11272:5533#1/1\",\r\n \"@990:2:4:11272:5533#1/3\"))\r\n # qual string differs (this is acceptable)\r\n self.assertTrue(check_header_match_pre180(\"@990:2:4:11272:5533#1/1\",\r\n \"@990:2:4:11272:5533#0/3\"))\r\n # match wo illumina qual string\r\n self.assertTrue(check_header_match_pre180(\"@990:2:4:11272:5533/1\",\r\n \"@990:2:4:11272:5533/2\"))\r\n self.assertTrue(check_header_match_pre180(\"@990:2:4:11272:5533/1\",\r\n \"@990:2:4:11272:5533/3\"))\r\n\r\n # mismatch w illumina qual string\r\n self.assertFalse(check_header_match_pre180(\"@990:2:4:11272:5533#1/1\",\r\n \"@990:2:4:11272:5532#1/2\"))\r\n self.assertFalse(check_header_match_pre180(\"@990:2:4:11272:5533#1/1\",\r\n \"@890:2:4:11272:5533#1/2\"))\r\n # mismatch wo illumina qual string\r\n self.assertFalse(check_header_match_pre180(\"@990:2:4:11272:5533/1\",\r\n \"@990:2:4:11272:5532/2\"))\r\n self.assertFalse(check_header_match_pre180(\"@990:2:4:11272:5533/1\",\r\n \"@890:2:4:11272:5533/2\"))", "def test_rawarray_edf(tmp_path):\n rng = np.random.RandomState(12345)\n format = \"edf\"\n ch_types = [\"eeg\", \"eeg\", \"stim\", \"ecog\", \"seeg\", \"eog\", \"ecg\", \"emg\", \"dbs\", \"bio\"]\n ch_names = np.arange(len(ch_types)).astype(str).tolist()\n info = create_info(ch_names, sfreq=1000, ch_types=ch_types)\n data = rng.random(size=(len(ch_names), 1000)) * 1e-5\n\n # include subject info and measurement date\n subject_info = dict(\n first_name=\"mne\", last_name=\"python\", birthday=(1992, 1, 20), sex=1, hand=3\n )\n info[\"subject_info\"] = subject_info\n raw = RawArray(data, info)\n time_now = datetime.now()\n meas_date = datetime(\n year=time_now.year,\n month=time_now.month,\n day=time_now.day,\n hour=time_now.hour,\n minute=time_now.minute,\n second=time_now.second,\n tzinfo=timezone.utc,\n )\n raw.set_meas_date(meas_date)\n temp_fname = tmp_path / f\"test.{format}\"\n\n raw.export(temp_fname, add_ch_type=True)\n raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True)\n\n # stim channel should be dropped\n raw.drop_channels(\"2\")\n\n assert raw.ch_names == raw_read.ch_names\n # only compare the original length, since extra zeros are appended\n orig_raw_len = len(raw)\n assert_array_almost_equal(\n raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4\n )\n assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)\n\n # check channel types except for 'bio', which loses its type\n orig_ch_types = raw.get_channel_types()\n read_ch_types = raw_read.get_channel_types()\n assert_array_equal(orig_ch_types, read_ch_types)\n assert raw.info[\"meas_date\"] == raw_read.info[\"meas_date\"]\n\n # channel name can't be longer than 16 characters with the type added\n raw_bad = raw.copy()\n raw_bad.rename_channels({\"1\": \"abcdefghijklmnopqrstuvwxyz\"})\n with pytest.raises(RuntimeError, match=\"Signal label\"), pytest.warns(\n RuntimeWarning, match=\"Data has a non-integer\"\n ):\n raw_bad.export(temp_fname, overwrite=True)\n\n # include bad birthday that is non-EDF compliant\n bad_info = info.copy()\n bad_info[\"subject_info\"][\"birthday\"] = (1700, 1, 20)\n raw = RawArray(data, bad_info)\n with pytest.raises(RuntimeError, match=\"Setting patient birth date\"):\n raw.export(temp_fname, overwrite=True)\n\n # include bad measurement date that is non-EDF compliant\n raw = RawArray(data, info)\n meas_date = datetime(year=1984, month=1, day=1, tzinfo=timezone.utc)\n raw.set_meas_date(meas_date)\n with pytest.raises(RuntimeError, match=\"Setting start date time\"):\n raw.export(temp_fname, overwrite=True)\n\n # test that warning is raised if there are non-voltage based channels\n raw = RawArray(data, info)\n raw.set_channel_types({\"9\": \"hbr\"}, on_unit_change=\"ignore\")\n with pytest.warns(RuntimeWarning, match=\"Non-voltage channels\"):\n raw.export(temp_fname, overwrite=True)\n\n # data should match up to the non-accepted channel\n raw_read = read_raw_edf(temp_fname, preload=True)\n orig_raw_len = len(raw)\n assert_array_almost_equal(\n raw.get_data()[:-1, :], raw_read.get_data()[:, :orig_raw_len], decimal=4\n )\n assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)\n\n # the data should still match though\n raw_read = read_raw_edf(temp_fname, preload=True)\n raw.drop_channels(\"2\")\n assert raw.ch_names == raw_read.ch_names\n orig_raw_len = len(raw)\n assert_array_almost_equal(\n raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4\n )\n assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)", "def test_load_quality_codes():\n assert len(code_reader.load_quality_codes()) > 0", "def test_check_header_chars(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_p-%efix',\r\n 'Description']\r\n warnings = []\r\n\r\n warnings = check_header_chars(header, warnings)\r\n\r\n expected_warnings = [\r\n 'Found invalid character in run_p-%efix header field.\\t0,3']\r\n\r\n self.assertEqual(warnings, expected_warnings)", "def test_run_a_scan_on_sdp_subarray_in_low():", "def test04(self):\n a = np.array([\"ale\", \"e\", \"aco\"], dtype=\"S4\")\n ac = bcolz.carray(a, dtype='S4')\n self.assertTrue(ac.dtype == np.dtype('S4'))\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac, \"Arrays are not equal\")", "def test_transcoder(self, raw, value):\n assert DPTSceneNumber.to_knx(value) == DPTArray(raw)\n assert DPTSceneNumber.from_knx(DPTArray(raw)) == value", "def autodetect_endian_and_sanity_check_su(file):\n pos = file.tell()\n if isinstance(file, io.BytesIO):\n file.seek(0, 2)\n size = file.tell()\n file.seek(pos, 0)\n else:\n size = os.fstat(file.fileno())[6]\n if size < 244:\n return False\n # Also has to be a multiple of 4 in length because every header is 400 long\n # and every data value 4 byte long.\n elif (size % 4) != 0:\n return False\n # Jump to the number of samples field in the trace header.\n file.seek(114, 0)\n sample_count = file.read(2)\n interval = file.read(2)\n # Jump to the beginning of the year fields.\n file.seek(156, 0)\n year = file.read(2)\n jul_day = file.read(2)\n hour = file.read(2)\n minute = file.read(2)\n second = file.read(2)\n # Jump to previous position.\n file.seek(pos, 0)\n # Unpack in little and big endian.\n le_sample_count = unpack(b'<h', sample_count)[0]\n be_sample_count = unpack(b'>h', sample_count)[0]\n # Check if both work.\n working_byteorders = []\n if le_sample_count > 0:\n length = 240 + (le_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('<')\n if be_sample_count > 0:\n length = 240 + (be_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('>')\n # If None works return False.\n if len(working_byteorders) == 0:\n return False\n # Check if the other header values make sense.\n still_working_byteorders = []\n for bo in working_byteorders:\n fmt = (\"%sh\" % bo).encode('ascii', 'strict')\n this_interval = unpack(fmt, interval)[0]\n this_year = unpack(fmt, year)[0]\n this_julday = unpack(fmt, jul_day)[0]\n this_hour = unpack(fmt, hour)[0]\n this_minute = unpack(fmt, minute)[0]\n this_second = unpack(fmt, second)[0]\n # Make a sanity check for each.\n # XXX: The arbitrary maximum of the sample interval is 10 seconds.\n if this_interval <= 0 or this_interval > 10E7:\n continue\n # Some programs write two digit years.\n if this_year != 0 and (this_year < 1930 or this_year >= 2030) and \\\n (this_year < 0 or this_year >= 100):\n continue\n # 9999 is often used as a placeholder\n if (this_julday > 366 or this_julday < 0) and this_julday != 9999:\n continue\n if this_hour > 24 or this_hour < 0:\n continue\n if this_minute > 60 or this_minute < 0:\n continue\n if this_second > 60 or this_second < 0:\n continue\n still_working_byteorders.append(bo)\n length = len(still_working_byteorders)\n if not length:\n return False\n elif length == 1:\n return still_working_byteorders[0]\n else:\n # XXX: In the unlikely case both byte orders pass the sanity checks\n # something else should be checked. Currently it is not.\n msg = \"\"\"\n Both possible byte orders passed all sanity checks. Please contact\n the ObsPy developers so they can implement additional tests.\n \"\"\".strip()\n raise Exception(msg)", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def test_quality_filter_illumina_qual(self):\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=0.75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/0\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes (old barcode in header format)\r\n header = \"HWI-6X_9267:1:1:4:1699#ACCACCC/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual fails filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (3,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual passes filter if filter turned off\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=False)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # good qual passes filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))" ]
[ "0.5721757", "0.56352746", "0.56029373", "0.55741435", "0.5571948", "0.5553872", "0.5409489", "0.53822374", "0.5372538", "0.53498983", "0.53497857", "0.53476274", "0.534357", "0.53414613", "0.5300027", "0.5283559", "0.52749854", "0.52655655", "0.5253365", "0.5250469", "0.524431", "0.52348703", "0.5233096", "0.5232581", "0.5232142", "0.5223388", "0.5217768", "0.5201786", "0.51921403", "0.5188037" ]
0.7044205
0
Tests that we can decode a trace from the data stream.
def test_decode_trace(self): self.assertEqual(td.trace(), decoder.decode_trace(BytesIO(td.trace(True))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_decode(self):\n pass # TODO(tlarsen)", "def testDecode(self):\n test_cases = [\n ('1\\n', '\\n'),\n ('1 ', ' '),\n ('3a 3b', 'aaabbb'),\n ('1a 1 1b', 'a b'),\n ('3\\n', '\\n\\n\\n'),\n ('11 22 33', '122333'),\n ('10a', 'aaaaaaaaaa'),\n ('10a 11b', 'aaaaaaaaaabbbbbbbbbbb'),\n ('1001a', 'a'*1001),\n ('1001a 909b 65c 2d', ''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2])),\n ]\n for data, expected in test_cases:\n decoded_result = ASCIITransportFormat.decode_data(data)\n self.assertEqual(decoded_result, expected)", "def test_decode():", "def test_decode():\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x", "def test_decode_listmode_data(self):\n self.assertEqual([td.header(decoded=True)],\n decoder.decode_listmode_data(BytesIO(td.header(as_bytes=True)), self.mask))\n self.assertEqual([{**td.header_with_trace(decoded=True), **{'trace': td.trace()}}],\n decoder.decode_listmode_data(\n BytesIO(td.header_with_trace(as_bytes=True) + td.trace(True)),\n self.mask))", "def test_decode_external_timestamp(self):\n self.assertEqual(td.external_timestamp(), decoder.decode_external_timestamp(\n BytesIO(td.external_timestamp(True)), self.mask))", "def test_get_payload(self):\n payload = Payload()\n\n # No traces\n self.assertTrue(payload.empty)\n encoded_data = payload.get_payload()\n decoded_data = payload.encoder.decode(encoded_data)\n self.assertEqual(decoded_data, [])\n\n # Add traces to the payload\n for _ in range(5):\n trace = [Span(self.tracer, name='root.span'), Span(self.tracer, name='child.span')]\n payload.add_trace(trace)\n\n self.assertEqual(payload.length, 5)\n self.assertFalse(payload.empty)\n\n # Assert the payload generated from Payload\n encoded_data = payload.get_payload()\n decoded_data = payload.encoder.decode(encoded_data)\n self.assertEqual(len(decoded_data), 5)\n for trace in decoded_data:\n self.assertEqual(len(trace), 2)\n self.assertEqual(trace[0][b'name'], b'root.span')\n self.assertEqual(trace[1][b'name'], b'child.span')", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_decode_qdc(self):\n self.assertEqual(td.qdc(), decoder.decode_qdc(BytesIO(td.qdc(True))))", "def decode(data): #@NoSelf", "def test_decode_invalid_B_record():\n\n invalid_b_records = [\n 'B1053175438931N0ÿÿÿøÈÐÀÀÜÐá\u0015\u0004ÀÄÈàÔÀÄÈÌØÀÀÜÀÀ',\n 'BÿÿÿøÄÀÈÌÄàÐäÐàààÁ8ÀÄÔÀäÈÌå��ÀÄàÔäÀ',\n 'B1140ÿÿÿøÌÈÔÐÌÌààÑ8ÀÈÐÈÌàÌÕ\u0015\u0004ÀÀääÈÀÀäÔ',\n 'B1309044931600N0153ÿÿÿøÐÀÄÍ\u0015\u0004ÀÄÔÌØÀÄÔÜØÀÀäÀ',\n 'B10470349ÿÿÿøÌÔäØÕ8ÀÄÔÄÈàÜÙ\u0015\u0004ÀÄàÐÐÀÄäÀÜÀÀØÀ',\n 'B11052249474ÿÿÿøÀÉ8ÀÄÔÀÜÜäÕ\u0015\u0004ÀÄÌÐÌÀÄÐÀÈÀÀÔÀ',\n 'B12ÿÿÿøÐØÀÌÐäÐÈØäÝ8ÀÄÔÄÜÌÐÑ\u0015\u0004ÀÄØÐàÀÄÜÐÀÀÀÜÀÀÀ4)\bÄÈ',\n 'B1124185148269N9833N00553309EA0084800873000068000000',\n 'B1245085122369N00614242Eÿÿÿù\u0004ÀÄÜØÄÀÄàÐäÀÀØÀ',\n ]\n\n for b_record in invalid_b_records:\n with pytest.raises(ValueError):\n LowLevelReader.decode_B_record(b_record)", "def test_decode_failure(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n AttributeList = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n assert assoc.is_established\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n\n assert status.Status == 0x0110\n assert ds is None\n\n scp.shutdown()", "def test_decode_errors(self):\n if self._invalid_encoded:\n self.assert_raises((ValueError, jsonschema.exceptions.ValidationError),\n self.import_cls.decode,\n self._invalid_encoded[0], self.typedef)", "def test_decode(self):\r\n barcodes = ['AGCACGAGCCTA',\r\n 'AACTCGTCGATG',\r\n 'ACAGACCACTCA',\r\n 'ACCAGCGACTAG',\r\n 'AGCAGCACTTGT',\r\n 'AACTGTGCGTAC',\r\n 'ACAGAGTCGGCT',\r\n 'ACCGCAGAGTCA',\r\n 'ACGGTGAGTGTC', ]\r\n for bc in barcodes:\r\n self.assertEqual(golay.decode(bc), (bc, 0))\r\n for bc in barcodes:\r\n err_bc = 'C' + bc[1:]\r\n self.assertEqual(golay.decode(err_bc), (bc, 2))", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_trace_parse_handling():\n\n print(\"Testing incorrect parsing:\")\n assert not actions.trace.TraceAction().parse(\"5:4\", logger)\n assert not actions.trace.TraceAction().parse(\"THISHOULDFAIL\", logger)\n assert not actions.trace.TraceAction().parse(\"\", logger)", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def test_correct_deserialization(self):\n dataset = self._load_dataset()\n\n assert dataset.metadata.provider == Provider.STATSBOMB\n assert dataset.dataset_type == DatasetType.EVENT\n assert len(dataset.events) == 4022\n assert len(dataset.metadata.periods) == 2\n assert (\n dataset.metadata.orientation == Orientation.ACTION_EXECUTING_TEAM\n )\n assert dataset.metadata.teams[0].name == \"Barcelona\"\n assert dataset.metadata.teams[1].name == \"Deportivo Alavés\"\n\n player = dataset.metadata.teams[0].get_player_by_id(\"5503\")\n assert player.player_id == \"5503\"\n assert player.jersey_no == 10\n assert str(player) == \"Lionel Andrés Messi Cuccittini\"\n assert player.position is None # not set\n assert player.starting\n\n sub_player = dataset.metadata.teams[0].get_player_by_id(\"3501\")\n assert str(sub_player) == \"Philippe Coutinho Correia\"\n assert not sub_player.starting\n\n assert dataset.metadata.periods[0] == Period(\n id=1,\n start_timestamp=0.0,\n end_timestamp=2705.267,\n attacking_direction=AttackingDirection.NOT_SET,\n )\n assert dataset.metadata.periods[1] == Period(\n id=2,\n start_timestamp=2705.268,\n end_timestamp=5557.321,\n attacking_direction=AttackingDirection.NOT_SET,\n )", "def test_predict_probe_data_2(self):\n reader = StringIO('1380:\\n804004\\n2369086\\n')\n writer = StringIO()\n predict_probe_data(reader, writer)\n self.assertEqual(writer.getvalue(), '1380:\\n3.5\\n3.4\\n')", "def testGetDataStream(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec,\n volume_index=0)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_stream_name = ''\n data_stream = file_entry.GetDataStream(data_stream_name)\n self.assertIsNotNone(data_stream)\n self.assertEqual(data_stream.name, data_stream_name)\n\n data_stream = file_entry.GetDataStream('bogus')\n self.assertIsNone(data_stream)", "def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3", "def test_decodeWithErrors(self):\n bytes = b'Hello world'\n self.assertEqual(\n bytes.decode('imap4-utf-7', 'strict'),\n bytes.decode('imap4-utf-7'))", "def test_predict_probe_data_1(self):\n reader = StringIO('138:\\n1735266\\n1270280\\n')\n writer = StringIO()\n predict_probe_data(reader, writer)\n self.assertEqual(writer.getvalue(), '138:\\n3.3\\n3.2\\n')", "def test_basic_parser_trace():", "def decode(data):\n raise NotImplementedError", "def test_read_telescope_events_type(dl2_shower_geometry_file):\n\n from ctapipe.io.tableloader import TableLoader\n\n subarray = SubarrayDescription.from_hdf(dl2_shower_geometry_file)\n\n with TableLoader(\n dl2_shower_geometry_file,\n load_dl1_images=False,\n load_dl1_parameters=False,\n load_dl2=True,\n load_simulated=True,\n load_true_images=True,\n load_instrument=True,\n ) as table_loader:\n\n table = table_loader.read_telescope_events([\"MST_MST_FlashCam\"])\n\n assert \"HillasReconstructor_alt\" in table.colnames\n assert \"true_energy\" in table.colnames\n assert \"true_image\" in table.colnames\n expected_ids = subarray.get_tel_ids_for_type(\"MST_MST_FlashCam\")\n assert set(table[\"tel_id\"].data).issubset(expected_ids)\n assert \"equivalent_focal_length\" in table.colnames\n # regression test for #2051\n assert \"HillasReconstructor_tel_impact_distance\" in table.colnames", "def test_decode_failure(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 0.4\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep, ExplicitVRLittleEndian)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n AttributeList = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n assert assoc.is_established\n mod_list = Dataset()\n mod_list.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n mod_list, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n\n assert status.Status == 0x0110\n assert ds is None\n\n scp.shutdown()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def _read_trace(self, unpack_headers=False, headonly=False):\n trace_header = self.file.read(240)\n # Check if it is smaller than 240 byte.\n if len(trace_header) != 240:\n msg = 'The trace header needs to be 240 bytes long'\n raise SEGYTraceHeaderTooSmallError(msg)\n self.header = SEGYTraceHeader(trace_header,\n endian=self.endian,\n unpack_headers=unpack_headers)\n # The number of samples in the current trace.\n npts = self.header.number_of_samples_in_this_trace\n self.npts = npts\n # Do a sanity check if there is enough data left.\n pos = self.file.tell()\n data_left = self.filesize - pos\n data_needed = DATA_SAMPLE_FORMAT_SAMPLE_SIZE[self.data_encoding] * \\\n npts\n if npts < 1 or data_needed > data_left:\n msg = \"\"\"\n Too little data left in the file to unpack it according to\n its trace header. This is most likely either due to a wrong\n byte order or a corrupt file.\n \"\"\".strip()\n raise SEGYTraceReadingError(msg)\n if headonly:\n # skip reading the data, but still advance the file\n self.file.seek(data_needed, 1)\n # build a function for reading data from the disk on the fly\n self.unpack_data = OnTheFlyDataUnpacker(\n DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[self.data_encoding],\n self.file.name, self.file.mode, pos, npts, endian=self.endian)\n else:\n # Unpack the data.\n self.data = DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[\n self.data_encoding](self.file, npts, endian=self.endian)", "def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))" ]
[ "0.6836183", "0.6604363", "0.6448971", "0.6442332", "0.6401186", "0.6361264", "0.6157366", "0.6095047", "0.5986678", "0.5910711", "0.5854313", "0.583977", "0.58035004", "0.575414", "0.5735243", "0.5728554", "0.5705553", "0.5687723", "0.56629175", "0.5657399", "0.562537", "0.5617992", "0.5614218", "0.55966324", "0.5575074", "0.5572684", "0.55592483", "0.5547808", "0.554073", "0.5536533" ]
0.860621
0
Tests that we raise a Value Error when decoding a bad header length.
def test_process_optional_header_data_bad_header_length(self): with self.assertRaises(ValueError): decoder.process_optional_header_data(BytesIO(td.external_timestamp(True)), 3, self.mask)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bad_control_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n ControlHeaderStruct.parse(data)", "def test_bad_control_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n ControlHeaderStruct.parse(data)", "def test_error_message_header_bad_request_codes(self):\n error_type = 1\n error_type_value = Error.ErrorType.OFPET_BAD_REQUEST\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_REQUEST_CODE_VALUE or length > 0:\n\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_REQUEST_CODE_VALUE:\n error_code += 1\n\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_raw_bad_values(self):\n self.assertRawOK(['60'])\n self.assertRawOK(['1' * 10])\n self.assertRaisesHeaderError(['1' * 11])\n self.assertRaisesHeaderError(['60,60'])\n self.assertRaisesHeaderError(['60 60'])\n self.assertRaisesHeaderError(['60;60'])\n self.assertRaisesHeaderError(['60.60'])\n self.assertRaisesHeaderError(['60', '60'])\n self.assertRaisesHeaderError(['foo'])", "def test_error_message_header_bad_property_codes(self):\n\n error_type = 14\n error_type_value = Error.ErrorType.OFPET_BAD_PROPERTY\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_PROPERTY_CODE_VALUE or length > 0:\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_PROPERTY_CODE_VALUE:\n error_code += 1\n length -= 1\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_error_message_header_bad_instruction_codes(self):\n\n error_type = 3\n error_type_value = Error.ErrorType.OFPET_BAD_INSTRUCTION\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_INSTRUCTION_CODE_VALUE or length > 0:\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_INSTRUCTION_CODE_VALUE:\n error_code += 1\n\n length -= 1\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_parser_raises_decode_error(self):\n with self.assertRaises(ParseError):\n self.parser.parse(\n stream=BytesIO(b'{\"value\": NaN}'),\n media_type=\"application/json\",\n parser_context={},\n )", "def testReadHeaderFail(self):\n archive = archive_parser.Archive('Fail.')\n self.assertRaises(ValueError, archive.Parse)", "def test_check_response_length_invalid(input):\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n with pytest.raises(ShdlcResponseError):\r\n cmd.check_response_length(input)", "def test_bad_ipbus_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n PacketHeaderStruct.parse(data)", "def test_bad_ipbus_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n PacketHeaderStruct.parse(data)", "def test_value_error(self):\n self._error_test(ValueError)", "def test_error_message_header_bad_match_codes(self):\n\n error_type = 4\n error_type_value = Error.ErrorType.OFPET_BAD_MATCH\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_MATCH_CODE_VALUE or length > 0:\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_MATCH_CODE_VALUE:\n error_code += 1\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def test_parse_redis_data_error():\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b\"this is some data\")", "def test_error_message_header_bad_action_codes(self):\n\n error_type = 2\n error_type_value = Error.ErrorType.OFPET_BAD_ACTION\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_ACTION_CODE_VALUE or length > 0:\n\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_ACTION_CODE_VALUE:\n error_code += 1\n\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_error_message_header_flow_monitor_failed_codes(self):\n\n error_type = 16\n error_type_value = Error.ErrorType.OFPET_FLOW_MONITOR_FAILED\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_FLOW_MONITOR_FAILED_CODE_VALUE or length > 0:\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_FLOW_MONITOR_FAILED_CODE_VALUE:\n error_code += 1\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_error_when_length_mismatch(self):\n self._assert_raise_error(\n probabilities=[0.5, 0.5],\n random_nums=[0],\n error=LengthMismatchError,\n code=1\n )", "def test_constructor_error():\n\n # word length too small\n try:\n BigEndianAscendingWordDeserializer(0, 0, [0])\n assert False, \"Should complain about too-short words.\"\n except ValueError as e:\n assert \"Word length must be\" in str(e)\n\n # word length too large\n try:\n BigEndianAscendingWordDeserializer(65, 0, [0])\n assert False, \"Should complain about too-long words.\"\n except ValueError as e:\n assert \"Word length must be\" in str(e)\n\n # byte padding negative\n try:\n BigEndianAscendingWordDeserializer(5, -1, [0])\n except ValueError as e:\n assert \"Byte padding must be\" in str(e)", "def test_check_header_bad_chars(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'AAA.^^2',\r\n 'Description']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n expected_warnings = [\r\n 'Found invalid character in AAA.^^2 header field.\\t0,3']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def test_length_unknown_unit(self):\n with self.assertRaises(ValueError):\n METRIC_SYSTEM.length(5, 'fr')", "def test_error_message_header_hello_failed_codes(self):\n error_type = 0\n error_type_value = Error.ErrorType.OFPET_HELLO_FAILED\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_HELLO_FAILED_CODE_VALUE:\n error_code += 1\n\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_value_init7(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(-4, 5)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_exception_case(file_with_exception_value):\n with pytest.raises(ValueError, match=\"It is not a magic number!\"):\n read_magic_number(file_with_exception_value)", "def test_decode_errors(self):\n if self._invalid_encoded:\n self.assert_raises((ValueError, jsonschema.exceptions.ValidationError),\n self.import_cls.decode,\n self._invalid_encoded[0], self.typedef)", "def testIncorrectContentLength(self):\n headers = Headers({'Content-Length': ['100'],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual(JSONRPC_PARSE_ERROR, response['error']['code'])\n message = 'Invalid payload: ContentLengthMismatch.'\n self.assertEqual(message, response['error']['message'])\n self.assertIn(message, self.log.getvalue())\n self.assertIn('<Payload empty or unparseable>', self.log.getvalue())", "def test_value_init18(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(0, 19)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)", "def test_value_init15(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(0, 0)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)" ]
[ "0.69578516", "0.69578516", "0.69512135", "0.6904783", "0.6873048", "0.6832096", "0.6810115", "0.68085086", "0.67901564", "0.67745537", "0.67745537", "0.67485756", "0.67124915", "0.66635495", "0.66243625", "0.6623286", "0.6604393", "0.64992744", "0.64364654", "0.6372474", "0.63706607", "0.6347114", "0.6341318", "0.6338201", "0.6337301", "0.6335362", "0.63294643", "0.63132083", "0.6310044", "0.63052046" ]
0.75773674
0
Resolves a list of requirements for the same package. Given a list of package details in the form of `packaging.requirements.Requirement` objects, combine the specifier, extras, url and marker information to create a new requirement object.
def resolve_requirement_versions(package_versions): resolved = Requirement(str(package_versions[0])) for package_version in package_versions[1:]: resolved.specifier = resolved.specifier & package_version.specifier resolved.extras = resolved.extras.union(package_version.extras) resolved.url = resolved.url or package_version.url if resolved.marker and package_version.marker: resolved.marker = Marker(f"{resolved.marker} or {package_version.marker}") elif package_version.marker: resolved.marker = package_version.marker return resolved
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_dependencies(package=\"sunpy\", extras=None):\n requirements = get_requirements(package)\n installed_requirements = {}\n missing_requirements = defaultdict(list)\n extras = extras or [\"required\"]\n for group in requirements:\n if group not in extras:\n continue\n for package, package_details in requirements[group].items():\n try:\n package_version = version(package)\n installed_requirements[package] = package_version\n except PackageNotFoundError:\n missing_requirements[package].append(package_details)\n for package, package_versions in missing_requirements.items():\n missing_requirements[package] = format_requirement_string(\n resolve_requirement_versions(package_versions))\n return missing_requirements, installed_requirements", "def resolve(requirements, obtainer=None, interpreter=None, platform=None):\r\n cache = _DistributionCache()\r\n interpreter = interpreter or PythonInterpreter.get()\r\n platform = platform or Platform.current()\r\n obtainer = obtainer or Obtainer.default(platform=platform, interpreter=interpreter)\r\n\r\n requirements = maybe_requirement_list(requirements)\r\n distribution_set = defaultdict(list)\r\n requirement_set = defaultdict(list)\r\n processed_requirements = set()\r\n\r\n def packages(requirement, existing=None):\r\n if existing is None:\r\n existing = obtainer.iter(requirement)\r\n return [package for package in existing\r\n if package.satisfies(requirement)\r\n and package.compatible(interpreter.identity, platform)]\r\n\r\n def requires(package, requirement):\r\n if not cache.has(package):\r\n dist = obtainer.obtain(package)\r\n if dist is None:\r\n raise Untranslateable('Package %s is not translateable.' % package)\r\n if not distribution_compatible(dist, interpreter, platform):\r\n raise Untranslateable('Could not get distribution for %s on appropriate platform.' %\r\n package)\r\n cache.put(package, dist)\r\n dist = cache.get(package)\r\n return dist.requires(extras=requirement.extras)\r\n\r\n while True:\r\n while requirements:\r\n requirement = requirements.pop(0)\r\n requirement_set[requirement.key].append(requirement)\r\n # TODO(wickman) This is trivially parallelizable\r\n distribution_list = distribution_set[requirement.key] = packages(\r\n requirement,\r\n existing=distribution_set.get(requirement.key))\r\n if not distribution_list:\r\n raise Unsatisfiable('Cannot satisfy requirements: %s' % requirement_set[requirement.key])\r\n\r\n # get their dependencies\r\n for requirement_key, requirement_list in requirement_set.items():\r\n new_requirements = OrderedSet()\r\n highest_package = distribution_set[requirement_key][0]\r\n for requirement in requirement_list:\r\n if requirement in processed_requirements:\r\n continue\r\n new_requirements.update(requires(highest_package, requirement))\r\n processed_requirements.add(requirement)\r\n requirements.extend(list(new_requirements))\r\n\r\n if not requirements:\r\n break\r\n\r\n to_activate = set()\r\n for distributions in distribution_set.values():\r\n to_activate.add(cache.get(distributions[0]))\r\n return to_activate", "def resolve_dependencies(self, all_data):\n self.requires = []\n for dep in self.metadata[\"deps\"]:\n key = (self.package.key, dep)\n if key in self.provides:\n raise Exception(\"Package shouldn't depend on itself: %s\" % repr(key))\n self.requires.append( all_data.resolve_unqualified_component(dep, self.package.key) )", "def get_requirements(package):\n requirements: list = requires(package)\n requires_dict = defaultdict(dict)\n for requirement in requirements:\n req = Requirement(requirement)\n package_name, package_marker = req.name, req.marker\n if package_marker and \"extra ==\" in str(package_marker):\n group = str(package_marker).split(\"extra == \")[1].strip('\"').strip(\"'\").strip()\n else:\n group = \"required\"\n # De-duplicate (the same package could appear more than once in the extra == 'all' group)\n if package_name in requires_dict[group]:\n continue\n requires_dict[group][package_name] = req\n return requires_dict", "def parse_requirements(*filenames):\n requirements = []\n for f in filenames:\n for line in open(f, 'r').read().split('\\n'):\n # Comment lines. Skip.\n if re.match(r'(\\s*#)|(\\s*$)', line):\n continue\n # Editable matches. Put the egg name into our reqs list.\n if re.match(r'\\s*-e\\s+', line):\n pkg = re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line)\n requirements.append(\"%s\" % pkg)\n # File-based installs not supported/needed. Skip.\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n return requirements", "def build_ireq_set(specifiers, # type: Iterable[str]\n index_urls=None, # type: Optional[Iterable[str]]\n prereleases=False, # type: bool\n resolve_canonical_names=True, # type: bool\n resolve_source_dir=None, # type: str\n resolve_versions=True, # type: bool\n sort_specifiers=True, # type: bool\n ):\n # type: (...) -> InstallReqSet\n install_requirements = ordered_set.OrderedSet()\n if index_urls is None:\n index_urls = []\n if sort_specifiers:\n specifiers = sorted(specifiers)\n for specifier in specifiers:\n if specifier.startswith('-e'):\n ireq = HashableInstallRequirement.from_line(specifier)\n else:\n args = []\n for index_url in index_urls:\n args.extend(['--extra-index-url', index_url])\n ireq = resolve_specifier(specifier, prereleases, resolve_versions,\n *args)\n if resolve_canonical_names and not ireq.editable:\n package_name = ireq.name\n canonical_name = get_canonical_name(\n package_name=package_name, index_urls=index_urls)\n update_ireq_name(\n install_requirement=ireq, package_name=canonical_name)\n elif resolve_source_dir is not None and ireq.source_dir:\n try:\n ireq.source_dir = str(\n pathlib.Path(ireq.source_dir)\n .relative_to(pathlib.Path(resolve_source_dir)))\n ireq.link = pip.index.Link('file://{}'.format(\n ireq.source_dir))\n except ValueError:\n pass\n install_requirements.add(ireq)\n return install_requirements", "def parse_requirements(filename, *args): # pragma: no cover\n # type: (str, str) -> Tuple[InstallReqSet, pip.index.PackageFinder]\n pip_options, session = build_pip_session(*args)\n repository = PyPiRepository(pip_options, session)\n requirements = pip.req.parse_requirements(\n filename,\n finder=repository.finder,\n session=repository.session,\n options=pip_options)\n return set(requirements), repository.finder", "def _resolve_depenency_map(\n requested_requirements, # type: t.Iterable[Requirement]\n galaxy_apis, # type: t.Iterable[GalaxyAPI]\n concrete_artifacts_manager, # type: ConcreteArtifactsManager\n preferred_candidates, # type: t.Iterable[Candidate] | None\n no_deps, # type: bool\n allow_pre_release, # type: bool\n upgrade, # type: bool\n include_signatures, # type: bool\n): # type: (...) -> dict[str, Candidate]\n if not HAS_RESOLVELIB:\n raise AnsibleError(\"Failed to import resolvelib, check that a supported version is installed\")\n if not HAS_PACKAGING:\n raise AnsibleError(\"Failed to import packaging, check that a supported version is installed\")\n try:\n dist = distribution('ansible-core')\n except Exception:\n req = None\n else:\n req = next((rr for r in (dist.requires or []) if (rr := PkgReq(r)).name == 'resolvelib'), None)\n finally:\n if req is None:\n # TODO: replace the hardcoded versions with a warning if the dist info is missing\n # display.warning(\"Unable to find 'ansible-core' distribution requirements to verify the resolvelib version is supported.\")\n if not RESOLVELIB_LOWERBOUND <= RESOLVELIB_VERSION < RESOLVELIB_UPPERBOUND:\n raise AnsibleError(\n f\"ansible-galaxy requires resolvelib<{RESOLVELIB_UPPERBOUND.vstring},>={RESOLVELIB_LOWERBOUND.vstring}\"\n )\n elif not req.specifier.contains(RESOLVELIB_VERSION.vstring):\n raise AnsibleError(f\"ansible-galaxy requires {req.name}{req.specifier}\")\n\n collection_dep_resolver = build_collection_dependency_resolver(\n galaxy_apis=galaxy_apis,\n concrete_artifacts_manager=concrete_artifacts_manager,\n user_requirements=requested_requirements,\n preferred_candidates=preferred_candidates,\n with_deps=not no_deps,\n with_pre_releases=allow_pre_release,\n upgrade=upgrade,\n include_signatures=include_signatures,\n )\n try:\n return collection_dep_resolver.resolve(\n requested_requirements,\n max_rounds=2000000, # NOTE: same constant pip uses\n ).mapping\n except CollectionDependencyResolutionImpossible as dep_exc:\n conflict_causes = (\n '* {req.fqcn!s}:{req.ver!s} ({dep_origin!s})'.format(\n req=req_inf.requirement,\n dep_origin='direct request'\n if req_inf.parent is None\n else 'dependency of {parent!s}'.\n format(parent=req_inf.parent),\n )\n for req_inf in dep_exc.causes\n )\n error_msg_lines = list(chain(\n (\n 'Failed to resolve the requested '\n 'dependencies map. Could not satisfy the following '\n 'requirements:',\n ),\n conflict_causes,\n ))\n raise raise_from( # NOTE: Leading \"raise\" is a hack for mypy bug #9717\n AnsibleError('\\n'.join(error_msg_lines)),\n dep_exc,\n )\n except CollectionDependencyInconsistentCandidate as dep_exc:\n parents = [\n \"%s.%s:%s\" % (p.namespace, p.name, p.ver)\n for p in dep_exc.criterion.iter_parent()\n if p is not None\n ]\n\n error_msg_lines = [\n (\n 'Failed to resolve the requested dependencies map. '\n 'Got the candidate {req.fqcn!s}:{req.ver!s} ({dep_origin!s}) '\n 'which didn\\'t satisfy all of the following requirements:'.\n format(\n req=dep_exc.candidate,\n dep_origin='direct request'\n if not parents else 'dependency of {parent!s}'.\n format(parent=', '.join(parents))\n )\n )\n ]\n\n for req in dep_exc.criterion.iter_requirement():\n error_msg_lines.append(\n '* {req.fqcn!s}:{req.ver!s}'.format(req=req)\n )\n\n raise raise_from( # NOTE: Leading \"raise\" is a hack for mypy bug #9717\n AnsibleError('\\n'.join(error_msg_lines)),\n dep_exc,\n )\n except ValueError as exc:\n raise AnsibleError(to_native(exc)) from exc", "def install_deps():\n default = open('requirements.txt', 'r').readlines()\n new_pkgs = []\n links = []\n for resource in default:\n if 'git+https' in resource:\n pkg = resource.split('#')[-1]\n links.append(resource.strip())\n new_pkgs.append(pkg.replace('egg=', '').rstrip())\n else:\n new_pkgs.append(resource.strip())\n return new_pkgs, links", "def dependencies(pkg, extra=None):\n ret = set()\n for dist in pkg.requires_dist:\n requirement = pkg_resources.Requirement.parse(dist)\n # we replace all underscores with dash, to make package names similiar in all cases\n name = requirement.name.replace(\"_\", \"-\")\n if extra:\n # for extras we don't grab dependencies for the main pkg,\n # those are already in the main plg rule\n if not requirement.marker or requirement.marker.evaluate({\"extra\": None}):\n continue\n\n if requirement.marker:\n if not requirement.marker.evaluate({\"extra\": extra}):\n continue\n\n if requirement.extras:\n ret = ret | set(\n [\"{}[{}]\".format(name, dist_extra) for dist_extra in requirement.extras]\n )\n else:\n ret.add(name)\n\n return sorted(list(ret))", "def resolve_ireqs(requirements, # type: InstallReqIterable\n prereleases=False, # type: bool\n intersect=False, # type: bool\n *args, # type: str\n **kwargs # type: Any\n ): # pragma: no cover\n # type: (...) -> InstallReqSet\n pip_options, session = build_pip_session(*args)\n repository = PyPiRepository(pip_options, session)\n resolver = piptools.resolver.Resolver(\n constraints=requirements, repository=repository, **kwargs)\n results = {HashableInstallRequirement.from_ireq(r)\n for r in resolver.resolve()}\n if intersect:\n results |= {HashableInstallRequirement.from_ireq(r)\n for r in requirements}\n return results", "def _resolve_multi(self, interpreter, requirements, find_links):\n python_setup = PythonSetup.global_instance()\n python_repos = PythonRepos.global_instance()\n distributions = {}\n fetchers = python_repos.get_fetchers()\n fetchers.extend(Fetcher([path]) for path in find_links)\n\n for platform in python_setup.platforms:\n requirements_cache_dir = os.path.join(python_setup.resolver_cache_dir,\n str(interpreter.identity))\n distributions[platform] = resolve(\n requirements=[req.requirement for req in requirements],\n interpreter=interpreter,\n fetchers=fetchers,\n platform=None if platform == 'current' else platform,\n context=python_repos.get_network_context(),\n cache=requirements_cache_dir,\n cache_ttl=python_setup.resolver_cache_ttl)\n\n return distributions", "def parse_requirements(requirements):\n for req in pyrequirements.parse(requirements):\n yield req", "def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:\n raise NotImplementedError()", "def get_required_packages(file_contents):\n # Make sure the only ``install_requires`` happens in the\n # call to setup()\n if file_contents.count(INST_REQS_KWARG) != 1:\n raise ValueError('Expected only one use of keyword',\n INST_REQS_KWARG, file_contents)\n # Make sure the only usage of ``install_requires`` is to set\n # install_requires=REQUIREMENTS.\n keyword_stmt = INST_REQS_KWARG + '=' + REQ_VAR\n if file_contents.count(keyword_stmt) != 1:\n raise ValueError('Expected keyword to be set with variable',\n INST_REQS_KWARG, REQ_VAR, file_contents)\n # Split file on ``REQUIREMENTS`` variable while asserting that\n # it only appear twice.\n _, reqs_section, _ = file_contents.split(REQ_VAR)\n # Find ``REQUIREMENTS`` list variable defined in ``reqs_section``.\n reqs_begin = reqs_section.index('[')\n reqs_end = reqs_section.index(']') + 1\n\n # Convert the text to an actual list, but make sure no\n # locals or globals can be used.\n reqs_list_text = reqs_section[reqs_begin:reqs_end]\n # We use literal_eval() because it limits to evaluating\n # strings that only consist of a few Python literals: strings,\n # numbers, tuples, lists, dicts, booleans, and None.\n requirements = ast.literal_eval(reqs_list_text)\n\n # Take the list of requirements and strip off the package name\n # from each requirement.\n result = []\n for required in requirements:\n parts = required.split()\n result.append(parts[0])\n return result", "def process_requirements(requirements, version=None):\n if requirements is None:\n return []\n\n if isinstance(requirements, list):\n return requirements\n\n if isinstance(requirements, dict):\n # The version \"dev\" should always compare as greater than any exisiting versions.\n dev_numeric = \"9999.9999.9999\"\n\n if version == DEV_VERSION:\n version = dev_numeric\n\n for ver_spec, packages in requirements.items():\n op_and_ver_pairs = map(get_operator_and_version, ver_spec.split(\",\"))\n match_all = all(\n comp_op(\n Version(version),\n Version(dev_numeric if req_ver == DEV_VERSION else req_ver),\n )\n for comp_op, req_ver in op_and_ver_pairs\n )\n if match_all:\n return packages\n return []\n\n raise TypeError(\"Invalid object type for `requirements`: '{}'\".format(type(requirements)))", "def _get_requirements_and_latest(\n filename,\n force=False,\n minor=[],\n patch=[],\n pre=[],\n index_urls=[],\n verify=True):\n session = PipSession()\n if verify:\n session.verify = verify\n finder = PackageFinder(\n session=session,\n find_links=[],\n index_urls=index_urls or [PyPI.simple_url],\n )\n\n _, content = get_file_content(filename, session=session)\n for line_number, line, orig_line in yield_lines(content):\n line = req_file.COMMENT_RE.sub('', line)\n line = line.strip()\n req = parse_requirement_line(line, filename, line_number, session, finder)\n if req is None or req.name is None or req_file.SCHEME_RE.match(req.name):\n yield (orig_line, None, None, None)\n continue\n spec_ver = current_version(req)\n if spec_ver or force:\n latest_ver = latest_version(req, spec_ver, session, finder,\n minor=minor, patch=patch, pre=pre)\n yield (orig_line, req, spec_ver, latest_ver)", "def find_with_deps(self, package_names):", "def resolve_specifier(specifier, # type: str\n prereleases=False, # type: bool\n resolve_versions=True, # type: bool\n *args # type: str # noqa: C812\n ):\n # type: (...) -> HashableInstallRequirement\n ireq = HashableInstallRequirement.from_line(specifier)\n pip_options, session = build_pip_session(*args)\n repository = PyPiRepository(pip_options, session)\n if (ireq.editable or\n piptools.utils.is_pinned_requirement(ireq) or\n not resolve_versions):\n return ireq\n else:\n return HashableInstallRequirement.from_ireq(\n repository.find_best_match(ireq, prereleases=prereleases))", "def _resolve_dependencies(self):\n matching_versions = dict()\n\n # Initialization of the BFS\n bfs_stack = list()\n for requirement_name, spec_str in sorted(self.spec_requirements, key=lambda x: x[0].lower()):\n self._add_spec(requirement_name, spec_str)\n bfs_stack.append(requirement_name)\n\n # Main loop\n while bfs_stack:\n # Stack Unwind\n requirement_name = bfs_stack.pop(0)\n available_versions = self._get_available_versions(requirement_name)\n spec = self._get_spec(requirement_name)\n best_matching_version = spec.select(available_versions)\n if best_matching_version is None:\n msg = 'Unmatched dependency for {}\\nSpecification requirement: {}\\nAvailable versions: {}\\n' \\\n 'Use NPM semver calculator to resolve: https://semver.npmjs.com/'\n error = msg.format(requirement_name, spec, ', '.join(reversed(map(str, available_versions))))\n raise RequirementMatchError(error)\n\n matching_versions[requirement_name] = best_matching_version\n\n # BFS stack population with dependencies\n dependencies = self._get_dependencies(requirement_name, best_matching_version)\n for dependency_name, dependency_version in dependencies:\n self._add_spec(dependency_name, dependency_version)\n bfs_stack.append(dependency_name)\n\n return matching_versions", "def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras", "def merge_dependencies(deps_list, remove_builds=False):\n only_pips = []\n unified_deps = []\n for deps in deps_list:\n if deps is None: # not found in this environment definition\n continue\n for dep in deps:\n if isinstance(dep, dict) and dep['pip']:\n only_pips.append(dep['pip'])\n else:\n if remove_builds:\n dep = _remove_build(dep)\n if dep not in unified_deps:\n unified_deps.append(dep)\n unified_deps = sorted(unified_deps)\n if only_pips:\n unified_deps.append(merge_pips(only_pips))\n return unified_deps", "def install_deps():\n with open('requirements.txt', 'r') as f:\n packages = f.readlines()\n new_pkgs = []\n for resource in packages:\n new_pkgs.append(resource.strip())\n return new_pkgs", "def resolve_requirements(\n cls, featuresets: list[FeatureSet], reqs: set[str]\n ) -> set[str]:\n fsets = {f.name: f for f in featuresets}\n reqs_out = set[str]()\n for req in reqs:\n cls._resolve_requirements(fsets, reqs_out, req)\n return reqs_out", "def read_requirements(*parts):\n requirements = []\n for line in read(*parts).splitlines():\n line_2 = re.sub(\n \"(\\s*)?#(?!egg=).*$\", # the space immediately before the hash mark, the hash mark, and anything that follows it, but not \"#egg=\" fragments\n \"\", # replace with a blank string\n line,\n )\n line_3 = re.sub(\n \"(\\s*)?-r.*$\", # we also can't reference other requirement files\n \"\", # replace with a blank string\n line_2,\n )\n if line_3: # i.e. we have a non-zero-length string\n requirements.append(line_3)\n return requirements", "def parse_req_file(req_file, verbatim=False):\n req_list = []\n requirements = req_file.readlines()\n for requirement in requirements:\n requirement_no_comments = requirement.split(\"#\")[0].strip()\n\n # if matching requirement line (Thing==1.2.3), update dict, continue\n req_match = re.match(\n r\"\\s*(?P<package>[^\\s\\[\\]]+)(?P<extras>\\[\\S+\\])?==(?P<version>\\S+)\",\n requirement_no_comments,\n )\n req_ignore = requirement.strip().endswith(\" # norot\")\n\n if req_match:\n req_list.append(\n (req_match.group(\"package\"), req_match.group(\"version\"), req_ignore)\n )\n elif requirement_no_comments.startswith(\"-r\"):\n try:\n base_dir = os.path.dirname(os.path.abspath(req_file.name))\n except AttributeError:\n print(\n \"Recursive requirements are not supported in URL based \" \"lookups\"\n )\n continue\n\n # replace the -r and ensure there are no leading spaces\n file_name = requirement_no_comments.replace(\"-r\", \"\").strip()\n new_path = os.path.join(base_dir, file_name)\n try:\n if verbatim:\n req_list.append((None, requirement, req_ignore))\n req_list.extend(parse_req_file(open(new_path), verbatim=verbatim))\n except IOError:\n print(\"Failed to import {}\".format(file_name))\n elif verbatim:\n req_list.append((None, requirement, req_ignore))\n return req_list", "def merge_requirements(req1, req2):\n if req1 is not None and req2 is None:\n return req1\n if req2 is not None and req1 is None:\n return req2\n\n req1_name_norm = normalize_project_name(req1.name)\n if req1_name_norm != normalize_project_name(req2.name):\n raise ValueError(\"Reqs don't match: {} != {}\".format(req1, req2))\n all_specs = set(req1.specs or []) | set(req2.specs or [])\n\n # Handle markers\n if req1.marker and req2.marker:\n if str(req1.marker) != str(req2.marker):\n if str(req1.marker) in str(req2.marker):\n new_marker = \";\" + str(req1.marker)\n elif str(req2.marker) in str(req1.marker):\n new_marker = \";\" + str(req2.marker)\n else:\n new_marker = \"\"\n else:\n new_marker = \";\" + str(req1.marker)\n else:\n new_marker = \"\"\n\n extras = merge_extras(req1.extras, req2.extras)\n extras_str = \"\"\n if extras:\n extras_str = \"[\" + \",\".join(extras) + \"]\"\n req_str = (\n req1_name_norm\n + extras_str\n + \",\".join(\"\".join(parts) for parts in all_specs)\n + new_marker\n )\n return parse_requirement(req_str)", "def resolve( # noqa:C901\n ireq, # type: TInstallRequirement\n reqset_provider=None, # type: Optional[TShimmedFunc]\n req_tracker_provider=None, # type: Optional[TShimmedFunc]\n install_cmd_provider=None, # type: Optional[TShimmedFunc]\n install_command=None, # type: Optional[TCommand]\n finder_provider=None, # type: Optional[TShimmedFunc]\n resolver_provider=None, # type: Optional[TShimmedFunc]\n wheel_cache_provider=None, # type: Optional[TShimmedFunc]\n format_control_provider=None, # type: Optional[TShimmedFunc]\n make_preparer_provider=None, # type: Optional[TShimmedFunc]\n tempdir_manager_provider=None, # type: Optional[TShimmedFunc]\n options=None, # type: Optional[Values]\n session=None, # type: Optional[TSession]\n resolver=None, # type: Optional[TResolver]\n finder=None, # type: Optional[TFinder]\n upgrade_strategy=\"to-satisfy-only\", # type: str\n force_reinstall=None, # type: Optional[bool]\n ignore_dependencies=None, # type: Optional[bool]\n ignore_requires_python=None, # type: Optional[bool]\n ignore_installed=True, # type: bool\n use_user_site=False, # type: bool\n isolated=None, # type: Optional[bool]\n build_dir=None, # type: Optional[str]\n source_dir=None, # type: Optional[str]\n download_dir=None, # type: Optional[str]\n cache_dir=None, # type: Optional[str]\n wheel_download_dir=None, # type: Optional[str]\n wheel_cache=None, # type: Optional[TWheelCache]\n require_hashes=None, # type: bool\n check_supported_wheels=True, # type: bool\n):\n # (...) -> Set[TInstallRequirement]\n reqset_provider = resolve_possible_shim(reqset_provider)\n finder_provider = resolve_possible_shim(finder_provider)\n resolver_provider = resolve_possible_shim(resolver_provider)\n wheel_cache_provider = resolve_possible_shim(wheel_cache_provider)\n format_control_provider = resolve_possible_shim(format_control_provider)\n make_preparer_provider = resolve_possible_shim(make_preparer_provider)\n req_tracker_provider = resolve_possible_shim(req_tracker_provider)\n install_cmd_provider = resolve_possible_shim(install_cmd_provider)\n tempdir_manager_provider = resolve_possible_shim(tempdir_manager_provider)\n if install_command is None:\n assert isinstance(install_cmd_provider, (type, functools.partial))\n install_command = install_cmd_provider()\n kwarg_map = {\n \"upgrade_strategy\": upgrade_strategy,\n \"force_reinstall\": force_reinstall,\n \"ignore_dependencies\": ignore_dependencies,\n \"ignore_requires_python\": ignore_requires_python,\n \"ignore_installed\": ignore_installed,\n \"use_user_site\": use_user_site,\n \"isolated\": isolated,\n \"build_dir\": build_dir,\n \"src_dir\": source_dir,\n \"download_dir\": download_dir,\n \"require_hashes\": require_hashes,\n \"cache_dir\": cache_dir,\n }\n kwargs, options = populate_options(install_command, options, **kwarg_map)\n with contextlib.ExitStack() as ctx:\n ctx.enter_context(tempdir_manager_provider())\n kwargs = ctx.enter_context(\n ensure_resolution_dirs(wheel_download_dir=wheel_download_dir, **kwargs)\n )\n wheel_download_dir = kwargs.pop(\"wheel_download_dir\")\n if session is None:\n session = get_session(install_cmd=install_command, options=options)\n if finder is None:\n finder = finder_provider(\n install_command, options=options, session=session\n ) # type: ignore\n format_control = getattr(options, \"format_control\", None)\n if not format_control:\n format_control = format_control_provider(None, None) # type: ignore\n wheel_cache = ctx.enter_context(\n wheel_cache_provider(kwargs[\"cache_dir\"], format_control)\n ) # type: ignore\n ireq.is_direct = True # type: ignore\n build_location_kwargs = {\n \"build_dir\": kwargs[\"build_dir\"],\n \"autodelete\": True,\n \"parallel_builds\": False,\n }\n call_function_with_correct_args(ireq.build_location, **build_location_kwargs)\n if reqset_provider is None:\n raise TypeError(\n \"cannot resolve without a requirement set provider... failed!\"\n )\n reqset = reqset_provider(\n install_command,\n options=options,\n session=session,\n wheel_download_dir=wheel_download_dir,\n **kwargs,\n ) # type: ignore\n\n preparer_args = {\n \"build_dir\": kwargs[\"build_dir\"],\n \"src_dir\": kwargs[\"src_dir\"],\n \"download_dir\": kwargs[\"download_dir\"],\n \"wheel_download_dir\": wheel_download_dir,\n \"build_isolation\": kwargs[\"isolated\"],\n \"install_cmd\": install_command,\n \"options\": options,\n \"finder\": finder,\n \"session\": session,\n \"use_user_site\": use_user_site,\n \"require_hashes\": require_hashes,\n }\n if isinstance(req_tracker_provider, (types.FunctionType, functools.partial)):\n preparer_args[\"req_tracker\"] = ctx.enter_context(req_tracker_provider())\n resolver_keys = [\n \"upgrade_strategy\",\n \"force_reinstall\",\n \"ignore_dependencies\",\n \"ignore_installed\",\n \"use_user_site\",\n \"isolated\",\n \"use_user_site\",\n ]\n resolver_args = {key: kwargs[key] for key in resolver_keys if key in kwargs}\n if resolver_provider is None:\n raise TypeError(\"Cannot resolve without a resolver provider... failed!\")\n preparer = ctx.enter_context(make_preparer_provider(**preparer_args))\n resolver = resolver_provider(\n finder=finder,\n preparer=preparer,\n session=session,\n options=options,\n install_cmd=install_command,\n wheel_cache=wheel_cache,\n **resolver_args,\n ) # type: ignore\n resolver.require_hashes = kwargs.get(\"require_hashes\", False) # type: ignore\n _, required_resolver_args = get_method_args(resolver.resolve)\n resolver_args = []\n if \"requirement_set\" in required_resolver_args.args:\n if hasattr(reqset, \"add_requirement\"):\n reqset.add_requirement(ireq)\n else: # Pip >= 22.1.0\n resolver._add_requirement_to_set(reqset, ireq)\n resolver_args.append(reqset)\n elif \"root_reqs\" in required_resolver_args.args:\n resolver_args.append([ireq])\n if \"check_supported_wheels\" in required_resolver_args.args:\n resolver_args.append(check_supported_wheels)\n if getattr(reqset, \"prepare_files\", None):\n if hasattr(reqset, \"add_requirement\"):\n reqset.add_requirement(ireq)\n else: # Pip >= 22.1.0\n resolver._add_requirement_to_set(reqset, ireq)\n results = reqset.prepare_files(finder)\n result = reqset.requirements\n reqset.cleanup_files()\n return result\n if make_preparer_provider is None:\n raise TypeError(\"Cannot create requirement preparer, cannot resolve!\")\n result_reqset = resolver.resolve(*resolver_args) # type: ignore\n if result_reqset is None:\n result_reqset = reqset\n results = result_reqset.requirements\n cleanup_fn = getattr(reqset, \"cleanup_files\", None)\n if cleanup_fn is not None:\n cleanup_fn()\n return results", "def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)", "def resolve_multi(config,\n requirements,\n interpreter=None,\n platforms=None,\n conn_timeout=None,\n ttl=3600):\n distributions = dict()\n interpreter = interpreter or PythonInterpreter.get()\n if not isinstance(interpreter, PythonInterpreter):\n raise TypeError('Expected interpreter to be a PythonInterpreter, got %s' % type(interpreter))\n\n install_cache = PythonSetup(config).scratch_dir('install_cache', default_name='eggs')\n platforms = get_platforms(platforms or config.getlist('python-setup', 'platforms', ['current']))\n\n for platform in platforms:\n translator = Translator.default(\n install_cache=install_cache,\n interpreter=interpreter,\n platform=platform,\n conn_timeout=conn_timeout)\n\n obtainer = PantsObtainer(\n install_cache=install_cache,\n crawler=crawler_from_config(config, conn_timeout=conn_timeout),\n fetchers=fetchers_from_config(config) or [PyPIFetcher()],\n translators=translator)\n\n distributions[platform] = resolve(requirements=requirements,\n obtainer=obtainer,\n interpreter=interpreter,\n platform=platform)\n\n return distributions" ]
[ "0.6731303", "0.6638183", "0.64004624", "0.6214947", "0.60815656", "0.6050588", "0.60242367", "0.6014201", "0.5969914", "0.59190315", "0.58537954", "0.5825504", "0.578931", "0.5785204", "0.5728535", "0.57212466", "0.56810987", "0.56708723", "0.5663251", "0.5650628", "0.5630034", "0.5584793", "0.55734414", "0.556734", "0.5540338", "0.55349356", "0.5521335", "0.55011517", "0.5487159", "0.54807705" ]
0.7085873
0
List installed and missing dependencies. Given a package and, optionally, a tuple of extras, identify any packages which should be installed to match the requirements and return any which are missing.
def find_dependencies(package="sunpy", extras=None): requirements = get_requirements(package) installed_requirements = {} missing_requirements = defaultdict(list) extras = extras or ["required"] for group in requirements: if group not in extras: continue for package, package_details in requirements[group].items(): try: package_version = version(package) installed_requirements[package] = package_version except PackageNotFoundError: missing_requirements[package].append(package_details) for package, package_versions in missing_requirements.items(): missing_requirements[package] = format_requirement_string( resolve_requirement_versions(package_versions)) return missing_requirements, installed_requirements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def missing_dependencies_by_extra(package=\"sunpy\", exclude_extras=None):\n exclude_extras = exclude_extras or []\n requirements = get_requirements(package)\n missing_dependencies = {}\n for group in requirements.keys():\n if group in exclude_extras:\n continue\n missing_dependencies[group] = find_dependencies(package, [group])[0]\n return missing_dependencies", "def dependencies(pkg, extra=None):\n ret = set()\n for dist in pkg.requires_dist:\n requirement = pkg_resources.Requirement.parse(dist)\n # we replace all underscores with dash, to make package names similiar in all cases\n name = requirement.name.replace(\"_\", \"-\")\n if extra:\n # for extras we don't grab dependencies for the main pkg,\n # those are already in the main plg rule\n if not requirement.marker or requirement.marker.evaluate({\"extra\": None}):\n continue\n\n if requirement.marker:\n if not requirement.marker.evaluate({\"extra\": extra}):\n continue\n\n if requirement.extras:\n ret = ret | set(\n [\"{}[{}]\".format(name, dist_extra) for dist_extra in requirement.extras]\n )\n else:\n ret.add(name)\n\n return sorted(list(ret))", "def list_installed_depends(\n installed_dists: InstalledDistributions,\n project_name: NormalizedName,\n extras: Optional[Sequence[NormalizedName]] = None,\n) -> Set[NormalizedName]:\n res = set()\n seen = set()\n\n def add(req: Requirement, deps_only: bool) -> None:\n req_name = canonicalize_name(req.name)\n seen_key = (req_name, tuple(sorted(req.extras)))\n if seen_key in seen:\n return\n seen.add(seen_key)\n try:\n dist = installed_dists[req_name]\n except KeyError:\n # not installed\n return\n else:\n if not deps_only:\n res.add(req_name)\n for dep_req in dist.requires:\n add(dep_req, deps_only=False)\n for extra in req.extras:\n extra = canonicalize_name(extra)\n if extra not in dist.extra_requires:\n # extra is not a known extra of installed dist,\n # so we can't report it's dependencies\n continue\n for dep_req in dist.extra_requires[extra]:\n add(dep_req, deps_only=False)\n\n add(\n Requirement(make_project_name_with_extras(project_name, extras)),\n deps_only=True,\n )\n\n return res", "def print_test_deps_not_in_package_deps(self):\n extras = []\n for key, rec_deps in self.recursive_pkg_deps.items():\n any = self.test_imports.get(key, set()).difference(rec_deps, set([key]))\n if any:\n extras.append((key, any))\n\n if extras:\n print(\"Packages whose tests have extra dependencies not listed in `go list -f {{.Deps}}`:\")\n for pkg, deps in extras:\n print(\"\\t{0}: {1}\".format(pkg, \", \".join(deps)))\n print(\"\\n\")", "def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:\n raise NotImplementedError()", "def check_missing_dep():\n global MISSING_PACKAGES, INSTALLED_PACKAGES, ENABLE_CUDA\n if ENABLE_CUDA and IS_MACOS:\n REQUIRED_PACKAGES.extend(MACOS_REQUIRED_PACKAGES)\n MISSING_PACKAGES = []\n for pkg in REQUIRED_PACKAGES:\n key = pkg.split(\"==\")[0]\n if key not in INSTALLED_PACKAGES:\n MISSING_PACKAGES.append(pkg)\n continue\n else:\n if len(pkg.split(\"==\")) > 1:\n if pkg.split(\"==\")[1] != INSTALLED_PACKAGES.get(key):\n MISSING_PACKAGES.append(pkg)\n continue", "def missing_requirements_command(args, packages=None, exit_on_failure=True):\n requirements_files = (\n args.requirements or Config.requirements_files or []\n )\n ignore_list = (\n args.ignore or Config.ignore_list or []\n )\n\n printer = Printer()\n if not validate_files(\n files=requirements_files,\n printer=printer,\n exit_on_failure=exit_on_failure):\n return False\n\n requirements = RequirementCollection()\n for requirements_file in requirements_files:\n requirements.extend(\n RequirementCollection.from_file(filepath=requirements_file)\n )\n\n packages = (\n packages or dependency_list(ignore_list=ignore_list)\n )\n\n missing = [\n (package, required_by)\n for package, required_by\n in missing_requirements(\n packages=packages,\n requirements=requirements,\n ignore_list=ignore_list\n )\n ]\n\n headers = [\n messages.PACKAGE,\n messages.REQUIRED,\n messages.REQUIRED_BY,\n ]\n\n tabular_data = []\n for package, requirers in missing:\n if requirers:\n for required_by, required_version in requirers:\n tabular_data.append([\n printer.colored_message(\n message=package.key,\n message_color=printer.color_package\n ),\n required_version,\n required_by.key,\n ])\n else:\n tabular_data.append([\n printer.colored_message(\n message=package.key,\n message_color=printer.color_package\n ),\n package.version.specifier,\n \"Requirements\",\n ])\n\n if tabular_data:\n printer.error(messages.MISSING_FOUND)\n printer.table(headers=headers, tabular_data=tabular_data)\n if exit_on_failure:\n sys.exit(1)\n return False\n\n printer.success(messages.MISSING_OK)\n return True", "def _list_dependencies_info(\n out: Callable, ljust: int, package: str, dependencies: List[Requirement]\n):\n unicode = sys.stdout.encoding.lower().startswith(\"utf\")\n if unicode:\n ljust += 1\n\n not_found: List[Requirement] = list()\n for dep in dependencies:\n if dep.name == package:\n continue\n try:\n version_ = version(dep.name)\n except Exception:\n not_found.append(dep)\n continue\n\n # build the output string step by step\n output = f\"✔︎ {dep.name}\" if unicode else dep.name\n # handle version specifiers\n if len(dep.specifier) != 0:\n output += f\" ({str(dep.specifier)})\"\n output += \":\"\n output = output.ljust(ljust) + version_\n\n # handle special dependencies with backends, C dep, ..\n if dep.name in (\"matplotlib\", \"seaborn\") and version_ != \"Not found.\":\n try:\n from matplotlib import pyplot as plt\n\n backend = plt.get_backend()\n except Exception:\n backend = \"Not found\"\n\n output += f\" (backend: {backend})\"\n out(output + \"\\n\")\n\n if len(not_found) != 0:\n not_found = [\n f\"{dep.name} ({str(dep.specifier)})\"\n if len(dep.specifier) != 0\n else dep.name\n for dep in not_found\n ]\n if unicode:\n out(f\"✘ Not installed: {', '.join(not_found)}\\n\")\n else:\n out(f\"Not installed: {', '.join(not_found)}\\n\")", "def compare_package_lists(manifest, installed):\n\n uninstalled = [x for x in manifest if x not in installed]\n\n # == comm -23\n also_installed = [x for x in installed if x not in manifest]\n\n # 'easiest' solution\n # print \"apt-get remove -y %s\" % (' '.join(uninstalled))\n # print \"apt-get install -y %s\" % (' '.join(also_installed))\n\n # >>> why isn't this good enough?\n # <<< why manually install dependencies that may change?\n # <<< better to select the minimal graph/set/covering\n # <<< though apt-get will just re-compute these dependencies again\n # <<< \"i swear i didn't manually install [...]\"\n\n # stack = collections.dequeue()\n def visit_graph(apt_cache, pkgname, depends, visited):\n try:\n pkg = apt_cache[pkgname]\n except KeyError as e:\n print(e) # TODO\n return\n\n for pkgset in pkg.installedDependencies:\n for pkg in pkgset:\n depends[pkg.name].append(pkgname)\n if pkgname not in visited:\n visited[pkgname] = True\n visit_graph(apt_cache, pkg.name, depends, visited)\n # stack.push( pkg['name'] )\n\n try:\n apt = import_apt()\n apt_cache = apt.Cache()\n\n depends = collections.defaultdict(list)\n visited = {}\n for pkgname in also_installed:\n visit_graph(apt_cache, pkgname, depends, visited)\n\n # TODO: more optimal covering\n minimal = [x for x in also_installed if x not in depends]\n finally:\n tmp_dir = getattr(apt, '_tmp_dirname')\n if tmp_dir and os.path.exists(tmp_dir):\n shutil.rmtree(apt._tmp_dirname)\n\n return PkgComparison(\n minimal,\n also_installed,\n uninstalled,\n manifest,\n installed)", "def unsatisfied_requirements(buildout, package, working_set):\n\n # read all lines from \"requirements.txt\"\n specs = [k.strip() for k in package_readlines(package, 'requirements.txt')]\n\n # discard empty lines and comments\n specs = [k for k in specs if k and k[0] not in ('#', '-')]\n\n # do not consider packages which are already installed, with a reasonable\n # version matching the user specification, either on the current working\n # set, the installed eggs or the system paths\n newest = bool_option(buildout, 'newest', 'true')\n\n left_over = []\n for k in specs:\n if requirement_is_satisfied(k, working_set, newest):\n dist = working_set.require(k)[0]\n logger.info(\"taking requirement `%s' (%s) from `%s'\", dist.key,\n dist.version, dist.location)\n else:\n left_over.append(k)\n specs = left_over\n\n return left_over", "def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras", "def list_installed_depends_by_extra(\n installed_dists: InstalledDistributions,\n project_name: NormalizedName,\n) -> Dict[Optional[NormalizedName], Set[NormalizedName]]:\n res = {} # type: Dict[Optional[NormalizedName], Set[NormalizedName]]\n base_depends = list_installed_depends(installed_dists, project_name)\n res[None] = base_depends\n for extra in installed_dists[project_name].extra_requires:\n extra_depends = list_installed_depends(installed_dists, project_name, [extra])\n res[extra] = extra_depends - base_depends\n return res", "def find_with_deps(self, package_names):", "def _getDepends(self, pkg):\r\n vals = self._rp.get_depends(pkg, implicit=True)\r\n return [v for v in vals if not self._rp.get_manifest(v).is_catkin]", "def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)", "def calculate_missing(base_pkg, missing, file_deps, use_test_depends=False):\n rospack = rospkg.RosPack()\n for launch_file in file_deps.keys():\n pkg = rospkg.get_package_name(os.path.dirname(os.path.abspath(launch_file)))\n\n if pkg is None: #cannot determine package\n print(\"ERROR: cannot determine package for [%s]\"%pkg, file=sys.stderr)\n continue\n m = rospack.get_manifest(pkg)\n d_pkgs = set([d.name for d in m.depends])\n if m.is_catkin:\n # for catkin packages consider the run dependencies instead\n # else not released packages will not appear in the dependency list\n # since rospkg does uses rosdep to decide which dependencies to return\n from catkin_pkg.package import parse_package\n p = parse_package(os.path.dirname(m.filename))\n d_pkgs = set([d.name for d in p.run_depends])\n if use_test_depends:\n for d in p.test_depends:\n d_pkgs.add(d.name)\n # make sure we don't count ourselves as a dep\n d_pkgs.add(pkg)\n\n diff = list(set(file_deps[launch_file].pkgs) - d_pkgs)\n if not pkg in missing:\n missing[pkg] = set()\n missing[pkg].update(diff)\n return missing", "def diff(requirements, installed):\n\n requirements = {r.req.key: r for r in requirements}\n\n to_be_installed = set()\n to_be_uninstalled = set()\n\n satisfied = set()\n\n for module in installed:\n key = module.key\n\n if key in EXCEPTIONS:\n pass\n elif key not in requirements:\n to_be_uninstalled.add(module.as_requirement())\n elif requirements[key].specifier.contains(module.version):\n satisfied.add(key)\n\n for key, requirement in requirements.items():\n if key not in satisfied:\n to_be_installed.add(requirement.req)\n\n return (to_be_installed, to_be_uninstalled)", "def requires_package(prerequisites):\n return check_prerequisites(prerequisites, checker=_check_py_package)", "def requires(self, package):\n return self.provides(package, \"requires\")", "def satisfy_requirements(buildout, package, working_set):\n\n requirements = unsatisfied_requirements(buildout, package, working_set)\n\n if not requirements: return\n\n # only installs if not on \"offline\" mode\n if offline(buildout):\n raise zc.buildout.UserError(\"We don't have a distribution for %s\\n\"\n \"and can't install one in offline (no-install) mode.\\n\"\n % ','.join(requirements))\n\n # installs all missing dependencies, if required, updates working set\n for req in requirements:\n logger.info(\"Installing `%s' for package `%s'...\", req, package)\n working_set = install_package(buildout, req, working_set)", "def packages_required(package_names):\n # info(\"packages_required(%s)\" % repr(package_names))\n # noinspection PyBroadException\n try:\n result = True\n\n # info(package_names)\n # info(__pip_list)\n for requirement in [Requirement(name) for name in package_names]:\n if requirement.supported_python():\n pkg_name = requirement.package\n if pkg_name.lower() not in __pip_list:\n try:\n # info('__import__(\"{name}\")'.format(name=pkg_name))\n __import__(pkg_name)\n except ImportError:\n info(pkg_name + \" not installed!\")\n missing_modules.append(pkg_name)\n result = False\n return result\n except Exception:\n return False", "def parse_depend_packages(self, atoms):\n\n matched_atoms = []\n atoms = self.filter_depend(atoms)\n matches = self.package_parser.findall(atoms)\n\n if len(matches) > 0:\n for match in matches:\n if not (match[0] == 'virtual' and (match[1] == 'jdk-1' or match[1] == 'jre-1' or match[1] == 'jdk' or match[1] == 'jre' )):\n matched_atoms.append({'equality':'=', 'cat':match[0], 'pkg':match[1], 'slot':match[2]})\n\n return matched_atoms", "def test_scan_and_find_dependencies_pypi():\n manifests = [{\n \"filename\": \"pylist.json\",\n \"filepath\": \"/bin/local\",\n \"content\": open(str(Path(__file__).parent / \"data/manifests/pylist.json\")).read()\n }]\n res = DependencyFinder().scan_and_find_dependencies(\"pypi\", manifests, \"false\")\n assert \"result\" in res\n assert res['result'][0]['details'][0]['_resolved'][0]['package'] == \"django\"\n assert len(res['result'][0]['details'][0]['_resolved'][0]['deps']) == 1", "def get_dependencies(apt_cache, package_name, pattern=None):\n dependencies = []\n for or_group in apt_cache[package_name].candidate.dependencies:\n for dep in or_group:\n if dep.rawtype in [\"Depends\", \"PreDepends\"]:\n dependencies.append(dep.name)\n if pattern:\n dependencies = [ x for x in dependencies if x.find(pattern) != -1 ]\n return dependencies", "def _print_missing(packages, verbose):\n if not packages:\n print(\"## No Rez packages were found.\")\n print(\"No data found\")\n\n return\n\n print(\"## Your command affects these Rez packages.\")\n\n template = \"{package.name}\"\n\n if verbose:\n template = \"{package.name}: {path}\"\n\n for line in sorted(\n template.format(package=package, path=finder.get_package_root(package))\n for package in packages\n ):\n print(line)", "def _remove_extra_packages(frozen_pkgs, ret, **kwargs):\n pkgs = __salt__[\"pkg.list_pkgs\"](**kwargs)\n extra_pkgs = set(pkgs) - set(frozen_pkgs)\n for pkg in extra_pkgs:\n try:\n __salt__[\"pkg.remove\"](name=pkg, **kwargs)\n ret[\"pkgs\"][\"remove\"].append(pkg)\n log.info(\"Removed extra package %s\", pkg)\n except Exception as e: # pylint: disable=broad-except\n msg = \"Error removing %s package: %s\"\n log.error(msg, pkg, e)\n ret[\"comment\"].append(msg % (pkg, e))", "def get_requirements(package):\n requirements: list = requires(package)\n requires_dict = defaultdict(dict)\n for requirement in requirements:\n req = Requirement(requirement)\n package_name, package_marker = req.name, req.marker\n if package_marker and \"extra ==\" in str(package_marker):\n group = str(package_marker).split(\"extra == \")[1].strip('\"').strip(\"'\").strip()\n else:\n group = \"required\"\n # De-duplicate (the same package could appear more than once in the extra == 'all' group)\n if package_name in requires_dict[group]:\n continue\n requires_dict[group][package_name] = req\n return requires_dict", "def show_missing():\n if missing_modules:\n info(\"The following modules are currently not installed and would enable additional tasks:\")\n for pkg_name in missing_modules:\n info(' ' + pkg_name)", "def test_scan_and_find_dependencies_pypi():\n manifests = [{\n \"filename\": \"pylist.json\",\n \"filepath\": \"/bin/local\",\n \"content\": open(str(Path(__file__).parent / \"data/pylist.json\")).read()\n }]\n res = DependencyFinder().scan_and_find_dependencies(\"pypi\", manifests)\n assert \"result\" in res\n assert res['result'][0]['details'][0]['_resolved'][0]['package'] == \"django\"\n assert len(res['result'][0]['details'][0]['_resolved'][0]['deps']) == 1", "def install_requires():\n skip_install_requires = environ.get('SKIP_INSTALL_REQUIRES')\n if not skip_install_requires:\n with open('requirements.pip') as r:\n return r.readlines()\n return []" ]
[ "0.76459414", "0.6849081", "0.67575186", "0.666805", "0.6547717", "0.654318", "0.6514373", "0.6467595", "0.6462847", "0.6381039", "0.6121573", "0.6098772", "0.6093863", "0.6074484", "0.60300785", "0.5925275", "0.59156656", "0.5831861", "0.582028", "0.58077604", "0.5804144", "0.5786391", "0.5785671", "0.5758435", "0.57471186", "0.57295126", "0.57161087", "0.5699716", "0.5694994", "0.568052" ]
0.818115
0
Get all the specified extras for a package and report any missing dependencies. This function will also return a "required" item in the dict which is the dependencies associated with no extras.
def missing_dependencies_by_extra(package="sunpy", exclude_extras=None): exclude_extras = exclude_extras or [] requirements = get_requirements(package) missing_dependencies = {} for group in requirements.keys(): if group in exclude_extras: continue missing_dependencies[group] = find_dependencies(package, [group])[0] return missing_dependencies
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_dependencies(package=\"sunpy\", extras=None):\n requirements = get_requirements(package)\n installed_requirements = {}\n missing_requirements = defaultdict(list)\n extras = extras or [\"required\"]\n for group in requirements:\n if group not in extras:\n continue\n for package, package_details in requirements[group].items():\n try:\n package_version = version(package)\n installed_requirements[package] = package_version\n except PackageNotFoundError:\n missing_requirements[package].append(package_details)\n for package, package_versions in missing_requirements.items():\n missing_requirements[package] = format_requirement_string(\n resolve_requirement_versions(package_versions))\n return missing_requirements, installed_requirements", "def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras", "def read_extras():\n extras = dict()\n extra_requirements_dir = 'packaging/requirements'\n for extra_requirements_filename in os.listdir(extra_requirements_dir):\n filename_match = re.search(r'^requirements-(\\w*).txt$', extra_requirements_filename)\n if not filename_match:\n continue\n extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename)\n try:\n extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines()\n except RuntimeError:\n pass\n return extras", "def dependencies(pkg, extra=None):\n ret = set()\n for dist in pkg.requires_dist:\n requirement = pkg_resources.Requirement.parse(dist)\n # we replace all underscores with dash, to make package names similiar in all cases\n name = requirement.name.replace(\"_\", \"-\")\n if extra:\n # for extras we don't grab dependencies for the main pkg,\n # those are already in the main plg rule\n if not requirement.marker or requirement.marker.evaluate({\"extra\": None}):\n continue\n\n if requirement.marker:\n if not requirement.marker.evaluate({\"extra\": extra}):\n continue\n\n if requirement.extras:\n ret = ret | set(\n [\"{}[{}]\".format(name, dist_extra) for dist_extra in requirement.extras]\n )\n else:\n ret.add(name)\n\n return sorted(list(ret))", "def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:\n raise NotImplementedError()", "def get_package_extras(provider_package_id: str) -> dict[str, list[str]]:\n if provider_package_id == \"providers\":\n return {}\n extras_dict: dict[str, list[str]] = {\n module: [get_pip_package_name(module)]\n for module in ALL_DEPENDENCIES[provider_package_id][CROSS_PROVIDERS_DEPS]\n }\n provider_yaml_dict = get_provider_yaml(provider_package_id)\n additional_extras = provider_yaml_dict.get(\"additional-extras\")\n if additional_extras:\n for entry in additional_extras:\n name = entry[\"name\"]\n dependencies = entry[\"dependencies\"]\n if name in extras_dict:\n # remove non-versioned dependencies if versioned ones are coming\n existing_dependencies = set(extras_dict[name])\n for new_dependency in dependencies:\n for dependency in existing_dependencies:\n # remove extra if exists as non-versioned one\n if new_dependency.startswith(dependency):\n extras_dict[name].remove(dependency)\n break\n extras_dict[name].append(new_dependency)\n else:\n extras_dict[name] = dependencies\n return extras_dict", "def get_requirements(package):\n requirements: list = requires(package)\n requires_dict = defaultdict(dict)\n for requirement in requirements:\n req = Requirement(requirement)\n package_name, package_marker = req.name, req.marker\n if package_marker and \"extra ==\" in str(package_marker):\n group = str(package_marker).split(\"extra == \")[1].strip('\"').strip(\"'\").strip()\n else:\n group = \"required\"\n # De-duplicate (the same package could appear more than once in the extra == 'all' group)\n if package_name in requires_dict[group]:\n continue\n requires_dict[group][package_name] = req\n return requires_dict", "def has_extras(self):\n return any(map(utils.assert_package_has_extras, self.pkg_arguments))", "def print_test_deps_not_in_package_deps(self):\n extras = []\n for key, rec_deps in self.recursive_pkg_deps.items():\n any = self.test_imports.get(key, set()).difference(rec_deps, set([key]))\n if any:\n extras.append((key, any))\n\n if extras:\n print(\"Packages whose tests have extra dependencies not listed in `go list -f {{.Deps}}`:\")\n for pkg, deps in extras:\n print(\"\\t{0}: {1}\".format(pkg, \", \".join(deps)))\n print(\"\\n\")", "def get_required_mods(self):\r\n mods = []\r\n unknowntags = []\r\n for key, value in self.dependencies.items():\r\n if value.required_by:\r\n if value.provided_by:\r\n mods.append(list(value.provided_by)[0]) #Pick random'ish if more than one.\r\n else:\r\n unknowntags.append((key, value))\r\n return {\"mods\":sorted(mods, key= lambda x: x.mod.name), \"unknown\": unknowntags}", "def _getDepends(self, pkg):\r\n vals = self._rp.get_depends(pkg, implicit=True)\r\n return [v for v in vals if not self._rp.get_manifest(v).is_catkin]", "def determine_possible_extras(whls):\n whl_map = {\n whl.name(): whl\n for whl in whls\n }\n\n # TODO(mattmoor): Consider memoizing if this recursion ever becomes\n # expensive enough to warrant it.\n def is_possible(name, extra):\n # If we don't have the .whl at all, then this isn't possible.\n if name not in whl_map:\n return False\n whl = whl_map[name]\n # If we have the .whl, and we don't need anything extra then\n # we can satisfy this dependency.\n if not extra:\n return True\n # If we do need something extra, then check the extra's\n # dependencies to make sure they are fully satisfied.\n for extra_dep in whl.dependencies(extra=extra):\n req = pkg_resources.Requirement.parse(extra_dep)\n # Check that the dep and any extras are all possible.\n if not is_possible(req.project_name, None):\n return False\n for e in req.extras:\n if not is_possible(req.project_name, e):\n return False\n # If all of the dependencies of the extra are satisfiable then\n # it is possible to construct this dependency.\n return True\n\n return {\n whl: [\n extra\n for extra in whl.extras()\n if is_possible(whl.name(), extra)\n ]\n for whl in whls\n }", "def requires(self, package):\n return self.provides(package, \"requires\")", "def list_installed_depends_by_extra(\n installed_dists: InstalledDistributions,\n project_name: NormalizedName,\n) -> Dict[Optional[NormalizedName], Set[NormalizedName]]:\n res = {} # type: Dict[Optional[NormalizedName], Set[NormalizedName]]\n base_depends = list_installed_depends(installed_dists, project_name)\n res[None] = base_depends\n for extra in installed_dists[project_name].extra_requires:\n extra_depends = list_installed_depends(installed_dists, project_name, [extra])\n res[extra] = extra_depends - base_depends\n return res", "def calculate_missing(base_pkg, missing, file_deps, use_test_depends=False):\n rospack = rospkg.RosPack()\n for launch_file in file_deps.keys():\n pkg = rospkg.get_package_name(os.path.dirname(os.path.abspath(launch_file)))\n\n if pkg is None: #cannot determine package\n print(\"ERROR: cannot determine package for [%s]\"%pkg, file=sys.stderr)\n continue\n m = rospack.get_manifest(pkg)\n d_pkgs = set([d.name for d in m.depends])\n if m.is_catkin:\n # for catkin packages consider the run dependencies instead\n # else not released packages will not appear in the dependency list\n # since rospkg does uses rosdep to decide which dependencies to return\n from catkin_pkg.package import parse_package\n p = parse_package(os.path.dirname(m.filename))\n d_pkgs = set([d.name for d in p.run_depends])\n if use_test_depends:\n for d in p.test_depends:\n d_pkgs.add(d.name)\n # make sure we don't count ourselves as a dep\n d_pkgs.add(pkg)\n\n diff = list(set(file_deps[launch_file].pkgs) - d_pkgs)\n if not pkg in missing:\n missing[pkg] = set()\n missing[pkg].update(diff)\n return missing", "def parse_depend_packages(self, atoms):\n\n matched_atoms = []\n atoms = self.filter_depend(atoms)\n matches = self.package_parser.findall(atoms)\n\n if len(matches) > 0:\n for match in matches:\n if not (match[0] == 'virtual' and (match[1] == 'jdk-1' or match[1] == 'jre-1' or match[1] == 'jdk' or match[1] == 'jre' )):\n matched_atoms.append({'equality':'=', 'cat':match[0], 'pkg':match[1], 'slot':match[2]})\n\n return matched_atoms", "def checkOptionalDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # soft dependencies\n failed = []\n for opt in self.optmodules:\n mod = self.parent.module(opt)\n if( mod == None ):\n failed.append(opt)\n \n # remove soft dependencies that were not found\n self.buildWithout(failed)", "def unsatisfied_requirements(buildout, package, working_set):\n\n # read all lines from \"requirements.txt\"\n specs = [k.strip() for k in package_readlines(package, 'requirements.txt')]\n\n # discard empty lines and comments\n specs = [k for k in specs if k and k[0] not in ('#', '-')]\n\n # do not consider packages which are already installed, with a reasonable\n # version matching the user specification, either on the current working\n # set, the installed eggs or the system paths\n newest = bool_option(buildout, 'newest', 'true')\n\n left_over = []\n for k in specs:\n if requirement_is_satisfied(k, working_set, newest):\n dist = working_set.require(k)[0]\n logger.info(\"taking requirement `%s' (%s) from `%s'\", dist.key,\n dist.version, dist.location)\n else:\n left_over.append(k)\n specs = left_over\n\n return left_over", "def _get_all_pkg_info(self):\n all_pkgs = self._filter_pkgs(self._go_list(\"./...\"))\n # for every package, list the deps, the test files, the test imports, and the external package test imports\n big_list = self._go_list(\n \"-f\", \"{{.ImportPath}}:{{.Deps}}:{{.TestImports}}:{{.XTestImports}}\", *all_pkgs)\n recursive_deps = {}\n test_imports = {}\n\n for line in big_list:\n tokens = [token.strip().lstrip('[').rstrip(']').strip() for token in line.split(\":\", 3)]\n pkg = tokens[0].strip()\n\n recursive_deps[pkg] = set(self._filter_pkgs(tokens[1].split() + [pkg]))\n if tokens[2] or tokens[3]:\n test_imports[pkg] = set(\n self._filter_pkgs(tokens[2].split()) + self._filter_pkgs(tokens[3].split()))\n\n return recursive_deps, test_imports", "def getPackageRequired(self, *args):\n return _libsbml.SBMLDocument_getPackageRequired(self, *args)", "def getPkgRequired(self, *args):\n return _libsbml.SBMLDocument_getPkgRequired(self, *args)", "def GetMissingRequires(self):\n external_dependencies = set(self._required_namespaces)\n\n # Assume goog namespace is always available.\n external_dependencies.add('goog')\n # goog.module is treated as a builtin, too (for goog.module.get).\n external_dependencies.add('goog.module')\n\n created_identifiers = set()\n for unused_namespace, identifier, unused_line_number in (\n self._created_namespaces):\n created_identifiers.add(identifier)\n\n missing_requires = dict()\n illegal_alias_statements = dict()\n\n def ShouldRequireNamespace(namespace, identifier):\n \"\"\"Checks if a namespace would normally be required.\"\"\"\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)\n\n # First check all the used identifiers where we know that their namespace\n # needs to be provided (unless they are optional).\n for ns in self._used_namespaces:\n namespace = ns.namespace\n identifier = ns.identifier\n if (not ns.alias_definition and\n ShouldRequireNamespace(namespace, identifier)):\n missing_requires[namespace] = ns.GetLine()\n\n # Now that all required namespaces are known, we can check if the alias\n # definitions (that are likely being used for typeannotations that don't\n # need explicit goog.require statements) are already covered. If not\n # the user shouldn't use the alias.\n for ns in self._used_namespaces:\n if (not ns.alias_definition or\n not ShouldRequireNamespace(ns.namespace, ns.identifier)):\n continue\n if self._FindNamespace(ns.identifier, self._provided_namespaces,\n created_identifiers, external_dependencies,\n missing_requires):\n continue\n namespace = ns.identifier.rsplit('.', 1)[0]\n illegal_alias_statements[namespace] = ns.token\n\n return missing_requires, illegal_alias_statements", "def resolve_dependencies(self, all_data):\n self.requires = []\n for dep in self.metadata[\"deps\"]:\n key = (self.package.key, dep)\n if key in self.provides:\n raise Exception(\"Package shouldn't depend on itself: %s\" % repr(key))\n self.requires.append( all_data.resolve_unqualified_component(dep, self.package.key) )", "def __gather_package_data(arguments):\n ignore_patterns, packages_path, search_packages_path = _resolve_arguments(\n arguments.ignore_patterns,\n arguments.packages_path,\n arguments.search_packages_path,\n )\n rez_packages = set(arguments.rez_packages)\n\n package_finder = registry.get_package_finder(arguments.command)\n\n found_packages = []\n packages, invalid_packages, skips = package_finder(\n paths=packages_path + search_packages_path\n )\n\n for package in packages:\n if rez_packages and package.name not in rez_packages:\n skips.append(package)\n else:\n found_packages.append(package)\n\n ignored_packages, other_packages = _split_the_ignored_packages(\n found_packages, ignore_patterns\n )\n\n other_packages = sorted(other_packages, key=operator.attrgetter(\"name\"))\n\n return ignored_packages, other_packages, invalid_packages, skips", "def find_with_deps(self, package_names):", "def check_missing_dep():\n global MISSING_PACKAGES, INSTALLED_PACKAGES, ENABLE_CUDA\n if ENABLE_CUDA and IS_MACOS:\n REQUIRED_PACKAGES.extend(MACOS_REQUIRED_PACKAGES)\n MISSING_PACKAGES = []\n for pkg in REQUIRED_PACKAGES:\n key = pkg.split(\"==\")[0]\n if key not in INSTALLED_PACKAGES:\n MISSING_PACKAGES.append(pkg)\n continue\n else:\n if len(pkg.split(\"==\")) > 1:\n if pkg.split(\"==\")[1] != INSTALLED_PACKAGES.get(key):\n MISSING_PACKAGES.append(pkg)\n continue", "def list_installed_depends(\n installed_dists: InstalledDistributions,\n project_name: NormalizedName,\n extras: Optional[Sequence[NormalizedName]] = None,\n) -> Set[NormalizedName]:\n res = set()\n seen = set()\n\n def add(req: Requirement, deps_only: bool) -> None:\n req_name = canonicalize_name(req.name)\n seen_key = (req_name, tuple(sorted(req.extras)))\n if seen_key in seen:\n return\n seen.add(seen_key)\n try:\n dist = installed_dists[req_name]\n except KeyError:\n # not installed\n return\n else:\n if not deps_only:\n res.add(req_name)\n for dep_req in dist.requires:\n add(dep_req, deps_only=False)\n for extra in req.extras:\n extra = canonicalize_name(extra)\n if extra not in dist.extra_requires:\n # extra is not a known extra of installed dist,\n # so we can't report it's dependencies\n continue\n for dep_req in dist.extra_requires[extra]:\n add(dep_req, deps_only=False)\n\n add(\n Requirement(make_project_name_with_extras(project_name, extras)),\n deps_only=True,\n )\n\n return res", "def test_multiple_manifest_with_single_dep(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 3\n }", "def missing_requirements_command(args, packages=None, exit_on_failure=True):\n requirements_files = (\n args.requirements or Config.requirements_files or []\n )\n ignore_list = (\n args.ignore or Config.ignore_list or []\n )\n\n printer = Printer()\n if not validate_files(\n files=requirements_files,\n printer=printer,\n exit_on_failure=exit_on_failure):\n return False\n\n requirements = RequirementCollection()\n for requirements_file in requirements_files:\n requirements.extend(\n RequirementCollection.from_file(filepath=requirements_file)\n )\n\n packages = (\n packages or dependency_list(ignore_list=ignore_list)\n )\n\n missing = [\n (package, required_by)\n for package, required_by\n in missing_requirements(\n packages=packages,\n requirements=requirements,\n ignore_list=ignore_list\n )\n ]\n\n headers = [\n messages.PACKAGE,\n messages.REQUIRED,\n messages.REQUIRED_BY,\n ]\n\n tabular_data = []\n for package, requirers in missing:\n if requirers:\n for required_by, required_version in requirers:\n tabular_data.append([\n printer.colored_message(\n message=package.key,\n message_color=printer.color_package\n ),\n required_version,\n required_by.key,\n ])\n else:\n tabular_data.append([\n printer.colored_message(\n message=package.key,\n message_color=printer.color_package\n ),\n package.version.specifier,\n \"Requirements\",\n ])\n\n if tabular_data:\n printer.error(messages.MISSING_FOUND)\n printer.table(headers=headers, tabular_data=tabular_data)\n if exit_on_failure:\n sys.exit(1)\n return False\n\n printer.success(messages.MISSING_OK)\n return True", "def iter_provided_extras(self) -> Iterable[str]:\n raise NotImplementedError()" ]
[ "0.750957", "0.72501034", "0.67800206", "0.66262317", "0.6523681", "0.6490982", "0.63194853", "0.62972224", "0.62286425", "0.60691607", "0.60094035", "0.59347016", "0.58882934", "0.5886922", "0.5747453", "0.57250464", "0.5711906", "0.5701838", "0.5681736", "0.5658716", "0.56246036", "0.5622307", "0.55765617", "0.5563604", "0.5563374", "0.55140036", "0.54567856", "0.54344183", "0.5431421", "0.54287446" ]
0.7773356
0
Load zoning districts from the given shapefile.
def load_zoning_districts(source=ZONING_DATA_FILE): layer = DataSource(source)[0] for feature in layer: try: _save_base_district(feature) except: print ('Could not save base district for feature with OBJECTID=%s.' ' Skipping.') % feature['OBJECTID'] traceback.print_exc()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_graph(self, graph: nx.Graph) -> None:\n\n self.pos = dict()\n self.polygons = dict()\n districts = len(self.data[\"features\"])\n\n for i in range(districts):\n\n # Get shape information from file\n nodes = self.data[\"features\"][i][\"geometry\"][\"coordinates\"][0]\n area_id = self.data[\"features\"][i][\"properties\"][\"MOVEMENT_ID\"].zfill(4) \n\n # Convert list of lists to list of tuples (lng, lat)\n nodes = list(map(tuple, nodes)) \n\n # Prune nodes and remove last node (duplicate of first node)\n nodes = list(map(lambda x: (x[:2]), nodes)) \n nodes = nodes[:-1]\n\n # Add polygon with node-coordinates to dict\n self.polygons[area_id] = nodes\n\n for j,k in enumerate(nodes):\n self.pos[area_id+\"-\"+str(j)]=(k[0], k[1])\n\n for j in range(len(nodes)-1):\n graph.add_edge(\n u_of_edge=area_id+\"-\"+str(j), \n v_of_edge=area_id+\"-\"+str(j+1))\n\n graph.add_edge(\n u_of_edge=area_id+\"-\"+str(len(nodes)-1), \n v_of_edge=area_id+\"-\"+str(0))\n \n self.graph = graph", "def load_districts(self):\r\n\r\n response = requests.get(\"https://cdn-api.co-vin.in/api/v2/admin/location/districts/{}\".format(self.state_id))\r\n\r\n if response.ok:\r\n\r\n df = pd.DataFrame(json.loads(response.text)[\"districts\"])\r\n self.districts_df = df", "def import_national_boundaries(self, name):\n print \"\\n4.3- importa shape con confini nazionali ISTAT\"\n countrySHP = os.path.join(\"boundaries\", \"italy_2011_WGS84.shp\")\n countrySQL = os.path.join(\"boundaries\", \"italy_%s.sql\" % name)\n if os.path.isfile(countrySQL):\n call(\"rm %s\" % countrySQL, shell=True)\n cmd = \"shp2pgsql -s 4326 -W 'LATIN1' %s italy %s > %s\" % (countrySHP, name, countrySQL)\n print cmd\n call(cmd, shell=True)\n call(\"psql -h localhost -U %s -d %s -f %s\" % (self.user, name, countrySQL), shell=True)\n call(\"rm %s\" % countrySQL, shell=True)\n call(\"echo 'CREATE INDEX ON italy USING GIST (geom);'| psql -U %s -d %s\" % (self.user, name), shell=True)\n call(\"echo 'ANALYZE italy;'| psql -U %s -d %s\" % (self.user, name), shell=True)", "def import_shapefile(self, shapefile, schema):\n logger.debug(\"Importing shapefile {}\".format(shapefile))\n layer = DataSource(shapefile)[0]\n for feature in layer:\n fields = schema.from_feature(feature)\n Region.objects.create(**fields)", "def get_district_file(state=48, district=7, leg_body='US-REP'):\r\n\r\n district_file = get_district_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n geojson_path = 'static/geojson/' \r\n state = \"{0:0>2}\".format(state)\r\n district = \"{0:0>2}\".format(district)\r\n \r\n if not os.path.isfile(district_file):\r\n print( \"Downloading district file\" )\r\n # TODO download the most recent districts file\r\n # currently it downloads the 2016 district\r\n # 'http://www2.census.gov/geo/tiger/GENZ2016/shp/cb_2016_us_cd115_500k.zip'\r\n \r\n if leg_body == 'US-REP':\r\n district_url = 'http://www2.census.gov/geo/tiger/GENZ2016/shp/cb_2016_us_cd115_500k.zip'\r\n if leg_body == 'STATE-REP':\r\n district_url = 'ftp://ftpgis1.tlc.state.tx.us/DistrictViewer/House/PlanH358.zip'\r\n if leg_body == 'STATE-SEN':\r\n district_url = 'ftp://ftpgis1.tlc.state.tx.us/DistrictViewer/Senate/PlanS172.zip'\r\n \r\n district_dl_file = geojson_path + 'district.zip'\r\n download_file(district_url, district_dl_file)\r\n extract_all(district_dl_file, geojson_path)\r\n \r\n if len(glob(geojson_path + '*shp')) > 0:\r\n districts_shapefile = glob(geojson_path + '*shp')[0]\r\n else:\r\n for p in glob(geojson_path + '*'):\r\n if os.path.isdir(p):\r\n shapefile_path = p\r\n districts_shapefile = glob(p + '/*shp')[0]\r\n \r\n print( \"Converting district file to GEOJSON\" )\r\n districts = gpd.read_file(districts_shapefile)\r\n \r\n if leg_body == 'US-REP':\r\n d_index = districts[districts.GEOID == (state + district) ].index\r\n if leg_body == 'STATE-REP' or leg_body == 'STATE-SEN':\r\n d_index = districts[districts.District == int(district) ].index\r\n\r\n district_shape = districts.loc[d_index]\r\n district_shape = district_shape.to_crs({'init': u'epsg:4326'})\r\n district_shape.to_file(district_file, driver='GeoJSON')\r\n\r\n # cleanup geojson dir\r\n if len(glob(geojson_path + '*shp')) > 0:\r\n shapefile_prefix = glob(geojson_path + '*shp')[0].split(\r\n geojson_path)[1].split('.')[0]\r\n shapefiles = glob(geojson_path + shapefile_prefix + '*')\r\n for f in shapefiles:\r\n os.remove(f)\r\n else:\r\n shapefile_prefix = glob(shapefile_path + '/*shp')[0].split(\r\n shapefile_path)[1].split('.')[0]\r\n shapefiles = glob(shapefile_path + shapefile_prefix + '*')\r\n for f in shapefiles:\r\n os.remove(f)\r\n os.rmdir(shapefile_path)\r\n os.remove(district_dl_file)", "def load_shapefile_neighborhood(area):\n if os.path.isfile(\"data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('data/shp/Inzameling_huisvuil_080520.shp')\n elif os.path.isfile(\"../data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('../data/shp/Inzameling_huisvuil_080520.shp')\n if area:\n source = source[source['sdcode'].isin(list(area))]\n return list(source.geometry)", "def load_from_geojson(self, filename_or_url):", "def get_shapes4country(country='South Africa'):\n # location of data\n URL = \"http://www.naturalearthdata.com/downloads/10m-cultural-vectors\"\n URL += \"/10m-admin-1-states-provinces/\"\n # Shapefiles locally?\n # TODO - update to download automatically and store in AC_tools' data directory\n shapefiles = 'ne_10m_admin_1_states_provinces_lakes'\n# shapefiles = 'ne_10m_admin_1_states_provinces'\n folder = '/mnt/lustre/users/ts551/labbook/Python_progs/'\n folder += '/AC_tools/data/shapefiles/{}'.format(shapefiles, shapefiles)\n states = geopandas.read_file(folder)\n # Just select state of interest\n choosen_states = states.query(\"admin == '{}'\".format(country))\n choosen_states = choosen_states.reset_index(drop=True)\n # Get the shapes\n shapes = zip(choosen_states.geometry, range(len(choosen_states)))\n return shapes", "def loadMap(self, filePath):\n try:\n f = h5py.File(filePath, 'r')\n except OSError as e:\n #File does not exist\n #As TDS-1 is run periodically, many time segment files are not populated\n print(\"Could not find coastal distance map \" + filePath)\n return\n\n self.coastalData = np.array(f['/array'])\n self.lats = np.array(f['/lats'])\n self.lons = np.array(f['/lons'])\n self.maxkm = np.array(f['/maxkm'])\n self.res = np.array(f['/res'])\n \n NaN = float('nan');", "def import_regional_boundaries(self, name):\n print \"\\n4.2- importa shape con confini regionali generalizzati ISTAT, per creare tabelle con errori per regione\"\n regionsSHP = os.path.join(\"boundaries\", \"regioni_2011_WGS84.shp\")\n regionsSQL = os.path.join(\"boundaries\", \"regioni_%s.sql\" % name)\n if os.path.isfile(regionsSQL):\n call(\"rm %s\" % regionsSQL, shell=True)\n cmd = \"shp2pgsql -s 4326 -W 'LATIN1' %s regioni %s > %s\" % (regionsSHP, name, regionsSQL)\n call(cmd, shell=True)\n call(\"psql -h localhost -U %s -d %s -f %s\" % (self.user, name, regionsSQL), shell=True)\n call(\"rm %s\" % regionsSQL, shell=True)\n call(\"echo 'CREATE INDEX ON regioni USING GIST (geom);'| psql -U %s -d %s\" % (self.user, name), shell=True)\n call(\"echo 'ANALYZE regioni;'| psql -U %s -d %s\" % (self.user, name), shell=True)", "def load():\n\n # To run this command type: 'python manage.py shell'\n # 'from map.views import load; load()'\n\n mapping = {\"productivi\": \"productivi\", \"mpoly\": \"MULTIPOLYGON\"}\n map_path = os.path.abspath('gis_django/fields_test/test_fields.shp')\n lm = LayerMapping(Map, map_path, mapping, transform=False, encoding=\"iso-8859-1\")\n lm.save(verbose=True)", "def get_bgs_in_district_geojson_filename(state=48, district=7, leg_body='US-REP'):\r\n state = \"{0:0>2}\".format(state)\r\n district = \"{0:0>2}\".format(district)\r\n \r\n state_abbr = str(states.mapping('fips', 'abbr')[state])\r\n district_abbr = leg_body + '-' + state_abbr + district\r\n geojson_path = 'static/geojson/'\r\n data_path = 'static/data/'\r\n shapfile_path = None\r\n bgs_in_district_fn = district_abbr + '-blockgroups'\r\n bgs_in_district_GeoJSON = geojson_path + bgs_in_district_fn + '.geojson'\r\n\r\n return bgs_in_district_GeoJSON", "def loadCountryGroupMappingFromFile(file):\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda line: (line[0], line[2].strip()))\n\t , partial(takewhile, lambda line: len(line) > 2 and line[0] != '')\n\t , lambda t: t[1]\n\t , lambda lines: (pop(lines), lines)\n\t , fileToLines\n \t , partial(join, getDataDirectory())\n\t)(file)", "def load_cityscapes(self, dataset_dir, subset):\n self.class_labels = {\n 'unlabeled':0,\n 'ego vehicle':1, \n 'rectification border':2,\n 'out of roi':3, \n 'static':4, \n 'dynamic':5, \n 'ground':6, \n 'road':7, \n 'sidewalk':8, \n 'parking':9, \n 'rail track':10, \n 'building':11, \n 'wall':12, \n 'fence':13, \n 'guard rail':14, \n 'bridge':15, \n 'tunnel':16, \n 'pole':17, \n 'polegroup':18, \n 'traffic light':19, \n 'traffic sign':20, \n 'vegetation':21, \n 'terrain':22, \n 'sky':23, \n 'person':24, \n 'rider':25, \n 'car':26, \n 'truck':27, \n 'bus':28, \n 'caravan':29, \n 'trailer':30, \n 'train':31, \n 'motorcycle':32, \n 'bicycle':33, \n 'license plate':34}\n \n annotation_dir = dataset_dir + 'gtFine_trainvaltest/' + subset + '_all.json'\n self.image_info = json.load(open(annotation_dir, 'r'))\n \n # Add classes\n for i in range(len(self.class_labels)):\n self.add_class(\"cityscape\", i, list(self.class_labels.keys())[i])", "def __load_geo(self):\n pass\n # process any splines? and turn them into arcs\n # http://www.mathopenref.com/constcirclecenter.html\n # find max dist between points\n # double it\n # select two segments\n # draw normal lines\n # find intersections, that is the center", "def load_cities (filename):\n if not os.path.isfile(filename):\n return None\n # try to decode a plain file\n try:\n with open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n # try to decode a gzipped file\n try:\n with gzip.open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n return None", "def prepare_data(self, file):\n maps = np.load(file)\n pred, gt, not_care = maps[-3:]\n return self.get_polygon(pred), self.get_polygon(gt), self.get_polygon(not_care)", "def __load_dxf(self):\n print('Loading file: %s' % self.__fname)\n dwg = dxfgrabber.readfile(self.__fname)\n lines = [item for item in dwg.entities if item.dxftype == 'LINE']\n arcs = [item for item in dwg.entities if item.dxftype == 'ARC']\n if self.__layer > -1:\n lines = [item for item in lines if item.layer == self.__layer]\n arcs = [item for item in arcs if item.layer == self.__layer]\n print('File read.')\n print('Loaded %i lines' % len(lines))\n print('Loaded %i arcs' % len(arcs))\n print('Loaded %i line segments, lines or arcs' %\n (len(lines)+len(arcs)))\n # get all points and Line and Arc using pycalculix entities\n print('Converting to pycalculix lines arcs and points ...')\n all_points, all_lines = self.__get_pts_lines(lines, arcs)\n print('Loaded %i line segments, lines or arcs' % len(all_lines))\n print('Loaded %i points' % len(all_points))\n # for point in all_points:\n # print('%s %s' % (point, point.lines))\n # for line in all_lines:\n # print('%s %s' % (line, line.points))\n\n # remove all lines that are not part of areas\n dangling_points = self.__dangling_points(all_points)\n pruned_geometry = bool(dangling_points)\n while dangling_points:\n for point in dangling_points:\n all_points.remove(point)\n print('Removed point= %s' % point)\n dangling_line = list(point.lines)[0]\n point.unset_line(dangling_line)\n if dangling_line in all_lines:\n all_lines.remove(dangling_line)\n print('Removed line= %s' % dangling_line)\n dangling_points = self.__dangling_points(all_points)\n if pruned_geometry:\n print('Remaining line segments: %i' % len(all_lines))\n print('Remaining points: %i' % len(all_points))\n\n # make line all_loops now\n all_loops = []\n line = all_lines[0]\n this_loop = geometry.LineLoop()\n while len(all_lines) > 0:\n this_loop.append(line)\n all_lines.remove(line)\n if this_loop.closed == True:\n all_loops.append(this_loop)\n this_loop = geometry.LineLoop()\n if all_lines:\n line = all_lines[0]\n continue\n point = line.pt(1)\n other_lines = point.lines - set([line])\n if len(other_lines) > 1:\n # note: one could exclude connected segment nodes\n # make disconnected line all_loops, then have another\n # loop to connect those disconnected line all_loops\n print('One point was connected to > 2 lines.')\n print('Only import simple part all_loops, or surfaces.')\n raise Exception('Import geometry is too complex')\n next_line = list(other_lines)[0]\n if line.pt(1) != next_line.pt(0):\n next_line.reverse()\n line = next_line\n\n # find exterior loops\n exterior_loops = []\n for ind, loop in enumerate(all_loops):\n other_loops = all_loops[ind+1:]\n other_loops.extend(exterior_loops)\n is_exterior = True\n for other_loop in other_loops:\n if loop.inside(other_loop):\n is_exterior = False\n break\n if is_exterior:\n # exterior must be clockwise\n if loop.ccw:\n loop.reverse()\n exterior_loops.append(loop)\n # remove the found part exterior loops from all_loops\n for exterior_loop in exterior_loops:\n all_loops.remove(exterior_loop)\n # each part in parts is a list of line all_loops\n # [exterior, hole1, hole2]\n parts = [[exterior_loop] for exterior_loop in exterior_loops]\n # now place the child hole loops after the part exterior loop\n for part_loops in parts:\n exterior_loop = part_loops[0]\n # find child holes\n for hole_loop in all_loops:\n if hole_loop.inside(exterior_loop):\n hole_loop.hole = True\n # holes must be ccw\n if not hole_loop.ccw:\n hole_loop.reverse()\n part_loops.append(hole_loop)\n # remove child holes from loop list\n for hole_loop in part_loops[1:]:\n all_loops.remove(hole_loop)\n\n # make parts\n parts_list = []\n for part_loops in parts:\n this_part = partmodule.Part(self.__fea)\n for ind, loop in enumerate(part_loops):\n is_hole = loop.hole\n start = loop[0].pt(0)\n this_part.goto(start.x, start.y, is_hole)\n for item in loop:\n if isinstance(item, geometry.Line):\n end = item.pt(1)\n this_part.draw_line_to(end.x, end.y)\n elif isinstance(item, geometry.Arc):\n end = item.pt(1)\n center = item.actr\n this_part.draw_arc(end.x, end.y, center.x, center.y)\n parts_list.append(this_part)\n print('Parts created: %i' % len(parts_list))\n return parts_list", "def load_cityscapes(path, fdr):\n dataset = Dataset(path, split='val', mode=\"fine\", target_type=[\"semantic\", \"instance\"])\n\n from PATH import SCRI_PATH as spath\n\n for image, (sseg, inst), name in dataset:\n image = np.array(image)\n sseg = gt_covert(sseg)\n inst = np.array(inst)\n if os.path.exists(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"):\n scribbles = np.array(Image.open(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"))\n else:\n scribbles = None\n # scribbles = scribble_convert(scribbles)\n yield name, image, sseg, inst, scribbles", "def load_geodata_containers(subsectie=None):\n if os.path.isfile(\"data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('data/shp/Inzameling_huisvuil_080520.shp')\n elif os.path.isfile(\"../data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('../data/shp/Inzameling_huisvuil_080520.shp')\n source = source[source['aanbiedwij'] ==\n 'Breng uw restafval naar een container voor restafval.']\n if subsectie:\n source = source[source['sdcode'].isin(list(subsectie))]\n return list(source.geometry)", "def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf", "def load_mask_from_shapefile(filename, shape, transform):\n multipolygon, _ = load_shapefile2multipolygon(filename)\n mask = multipolygon2mask(multipolygon, shape, transform)\n return mask", "def taxi_zones(path, storage_options=None):\n zdf = pd.read_csv(path, storage_options=storage_options)\n zdf = zdf.drop(\"OBJECTID\", axis=\"columns\")\n zdf = zdf.set_index(\"LocationID\")\n return zdf", "def _from_gisdb(self):\n self._ways = gpd.read_postgis(sql=\"ways\", con=self._gisdb, geom_col=\"geometry\")\n self._nodes = pd.read_sql(sql=\"nodes\", con=self._gisdb)\n self._edges = pd.read_sql(sql=\"graph_edges\", con=self._gisdb)\n # graph_nodes = gpd.read_postgis(sql=\"graph_nodes\", con=self._gisdb, geom_col=\"geometry\")", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def read_postcode_sectors(path):\n with fiona.open(path, 'r') as pcd_sector_shapes:\n return [pcd for pcd in pcd_sector_shapes]", "def read_postcode_sectors(path):\n with fiona.open(path, 'r') as pcd_sector_shapes:\n return [pcd for pcd in pcd_sector_shapes]", "def get_all_districts():\n with open(district_data_dir + 'district-data.json') as f:\n district_dict = json.load(f)\n districts = set([])\n\n for date, data in district_dict.items():\n if date == '03/02/2020':\n continue\n districts.update(data.keys())\n\n # Remove unnecessary points\n districts.remove('total-infected')\n districts.remove('max-legend-value')\n districts.remove('splitPoints')\n return districts", "def load_map(self, filename):\n with open(filename, 'rb') as file:\n self.current_obstacles = pickle.load(file)\n self.current_goal = pickle.load(file)\n try:\n setstate(pickle.load(file))\n except EOFError:\n print(\"No random state stored\")", "def open_shapefile(file_path):\n datasource = ogr.Open(file_path)\n layer = datasource.GetLayerByIndex(0)\n print(\"Opening {}\".format(file_path))\n print(\"Number of features: {}\".format(layer.GetFeatureCount()))\n return datasource" ]
[ "0.63071585", "0.62551874", "0.62253094", "0.60826", "0.6047353", "0.5942383", "0.5731001", "0.5591916", "0.5538651", "0.54869246", "0.53558475", "0.5330665", "0.529415", "0.5255251", "0.5226903", "0.51566815", "0.514738", "0.5143", "0.51270646", "0.512447", "0.5090616", "0.5088032", "0.5078602", "0.5072706", "0.5068404", "0.5066697", "0.5066697", "0.5051659", "0.5048627", "0.5044142" ]
0.72876954
0
Extract the hidden message fro the given image. Authenticate the hidden message by validating the hmac hash sliced from the hidden message.
def get_msg(img): i = Image.open('%s.ste' % img) secret = stg.extract_msg(i) mac = secret.split('--:--')[0] print 'HMAC hex is: \n%s\n' % mac.encode('hex') data = secret.split('--:--')[1] print 'The hidden message is: \n%s\n' % data check_hmac(mac) i.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hide(self, img, message):\r\n encoded = img.copy()\r\n width, height = img.size\r\n index = 0\r\n\r\n message = message + '~~~'\r\n message_bits = \"\".join(tools.a2bits_list(message))\r\n\r\n npixels = width * height\r\n if len(message_bits) > npixels * 3:\r\n return \"\"\"Too long message (%s > %s).\"\"\" \"\"\"%\"\"\"\r\n (len(message_bits), npixels * 3)\r\n\r\n for row in range(height):\r\n for col in range(width):\r\n if index + 3 <= len(message_bits) :\r\n\r\n # Get the colour component.\r\n (r, g, b) = img.getpixel((col, row))\r\n\r\n # Change the Least Significant Bit of each colour component.\r\n r = tools.setlsb(r, message_bits[index])\r\n g = tools.setlsb(g, message_bits[index+1])\r\n b = tools.setlsb(b, message_bits[index+2])\r\n\r\n # Save the new pixel\r\n encoded.putpixel((col, row), (r, g , b))\r\n\r\n index += 3\r\n\r\n return encoded\r\n self.resultLbl.SetLabel(\"Message successfully encoded.\")", "def decode_with_esponce(img):\n h = httplib2.Http()\n resp, content = h.request(ESPONCE_URL, \"POST\", img.read())\n content = json.loads(content)\n return content.get(\"content\")", "def decode(decryption=None):\n\n key_to_encrypt = {'a': 'q', 'b': 'v', 'c': 'x', 'd': 'z', 'e': 'y', 'f': 'w', 'g': 'u', 'h': 't', 'i': 's',\n 'j': 'r',\n 'k': 'p', 'l': 'o', 'm': 'n', 'n': 'm', 'o': 'l', 'p': 'k', 'r': 'j', 's': 'i', 't': 'h',\n 'u': 'g', 'w': 'f',\n 'y': 'e', 'z': 'd', 'x': 'c', 'v': 'b', 'q': 'a',\n 'A': 'Q', 'B': 'V', 'C': 'X', 'D': 'Z', 'E': 'Y', 'F': 'W', 'G': 'U', 'H': 'T', 'I': 'S',\n 'J': 'R', 'K': 'P',\n 'L': 'O', 'M': 'N', 'N': 'M', 'O': 'L', 'P': 'K', 'R': 'J', 'S': 'I', 'T': 'H', 'U': 'G',\n 'W': 'F', 'Y': 'E',\n 'Z': 'D', 'X': 'C', 'V': 'B', 'Q': 'S',\n '1': '5', '2': '9', '3': '8', '4': '7', '5': '6', '6': '4', '7': '3', '8': '2', '9': '1',\n '.': ',', ',': '.', ':': ';', ';': ':', '?': '!', '!': '?', '-': '_', '_': '-', '(': ')',\n ')': '(',\n '%': '$', '$': '%', ' ': '&', '&': ' ', '+': '*', '*': '+'}\n\n k1 = key.Key(key_to_encrypt)\n reversed_key = k1.createReverseKey()\n\n entered_image = input(\"Image name with extension: \")\n img = Image.open(entered_image, 'r')\n\n decoded_message = ''\n data_from_image = iter(img.getdata())\n\n while (True):\n pixels = [value for value in data_from_image.__next__()[:3] +\n data_from_image.__next__()[:3] +\n data_from_image.__next__()[:3]]\n\n binary = ''\n\n for i in pixels[:8]:\n if (i % 2 == 0):\n binary += '0'\n else:\n binary += '1'\n\n decoded_message += chr(int(binary, 2))\n d1 = monoalphabetic_decryption.Decryption(reversed_key, decoded_message)\n message = d1.decrypt()\n if (pixels[-1] % 2 != 0):\n return message", "def verifyImageVerification( imageVerification ):\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"", "def img(self):\n return self.img_decode(self.img_msg_)", "def decode (self, secret_key, random_seed, message_length=math.inf):\n # seed the random number generator with the seed used to embed\n random.seed(random_seed)\n bytes_visited = {} # a dictionary of the unique bytes already visited\n color_offset = StegImage.color_offset # the color plane where the message exists\n recent_bits = [] # an array. each element is a single bit\n message = \"\"\n message_over = False\n character_offset = 0\n while ((len(bytes_visited) < message_length * self.binary_size) and not message_over) and len(bytes_visited) < (len(self.bytes) - 54)/3: # will try to decode one letter at a time until an error is thrown or it reaches the end of the image. (the algo has no idea when the message stops)\n index_of_byte = None\n while (index_of_byte is None or index_of_byte in bytes_visited): # if the byte is visited twice, in the embed algo, it just skips it the second time and moves on, so do the same when decoding\n index_of_byte = random.randint(self.offset, self.number_of_pixels * 3)\n index_of_byte += color_offset\n bytes_visited[index_of_byte] = True\n byte = self.binary_array[index_of_byte]\n bit = data_manipulation.get_bit_from_byte(byte, self.binary_size - 1) # get the last bit of the byte\n recent_bits.append(bit)\n\n if len(recent_bits) == StegImage.binary_size: # if an entire byte is stored:\n # attempt to decrypt\n try:\n letter = EncryptString.decrypt(recent_bits, secret_key, character_offset = character_offset) # if this throws an error, assume the end of the message has been reached\n # a letter has been successfully decrypted if it reaches this point\n message += letter\n character_offset += 1 # another character in the message has been found\n recent_bits = []\n except:\n # print(\"The end of the message has been reached or the message was not encoded successfully/the wrong decode parameters were given\")\n message_over = True # assume the emssage is over if an error ahs been reached\n #traceback.print_exc() # since an error is expected (a utf-8 decode error), don't print it\n\n return message", "def test_hiddenpart(self):\n testfile='hiddenpart.eml'\n try:\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='hidden', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = '[email protected]'\n conffile = self.tempdir + \"/%s-filetypes.conf\" % user\n # the largefile in the test message is just a bunch of zeroes\n open(conffile, 'w').write(\n \"deny application\\/zip no zips allowed\")\n self.rulescache._loadrules()\n suspect = Suspect(\n '[email protected]', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'hidden message part was not detected')\n\n finally:\n tmpfile.close()\n os.remove(conffile)", "def get_frame_extracted_image(img):\n\n max_window_size = 0.1\n steps = 25\n offset = 4\n img = re.remove_border(img, steps, max_window_size, offset)\n return img", "def decrypter(img_file, image_opener):\n image = image_opener(img_file)\n\n message = []\n image_data_iter = iter(image.getdata())\n\n while True:\n jpgs = [val for val in image_data_iter.__next__()[:3] +\n image_data_iter.__next__()[:3] +\n image_data_iter.__next__()[:3]] # List comprehension\n\n def not_a_num(val):\n \"\"\"Filters out parameters that are not numbers\n\n This function returns false if the parameter not a number, which\n will be used in conjunction with the filter() function.\n\n Args:\n val: the parameter to check\n Returns:\n False if it is not a number, True otherwise\n \"\"\"\n if math.isnan(val):\n return False\n else:\n return True\n\n jpgs = list(filter(not_a_num, jpgs)) # filter() higher order function\n\n binary = ''\n\n for i in jpgs[:8]:\n if i % 2 == 0:\n binary = (lambda zero: zero + \"0\")(binary) # lambda\n else:\n binary = (lambda one: one + \"1\")(binary) # lambda\n\n letter = chr(int(binary, 2))\n message.append(letter)\n if jpgs[-1] % 2 != 0:\n text = reduce(lambda a, b: a + b, message) # reduce() and lambda\n return text", "def decrypt_message(encrypted_message):", "def decrypt_faces(msg, nkey=key):\n newmsg = msg[:-20]\n obj = DES.new(nkey, DES.MODE_ECB)\n return obj.decrypt(newmsg)", "def unpack(\n data: bytes,\n crypto: AuxiliaryStreamCrypto,\n client_data: bool = False\n) -> bytes:\n # Split header from rest of data\n header, payload, hmac = data[:4], data[4:-32], data[-32:]\n\n parsed = aux_header_struct.parse(header)\n\n if not crypto.verify(header + payload, hmac):\n raise AuxiliaryPackerException('Hash verification failed')\n\n if not client_data:\n plaintext = crypto.decrypt(payload)\n else:\n plaintext = crypto.decrypt_client(payload)\n\n # Cut off padding, before returning\n return plaintext[:parsed.payload_size]", "def decode(image):\n bitstream = ''\n for row in image:\n for pixel in row:\n for intensity in pixel:\n # Use get_bit function from bits.py library\n # to select the LSB of each intensity value\n bitstream += bits.get_bit(intensity,0)\n # Decode message using bits_to_message function\n message = bits.bits_to_message(bitstream)\n return message", "def verifyBootImage( template, sztpBootImage ):\n def verifyImageVerification( imageVerification ):\n \"\"\"Verify instance of image-verification is correct\"\"\"\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"\n\n def verifyImageVerificationList( template, sztpImageVerification ):\n \"\"\"Verify image-verification list is correct\"\"\"\n assert isinstance( sztpImageVerification, list ), \\\n \"Expected list\"\n for imageVer in sztpImageVerification:\n assert verifyDictTypes( template, imageVer ), \"Unexpected value types\"\n assert set( imageVer.keys() ).issubset( set( template.keys() ) ), \\\n \"Unexpected keys in dict\"\n verifyImageVerification( imageVer )\n\n mandatory = [ \"download-uri\" ]\n assert isinstance( sztpBootImage, dict ), \"Expected dict\"\n assert set( sztpBootImage.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpBootImage ), \\\n \"Unexpected value types\"\n assert set( mandatory ).issubset( sztpBootImage ), \\\n \"Mandatory keys not present\"\n if \"image-verification\" in sztpBootImage:\n verifyImageVerificationList( template[ \"image-verification\" ][ 0 ],\n sztpBootImage[ \"image-verification\" ] )", "def cover(img: Image, message: bitarray) -> Image:\n width, height = img.size\n check_image_width(width, RGB_PIXEL, message)\n pixels = img.load()\n\n row = random.randint(0, height) # Randomly chooses row.\n i = 0 # Tracks hidden bits\n\n # If Image consist of 8-bit pixels\n if img.mode == \"P\":\n offset = generate_offset(width, P_PIXEL, message)\n for x in range(offset, width):\n p = pixels[x,row]\n if i < len(message):\n p = modify_byte(p, message[i])\n i += 1\n pixels[x,row] = p\n generate_key(row, message, offset * P_PIXEL)\n # If Image consists of 3x8-bit pixels\n elif img.mode == \"RGB\":\n offset = generate_offset(width, RGB_PIXEL, message)\n for x in range(offset, width):\n r, g, b = pixels[x,row]\n if i < len(message):\n r = modify_byte(r, message[i])\n i += 1\n if i < len(message):\n g = modify_byte(g, message[i])\n i += 1\n if i < len(message):\n b = modify_byte(b, message[i])\n i += 1\n pixels[x,row] = (r, g, b)\n generate_key(row, message, offset * RGB_PIXEL)\n # If Image consists of 4x8-bits pixels\n elif img.mode == \"RGBA\":\n offset = generate_offset(width, RGBA_PIXEL, message)\n for x in range(offset, width):\n r, g, b, a = pixels[x,row]\n if i < len(message):\n r = modify_byte(r, message[i])\n i += 1\n if i < len(message):\n g = modify_byte(g, message[i])\n i += 1\n if i < len(message):\n b = modify_byte(b, message[i])\n i += 1\n if i < len(message):\n a = modify_byte(a, message[i])\n i += 1\n pixels[x,row] = (r, g, b, a)\n generate_key(row, message, offset * RGBA_PIXEL)\n\n return img", "def test_hiddenbinary(self):\n # copy file rules\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='virus', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(TESTDATADIR + '/binaryattachment.eml', tmpfile.name)\n suspect = Suspect(\n '[email protected]', '[email protected]', tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n tmpfile.close()\n self.assertEqual(result, DELETE)", "def serve_detached_gpg_signature_unknown_key():\n message = b\"I am a meaningless message detach-signed by a throwaway key :)\\n\"\n\n signature = detached_sign_data_with_throwaway_gpg_key(message)\n\n return Response(signature, mimetype=\"text/plain\")", "def decode(img):\r\n ints=[];#A list of ints that will contain all of our alpha values.\r\n width,height=img.size #Get the width and the height of my image.\r\n pixelData=ImageUtilities.getPixelList(img); #Get all of the pixels in the image and put them into a list.\r\n for y in range(height): #Iterate across the pixels from top to bottom.\r\n for x in range(width):#Iterate across out image from left to right.\r\n alpha=ImageUtilities.getAlphaFromList(img,pixelData,x,y); #Referenced the dumped contents\r\n if(alpha==255): #If the alpha of our pixel is 255....\r\n continue; #I don't want 255 values because that means that is not part of my message.\r\n ints.append(alpha); #Get the alpha value and append it to my list of ints.\r\n\r\n msg=\"\"; #Make an empty string to store our decoded message.\r\n for value in ints: #Iterate across my list of ints. (For each int in my list...)\r\n msg+=chr(value); #Convert my int to it's character value and add it back to my message.\r\n return msg; #Return my message string.\r", "def get_old_hash(img):\n try:\n old_hash = seals_data[img.split('.')[0]]['hash']\n except KeyError:\n old_hash = None\n return old_hash", "def decode(conf_dict, image):\n # FIXME\n msg_prosthesis = 'a'*(image.size[0]*image.size[1]//conf_dict['frequency'])\n msg_str = ''\n colors = ['red', 'green', 'blue']\n img_pixels = image.load()\n for pixel_info in PixelIter(conf_dict, msg_prosthesis):\n if pixel_info[0] == 'whatever':\n continue\n xy = (pixel_info[1], pixel_info[2])\n which_color = colors.index(pixel_info[0])\n letter_ord = img_pixels[xy][which_color]\n msg_str += chr(letter_ord)\n return msg_str", "def imageparts(msg):\n # Don't want a set here because we want to be able to process them in\n # order.\n return filter(lambda part:\n part.get_content_type().startswith('image/'),\n msg.walk())", "def traffic_sign_detection_challenge(img_in):\n img = img_in.copy()\n clean_picture = cv2.bilateralFilter(img, 9, 75, 75)\n return traffic_sign_detection(clean_picture, light_size=(10, 30), light_offset=10)", "def M12Nut(image):\n kernel = np.ones((5, 5), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=4)\n\n parameters = cv2.SimpleBlobDetector_Params()\n detector = cv2.SimpleBlobDetector_create(parameters=parameters)\n keypoints = detector.detect(image)\n new_image = cv2.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n for i in range(len(keypoints)):\n print(\"Keypoint: \", keypoints[i].pt)\n cv2.imshow(\"Keypoints\", new_image)\n cv2.waitKey(1000)\n cv2.destroyAllWindows()\n x, y = keypoints[0].pt\n\n return x, y", "def splitImage(image, shareFile1=\"share1.tif\", shareFile2=\"share2.tif\"):\n\n _, expandedPad = makePad(Image.open(image).size, shareFile1)\n expandedCiphertext = makeCryptograph(str(image), shareFile2)\n print(expandedPad,expandedCiphertext)\n return expandedPad, expandedCiphertext", "async def unpack_message(\n auth_manager: AuthKeyManager,\n schema: Schema,\n encrypted_message: bytes\n) -> EncryptedMessage:\n auth_key = await get_auth_key(auth_manager, encrypted_message)\n\n msg_key = load_int128(encrypted_message[8:]).value\n\n key_pair = generate_key_iv(\n auth_key,\n msg_key,\n key_type='client'\n )\n\n message_bytes = ige256_decrypt(\n encrypted_message[24:],\n key_pair.key,\n key_pair.iv\n )\n\n return await load_message(schema, message_bytes)", "def recognize_text_from_image_bytes(image_bytes: str):\n analyze_endpoint_url = service_constants.VISION_SERVICE_URL + \"recognizeText\"\n\n headers = {\n # subscription key must accompany every call\n 'Ocp-Apim-Subscription-Key': service_constants.OCP_APIM_SUBSCRIPTION_KEY,\n # when sending image bytes, set this content type\n 'Content-Type': 'application/octet-stream'\n }\n\n # if the text is handwritten, toggle this flag\n params = {'handwriting': 'false'}\n\n # make the POST request\n response = requests.post(analyze_endpoint_url, headers=headers, params=params, data=image_bytes)\n\n # if an error occurred\n response.raise_for_status()\n\n # json object from the body\n analysis = response.json()\n\n # This is the structure of the result dict\n # result[\"language\"]\n # result[\"orientation\"]\n # result[\"textAngle\"]\n # result[\"regions\"][0][\"boundingBox\"]\n # result[\"regions\"][0][\"lines\"][0][\"boundingBox\"]\n # result[\"regions\"][0][\"lines\"][0][\"words\"][0][\"boundingBox\"]\n # result[\"regions\"][0][\"lines\"][0][\"words\"][0][\"text\"]\n\n return analysis", "def read_message(msg_cipher: bytes, crypto: object) -> Tuple[str, str]:\n\n ciph_in = msg_cipher[:-64]\n hmac = msg_cipher[-64:].decode('utf-8')\n plaintext = crypto.decrypt(ciph_in).decode('utf-8')\n plaintext = plaintext.strip('\\0')\n return plaintext, hmac", "def decode_message_part(message_part):\n return base64.urlsafe_b64decode(message_part['body']['data']).decode().strip()", "def extract(img):\n # perform selective search\n img_lbl, regions = selectivesearch.selective_search(\n img, scale=500, sigma=0.9, min_size=1)\n\n # delete the region which contains whole image\n regions = sorted(regions, key=lambda x: x['size'], reverse=True)\n\n candidates = []\n\n for r in regions:\n # excluding biggest retangle which contains whole image\n if r['rect'][0] == 0 and r['rect'][1] == 0:\n continue\n # excluding same rectangle (with different segments)\n if r['rect'] in candidates:\n continue\n # excluding parts that are too small\n x, y, w, h = r['rect']\n\n if w * h < 9:\n continue\n\n # ecludeing parts too sharp\n if w > 100 * h or h > 100 * w:\n continue\n\n candidates.append(r['rect'])\n\n # remove rectangles opverlap each other with nms technique\n candidates = nms.non_max_suppression_slow(candidates)\n\n return candidates", "def image_pull_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_pull_secret\")" ]
[ "0.6304911", "0.5306204", "0.5240656", "0.5205463", "0.51036364", "0.5082488", "0.50332326", "0.50201464", "0.49953687", "0.4965542", "0.4962028", "0.49606332", "0.49157923", "0.49048188", "0.4883517", "0.48731127", "0.48006538", "0.47986904", "0.47594568", "0.47593972", "0.47311398", "0.47309035", "0.4730177", "0.47285545", "0.47007984", "0.46837762", "0.4669044", "0.46645394", "0.46549267", "0.46500993" ]
0.8221424
0
Check if the given hmac ist valid by creating a new hmac with the supplied password and the data.
def check_hmac(mac, data): h_mac = hmac.new(args['m'], bytes(data), digestmod=hashlib.sha256).digest() print 'HMAC validation: \n%s\n' % str(h_mac == mac)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_hmac(self, payload):\r\n \r\n new_hmac = hmac.new(bytes(self.passphrase), b'%s'%(payload['eiv']) , hashlib.sha224)\r\n new_hmac.update(b'%s'%(payload['enid']))\r\n new_hmac.update(b'%s'%(payload['ed']))\r\n new_hmac.update(self.sessionID)\r\n #print(new_hmac.digest())\r\n #print(b'%s'%(payload['hmac']))\r\n if b'%s'%(payload['hmac']) == new_hmac.digest():\r\n return \"Successful Decryption\"\r\n return \"Failed Authentication\"", "def hmac_valid(*, hmac_key: Optional[str] = None) -> bool:\n if hmac_key is None:\n hmac_key = HMAC_KEY\n sig = request.headers.get('X-FS-Signature')\n if not sig:\n return False\n # get_data could be huge, check content length?\n calc_sig = hmac.new(hmac_key, request.get_data(), 'sha256').hexdigest()\n return hmac.compare_digest(f'sha256={calc_sig}', sig)", "def check_hmac_signature(self, message):\n data = message[:-20]\n checksum = message[-20:]\n hmac_data = hmac.new(bytes(self.settings['hmac_key'].encode('utf-8')), bytes(data), hashlib.sha1)\n\n return True if hmac_data.digest() == checksum else False", "def check_webapp_signature(token: str, init_data: str) -> bool:\n try:\n parsed_data = dict(parse_qsl(init_data, strict_parsing=True))\n except ValueError: # pragma: no cover\n # Init data is not a valid query string\n return False\n if \"hash\" not in parsed_data:\n # Hash is not present in init data\n return False\n hash_ = parsed_data.pop(\"hash\")\n\n data_check_string = \"\\n\".join(\n f\"{k}={v}\" for k, v in sorted(parsed_data.items(), key=itemgetter(0))\n )\n secret_key = hmac.new(key=b\"WebAppData\", msg=token.encode(), digestmod=hashlib.sha256)\n calculated_hash = hmac.new(\n key=secret_key.digest(), msg=data_check_string.encode(), digestmod=hashlib.sha256\n ).hexdigest()\n return calculated_hash == hash_", "def check_signature(signature, data):\n if SIGNATURE_DISABLED:\n return True\n\n # check signature\n try:\n digest = hmac.new(\n SEGMENT_SHARED_SECRET.encode(), msg=data, digestmod=hashlib.sha1\n ).hexdigest()\n if digest == signature:\n return True\n else:\n print(f\"Invalid signature. Expected {digest} but got {signature}\")\n except KeyError:\n pass\n\n return False", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def test_add_hmac_signature_post_with_data(self):\n resp = self.client.post(\"/\", data=self.client_data)\n status_code = resp.status_code\n response = resp.data\n self.assertTrue(status_code == 200)\n self.assertTrue(\"Signature=\" in response.decode(self.encoding))", "def check_sign(secret_key, project_id, encoded_data, auth_sign):\n sign = hmac.new(six.b(str(secret_key)))\n sign.update(six.b(project_id))\n sign.update(six.b(encoded_data))\n return sign.hexdigest() == auth_sign", "def _validate_hash(data, shasum):\n from hashlib import sha1\n digest = sha1(data).hexdigest()\n if digest == shasum:\n return True\n else:\n print('Invalid shasum, got: {} , expected: {}'.format(digest, shasum))\n return False", "def verify_hmac_sha1(request):\n base_string = generate_signature_base_string(request)\n sig = hmac_sha1_signature(\n base_string, request.client_secret, request.token_secret)\n return hmac.compare_digest(sig, request.signature)", "def create_hmac(mac_pass, msg_bytes):\n return hmac.new(\n mac_pass, msg_bytes, digestmod=hashlib.sha256).digest()", "def _hmac_create(self, password, shared_key):\n hmac_value = base64.b64encode(hmac.new(\n smart_str(shared_key),\n smart_str(password),\n hashlib.sha512).digest())\n return hmac_value", "def _hmac_create(self, password, shared_key):\n hmac_value = base64.b64encode(hmac.new(\n smart_str(shared_key),\n smart_str(password),\n hashlib.sha512).digest())\n return hmac_value", "def verify(self, msgAndDigest, additionalData=b''):\n if msgAndDigest['alg'] != self._algorithm:\n raise ValueError(\"Currently only HMAC_SHA2 is supported as an algorithm\")\n expected = bytes(self.mac(msgAndDigest['msg'], additionalData=additionalData)['digest'], 'utf-8')\n received = bytes(msgAndDigest['digest'], 'utf-8')\n # we compare the hash instead of the direct value to avoid a timing attack\n return sha2(expected).digest() == sha2(received).digest()", "def check_signature(signature, *args, **kwargs):\n return hmac.compare_digest(signature, create_signature(*args, **kwargs))", "def check_signature(signature, key, data):\n if isinstance(key, type(u'')):\n key = key.encode()\n \n digest = 'sha1=' + hmac.new(key, data, hashlib.sha1).hexdigest()\n \n # Covert everything to byte sequences\n if isinstance(digest, type(u'')):\n digest = digest.encode()\n if isinstance(signature, type(u'')):\n signature = signature.encode()\n \n return werkzeug.security.safe_str_cmp(digest, signature)", "def hmac(key, data, algorithm):\n if algorithm == CryptographicMeta.SHA1:\n algorithm = hashlib.sha1\n else:\n raise NotImplementedError\n return hmac.new(key, data, algorithm).digest()", "def verify_signature(self, key, data):\n verify_signature(self, key, data)", "def valid_pw(name, password, h):\n salt = h.split(',')[0]\n return h == make_pw_hash(name, password, salt)", "def valid_pw(password, h):\n salt = h.split(',')[0]\n return h == make_pw_hash(password, salt)", "def test_add_hmac_signature_post_without_data(self):\n resp = self.client.post(\"/\")\n status_code = resp.status_code\n self.assertTrue(status_code == 403)", "def passwd_check(request, passphrase):\n import hashlib\n hashed_passphrase = request.registry.settings.get('phoenix.password', u'')\n \n try:\n algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)\n except (ValueError, TypeError):\n return False\n\n try:\n h = hashlib.new(algorithm)\n except ValueError:\n return False\n\n if len(pw_digest) == 0:\n return False\n\n try:\n h.update(passphrase.encode('utf-8') + salt.encode('ascii'))\n except:\n return False\n\n return h.hexdigest() == pw_digest", "def check_password(pwhash, password):\n if not pwhash or not password:\n return False\n\n if isinstance(password, unicode):\n password = password.encode('utf-8')\n\n if pwhash.count('$') < 2:\n return False\n\n method, salt, hashval = pwhash.split('$', 2)\n\n if method == 'plain':\n return hashval == password\n elif method == 'md5':\n h = md5()\n elif method == 'sha1':\n h = sha1()\n else:\n return False\n\n h.update(salt)\n h.update(password)\n return h.hexdigest() == hashval", "def has_valid_signature(method, headers_dict, body_dict, access_key, secret_key):\r\n _, expected_signature, _ = generate_signed_message(\r\n method, headers_dict, body_dict, access_key, secret_key\r\n )\r\n\r\n authorization = headers_dict[\"Authorization\"]\r\n auth_token, post_signature = authorization.split(\":\")\r\n _, post_access_key = auth_token.split()\r\n\r\n if post_access_key != access_key:\r\n log.error(\"Posted access key does not match ours\")\r\n log.debug(\"Their access: %s; Our access: %s\", post_access_key, access_key)\r\n return False\r\n\r\n if post_signature != expected_signature:\r\n log.error(\"Posted signature does not match expected\")\r\n log.debug(\"Their sig: %s; Expected: %s\", post_signature, expected_signature)\r\n return False\r\n\r\n return True", "def validate(self, data):\n password = data['password']\n if data['password'] == data['password2'] and re.fullmatch(r'[A-Za-z0-9@#$%^&+=]{8,}', password):\n return data\n raise serializers.ValidationError(\"Password should be match and password must have number,special char,1-capital,1-small and min 8 char\")", "def digest_is_valid(cls, digest_received, message_received):\n digest_received = digest_received or ''\n message_digest = cls._get_hex_digest(message_received, cls.get())\n\n # hmac.compare_digest protects against timing attacks\n if not hmac.compare_digest(digest_received, message_digest):\n return False\n return True", "def check(cls, challenge, solution, secretKey, hmacKey):\n hmacIsValid = False\n\n if not solution:\n return hmacIsValid\n\n logging.debug(\"Checking CAPTCHA solution %r against challenge %r\"\n % (solution, challenge))\n try:\n decoded = urlsafe_b64decode(challenge)\n hmacFromBlob = decoded[:20]\n encBlob = decoded[20:]\n hmacNew = crypto.getHMAC(hmacKey, encBlob)\n hmacIsValid = hmacNew == hmacFromBlob\n except Exception:\n return False\n finally:\n if hmacIsValid:\n try:\n answerBlob = secretKey.decrypt(encBlob)\n\n timestamp = answerBlob[:12].lstrip('0')\n then = cls.sched.nextIntervalStarts(int(timestamp))\n now = int(time.time())\n answer = answerBlob[12:]\n except Exception as error:\n logging.warn(error.message)\n else:\n # If the beginning of the 'next' interval (the interval\n # after the one when the CAPTCHA timestamp was created)\n # has already passed, then the CAPTCHA is stale.\n if now >= then:\n exp = schedule.fromUnixSeconds(then).isoformat(sep=' ')\n raise CaptchaExpired(\"Solution %r was for a CAPTCHA \"\n \"which already expired at %s.\"\n % (solution, exp))\n if solution.lower() == answer.lower():\n return True\n return False", "def check_password(self, password: str) -> bool:\n\n return hmac.compare_digest(\n bytes.fromhex(self.hash),\n pbkdf2_hmac('sha1', password.encode(), bytes.fromhex(self.salt), 100000))", "def check_auth(_, http_password):\n return (password is not None) and (password == http_password)", "def create_hmac_sha_256_signature(api_key_secret, signing_data, timestamp, nonce):\n key_nonce = \\\n hmac.new(codecs.decode(api_key_secret, 'hex_codec'), codecs.decode(nonce, 'hex_codec'), sha256).digest()\n key_date = hmac.new(key_nonce, str(timestamp).encode(), sha256).digest()\n signature_key = hmac.new(key_date, u'vcode_request_version_1'.encode(), sha256).digest()\n return hmac.new(signature_key, signing_data.encode(), sha256).hexdigest()" ]
[ "0.67652047", "0.6743941", "0.64469117", "0.63084877", "0.6117735", "0.60767394", "0.6014057", "0.60025823", "0.5932569", "0.58754456", "0.5839325", "0.5813827", "0.5786236", "0.57454073", "0.5742142", "0.57346916", "0.57076555", "0.56693023", "0.5610851", "0.5534866", "0.552412", "0.5506381", "0.5462397", "0.5455215", "0.54004294", "0.5385295", "0.53810155", "0.53783494", "0.53781974", "0.53556335" ]
0.7753446
0
If s is the numer of sides in a polygon, then the formula for the nth
def polygonal_number(s, n): return (n*n*(s-2)-n*(s-4))/2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area_polygon(n, s):\n area = ((float(1)/float(4)) * n * s ** 2) / (math.tan(math.pi / n))\n return area", "def polysum(n, s):\n area = 0\n \n #avoiding division by zero\n if n != 0: \n area = (0.25 * n * (s**2)) / math.tan(math.pi / n)\n perimeter = n * s\n \n return (round(area + perimeter**2, 4))", "def polysum(n, s):\n\n import math\n\n area = 0.25*n*s**2/math.tan(math.pi/n)\n peri = s*n\n summary = area + peri**2\n return round(summary, 4)", "def polygon_gen(s):\n f = functools.partial(polygonal_number, s)\n return itertools.imap(f, itertools.count(1))", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def polygon(t, length, n):\n for i in range(n):\n t.fd(length)\n t.lt(360/n)", "def polygon(t, n):\n angle = 360/n\n for i in range(n):\n t.fd(100)\n t.lt(angle)", "def len_func(polygon):\n ret=[]\n N=len(polygon)\n for i in range(1,N):\n l = ((polygon[i][0]-polygon[i-1][0])**2 + (polygon[i][1]-polygon[i-1][1])**2 )**0.5\n ret.append(l)\n l = ((polygon[0][0]-polygon[N-1][0])**2 + (polygon[0][1]-polygon[N-1][1])**2 )**0.5\n ret.append(l)\n return ret", "def polygon(n,r):\n \n window = turtle.Screen()\n\n david = turtle.Turtle()\n david.pensize(2)\n\n a = float(360 / n) \t\t #this is the angle the turtle will turn each time\n l = 2 * (math.sin(math.radians(a / 2)) * r) #this is the length of the sides\n\n david.penup()\n david.speed(0)\n david.right(90)\n david.forward(r * math.cos(math.radians(a / 2)))\n david.right(90)\n david.forward(l / 2)\n david.left(180)\n david.pendown()\n david.speed(1/2)\n\n for x in range(n):\n david.forward(l)\n david.left(a)", "def calculate_S(func, a, b, N):\n # Trapezoid width\n h = (b - a)/N\n\n # Every even slice\n new_part = func(a) + func(b)\n for i in range(2, N, 2):\n new_part += 2 * func(a + i*h) \n \n return 1/3. * new_part", "def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2", "def draw_poly(t, n, sz):\n\tfor side in range(n):\n\t\tangle = (360/n)\n\t\tt.pendown()\n\t\tt.forward(sz)\n\t\tt.right(angle)", "def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)", "def angle_calc(sides):\n return 360//sides", "def square_triangle(sides: list) -> float:\n h_per = (sides[0] + sides[1] + sides[2]) / 2 #half-perimetr\n square = math.sqrt (h_per * (h_per- sides[0]) * (h_per - sides[1]) * (h_per - sides[2]))\n return square", "def fs2ps2D(px, s):\n\t\tsfun = psarclength(px)\t\n\t\treturn sfun-s", "def draw_poly(t, n, sz):\r\n\r\n\tfor i in range(n):\r\n\t\tt.forward(sz)\r\n\t\tt.left(360/n)", "def special_pythagorean_triplet(s):\n\tfor a in xrange(1, s / 3):\n\t\tfor b in xrange(a + 1, s - a):\n\t\t\tc = s - a - b;\n\t\t\tif a ** 2 + b ** 2 == c ** 2:\n\t\t\t\treturn (a, b, c)", "def draw_poly(t, n, sz):\r\n angle = 180 - (n - 2) * 180 / n\r\n for i in range(n):\r\n t.forward(sz)\r\n t.left(angle)", "def poly_nth(f, n):\n if n < 0 or n > len(f)-1:\n raise IndexError\n else:\n return f[zzx_degree(f)-n]", "def sw(n):\n return 4*n*n + 2*n + 1", "def sgfxy2p(s, N):\n x = sgf_coord.index(s[0])\n y = sgf_coord.index(s[1])\n\n p = rc2p(y + 1, x + 1, N)\n #print('x:{} y:{} p:{}'.format(x, y,p))\n return p", "def _rectangles(m, n):\n return m * (m+1) * n * (n+1) // 4", "def polygonpts(nSides, radius=1.0):\n\treturn [[cos(theta)*radius, sin(theta)*radius] for theta in frange(0, twopi, nSides+1)[:-1] ]", "def shape_function(self, idx, x):\n if idx == 0:\n return (1-x)/2\n elif idx == 1:\n return (1+x)/2\n elif idx == 2:\n return (x**2-1)*sqrt(3./2)/2\n elif idx == 3:\n return (x**2-1)*x*sqrt(5./2)/2\n elif idx == 4:\n return (x**2-1)*(5*x**2-1)*sqrt(7./2)/8\n elif idx == 5:\n return (x**2-1)*(7*x**2-3)*sqrt(9./2)/8\n elif idx == 6:\n return (x**2-1)*(21*x**4-14*x**2+1)*sqrt(11./2)/16\n elif idx == 7:\n return (x**2-1)*(33*x**4-30*x**2+5)*sqrt(13./2)/16\n elif idx == 8:\n return (x**2-1)*(429*x**6-495*x**4+135*x**2-5)*sqrt(15./2)/128\n elif idx == 9:\n return (x**2-1)*(715*x**6-1001*x**4+385*x**2-35)*sqrt(17./2)/128\n elif idx == 10:\n return (x**2-1)*(2431*x**8-4004*x**6+2002*x**4-308*x**2+7)*sqrt(19./2)/256\n raise NotImplementedError(\"Such shape function is not implemented yet (i=%d)\" % i)", "def evaluate(s):\n # 5 is the only integer that can be placed in middle because all others\n # would result in excess ofthe magic number 15 in one ofthediagonals, cols,\n # or rows\n pre = [[[8, 1, 6], [3, 5, 7], [4, 9, 2]],\n [[6, 1, 8], [7, 5, 3], [2, 9, 4]],\n [[4, 9, 2], [3, 5, 7], [8, 1, 6]],\n [[2, 9, 4], [7, 5, 3], [6, 1, 8]],\n [[8, 3, 4], [1, 5, 9], [6, 7, 2]],\n [[4, 3, 8], [9, 5, 1], [2, 7, 6]],\n [[6, 7, 2], [1, 5, 9], [8, 3, 4]],\n [[2, 7, 6], [9, 5, 1], [4, 3, 8]]\n ]\n\n totals = []\n for p in pre:\n total = 0\n for p_row, s_row in zip(p, s):\n for i, j in zip(p_row, s_row):\n if not i == j:\n total += abs(i - j) # or += max([i, j]) - min([i, j])\n totals.append(total)\n return min(totals)", "def sumn(n):\n return n * (n + 1) // 2", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def area_triangle_sss(side1,side2,side3):\n semi_perim=(side1+side2+side3)/2.0\n return math.sqrt(semi_perim*\n (semi_perim - side1)*\n (semi_perim - side2)*\n (semi_perim - side3)\n )", "def sz_operator(n_spatial_orbitals):\n if not isinstance(n_spatial_orbitals, int) or n_spatial_orbitals < 0:\n raise TypeError(\"n_orbitals must be specified as an integer\")\n\n sz_up = FermionOperator()\n sz_down = FermionOperator()\n for orbit in range(n_spatial_orbitals):\n sz_up += number_operator(None, up_index(orbit), 0.5)\n sz_down += number_operator(None, down_index(orbit), 0.5)\n\n return sz_up - sz_down" ]
[ "0.7027547", "0.6960124", "0.6618643", "0.62399167", "0.6210337", "0.6153565", "0.60968953", "0.6084945", "0.6028545", "0.60198605", "0.59947395", "0.58944356", "0.58530086", "0.5812029", "0.5808305", "0.57551855", "0.5749107", "0.5730243", "0.57258356", "0.5722142", "0.57036203", "0.5686315", "0.56750786", "0.5673778", "0.5659677", "0.56408125", "0.56258094", "0.5594233", "0.5590602", "0.5577255" ]
0.8311902
0
Create a list of figurate numbers for a given s, between 1000 and 9999, with the additional property that the third digit of each number will not be 0.
def figurate_list(s): f = polygon_gen(s) ans = [] c = next(f) while c < 999: c = next(f) while c < 10000: c = str(c) if c[2] != '0': ans.append(FigurateNode(c[:2], c[-2:], s)) c = next(f) return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fnum(num, sf = 0):\n\n\ts = []\n\tnf = 0\n\tppos = -1\n\tfor x in str(num):\n#\t\tprint((x, s))\n\t\tif x == '.':\n\t\t\tppos = len(s)\n\t\t\tcontinue\n\t\tif nf == 0 and ppos < 0 and x == '0':\n\t\t\tcontinue\n\t\ts.append(x)\n\t\tif x != '-' and (x != '0' or nf > 0):\n\t\t\tnf += 1\n\t\tif ppos >= 0 and sf > 0 and nf > sf:\n\t\t\tif int(s[-1]) >= 5:\n\t\t\t\ts[-2] = str(int(s[-2]) + 1)\n\t\t\ts = s[:-1]\n\t\t\tbreak\n\tif len(s) == 0:\n\t\ts = ['0']\n\tif ppos >= 0:\n\t\ts.insert(ppos, '.')\n\t\tif s[0] == '.':\n\t\t\ts.insert(0, '0')\n\t\treturn(''.join(s).rstrip('0').rstrip('.'))\n\telse:\n\t\treturn(''.join(s))", "def fn(n):\n if not n: return []\n elif n < 20: return [mp[n]]\n elif n < 100: return [mp[n//10*10]] + fn(n%10)\n else: return [mp[n//100], \"Hundred\"] + fn(n%100)", "def digit_set(s):\n t = np.zeros((10,1))\n t[s] = 1.0\n return t", "def squares(s):\n\t\"*** YOUR CODE HERE ***\"\n\treturn [int(i**0.5) for i in s if round(i ** 0.5) ** 2 == i ]", "def thousand_first_primes() -> List[int]:\n primes = []\n i = 0\n while len(primes) != 1000:\n primes += [i] if premier(i) else []\n i += 1\n return primes", "def prepare_numbers(numbers):\n \n numb = []\n for item in numbers:\n numb.append(int(item))\n return numb", "def truncatable_primes():\n list_tp = []\n i = 8\n while len(list_tp) < 11:\n if is_truncatable(i):\n list_tp.append(i)\n i += 1\n if i % 100 == 0:\n print(\"i : \", i)\n return list_tp, sum(list_tp)", "def digits(x):\n return [int(d) for d in str(x)]", "def replace_count_primes(n):\n s = list(str(n))\n length = len(s)\n print(n)\n for c in combinations(digits[:length], mu):\n test_list = []\n for i in range(10):\n new_s = deepcopy(s)\n for place in c:\n new_s[int(place)] = str(i)\n new_n = int(''.join(new_s))\n if no_leading_zeroes and len(str(new_n)) < length:\n continue\n if is_prime(new_n):\n test_list.append(new_n)\n if(len(test_list) >= 8):\n print(\"FOUND!!!\")\n print(test_list)\n return True\n return False", "def squares(s):\n\n \"*** YOUR CODE HERE ***\"\n return [int(x**(1/2)) for x in s if x**(1/2) == round(x**(1/2))]", "def fractionify(n):\n i = 0\n while True:\n if not n * 10 ** i % 1:\n break\n i += 1\n return n * 10 ** i, 10 ** i", "def two_digits_into_list(nr: int) -> list:\n return [int(a) for a in list(str(nr))]\n pass", "def spelledout_numbers_to_numbers(self, s):\n\t\tnumbers_1to9 = 'one two three four five six seven eight nine'.split() \n\t\tmappings_1to9 = {t[0]: str(t[1]) \n\t\t\t\t\t\t\t for t in zip(numbers_1to9, range(1,10))}\n\t\t\n\t\tmappings_10to19 = {t[0]: str(t[1]) \n\t\t\t\t\t\t\t for t in zip(\"\"\"ten eleven twelve thirteen fourteen fifteen \n\t\t\t\t\t\t\t\t\t\t\t sixteen seventeen eighteen nineteen\"\"\".split(), range(10,20))}\n\t\t\n\t\tnumbers_20to90 = 'twenty thirty forty fifty sixty seventy eighty ninety'.split()\n\t\tmappings_20to90 = {t[0]: str(t[1]) \n\t\t\t\t\t\t\t for t in zip(numbers_20to90, range(20,100,10))}\n\t\t\n\t\t# produce numbers like twenty one, fifty seven, etc.\n\t\tnumbers_21to99 = [' '.join([s,p]) for s in numbers_20to90 for p in numbers_1to9]\n\t\t\n\t\t\"\"\"\n\t\tcreate an ordered dictionary mapping spelled numbers to numbers in\n\t\tdigits; note that the order is important because we want to search\n\t\tfor spelled numbers starting from the compound ones like twenty two,\n\t\tthen try to find the rest\n\t\t\"\"\"\n\t\t\n\t\tod = OrderedDict({t[0]:t[1] \n\t\t\t\t\t\t\tfor t in zip(numbers_21to99, \n\t\t\t\t\t\t\t\t\t\t # create a list [21,22,..,29,31,..,39,41,..,99]\n\t\t\t\t\t\t\t\t\t\t [_ for _ in chain.from_iterable([[str(_) for _ in range(int(d)*10 + 1,int(d+1)*10)] \n\t\t\t\t\t\t\t\t\t\t\t for d in range(2,10)])])})\n\t\tod.update(mappings_20to90)\n\t\tod.update(mappings_10to19)\n\t\tod.update(mappings_1to9)\n\t\t\n\t\tfor w_ in od:\n\t\t\t s = re.sub(r'\\b' + w_ + r'\\b', od[w_], s)\n\t\t\n\t\treturn s", "def init_numbers():\n return ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')", "def find_digits(a,b,n):\n res = []\n\n c = a//b\n res.append(c)\n i = 0 \n while i < n:\n a -= b*c \n if a == 0:\n return res\n if a < b:\n a *= 10\n c = a//b\n res.append(c)\n i += 1\n \n return res", "def generate_numbers():\n\n nums = []\n\n for i in range(0, 12):\n for j in range(1, 12):\n nums.append(i / j)\n\n return list(set(nums))", "def _pettifor_numbers():\n return { \"Li\": 0.45,\n \"Be\": 1.5,\n \"B\": 2.0,\n \"C\": 2.5,\n \"N\": 3.0, \n \"O\": 3.5,\n \"F\": 4.0,\n \n \"Na\": 0.4,\n \"Mg\": 1.28,\n \"Al\": 1.66,\n \"Si\": 1.92,\n \"P\": 2.18,\n \"S\": 2.44,\n \"Cl\": 2.70,\n \n \"K\": 0.35,\n \"Ca\": 0.60,\n \"Sc\": 0.74,\n \"Ti\": 0.79,\n \"V\": 0.84,\n \"Cr\": 0.89,\n \"Mn\": 0.94,\n \"Fe\": 0.99,\n \"Co\": 1.04,\n \"Ni\": 1.09,\n \"Cu\": 1.20,\n \"Zn\": 1.44,\n \"Ga\": 1.68,\n \"Ge\": 1.92,\n \"As\": 2.16,\n \"Se\": 2.40,\n \"Br\": 2.64,\n\n \"Rb\": 0.30,\n \"Sr\": 0.55,\n \"Y\": 0.70,\n \"Zr\": 0.76,\n \"Nb\": 0.82,\n \"Mo\": 0.88,\n \"Tc\": 0.94,\n \"Ru\": 1.00,\n \"Rh\": 1.06,\n \"Pd\": 1.12,\n \"Ag\": 1.18,\n \"Cd\": 1.36,\n \"In\": 1.60,\n \"Sn\": 1.84,\n \"Sb\": 2.08,\n \"Te\": 2.32,\n \"I\": 2.56,\n \n \"Cs\": 0.25,\n \"Ba\": 0.50,\n \"La\": 0.748,\n \"Hf\": 0.775,\n \"Ta\": 0.83,\n \"W\": 0.885,\n \"Re\": 0.94,\n \"Os\": 0.995,\n \"Ir\": 1.05,\n \"Pt\": 1.105,\n \"Au\": 1.16,\n \"Hg\": 1.32,\n \"Tl\": 1.56,\n \"Pb\": 1.80,\n \"Bi\": 2.04,\n \"Po\": 2.28, \n \"At\": 2.52 }", "def double_nums(num_list):", "def zeros(s, zero=0):\n\treturn [zeros(s[1:] ) for i in range(s[0] ) ] if not len(s) else zero", "def _init_numbers(self):\n self._largeNums = [25, 50, 75, 100]\n smallNums = []\n\n for i in xrange(1, 10):\n smallNums += 2*[i]\n\n self._smallNums = smallNums", "def h_d_n(x:int) -> tuple:\n return(x // 100, (x % 100) // 10, x % 10)", "def integer_to_digits(i):\r\n return [i] if i < 10 else integer_to_digits(i // 10) + [i % 10]", "def digits(x):\n \n if type(x) != int: \n print(\"ERROR <- x in factorial(x) is not type int\")\n return\n \n return [int(i) for i in list(str(x))]", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def compact_number(value: int) -> str:\n value = float('{:.3g}'.format(value))\n magnitude = 0\n while abs(value) >= 1000:\n magnitude += 1\n value /= 1000.0\n return '{}{}'.format(\n '{:f}'.format(value).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude]\n )", "def solve(number):\n if number == 0:\n return \"INSOMNIA\"\n else:\n total_digits = 10 # there are 10 digits [0-9]\n digits_seen = set()\n multiplier = 0\n while len(digits_seen) < total_digits:\n multiplier += 1\n digits_in_n = {int(i) for i in str(multiplier*number)}\n digits_seen = digits_seen.union(digits_in_n)\n return multiplier*number", "def number_list(l):\n return ['{i:>{s}}. {v}'.format(s=len(str(len(l))), i=i+1, v=l[i]) for i in range(len(l))]", "def selfDividingNumbers(left, right):\n ret = []\n bounds = list(range(left, right + 1))\n \n for num in bounds:\n div = True\n if '0' in str(num):\n pass\n elif num < 10:\n ret.append(num)\n else:\n for n in str(num): \n if num % int(n) !=0:\n div = False\n if div is True:\n ret.append(num) \n return ret", "def int_with_commas(number):\n try:\n number = int(number)\n if number < 0:\n return '-' + int_with_commas(-number)\n result = ''\n while number >= 1000:\n number, number2 = divmod(number, 1000)\n result = \",%03d%s\" % (number2, result)\n return \"%d%s\" % (number, result)\n except Exception:\n return \"\"", "def extract_numbers_safe(cls, s, decimals=False):\n if decimals:\n tmp = ''.join([i for i in cls.escape(s) if ((i >= '0') and (i <= '9') or i == '.')])\n\n parts = tmp.split('.')\n\n try:\n output = '{a}.{b}'.format(a=parts[0], b=parts[1])\n except IndexError:\n output = parts[0]\n\n else:\n output = ''.join([i for i in cls.escape(s) if (i >= '0') and (i <= '9')])\n\n try:\n if s[0] == '-':\n output = '-{s}'.format(s=output)\n except:\n pass\n\n return output" ]
[ "0.617936", "0.61459374", "0.5946138", "0.5857729", "0.5823544", "0.57899886", "0.57853043", "0.5770208", "0.5769795", "0.57615775", "0.5737207", "0.57371724", "0.5700131", "0.56913346", "0.56819487", "0.566019", "0.5638854", "0.56227523", "0.56187755", "0.5614714", "0.55782115", "0.5563785", "0.5539613", "0.5538638", "0.5535185", "0.55308425", "0.55096817", "0.55094826", "0.54957837", "0.54897255" ]
0.66711605
0
Finds all complete figurate cycles for given svalues.
def figurate_cycles(*s_vals): assert len(s_vals) > 1 #incomplete sanity check # Since a DFS has to start SOMEWHERE and we're looking for cycles, we # arbitrarily take the first list of figurates and use them as the # roots of our search. roots = figurate_list(s_vals[0]) # Make a big list of all the rest of the figurate numbers candidates = [] for s in s_vals[1:]: candidates.extend(figurate_list(s)) answer = [] # Perform a cycle-detecting DFS for every root in our list for root in roots: for cycle in find_all_cycles(candidates, root): answer.append(cycle) return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_all_cycles(s,graph):\n\n grph = u.edge_to_list_dict(graph)\n node_cnt = len(grph)\n k = z.Int(\"k\")\n syms = [z.Int('node%s'%i) for i in range(node_cnt)]\n\n # s.add(syms[0] == 0) # start node is a 0\n s.add(k < node_cnt)\n s.add(k > 1)\n\n o = z.Optimize()\n\n # for source, sinks in sgraph.s_adj_list():\n for i in range(node_cnt):\n s.add(syms[i] >= 0)\n s.add(syms[i] <= k)\n s.add(z.Or([syms[j] == ((syms[i] + 1) % k) for j in grph[i]]) == (syms[i] == 0))\n\n\n r = []\n m = []\n\n # o.minimize(z.Sum([syms[i] for i in range(node_cnt)]))\n s.add(z.Product([syms[i] for i in range(node_cnt)]) == 0)\n done = False\n while not done:\n if s.check() == z.sat:\n m = s.model()\n r.append(m)\n s.add(k != m[k])\n else:\n done = True\n\n return r", "def get_all_cycles(l_values):\n lt_cycles = []\n i_num = len(l_values)\n for i in range(i_num):\n for j in range(i_num):\n for t_permutation in itertools.permutations(l_values[j:],i+1):\n if t_permutation[0] == l_values[j]:\n lt_cycles.append(t_permutation)\n \n return lt_cycles", "def get_elementary_cycles(self):\n tarjan = Tarjan(self.graph)\n for ssc in tarjan.ssc():\n for start_node in ssc:\n least_node = min(ssc) # Some kind of ordering\n self.find_cycles(least_node, least_node)\n # ssc changes at each iteration, since we remove the\n # least node to avoid unnecesary DFSs\n ssc = tarjan.remove_useless_edges(ssc, least_node)\n return self.cycles", "def _find_cycle(subtypes: Dict[str, List[str]]) -> None:\n\n found_cycles = []\n\n def iterate(current_id, find_id):\n for t_entry in subtypes.get(current_id, []):\n if t_entry == find_id:\n found_cycles.append((find_id, current_id))\n iterate(t_entry, find_id)\n\n for the_id in subtypes['']:\n iterate(the_id, the_id)\n if len(found_cycles) > 0:\n for entry in found_cycles:\n logger.error(\n 'Cycle found with ids {} and {}'.format(entry[0], entry[1]))\n raise ValueError('cycles found in graph information')", "def get_dfs(self, s):\n results = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n self._dfs_recursive(s, visited, results)\n return results", "def _getcycles (self, simplify=True):\n # Keep track of the numbers we haven't touched yet. I chose to\n # look at what we haven't touched, because it makes the while\n # condition nicer to look at.\n not_done = [True] * len(self)\n cycles = []\n while any(not_done):\n # We start with the lowest number that hasn't yet been\n # listed, and record that we've touched it. We then loop\n # through the permutation until we get back to the original\n # number, at which point we close that cycle and begin a\n # new one.\n start = not_done.index(True) + 1\n cycle = [start]\n not_done[start - 1] = False\n next_element = self(start)\n while next_element != start:\n cycle.append(next_element)\n not_done[next_element - 1] = False\n next_element = self(next_element)\n # We have the option of removing the one-cycles from the\n # list, except possibly for the last one, if it is needed\n # to determine the length of the permutation.\n #\n # For example,\n # The identity permutation in S3 can be written as (3),\n # but cannot be simplified further.\n if not simplify or len(cycle) > 1 or cycle[0] == len(self):\n cycles.append(cycle)\n return cycles", "def find_cycle(self):\n # from guido's blog :\n # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html\n worklist = set(self.successors)\n while worklist:\n stack = [worklist.pop()]\n while stack:\n top = stack[-1]\n for node in self.successors.get(top, ()):\n try:\n # raises ValueError if node is not in stack.\n cycle = stack[stack.index(node) :]\n succs = dict(\n (source, [cycle[(i + 1) % len(cycle)]])\n for i, source in enumerate(cycle)\n )\n return Digraph(succs, self.get_score, self.get_label)\n except ValueError:\n pass\n if node in worklist:\n stack.append(node)\n worklist.remove(node)\n break\n else:\n stack.pop()\n return None", "def test_find_cycles_multiple_cycles(self):\n self._build_sample_graph()\n # Adding cycle a -> d -> a\n self.skill_graph.add_prerequisite(self.sa.id, self.sd.id)\n # Adding cycle g -> h -> g\n sg = self.skill_graph.add(Skill.build('g', ''))\n sh = self.skill_graph.add(Skill.build('h', ''))\n self.skill_graph.add_prerequisite(sg.id, sh.id)\n self.skill_graph.add_prerequisite(sh.id, sg.id)\n\n expected = [[self.sa.id, self.sd.id], [sg.id, sh.id]]\n skill_map = SkillMap.load(self.course)\n successors = skill_map.build_successors()\n result = SkillMapMetrics(skill_map).simple_cycles()\n self.assertEqual(len(result), len(expected))\n for cycle in result:\n self.assertIn(sorted(cycle), expected)", "def search(values):\n \"Using depth-first search and propagation, try all possible values.\"\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes):\n return values ## Solved!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt", "def resolve_references_as_possible(s):\n refs = []\n resolved = []\n\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # try to resolve all REFs\n for ref in refs:\n if ref.try_to_point():\n resolved.append(ref)\n\n # for REFs that link up,\n for ref in resolved:\n s.resolve_single_ref( ref )", "def find_cycles(self,max_cycle_len=4,starting_edges=None,check_area=True):\n def traverse(a,b):\n cs=self.angle_sort_adjacent_nodes(b,ref_nbr=a)\n return b,cs[-1]\n\n visited=set() # directed tuple of nodes\n\n cycles=[]\n\n if starting_edges is None:\n starting_edges=self.valid_edge_iter()\n\n for j in starting_edges:\n if j % 10000==0:\n print(\"Edge %d/%d, %d cycles\"%(j,self.Nedges(),len(cycles)))\n # iterate over the two half-edges\n for A,B in (self.edges['nodes'][j], self.edges['nodes'][j,::-1]):\n cycle=[A]\n\n while (A,B) not in visited and len(cycle)<max_cycle_len:\n visited.add( (A,B) )\n cycle.append(B)\n A,B = traverse(A,B)\n if B==cycle[0]:\n if check_area:\n A=signed_area( self.nodes['x'][cycle] )\n if A>0:\n cycles.append(cycle)\n else:\n cycles.append(cycle)\n break\n return cycles", "def find_eulerian_cycle(adj_dict, edges):\n if not adj_dict:\n return []\n\n checked = [False] * len(edges)\n list_keys = list(adj_dict.keys())\n for i in list_keys: # the first time will return true anyway\n cycle = []\n if dfs(i, adj_dict, edges, checked, cycle, i):\n return cycle\n return cycle", "def run_dfs(self,s):\n if self.verbose: print('entering run_dfs with s = ',s)\n new_states = [self.succ(s,a) for a in self.actions(s)]\n results = []\n\n for ns in new_states:\n if self.verbose: print('considering new state = ',ns)\n end = self.is_end(ns)\n if end:\n result = self.result(ns)\n if result is not None:\n results.append(result)\n else:\n results += self.run_dfs(ns)\n return results", "def dfs(self, value):\n\t\treturn self.__dfs(self, value)", "def _dfs_iteration(self, v):\n stack1 = [v]\n self._visited[v] = True\n while stack1:\n curr = stack1.pop()\n for w in self._G.adj(curr):\n if not self._visited[w]:\n stack1.append(w)\n self._visited[w] = True\n self._pre[w] = curr\n elif self._pre[w] != curr:\n self.cycle = True", "def extract_cycles(series, left=False, right=False):\n points = deque()\n\n for x in reversals(series, left=left, right=right):\n points.append(x)\n while len(points) >= 3:\n # Form ranges X and Y from the three most recent points\n X = abs(points[-2] - points[-1])\n Y = abs(points[-3] - points[-2])\n\n if X < Y:\n # Read the next point\n break\n elif len(points) == 3:\n # Y contains the starting point\n # Count Y as one-half cycle and discard the first point\n yield points[0], points[1], 0.5\n points.popleft()\n else:\n # Count Y as one cycle and discard the peak and the valley of Y\n yield points[-3], points[-2], 1.0\n last = points.pop()\n points.pop()\n points.pop()\n points.append(last)\n else:\n # Count the remaining ranges as one-half cycles\n while len(points) > 1:\n yield points[0], points[1], 0.5\n points.popleft()", "def search(self, values):\n if values is False:\n return False\n if all(len(values[square]) == 1 for square in self.squares):\n return values\n n, square = min((len(values[square]), square)\n for square in self.squares if len(values[square]) > 1)\n\n return self.possible_values(self.search(self.assign(values.copy(), square, dig))\n for dig in values[square])", "def cycle(start, times):\n current_gen = start\n for _ in range(times):\n next_gen = defaultdict(int)\n all_locs = get_all_neighbors(current_gen.keys())\n all_locs.update(current_gen.keys())\n for loc in all_locs:\n neighbors = get_neighbors(loc)\n count = sum(current_gen[n] for n in neighbors)\n if count in (2, 3) and current_gen[loc] == 1:\n next_gen[loc] = 1\n elif count == 3 and current_gen[loc] == 0:\n next_gen[loc] = 1\n current_gen = next_gen\n return current_gen", "def all_cycles_iterator(self, starting_vertices=None, simple=False,\n rooted=False, max_length=None, trivial=False):\n if starting_vertices is None:\n starting_vertices = self\n # Since a cycle is always included in a given strongly connected\n # component, we may remove edges from the graph\n sccs = self.strongly_connected_components()\n d = {}\n for id, component in enumerate(sccs):\n for v in component:\n d[v] = id\n h = copy(self)\n h.delete_edges([ (u,v) for (u,v) in h.edge_iterator(labels=False)\n if d[u] != d[v] ])\n # We create one cycles iterator per vertex. This is necessary if we\n # want to iterate over cycles with increasing length.\n vertex_iterators = dict([(v, h._all_cycles_iterator_vertex( v\n , starting_vertices=starting_vertices\n , simple=simple\n , rooted=rooted\n , max_length=max_length\n , trivial=trivial\n , remove_acyclic_edges=False\n )) for v in starting_vertices])\n cycles = []\n for vi in vertex_iterators.values():\n try:\n cycle = next(vi)\n cycles.append((len(cycle), cycle))\n except(StopIteration):\n pass\n # Since we always extract a shortest path, using a heap\n # can speed up the algorithm\n from heapq import heapify, heappop, heappush\n heapify(cycles)\n while cycles:\n # We choose the shortest available cycle\n _, shortest_cycle = heappop(cycles)\n yield shortest_cycle\n # We update the cycle iterator to its next available cycle if it\n # exists\n try:\n cycle = next(vertex_iterators[shortest_cycle[0]])\n heappush(cycles, (len(cycle), cycle))\n except(StopIteration):\n pass", "def resolve_all_refs(s):\n refs = []\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # resolve collected refs\n for ref in refs:\n ref.resolve()\n\n return len( refs )", "def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow", "def locate_all_or_nothing_cycle(player):\n\n lasts = [player]\n seconds = []\n while True:\n second_best = player.prefs[1]\n their_worst = second_best.prefs[-1]\n\n seconds.append(second_best)\n lasts.append(their_worst)\n\n player = their_worst\n\n if lasts.count(player) > 1:\n break\n\n idx = lasts.index(player)\n cycle = list(zip(lasts[idx + 1 :], seconds[idx:]))\n\n return cycle", "def dfs_r(self, s):\n g = Graph(attr={DIRECTED: True})\n return self.dfs_rec(g, ('#', s))", "def compute_cyclepoints(sig, fs, f_range, **find_extrema_kwargs):\n\n # Ensure arguments are within valid range\n check_param_range(fs, 'fs', (0, np.inf))\n\n # Find extrema and zero-crossings locations in the signal\n peaks, troughs = find_extrema(sig, fs, f_range, **find_extrema_kwargs)\n rises, decays = find_zerox(sig, peaks, troughs)\n\n # For each cycle, identify the sample of each extrema and zero-crossing\n samples = {}\n samples['sample_peak'] = peaks[1:]\n samples['sample_last_zerox_decay'] = decays[:-1]\n samples['sample_zerox_decay'] = decays[1:]\n samples['sample_zerox_rise'] = rises\n samples['sample_last_trough'] = troughs[:-1]\n samples['sample_next_trough'] = troughs[1:]\n\n df_samples = pd.DataFrame.from_dict(samples)\n\n return df_samples", "def all_simple_cycles(self, starting_vertices=None, rooted=False,\n max_length=None, trivial=False):\n return list(self.all_cycles_iterator(starting_vertices=starting_vertices, simple=True, rooted=rooted, max_length=max_length, trivial=trivial))", "def search(values):\n \n values = reduce_schedule(values)\n \n if values is False:\n return False ## failed earlier\n \n if all(len(values[s]) == 1 for s in boxes):\n return values ## solved!\n \n #choose one of the unfilled squares with the fewest possibilities\n n, s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1) \n \n #using reccurence, solve each one of the resulting schedules\n for value in values[s]:\n try:\n new_schedule = values.copy()\n new_schedule[s] = [value]\n attempt = search(new_schedule)\n if attempt:\n #print(\"Successfully assigned {} to {}\".format(value, s))\n return attempt\n except:\n #print(\"Failed assigning {} to {}\".format(value, s))\n pass", "def _find_cusps(self):\n N = self.level()\n s = []\n\n for d in arith.divisors(N):\n w = arith.gcd(d, N//d)\n if w == 1:\n if d == 1:\n s.append(Cusp(1,0))\n elif d == N:\n s.append(Cusp(0,1))\n else:\n s.append(Cusp(1,d))\n else:\n for a in range(1, w):\n if arith.gcd(a, w) == 1:\n while arith.gcd(a, d//w) != 1:\n a += w\n s.append(Cusp(a,d))\n return sorted(s)", "def find_already_eulerian_cycle(adj_dict, edges):\n if not adj_dict:\n return []\n\n checked = [False] * len(edges)\n list_keys = list(adj_dict.keys())\n for i in list_keys: # the first time will return true anyway\n cycle = [i]\n if dfs_eulerian(i, adj_dict, edges, checked, cycle, i):\n return cycle\n return cycle", "def find_all_cycles(candidates, new_elem, path=[]):\n \n def have_cycle(candidates, path):\n \"\"\" Checks that when we have no more candidates, that our path\n 'endpoints' are cyclical. \"\"\"\n return (not candidates and path[0].prefix == path[-1].suffix)\n \n def have_dead_end(candidates, new_elem):\n \"\"\" Checks that we have at least one candidate whose prefix is\n cyclical with the new element's suffix. \"\"\"\n return new_elem.suffix not in map(lambda x: x.prefix, candidates)\n \n def remove_sgons(s_value, candidates):\n \"\"\" Returns a new list where all s-gonal candidates have been\n removed. \"\"\"\n return list(filter(lambda x: x.s != s_value,\n candidates))\n # Append new_elem to our working path, and test for our two exit criteria:\n # 1. A complete cycle -- There are no more candidates to extend our path\n # with and our ends wrap around prefix-suffix-cyclically\n # 2. A dead end -- There are no new candidates whose prefix match our\n # new element's suffix\n path = path + [new_elem]\n if have_cycle(candidates, path):\n return [path]\n if have_dead_end(candidates, new_elem):\n return []\n # Now go through every candidate and find the handful of ones whose prefix\n # match our new element's suffix.\n cycles = []\n for candidate in candidates:\n if new_elem.suffix == candidate.prefix:\n # When we find a valid candidate, we remove all candidates of the\n # same figurate type as our valid candidate.\n new_candidates = remove_sgons(candidate.s, candidates)\n # We then go down the path of finding all cycles with our valid\n # candidate as the new last-element\n new_cycles = find_all_cycles(new_candidates, candidate, list(path))\n for new_cycle in new_cycles:\n cycles.append(new_cycle)\n return cycles", "def run(self, concentrations: dict, svd: bool = True) -> None:\n\n if type(concentrations) is list or type(concentrations) is tuple:\n raise NotImplementedError\n\n elif isinstance(concentrations, dict):\n names = []\n values = []\n\n for k, v in concentrations.items():\n if k == 'H+':\n names.append('pH')\n else:\n names.append(k)\n values.append(v)\n\n grid = list(product(*values))\n indices = list(product(*[list(range(len(i))) for i in values]))\n else:\n msg = \"Could not determined range selection scheme from `concentrations`\"\n raise ValueError(msg)\n\n self._validate_ranges(concentrations)\n\n N_states = len(self.c.states)\n # shape of [N_states, <values ligand 1>, <values ligand 2>, ..., <values ligand N>]\n res = np.zeros([N_states] + [len(i) for i in values])\n res_prob = np.zeros_like(res)\n res_std_error = np.zeros_like(res)\n res_covar = np.zeros([N_states, N_states] + [len(i) for i in values])\n res_deltas = np.zeros_like(res_covar)\n res_dGs = np.zeros_like(res_covar)\n\n pH = 0\n\n for g, c in zip(grid, indices):\n # using OrderedDict as a precaution mostly since\n # the shapes of the output can vary wildly\n conc = OrderedDict()\n coords = []\n\n for i, n in enumerate(names):\n if n == 'pH' or n.upper() == 'H+':\n pH = g[i]\n coords.append(i)\n continue\n conc[n] = g[i]\n coords.append(i)\n\n self.c.concentrations = conc\n self.c.build_cycle(pH=pH)\n self.c.MLE(svd=svd)\n _filter1D = (slice(0, None), *c)\n _filter2D = (slice(0, None), slice(0, None), *c)\n res[_filter1D] = self.c.g_mle\n weights = np.exp(-res[_filter1D])\n Z = weights.sum()\n res_prob[_filter1D] = weights / Z\n res_std_error[_filter1D] = self.c.std_errors\n res_covar[_filter2D] = self.c.covariance_matrix\n res_deltas[_filter2D] = self.c.deltas\n res_dGs[_filter2D] = self.c.dGs\n\n coords = OrderedDict()\n coords['state'] = self.c.states.values[:, 0]\n coords['state_i'] = self.c.states.values[:, 0]\n coords['state_j'] = self.c.states.values[:, 0]\n\n for k, v in zip(names, values):\n coords[k] = v\n\n self.results = Dataset(\n data_vars=dict(\n free_energy=(\n ['state', *names], res\n ),\n microstate_probs=(\n ['state', *names], res_prob\n ),\n std_errors=(\n ['state', *names], res_std_error\n ),\n covariance=(\n ['state_i', 'state_j', *names], res_covar\n ),\n deltas=(\n ['state_i', 'state_j', *names], res_deltas\n ),\n dGs=(\n ['state_i', 'state_j', *names], res_dGs\n ),\n ),\n coords=coords,\n )" ]
[ "0.6175425", "0.57670933", "0.5475402", "0.53936297", "0.5329402", "0.522589", "0.52258706", "0.5211429", "0.51233554", "0.5099929", "0.5068521", "0.5063437", "0.5032627", "0.5028338", "0.49907324", "0.4966183", "0.49148342", "0.49041694", "0.48915786", "0.4890928", "0.4883268", "0.48830852", "0.48827088", "0.48713014", "0.4870162", "0.48625618", "0.486201", "0.48313826", "0.4823267", "0.4820413" ]
0.7913429
0
Checks that when we have no more candidates, that our path 'endpoints' are cyclical.
def have_cycle(candidates, path): return (not candidates and path[0].prefix == path[-1].suffix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_paths(self):\n for path in self.paths:\n # check that arc starts at s\n arc = path[0]\n arc_start = self.arc_info[arc][\"start\"]\n assert(arc_start == self.source()), \"Path does not start at s\"\n # check that internal arcs are valid\n for (i, arc) in enumerate(path[:-1]):\n next_arc = path[i + 1]\n arc_destin = self.arc_info[arc][\"destin\"]\n next_arc_start = self.arc_info[next_arc][\"start\"]\n assert (arc_destin == next_arc_start), \"Invalid path\"\n arc = path[-1]\n arc_end = self.arc_info[arc][\"destin\"]\n assert(arc_end == self.sink()), \"Path does not end at t\"", "def valid_connection(\n graph: list[list[int]], next_ver: int, curr_ind: int, path: list[int]\n) -> bool:\n\n # 1. Validate that path exists between current and next vertices\n if graph[path[curr_ind - 1]][next_ver] == 0:\n return False\n\n # 2. Validate that next vertex is not already in path\n return not any(vertex == next_ver for vertex in path)", "def cyclic(g):\n path = set()\n visited = set()\n\n def visit(vertex):\n if vertex in visited:\n return False\n visited.add(vertex)\n path.add(vertex)\n for neighbour in g.parents_of(vertex):\n if neighbour in path or visit(neighbour):\n return True\n path.remove(vertex)\n return False\n\n return any(visit(v) for v in g.indices)", "def find_all_cycles(candidates, new_elem, path=[]):\n \n def have_cycle(candidates, path):\n \"\"\" Checks that when we have no more candidates, that our path\n 'endpoints' are cyclical. \"\"\"\n return (not candidates and path[0].prefix == path[-1].suffix)\n \n def have_dead_end(candidates, new_elem):\n \"\"\" Checks that we have at least one candidate whose prefix is\n cyclical with the new element's suffix. \"\"\"\n return new_elem.suffix not in map(lambda x: x.prefix, candidates)\n \n def remove_sgons(s_value, candidates):\n \"\"\" Returns a new list where all s-gonal candidates have been\n removed. \"\"\"\n return list(filter(lambda x: x.s != s_value,\n candidates))\n # Append new_elem to our working path, and test for our two exit criteria:\n # 1. A complete cycle -- There are no more candidates to extend our path\n # with and our ends wrap around prefix-suffix-cyclically\n # 2. A dead end -- There are no new candidates whose prefix match our\n # new element's suffix\n path = path + [new_elem]\n if have_cycle(candidates, path):\n return [path]\n if have_dead_end(candidates, new_elem):\n return []\n # Now go through every candidate and find the handful of ones whose prefix\n # match our new element's suffix.\n cycles = []\n for candidate in candidates:\n if new_elem.suffix == candidate.prefix:\n # When we find a valid candidate, we remove all candidates of the\n # same figurate type as our valid candidate.\n new_candidates = remove_sgons(candidate.s, candidates)\n # We then go down the path of finding all cycles with our valid\n # candidate as the new last-element\n new_cycles = find_all_cycles(new_candidates, candidate, list(path))\n for new_cycle in new_cycles:\n cycles.append(new_cycle)\n return cycles", "def IsCyclic(self):\n\n visited = [False for i in range(self.NodesCount())]\n \n for idx in range(1, self.NodesCount()+1): \n if not visited[idx-1]: \n if self.IsCyclicRec(idx, visited, -1): \n return True\n return False", "def is_cyclic(self):\n \n visited = set()\n path = []\n \n for node in self.node_set:\n if node not in visited:\n if self.is_cyclic_helper(node, visited, path) is True:\n return True \n \n visited.clear()\n path.clear()\n return False", "def is_cyclic(self):\n return self._.b[0] == 2 and self._.c[-1] in [1, 2] and \\\n all(x == 1 for x in self._.b[1:-1] + self._.c[1:-1])", "def test_cyclical_and_end_disconnected_graph(self):\n path = Dijkstras().dijkstras(self.g6, 'a', 'b')\n self.assertTrue(\n 'The proclaimed end vertex \\'b\\' was not reached' in path,\n 'The end was found when it was not supposed to be. \\\n The return was \\'{}\\''.format(path))", "def hasPath(startVert, endVert, sequencer):\n visited = set()\n sequencer.push(startVert)\n while (not sequencer.empty()):\n current = sequencer.pop()\n visited.add(current)\n print(\"Visting \" + str(current.id))\n for n in current.getConnections():\n if n == endVert:\n return True\n if n not in visited:\n sequencer.push(n)\n return False", "def is_cyclic_helper(self, node, visited, path):\n \n visited.add(node)\n path.append(node)\n \n for i in self.suffix[node]:\n if i not in visited:\n if self.is_cyclic_helper(i, visited, path) is True: \n return True\n \n elif i in path:\n return True\n \n path.remove(node)\n return False", "def valid_path(self, source, sink, path):\n\n if source == sink:\n return path\n for edge in self.adjacents[source]:\n if edge not in path:\n if edge.capacity - self.edges[edge] > 0:\n return self.valid_path(edge.sink, sink, path + [edge])\n\n # In case there is no more possible path:\n return None", "def path_exists(graph, start, end):\n explored = set()\n q = deque([start])\n while q:\n current = q.pop()\n if current == end:\n return True\n if current in explored:\n continue\n explored.add(current)\n for adjacent in graph.get(current, set()):\n q.appendleft(adjacent)\n return False", "def is_polycyclic(self):\n return self.is_solvable", "def check_dag_acyclic(self, start, inputs):\n for binding in inputs.values():\n if start == binding.io_owner:\n return False\n for p in binding.parents:\n if not self.check_dag_acyclic(start, p.io_owner.input_bindings.bindings):\n return False\n\n return True", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def has_cycle(self):\n traversed = dict()\n for i in self.adj_list:\n traversed[i] = False\n\n for i in self.adj_list:\n if not traversed[i]:\n if self.has_cycle_helper(i, traversed, -1):\n return True\n return False", "def has_cyclic(self):\n return self._stub.List(self._message).cyc_info.has_cyclic", "def build_path(\n G: nx.Graph,\n node: int,\n endpoints: List[int],\n path: List[int]) -> List[int]:\n\n # For each successor in the passed-in node\n for successor in G.successors(node):\n if successor not in path:\n # If successor is already in path, ignore it, otherwise add to path\n path.append(successor)\n\n if successor not in endpoints:\n # If successor not endpoint, recursively call\n # build_path until endpoint found\n path = build_path(G, successor, endpoints, path)\n\n else:\n # If successor is endpoint, path is completed, so return\n return path\n\n if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):\n # If end of the path is not actually an endpoint and the path's\n # first node is a successor of the path's final node, then this is\n # actually a self loop, so add path's first node to end of path to\n # close it\n path.append(path[0])\n\n return path", "def test_large_cyclical_graph_to_check_multiple_paths(self):\n self.assertEquals(\n Dijkstras().dijkstras(self.g9, 'a', 'b'),\n (6, ['a', 'c', 'd', 'g', 'b']))", "def is_complete_path(self, path):\n try:\n if len(path) > 30:\n raise TooLongPathError\n nodes = [node for (node, edge) in path]\n if nodes.count('DBranch') > 2 or nodes.count('DLoop') > 2 or nodes.count('DExcept') > 2:\n raise TooLongPathError\n calls = [call for (call, edge) in path if call not in ['DSubTree', 'DBranch', 'DLoop', 'DExcept', 'STOP']]\n for call in calls:\n if nodes.count(call) > 1:\n raise TooLongPathError\n self.consume_until_STOP(path, 1)\n return True\n except IncompletePathError:\n return False", "def _find_path(self, start, end, path, visited):\n path.append(start)\n visited.add(start)\n if start == end:\n return path\n for vertex in self.neighbors(start):\n if vertex not in visited:\n if not self._find_path(vertex, end, path, visited):\n path.remove(vertex)\n else:\n return True", "def has_cycles(graph):\n path = set()\n\n def visit(node):\n path.add(node)\n for neighbour in graph.edges[node]:\n if neighbour in path or visit(neighbour):\n return True\n path.remove(node)\n return False\n\n return any(visit(node) for node in graph.nodes)", "def has_cycle(link):\r\n # collect_list = [link]\r\n # while not link == Link.empty:\r\n # collect_list.append(link.first)\r\n # link = link.rest\r\n # if link.rest in collect_list:\r\n # return True\r\n # return False\r\n s = link\r\n while not link == Link.empty:\r\n if link.rest == s:\r\n return True\r\n else:\r\n link = link.rest\r\n return False", "def validate_edges(attack_surface_graph, admissible_path, starting_points):\n for i in range(len(admissible_path)-1):\n for edge in attack_surface_graph.edges(data=True):\n if edge[0] == admissible_path[i] and edge[1] == admissible_path[i+1]:\n descriptors = edge[2]\n if find_violation(descriptors) == [] and edge[0] not in starting_points:\n return False\n return True", "def canReachDFS(start, end):\n visited = set()\n visited.add(start)\n __canReachDFS(start, visited)\n # a path exists if the end node was visited, otherwise the graph is\n # disconnected and no path exists from start to end\n return end in visited", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n\n # We are only looking for chains longer that ours\n max_length = len(self.chain)\n\n # Checking for the length of each chain in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n if length > max_length and self.validate_chain(chain):\n new_chain = chain\n max_length = length\n\n # Replace our chain with a new, longer, valid chain in our network (if present)\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n # Look only for chains longer than this\n max_length = len(self.chain)\n # Get and verify the chains from all the nodes in the network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n # Check if chain is longer and valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n # Replace this chain if a longer valid chain is discovered\n if new_chain:\n self.chain = new_chain\n return True\n return False", "def is_valid_path(self, path: []) -> bool:\n if not path:\n return True\n\n if len(path) == 1:\n return self.contains_vertex(path[0])\n\n i = 0\n j = 1\n while j < len(path):\n if path[j] not in self.adj_list[path[i]]:\n return False\n else:\n i += 1\n j += 1\n\n return True", "def _check_cycles(self, graph):\n if list(nx.simple_cycles(graph)):\n raise AbstractGraphError('Cycles in graph')" ]
[ "0.64591026", "0.64028496", "0.62922645", "0.6263719", "0.62504596", "0.6250248", "0.62022024", "0.6092906", "0.60610884", "0.6022989", "0.6006082", "0.5956604", "0.595369", "0.5885716", "0.58702284", "0.58571595", "0.5837033", "0.5828162", "0.5810018", "0.5783266", "0.5762985", "0.5759325", "0.57577974", "0.573151", "0.5730676", "0.5728142", "0.5687513", "0.5687512", "0.5681832", "0.56766593" ]
0.6560611
0
Checks that we have at least one candidate whose prefix is cyclical with the new element's suffix.
def have_dead_end(candidates, new_elem): return new_elem.suffix not in map(lambda x: x.prefix, candidates)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def have_cycle(candidates, path):\n return (not candidates and path[0].prefix == path[-1].suffix)", "def find_all_cycles(candidates, new_elem, path=[]):\n \n def have_cycle(candidates, path):\n \"\"\" Checks that when we have no more candidates, that our path\n 'endpoints' are cyclical. \"\"\"\n return (not candidates and path[0].prefix == path[-1].suffix)\n \n def have_dead_end(candidates, new_elem):\n \"\"\" Checks that we have at least one candidate whose prefix is\n cyclical with the new element's suffix. \"\"\"\n return new_elem.suffix not in map(lambda x: x.prefix, candidates)\n \n def remove_sgons(s_value, candidates):\n \"\"\" Returns a new list where all s-gonal candidates have been\n removed. \"\"\"\n return list(filter(lambda x: x.s != s_value,\n candidates))\n # Append new_elem to our working path, and test for our two exit criteria:\n # 1. A complete cycle -- There are no more candidates to extend our path\n # with and our ends wrap around prefix-suffix-cyclically\n # 2. A dead end -- There are no new candidates whose prefix match our\n # new element's suffix\n path = path + [new_elem]\n if have_cycle(candidates, path):\n return [path]\n if have_dead_end(candidates, new_elem):\n return []\n # Now go through every candidate and find the handful of ones whose prefix\n # match our new element's suffix.\n cycles = []\n for candidate in candidates:\n if new_elem.suffix == candidate.prefix:\n # When we find a valid candidate, we remove all candidates of the\n # same figurate type as our valid candidate.\n new_candidates = remove_sgons(candidate.s, candidates)\n # We then go down the path of finding all cycles with our valid\n # candidate as the new last-element\n new_cycles = find_all_cycles(new_candidates, candidate, list(path))\n for new_cycle in new_cycles:\n cycles.append(new_cycle)\n return cycles", "def _causes_name_clash(candidate, path_list, allowed_occurences=1):\n duplicate_counter = -allowed_occurences\n for path in path_list:\n parts = tuple(reversed(path.parts))\n if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate:\n duplicate_counter += 1\n return duplicate_counter > 0", "def generate_good_suffix(self, pattern: str, suffix, prefix):\n M = self.M\n # i is the index of scanning index in pattern, range from [0,M-2], to scan substring before pattern[M-1]\n for i in range(M - 1):\n # j is the index of suffix index\n j = i\n # k is the suffix array length, also index, should range from [1,M-1]\n k = 0\n while j >= 0 and pattern[j] == pattern[M - 1 - k]:\n j -= 1\n k += 1\n suffix[k] = j + 1\n if j == -1:\n prefix[k] = True", "def _is_component(words):\n init_word = words[0]\n words = set(words) # odstrani duplicity\n seen = {init_word, }\n first_ch = {init_word[0], }\n last_ch = {init_word[-1], }\n index = 0\n while index < max(len(first_ch), len(last_ch)):\n for word in words:\n if word[:1] in last_ch or word[-1:] in first_ch:\n first_ch.add(word[:1])\n last_ch.add(word[-1:])\n seen.add(word)\n index += 1\n return len(seen) == len(words)", "def _is_component(words):\n init_word = words[0]\n words = set(words) # odstrani duplicity\n seen = {init_word, }\n first_ch = {init_word[0], }\n last_ch = {init_word[-1], }\n index = 0\n while index < max(len(first_ch), len(last_ch)):\n for word in words:\n if word[:1] in last_ch or word[-1:] in first_ch:\n first_ch.add(word[:1])\n last_ch.add(word[-1:])\n seen.add(word)\n index += 1\n return len(seen) == len(words)", "def handle_one_off(self, shorter, longer):\n found = False\n for n, c in enumerate(shorter):\n if shorter[n] == longer[n]:\n continue\n elif shorter[n] == longer[n+1]:\n if not found:\n found = True\n else:\n return False\n return True", "def is_cyclic_helper(self, node, visited, path):\n \n visited.add(node)\n path.append(node)\n \n for i in self.suffix[node]:\n if i not in visited:\n if self.is_cyclic_helper(i, visited, path) is True: \n return True\n \n elif i in path:\n return True\n \n path.remove(node)\n return False", "def startsWith(self, prefix: str) -> bool:\n curr = self.root\n for c in prefix:\n if not c in curr.adj:\n return False\n curr = curr.adj[c]\n return True", "def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"123\")\n self.assertFalse(self.phonebook.is_consistent())", "def transitive_reduction_helper(self, node, visited, path):\n\n visited.add(node)\n path.append(node)\n \n for i in self.suffix[node].copy(): \n self.transitive_reduction_helper(i, visited, path)\n\n # determine if the prefix can be reduced\n for i in self.prefix[path[-1]].copy():\n # if j is in the path and not the previous node\n if i in path and i is not path[-2]:\n self.remove_edge(i, path[-1]) \n \n \n path.remove(node)\n return visited", "def test_prefix(self):\n self.chck_triple('prefix')", "def is_cyclic_conjugate(self, w):\n l1 = len(self)\n l2 = len(w)\n if l1 != l2:\n return False\n w1 = self.identity_cyclic_reduction()\n w2 = w.identity_cyclic_reduction()\n letter1 = w1.letter_form\n letter2 = w2.letter_form\n str1 = ' '.join(map(str, letter1))\n str2 = ' '.join(map(str, letter2))\n if len(str1) != len(str2):\n return False\n\n return str1 in str2 + ' ' + str2", "def stillLookingForPrefix(self, prefix):\n return prefix in self._prefixToIdentifiers", "def _build_prefix(self):\r\n pattern = self.string2\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p", "def startsWith(self, prefix):\n tri = self.root.d\n \n \n if len(prefix) == 0: \n return True\n \n if len(tri) == 0:\n return False\n \n p = 0\n \n for i in xrange(len(prefix)):\n if tri != 0 and prefix[i] in tri:\n tri = tri[prefix[i]]\n else:\n return False\n \n return True", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n # Look only for chains longer than this\n max_length = len(self.chain)\n # Get and verify the chains from all the nodes in the network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n # Check if chain is longer and valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n # Replace this chain if a longer valid chain is discovered\n if new_chain:\n self.chain = new_chain\n return True\n return False", "def has_cycle(link):\r\n # collect_list = [link]\r\n # while not link == Link.empty:\r\n # collect_list.append(link.first)\r\n # link = link.rest\r\n # if link.rest in collect_list:\r\n # return True\r\n # return False\r\n s = link\r\n while not link == Link.empty:\r\n if link.rest == s:\r\n return True\r\n else:\r\n link = link.rest\r\n return False", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n\n # We are only looking for chains longer that ours\n max_length = len(self.chain)\n\n # Checking for the length of each chain in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n if length > max_length and self.validate_chain(chain):\n new_chain = chain\n max_length = length\n\n # Replace our chain with a new, longer, valid chain in our network (if present)\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.rewrite_chain(new_chain)\n return True\n\n return False", "def _verify_prefix(prefix, files):\n for f in files:\n f = os.path.join(prefix, f)\n if not os.path.exists(f):\n return False\n else:\n return True", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for c in prefix:\n if c not in node.children:\n return False\n return True", "def startsWith(self, prefix):\n if prefix[0] not in self.trie:\n return False\n cur = self.trie[prefix[0]]\n for char in prefix[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return True", "def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1", "def startsWith(self, prefix):\n now = self.tree\n for i in prefix:\n if i in now:\n now = now[i]\n else:\n return False\n return True", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'http://{node}:5000/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length:\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def test_check_bc_duplicates_added_demultiplex(self):\r\n\r\n # Should not find any duplicates\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should not find any duplicates with var length turned on.\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should not find errors when only looking at added field\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=False,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def resolve_conflict(self):\n neighbours = self.nodes\n new_chain = None\n #We're only looking for chains Longer than ours\n max_length = len(self.chain)\n #Grab and verify the chains from all the other nodes in our netwrok\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n #check if the lentgh is longer and the cain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n #replace our chain if we're discovered a new valid chain, Longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def startsWith(self, prefix):\n currNode = self.root\n\n for c in prefix:\n if c not in currNode.children:\n return False\n currNode = currNode.children[c]\n return True", "def startsWith(self, prefix: str) -> bool:\n cur = self.root\n for letter in prefix:\n if letter not in cur:\n return False\n cur = cur[letter]\n return True" ]
[ "0.688956", "0.67497593", "0.6492469", "0.58144426", "0.57607526", "0.57607526", "0.57380426", "0.5663673", "0.56448567", "0.55977595", "0.5587944", "0.55387944", "0.55290985", "0.54872364", "0.5447168", "0.5441908", "0.5441635", "0.5411554", "0.5400539", "0.53535", "0.5329227", "0.5328217", "0.53248304", "0.5314708", "0.529402", "0.52910244", "0.5287264", "0.52854717", "0.52736425", "0.5268301" ]
0.69614404
0
Returns a new list where all sgonal candidates have been removed.
def remove_sgons(s_value, candidates): return list(filter(lambda x: x.s != s_value, candidates))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def rm(x, l):\n return [y for y in l if x != y]", "def removed_vms(self) -> List[qubesadmin.vm.QubesVM]:\n return list(set(self._initial_vms) - set(self.selected_vms))", "def _remove_initial_objects_from_list(self, all):\n\n new_list = []\n for obj in all:\n if obj not in self.initial_set_of_objects:\n new_list.append(obj)\n\n return new_list", "def remove_duplicates(possible_vulns):\n return list(set(possible_vulns))", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def remove_empty_genes(self):\n to_remove = []\n for gene in self.genes:\n if not gene.mrnas:\n to_remove.append(gene)\n if to_remove:\n for gene in to_remove:\n self.genes.remove(gene)\n sys.stderr.write(\"Removed empty gene \" + gene.identifier + \"\\n\")\n self.removed_genes.extend(to_remove)\n return to_remove", "def delete_gkeeper(alist):\n\n res = [player for player in alist if player[2] != ['Por']]\n\n return res", "def remove_from_candidates(fit, candidates):\n if not candidates: return candidates\n row, col, n = fit\n del candidates[(row, col)]\n for k, v in candidates.items():\n if k[0] == row or k[1] == col:\n try:\n v.remove(n)\n except:\n continue\n return candidates", "def get_removed_sids(self, queryset, to_have, not_to_have):\n to_remove = set()\n if to_have == [] and not_to_have == []:\n return to_remove\n else:\n for result in queryset:\n mappings = ConceptMapping.objects.filter(section=result.pk)\n concepts = Concept.objects.filter(pk__in=mappings.values('concept')).distinct()\n related_labels = set()\n for concept in concepts:\n temp_set = set(concept.get_ancestors().values_list('label', flat=True))\n temp_set.add(concept.label)\n related_labels.update(temp_set)\n if self.is_out(related_labels, to_have, not_to_have):\n to_remove.add(result.pk)\n return to_remove", "def remove_elements(l, e):\n return [x for x in l if x != e]", "def _prune_candidates(self, beam_width=None):\n if beam_width is None:\n beam_width = self.beam_width\n if len(self.candidates) <= beam_width:\n return\n neg_scores = np.array([-cand.logp_total() for cand in self.candidates])\n parted_indices = np.argpartition(neg_scores, beam_width - 1)\n self.candidates = np.array(self.candidates)[parted_indices[:beam_width]].tolist()", "def deleteCandidates():\n return prepJSON(cs411_game.getProposedDeletes())", "def remove(self, *args):\n return _libsbml.ListOfPossibleSpeciesFeatureValues_remove(self, *args)", "def cleaned_list():\n ws_oc = catalog.srcs.copy() # write-safe read copy for\n # the GLEAM object catalog\n cat = catalog.srcs.copy()\n # we loop in reverse, to avoid concurrent mod. exceptions\n for i in range(len(ws_oc) - 1, 0, -1):\n # classic. The easiest way to check if a value is NaN:\n # it won't equal itself\n if ws_oc[i].alpha != ws_oc[i].alpha:\n cat = np.delete(cat, i)\n return cat", "def remove_pairings(pairings, mentors, candidates):\n for mentor, candidate in pairings:\n for row in candidates:\n if row[0] == candidate:\n candidates.remove(row)\n for row in mentors:\n if row[0] == mentor:\n mentors.remove(row)\n\n validate_data(mentors, candidates)", "def cull(self):\n # genetics.cpp:2716\n num_parents = int(self.pop.survival_thresh * len(self) + 1)\n self.sort_genomes()\n self.genomes = self.genomes[:num_parents]", "def clean_overlapping(overlapping):\n remove = []\n for square in overlapping:\n if len(overlapping[square]) == 1:\n remove.append(square)\n for square in remove:\n overlapping.pop(square)\n return overlapping", "def remove_dead(all_animals):\n dead = []\n for x in all_animals:\n if not x.is_alive:\n dead.append(x)\n\n for x in dead:\n all_animals.remove(x)", "def remove_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n self.remove_poss(row, col)", "def filter_cds(self):\n log.debug(\"Running {} filter.\".format(self.__class__.__name__))\n\n tmp_cds = copy.copy(self.cds)\n\n # Start the recursive depth-first search.\n self._remove_recursively(tmp_cds)\n\n log.debug(\"Done with {} items.\".format(len(tmp_cds)))\n\n return tmp_cds", "def cleanholdercandidates(lst):\n for sent in lst:\n for token in sent:\n if 'holder_candidate' in token:\n del token['holder_candidate']", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def Remove(locList):\n final_list = []\n for loc in locList:\n if loc[1] not in final_list:\n final_list.append(loc[1])\n\n return final_list", "def prune(pybel_list, min_RMSD):\n #Set up OBAling object\n align = openbabel.OBAlign()\n #Loop\n i = 0\n total_removed = 0\n while i < len(pybel_list):\n referens = pybel_list[i].OBMol #reference\n align.SetRefMol(referens)\n j = i + 1\n while j < len(pybel_list):\n target = pybel_list[j].OBMol #target\n align.SetTargetMol(target)\n #Align and ret rmsd\n if align.Align():\n rmsd = align.GetRMSD()\n if rmsd < min_RMSD:\n pybel_list.pop(j) #remove from both lists\n total_removed += 1\n else:\n j = j + 1\n else:\n print \"Couldn't align\"\n raise Exception()\n #end of inner loop\n i = i + 1\n #end of outer loop\n print \"finished deleting, total number of \\\n removed conformers is\", total_removed\n return pybel_list", "def __cullArchive(self):\n if len(self.genomes) <= self.max_size:\n return\n\n n_delete = len(self.genomes) - self.max_size\n indices = sorted([(lf, i) for i,lf in enumerate(self.local_fitnesses)])\n to_delete = set( i for _, i in indices[:n_delete] )\n self.genomes = [g for i,g in enumerate(self.genomes) if i not in to_delete]\n self.fitnesses = [f for i,f in enumerate(self.fitnesses) if i not in to_delete]\n self.features = [f for i,f in enumerate(self.features) if i not in to_delete]\n self.local_fitnesses = [f for i,f in enumerate(self.local_fitnesses) if i not in to_delete]\n\n assert len(self.genomes) <= self.max_size\n assert len(self.genomes) == len(self.fitnesses)\n assert len(self.genomes) == len(self.features)\n assert len(self.genomes) == len(self.local_fitnesses)", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def removeOutliers(self):\n #With the DSFPlate object, we can just use self.wells.pop() to remove outliers\n visited = []\n discard = []\n for well in self.wells:\n if well not in visited:\n reps = []\n reps += self.originalPlate.repDict[well]\n pairs = combinations(reps,2)\n distMatrix = [[0 for x in range(len(reps))] for y in range(len(reps))]\n for pair in pairs:\n dist = sqrDiffWellFluoro(self.wells[pair[0]].fluorescence,self.wells[pair[1]].fluorescence)\n distMatrix[reps.index(pair[0])][reps.index(pair[1])] = dist\n distMatrix[reps.index(pair[1])][reps.index(pair[0])] = dist\n keep = rh.discardBad(reps,distMatrix,SIMILARITY_THRESHOLD)\n for rep in reps:\n visited.append(rep)\n if rep not in keep:\n discard.append(rep)\n for well in discard:\n self.wells[well].fluorescence = None\n self.delCurves.append(well)\n return", "def remove_pedal(self):\n return Melody([n.remove_pedal() for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))", "def _remove_points(self, points_to_remove, teams_population):\n for team in teams_population:\n for point in points_to_remove:\n if point.point_id_ in team.results_per_points_:\n team.results_per_points_.pop(point.point_id_)" ]
[ "0.62687725", "0.6201206", "0.61726826", "0.6111378", "0.60760725", "0.6075531", "0.6048856", "0.59979814", "0.59737307", "0.59074605", "0.59073585", "0.5861006", "0.58275676", "0.58228827", "0.5821122", "0.58096284", "0.57756793", "0.577101", "0.5767358", "0.57616895", "0.572907", "0.5717013", "0.5714127", "0.5712537", "0.5712404", "0.57037866", "0.5696503", "0.5692275", "0.56218034", "0.5616839" ]
0.718452
0
Prunes the obtained tree according to the minimal gain (entropy or Gini).
def prune(tree, minGain, evaluationFunction=entropy, notify=False): # recursive call for each branch if tree.trueBranch.results == None: prune(tree.trueBranch, minGain, evaluationFunction, notify) if tree.falseBranch.results == None: prune(tree.falseBranch, minGain, evaluationFunction, notify) # merge leaves (potentionally) if tree.trueBranch.results != None and tree.falseBranch.results != None: tb, fb = [], [] for v, c in tree.trueBranch.results.items(): tb += [[v]] * c for v, c in tree.falseBranch.results.items(): fb += [[v]] * c p = float(len(tb)) / len(tb + fb) delta = evaluationFunction(tb+fb) - p*evaluationFunction(tb) - (1-p)*evaluationFunction(fb) if delta < minGain: if notify: print('A branch was pruned: gain = %f' % delta) tree.trueBranch, tree.falseBranch = None, None tree.results = uniqueCounts(tb + fb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prune( tree, impurity_crit, dataSet, treeSeq ):\n\n\t\tsaved = {}\n\n\t\ttotal_leaf_impurity, num_leaves = DecisionTree._fetch(tree, impurity_crit, dataSet, saved)\n\n\t\tnodes, sets, G = saved['node'], saved['set'], saved['G']\n\n\t\t# choose TreeNode such that g is minimum to prune\n\t\tmin_g_ind = np.argmin(G)\n\t\tnode2Prune = nodes[min_g_ind]\n\t\tnode2Prune.value = DecisionTree._make_leaf(sets[min_g_ind], impurity_crit)\n\t\tnode2Prune.cut_off = None\n\n\t\t# get a new tree pruned\n\t\ttreeSeq['alpha'].append(G[min_g_ind])\n\t\ttreeSeq['tree'].append(tree)\n\t\ttreeSeq['num_leaves'].append(num_leaves-node2Prune.leaves()+1)\n\n\t\tif not (tree.left.cut_off is None and tree.right.cut_off is None):\n\n\t\t\tDecisionTree._prune(deepcopy(tree), impurity_crit, dataSet, treeSeq )\n\t\telse:\n\t\t\treturn", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def prune_tree(self):\n tree = copy.deepcopy(self.tree)\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node(tree, tree.root)\n return tree", "def pruning_algorithm(self):\n # traverse tree to get conditional likelihood estimate at root.\n self.set_qmat()\n for node in self.tree.treenode.traverse(\"postorder\"):\n if not node.is_leaf(): \n self.node_conditional_likelihood(node)\n logger.debug(\n f\"node={node.idx}; likelihood=[{node.likelihood[0]:.6f}, {node.likelihood[1]:.6f}]\") \n\n # multiply root prior times the conditional likelihood at root\n root = self.tree.treenode\n lik = (\n (1 - self.prior_root_is_1) * root.likelihood[0] + \n self.prior_root_is_1 * root.likelihood[1]\n )\n return lik", "def prune():\n with tf.Graph().as_default() as g:\n # Input evaluation data\n images, labels = rn.inputs(eval_data=True)\n\n # inference model.\n logits = rn.inference(images, 15)\n\n # Calculate predictions.\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n\n # Create a saver\n saver = tf.train.Saver()\n\n # Create session to restore, and restore data\n sess = tf.InteractiveSession()\n\n # Queue runner\n tf.train.start_queue_runners()\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step_num = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n precision = eval_once(sess, top_k_op)\n \n \"\"\"\n # Get all variables\n lst_variables = tf.global_variables()\n lst_values = sess.run(tf.global_variables())\n\n # Get the pruning information\n r = np.arange(0,0.2,0.01)\n p = []\n for reduce_factor in r:\n kernel_index, channel_to_delete_pack, pruning_number_pack = \\\n pru_cal(lst_variables, lst_values, reduce_factor=reduce_factor)\n print('reduce factor is %.3f' % reduce_factor)\n\n # Delete these variables\n counter = 0\n for i in kernel_index:\n for j in range(pruning_number_pack[counter]):\n sess.run(tf.assign(lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]],\n tf.zeros(\n tf.shape(lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]])),\n name=lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]].name))\n counter = counter + 1\n\n # Real evaluation, after pruning\n p.append(eval_once(sess, top_k_op))\n\n return r, p\n \"\"\"", "def prune(self, x_val, y_val):\n\n # make sure that the classifier has been trained before predicting\n if not self.is_trained:\n raise Exception(\"DecisionTreeClassifier has not yet been trained.\")\n\n # get the maximum depth\n deepest_depth = get_max_depth(self.root)\n\n # explore the depth starting from (max_depth - 1) to half of the max_depth\n half_of_max_depth = deepest_depth // 2\n for depth in range(deepest_depth - 1, half_of_max_depth, -1):\n explore_nodes_to_prune(self, self.root, x_val, y_val, depth)\n\n print(\"Pruning completed\")", "def prune(self, root, X, Y):\n # calculate the gini index of this subtree if the children of root is trimmed\n pruned_gini = len(X) * gini(Counter(Y).values())\n pruned_loss = pruned_gini\n # if root is a leaf node, return loss directly\n if root.col is None:\n return pruned_loss, 1\n\n # cur_loss record the loss function when root is not trimmed\n cur_loss = 0.\n # size record the size of this subtree\n size = 1\n\n selected_ind = X[:, root.col] == root.val\n other_ind = X[:, root.col] != root.val\n selected_X = X[selected_ind]\n other_X = X[other_ind]\n selected_Y = Y[selected_ind]\n other_Y = Y[other_ind]\n\n # trim the left node recursively\n child_loss, child_size = self.prune(root.left, selected_X, selected_Y)\n cur_loss += child_loss\n size += child_size\n\n # trim the right node recursively\n child_loss, child_size = self.prune(root.right, other_X, other_Y)\n cur_loss += child_loss\n size += child_size\n\n # alpha means that\n # if the weight of size of tree in the loss function is larger than alpha,\n # this node will be trimmed\n alpha = (pruned_loss - cur_loss) / (size - 1)\n root.alpha = alpha\n # FIXME: why its length is always 1?\n self.possible_alpha.add(alpha)\n return cur_loss, size", "def on_prune(self, function_graph, node, reason):", "def prune_trie(trie, threshold):\n\tnode = trie.root\n\tpq = []\n\tfor i in node.children.keys():\n\t\tpq.append((node.children[i],node.children[i].char))\n\twhile len(pq) > 0:\n\t\tcur_node, char = pq.pop()\n\t\tif cur_node.isEnd == False:\n\t\t\tfor i in cur_node.children.keys():\n\t\t\t\tpq.append((cur_node.children[i],char + cur_node.children[i].char))\n\t\telse:\n\t\t\tif cur_node.weight < threshold:\n\t\t\t\tdelete(trie, char)\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn trie", "def prune(self, n_leaves):\n true_node_count = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n leaves = np.where(self.children_left == _tree.TREE_LEAF)[0]\n to_remove_count = true_node_count - 2*n_leaves + 1\n\n nodes_to_remove = pruning_order(self, max_to_prune = to_remove_count/2)\n\n # self._copy is gone, but this does the same thing\n out_tree = _tree.Tree(*self.__reduce__()[1])\n out_tree.__setstate__(self.__getstate__().copy())\n\n for node in nodes_to_remove:\n #TODO: Add a Tree method to remove a branch of a tree\n out_tree.children_left[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[node] = _tree.TREE_LEAF\n out_tree.children_right[node] = _tree.TREE_LEAF\n\n # FIXME: currently should not change node_count, after deletion\n # this is not number of nodes in the tree\n #out_tree.node_count -= 2*len(nodes_to_remove)\n\n return out_tree", "def pruning_order(self, max_to_prune=None):\n\n def _get_terminal_nodes(children):\n \"\"\"Lists the nodes that only have leaves as children\"\"\"\n leaves = np.where(children[:,0]==_tree.TREE_LEAF)[0]\n child_is_leaf = np.in1d(children, leaves).reshape(children.shape)\n return np.where(np.all(child_is_leaf, axis=1))[0]\n\n def _next_to_prune(tree, children=None):\n \"\"\"Weakest link pruning for the subtree defined by children\"\"\"\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]\n\n if max_to_prune is None:\n max_to_prune = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n\n children = np.array([self.children_left.copy(), self.children_right.copy()]).T\n nodes = list()\n\n while True:\n node = _next_to_prune(self, children)\n nodes.append(node)\n\n if (len(nodes) == max_to_prune) or (node == 0):\n return np.array(nodes)\n\n #Remove the subtree from the children array\n children[children[node], :] = _tree.TREE_UNDEFINED\n children[node, :] = _tree.TREE_LEAF", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def _next_to_prune(tree, children=None):\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def prune(self, x_val, y_val):\n\n # make sure that the classifier has been trained before predicting\n if not self.is_trained:\n raise Exception(\"DecisionTreeClassifier has not yet been trained.\")\n\n #######################################################################\n # ** TASK 4.1: COMPLETE THIS METHOD **\n #######################################################################\n\n self.prune_tree(self.decision_tree, x_val, y_val)\n\n return self.decision_tree", "def __prune_tree(self, accuracy, node, validation_data, depth):\n\n if node.is_leaf:\n return accuracy\n\n if node.depth == depth:\n if node.left_child.is_leaf and node.right_child.is_leaf:\n accuracy = self.__prune_node(accuracy, node, validation_data)\n return accuracy\n\n accuracy = self.__prune_tree(accuracy, node.left_child, validation_data, depth)\n accuracy = self.__prune_tree(accuracy, node.right_child, validation_data, depth)\n\n return accuracy", "def quantifier_lower(self):\n tree = deepcopy(self)\n all_DPs = [subtree for subtree in tree.subtree_dict.values() if (isinstance(subtree.label, FeatStructNonterminal) and (subtree.label[feature_type] == 'DP'))]\n trace_DPs = [DP for DP in all_DPs if DP.label['TRACE']]\n non_trace_DPs = diff(all_DPs, trace_DPs)\n for DP in trace_DPs:\n for other_DP in non_trace_DPs:\n if (DP.children[0].label.index == other_DP.ID):\n DP.label = deepcopy(other_DP.label)\n DP.children = deepcopy(other_DP.children)\n break\n tree = remove_QRed_DPs(tree)\n tree.label_nodes()\n tree.make_nx_tree()\n return tree", "def prune_trivial_subtrees(self):\n num_pruned = 0\n if not self.is_leaf:\n children_classes = set()\n num_trivial_children = 0\n for child_node in self.nodes:\n num_pruned += child_node.prune_trivial_subtrees()\n if child_node.is_leaf:\n num_trivial_children += 1\n children_classes.add(child_node.most_common_int_class)\n if num_trivial_children == len(self.nodes) and len(children_classes) == 1:\n self.is_leaf = True\n num_pruned += num_trivial_children\n self.nodes = []\n return num_pruned", "def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])", "def prune(self, rng, get_nodes, max_depth=1):\n if not self.children:\n return\n for i_c, child in enumerate(self.children):\n if child.min_depth >= max_depth:\n self.children[i_c] = Node(\n rng.choice(get_nodes(arity=0)),\n self.tree_type)\n self.children[i_c].parent = self\n elif max_depth > 1:\n child.prune(rng, get_nodes, max_depth - 1)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def deleteLower(self, threshold, current=None, parent=None):\n if current is None:\n if self:\n current = self.root\n else:\n return self # break\n\n if current > threshold:\n if current.left:\n self.deleteLower(threshold, current.left, current)\n elif current < threshold:\n if current.right:\n current.data = current.right.data\n current.left = current.right.left\n current.right = current.right.right\n self.deleteLower(threshold, current, parent)\n else:\n if parent:\n parent.left = None # restart current\n else:\n self.clear() # restart root\n else: # equals\n current.left = None\n\n return self", "def minimal_subtree(tree):\n tree_copy = tree.copy()\n\n for n in tree_copy.traverse():\n if len(n.children) == 1:\n n.delete()\n\n new_root = tree_copy\n while len(new_root.children) == 1:\n new_root = new_root.children[0]\n\n new_tree = new_root.detach()\n return new_tree", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def decision(grid):\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()", "def improve_tree(tree, freq_dict):\n # todo" ]
[ "0.71780753", "0.6755928", "0.6264333", "0.6245853", "0.61987823", "0.61671454", "0.6129602", "0.6123983", "0.6105711", "0.60467637", "0.603748", "0.599225", "0.5975164", "0.5917185", "0.5786919", "0.5713195", "0.5691192", "0.5679927", "0.56610376", "0.5647163", "0.56247056", "0.56016415", "0.5600159", "0.5585429", "0.5585429", "0.5524571", "0.55187875", "0.551619", "0.5495746", "0.5491554" ]
0.7445959
0
Loads a CSV file and converts all floats and ints into basic datatypes.
def loadCSV(file): def convertTypes(s): s = s.strip() try: return float(s) if '.' in s else int(s) except ValueError: return s reader = csv.reader(open(file, 'rt')) return [[convertTypes(item) for item in row] for row in reader]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCSV(input_file):", "def load_csv():\n df = pd.read_csv(datafolder+filename, decimal=decimal).astype(\n {'min': 'float', 'max': 'float'})\n return df", "def place_types_read_csv(self, csv_input):\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''])\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)", "def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []", "def read_csv_file(csv_file):\n return cudf.read_csv(csv_file, delimiter=' ',\n dtype=['int32', 'int32', 'float32'], header=None)", "def load_csv(filename):\r\n dataset = list()\r\n with open(filename, 'r') as file:\r\n csv_reader = reader(file, delimiter='\\t')\r\n for row in csv_reader:\r\n if not row:\r\n continue\r\n dataset.append([float(i) for i in row])\r\n return dataset", "def read_csv():", "def parse_file(args):\n\n data_types = []\n headers = []\n\n with open(args.input, \"r\") as csvfile:\n reader = csv.reader(csvfile)\n have_columns = False\n\n for line in reader:\n if have_columns:\n index = 0\n for col in line:\n if col != \"\": \n if data_types[index] != TYPE_STRING and data_types[index] != TYPE_FLOAT:\n data_types[index] = get_data_type(col)\n # else:\n # data_types[index] = TYPE_STRING\n index += 1\n\n else:\n headers = line \n for col in line:\n data_types.append(\"\")\n have_columns = True \n\n return headers, data_types", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def load_csv(fn):\n def iter_func():\n with open(fn, 'r') as infile:\n for line in infile:\n line = line.rstrip().split(',')\n for item in line:\n yield float(item)\n load_csv.rowlength = len(line)\n data = np.fromiter(iter_func(), dtype=float)\n data = data.reshape((-1, load_csv.rowlength))\n return data", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def csv2columns(csvFile, columns):\n import csv\n names = []; types = []; cols = []\n for column in columns.split(','):\n if column.find(':') > 0:\n name, type = column.split(':')\n else:\n name = column; type = 'float'\n names.append(name.strip())\n types.append( eval(type.strip()) ) # get type conversion function from type string\n cols.append([])\n\n print csvFile\n for fields in csv.DictReader(urlopen(csvFile).readlines(), skipinitialspace=True):\n tmpColVals = []\n try:\n for i, type in enumerate(types): tmpColVals.append( type(fields[names[i]]) )\n except Exception, e:\n print \"Got exception coercing values: %s\" % e\n continue\n for i in range(len(types)): cols[i].append(tmpColVals[i])\n return [N.array(col) for col in cols]", "def load_simple_csv(filename, target_col = -1):\n #target_names = []\n #target = []\n #features = []\n n_samples = -1\n with open(filename) as csv_file:\n for line in csv_file:\n n_samples += 1\n\n with open(filename) as csv_file:\n data_file = csv.reader(csv_file)\n data_names = np.array(next(data_file))\n #print target_names.shape\n feature_names = np.delete(data_names,target_col) # 1 target , other cols are all features\n n_features = feature_names.shape[0]\n\n target = np.empty((n_samples,), dtype = np.dtype(float))\n features = np.empty((n_samples, n_features))\n type_list = [ (label, np.dtype(t)) for label,t in dtype_dict.items() ]\n type_list.pop(target_col)\n dt = np.dtype(type_list)\n # print len(dt)\n for i, item in enumerate(data_file):\n # print item,len(item)\n t = item.pop(target_col)\n target[i] = np.asarray(t, dtype = np.float64)\n features[i] = np.asarray(item, dtype = dt)\n\n return Bunch(data=features, target=target,\n target_names=None, # precit problem\n DESCR=None,\n feature_names=feature_names)", "def load(filename):\n with open(filename,'r') as fd:\n csv_in = csv.reader(fd, delimiter=',', quotechar='\"')\n keys = csv_in.next()\n data = {k:[] for k in keys}\n for row in csv_in:\n for k,v in zip(keys,row):\n data[k].append(float(v))\n return data", "def parse_csv(csv, as_ints=False):\n items = []\n for val in csv.split(\",\"):\n val = val.strip()\n if val:\n items.append(int(val) if as_ints else val)\n return items", "def parse(csvfilename):\r\n with open(csvfilename, 'r') as f:\r\n reader = csv.reader(f, delimiter=';')\r\n #reader = csv.reader(f, delimiter=';', quotechar=\"'\")\r\n data = list(reader)\r\n # transform data into numpy array\r\n data = np.array(data).astype(float)\r\n return data", "def __load_csv(filename):\n fp = open(Parser.DATA_FOLDER_PATH + filename + '.csv', 'r')\n records = []\n for line in fp:\n items = line.strip().split(',')\n x, y, z = '0', '0', '0'\n if len(items) > 1:\n x = items[1]\n if len(items) > 2:\n y = items[2]\n if len(items) > 3:\n z = items[3]\n\n values = [x, y, z]\n records.append(values)\n\n # Discard some beginning data which may be noisy\n # del records[:int(len(records) / 30)]\n n = len(records)\n\n for i in range(n):\n rec = []\n # Consider X, Y, Z axes\n for k in range(3):\n # If can convert string to float\n try:\n val = float(records[i][k])\n except ValueError:\n val = 0\n rec.append(val)\n\n # Replace it\n records[i] = rec\n return records", "def read(self, filename):\n lines = []\n rawData = []\n file = open(filename, \"rU\")\n csv_reader = csv.reader( file )\n for line in csv_reader:\n lines.append(line)\n for item in range(len(line)):\n line[item] = line[item].replace(\" \",\"\")\n self.headers = lines[0]\n self.types = lines[1]\n rawData = lines[2:]\n for row in rawData:\n newRow = []\n for i in range(len(row)):\n if self.types[i] != 'numeric':\n continue\n else:\n newRow.append(float((row[i].strip())))\n self.finalData.append(newRow)\n self.data = np.matrix(self.finalData)\n\n for i in range(len(self.types)):\n if self.types[i] == 'numeric':\n self.numHeadList.append(self.headers[i])\n i = 0\n for header in self.numHeadList:\n self.header2col[header] = i\n i += 1\n\n return self.data", "def read_from_csvfile(fname, types, header=True):\n num_columns = len(types)\n parsed = []\n with open(fname) as f:\n reader = csv.reader(f)\n if header:\n columns = next(reader)\n else:\n columns = ['col '+str(i) for i in range(num_columns)]\n\n for row in reader:\n parsed.append([types[i](row[i]) if row[i] else None for i in range(num_columns)])\n\n return [{k:v for k,v in zip(columns, row)} for row in parsed]", "def load_from_file_csv(cls):\n if path.exists(cls.__name__ + \".csv\") is False:\n return []\n with open(cls.__name__ + \".csv\", \"r\", newline='') as f:\n listofinstances = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, value in row.items():\n row[key] = int(value)\n listofinstances.append(cls.create(**row))\n return listofinstances", "def loader(filename,sep=',',rowskip=[], colskip=[], axis=1,names=1,fromstring=0):\n\n #manages excpetions to the csv file incase of missing data\n if (type(filename)==str) and (fromstring==1):\n iterable=filename.strip('\\n').split('\\n')\n content=np.array([i for i in csv.reader(iterable,delimiter=sep)])\n elif type(filename)==np.ndarray:\n content=filename\n else:\n content=np.array([i for i in\\\n csv.reader(open(filename,'r'),delimiter=sep)])\n #content=np.genfromtxt(filename,delimiter=sep,dtype=str)\n\n if rowskip:\n #rowskip.sort(reverse=True)\n content=np.delete(content,rowskip,0)\n #for i in rowskip: content.pop(i)\n\n if colskip:\n #colskip.sort(reverse=True)\n content=np.delete(content,colskip,1)\n #for i in colskip: content.pop(i)\n\n if axis==0: # if the file oriented column-wise\n #content=list(map(list,zip(*content)))\n content=content.T\n\n\n\n if names is 0:\n variables=np.arange(content.shape[1]).tolist()\n offset=0\n else:\n variables=content[0].tolist()\n offset=1\n\n try:\n content=np.array([conv_col(col) for col in\n content[offset:].T],dtype='object')\n arity=np.array([np.unique(i).size for i in content])\n return dataset(variables,content.T,arity)\n except ValueError: \n print( 'Data could not be loaded, failed converting to float.')\n return content", "def __init__(self, csv_path, column_types=None, set_columns=False, file_headers=True, encoding=\"utf-8-sig\",\n missing_to_zero=False, print_warnings=True):\n\n self.file_path = Path(csv_path)\n self.file_name = self.file_path.stem\n\n self._file_headings = file_headers\n self._encoding = encoding\n\n self.headers = self._extract_headers()\n self.row_length = len(self.headers)\n\n self.missing_to_zero = missing_to_zero\n self.print_warnings = print_warnings\n self.invalid_typed = []\n\n self.column_types = self._determine_column_types(column_types)\n self.row_data, self.column_data, self.column_length = self._set_data(set_columns)\n\n # Old definitions kept for legacy, but new names added for clarity\n self.num_cols = self.row_length\n self.num_rows = self.column_length\n\n if len(self.invalid_typed) > 0 and self.print_warnings:\n print(f\"Warning: The following column-row-value-type where not correct so loaded as strings:\\n\"\n f\"{sorted(self.invalid_typed)}\")", "def load_from_file_csv(cls):\n list_rectangle = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n list_square = [\"id\", \"size\", \"x\", \"y\"]\n filename = cls.__name__ + \".csv\"\n dictionary = []\n result = []\n\n try:\n with open(filename, encoding=\"utf-8\") as file:\n obj_list = csv.reader(file)\n # read obj_list <_csv.reader object at 0x7fbfe5614b38>\n if cls.__name__ == \"Rectangle\":\n for list in obj_list:\n # create dictionary\n dict = {}\n for key, value in zip(list_rectangle, list):\n dict[key] = int(value)\n # create an object and append to a list\n result.append(cls.create(**dict))\n if cls.__name__ == \"Square\":\n for list in obj_list:\n # create dictionary\n dict = {}\n for key, value in zip(list_square, list):\n dict[key] = int(value)\n # create an object and append to a list\n result.append(cls.create(**dict))\n return result\n except:\n return result", "def load(csvfile):\n return PsychoPyCSV(csvfile)", "def parse_file(file):\n\n def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n rows = [row for row in csv.reader(file.splitlines(), delimiter=\",\", doublequote=True, escapechar=None, quotechar='\"', quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)]\n if len(rows) < 2:\n raise Exception(\"File must contain at least two rows.\")\n\n attributes = []\n dimensions = [{\"name\":\"row\", \"type\":\"int64\", \"begin\":0, \"end\":len(rows[1:])}]\n data = []\n\n # go through the csv by column\n for column in zip(*rows):\n column_has_floats = False\n\n # start from 1 to avoid the column name\n for value in column[1:]:\n if isfloat(value):\n column_has_floats = True\n try:# note NaN's are floats\n output_list = ['NaN' if x=='' else x for x in column[1:]]\n data.append(numpy.array(output_list).astype(\"float64\"))\n attributes.append({\"name\":column[0], \"type\":\"float64\"})\n\n # could not convert something to a float defaulting to string\n except Exception as e:\n column_has_floats = False\n break\n\n if not column_has_floats:\n data.append(numpy.array(column[1:]))\n attributes.append({\"name\":column[0], \"type\":\"string\"})\n\n if len(attributes) < 1:\n raise Exception(\"File must contain at least one column.\")\n\n return attributes, dimensions, data", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def readCSVasFloat(filename):\n returnArray = []\n lines = open(filename).readlines()\n for line in lines:\n line = line.strip().split(\",\")\n if len(line) > 0:\n returnArray.append(np.array([np.float32(x) for x in line]))\n\n returnArray = np.array(returnArray)\n return returnArray" ]
[ "0.7278124", "0.7128511", "0.7073164", "0.6921024", "0.69140124", "0.6835938", "0.6834292", "0.68103707", "0.6777402", "0.6776886", "0.674985", "0.67280674", "0.6687896", "0.6687714", "0.66312677", "0.6607438", "0.65860206", "0.6575137", "0.65615463", "0.6559472", "0.6544173", "0.6443688", "0.64400417", "0.64385676", "0.6427096", "0.6412775", "0.63802916", "0.6363707", "0.635382", "0.63483685" ]
0.7568201
0
Ban an ip from all DDNet servers in given region. Minutes need to be greater than 0. Region needs to be the 3 char server code.
async def global_ban_region(self, ctx: commands.Context, region: str, ip: str, name: str, minutes: int, *, reason: clean_content): await self._global_ban(ctx, ip, name, minutes, reason, region)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_exclude_ip_ban(self):\n pass", "def ban_host(self, host, hard=False, duration=None):\n # TODO: Timed bans?\n logger.verbose(\"Banning IP {0}\".format(host))\n self.ip_bans.add(host, hard)", "async def global_unban(self, ctx: commands.Context, *, name: str):\n if re.match(r'^[\\d\\.-]*$', name) is None:\n query = 'SELECT ip FROM ddnet_bans WHERE name = $1;'\n ips = [r['ip'] for r in await self.bot.pool.fetch(query, name)]\n if not ips:\n return await ctx.send(f'`{escape_backticks(name)}` isn\\'t banned')\n else:\n ips = [name]\n\n for ip in ips:\n try:\n await self.ddnet_unban(ip)\n except RuntimeError as exc:\n await ctx.send(exc)\n else:\n await ctx.send(f'Successfully unbanned `{ip}`')", "def ban_all():\n sudo(\"varnishadm 'ban req.url ~ .'\")", "def getBanIps(self):\n banned = []\n q = \"\"\"SELECT clients.ip as target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id\n WHERE penalties.type = 'Ban' AND penalties.inactive = 0 AND penalties.time_expire = -1\n GROUP BY clients.ip\"\"\"\n cursor = self.query(q)\n if cursor:\n while not cursor.EOF:\n banned.append(cursor.getValue('target_ip'))\n cursor.moveNext()\n cursor.close()\n return banned", "async def global_ban(self, ctx: commands.Context, ip: str, name: str, minutes: int, *, reason: clean_content):\n await self._global_ban(ctx, ip, name, minutes, reason)", "def ban_command(server, output):\n for target in output.message.split()[1:]:\n if target in server.ops:\n server.tell(output.name, 'Operators cannot be banned')\n continue\n server.banip(target)\n server.ban(target)\n return", "def ban_ip(self, ip_id, length=BAN_TIME_IP):\n self.sql('UPDATE ip_addresses SET ban_until = UNIX_TIMESTAMP(NOW()) + %s, ban_count = ban_count + 1 WHERE id = %s', length, ip_id)\n \n if not PRODUCTION_SERVER:\n print 'Banned IP {} for {} seconds'.format(ip_id, length)\n \n return length", "def test_replace_host_subnet(self):\n pass", "def listBlockedIpAddresses(ip_addresses):\n for ip_address in ip_addresses[:500]:\n hostname = ip_address\n if resolve_ipaddress:\n try:\n hostname = str(resolver.query(reversename.from_address(ip_address), \"PTR\")[0])\n except:\n hostname = None\n logger.info('%s (%s)' % (ip_address, hostname))", "def remove_ban(self, vapor_id_or_ip):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}removeBan {}'.format(self.console, identity)\n self.write_command(cmd)", "def test_list_host_subnet(self):\n pass", "def do_balance(self, hosts, args):\n try:\n # use hosts IDs to call the Rest API and make a decision\n # return the wanted vm and a list of underutilised hosts\n print(('33333333-3333-3333-3333-333333333333', ['11111111-1111-1111-1111-111111111111']))\n except Exception as ex:\n print(ex, file=sys.stderr)", "def block_ip_address(self, ip_address):\n\n rule = \"iptables -A INPUT -s \" + ip_address + \" -j DROP\\n\"\n rules = open('resources/rules.sh', 'r')\n regex = re.compile(ip_address, re.MULTILINE)\n match = regex.search(rules.read())\n rules.close()\n # check if a rule to block this ip has already been written, this can happen due to threading\n if not match:\n f = open('resources/rules.sh', 'r')\n rules = f.readlines()\n f.close()\n\n rules.insert(6, rule)\n\n f = open('resources/rules.sh', 'w')\n rules = \"\".join(rules)\n f.write(rules)\n f.close()\n subprocess.call([\"chmod\", \"755\", \"resources/rules.sh\"])\n subprocess.call(\"./resources/rules.sh\")\n print(\"IP address \" + ip_address + \" blocked\")", "def list_subnet(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing subnet.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"subnet List : %s \" % output)\n return output[\"subnets\"]", "def block_ip(ip, logger, dashboard_log, firewall_ip_and_port):\n if not is_already_blocked(ip, firewall_ip_and_port):\n try:\n data = {\n \"ip\": ip,\n \"port\": 0,\n \"reason\": \"\"\n }\n request = requests.post(f\"http://{firewall_ip_and_port}/firewall/blocked\", json=data)\n if not request.ok:\n logger.error(f\"Blocking IP {ip} was unsuccessful. Code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Blocking IP {ip} was unsuccessful. Code {request.status_code}\",\n \"time\": time.time()})\n return False\n return True\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": f\"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n return False\n # error, continue program", "def block_list(to_block_list, blocked_ips_list):\n to_be_blocked_list = []\n for host in to_block_list:\n found_ip = False\n host_ip = host['host']['ip_address']\n for blocked in blocked_ips_list:\n if blocked['ip'] == host_ip:\n found_ip = True\n # if we want to block already blocked IP, nothing happens,\n # but if the host IP was not found in blocked IPs, block it\n if not found_ip:\n to_be_blocked_list.append(host_ip)\n return to_be_blocked_list", "def blacklist_ips(self):\r\n if self.blacklist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.blacklist.split(',')]) # pylint: disable=no-member\r", "def test_patch_host_subnet(self):\n pass", "def random_cidr(ip_pattern=None, mask=None, min_mask=0, max_mask=30):\n if mask is None:\n mask = random.randint(min_mask, max_mask)\n ip = random_ip(ip_pattern)\n return ''.join([ip, '/', str(mask)])", "def list_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnetList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def unblock_list(blocked_ips_list, to_block_list):\n to_be_unblocked_list = []\n for blocked in blocked_ips_list:\n found_ip = False\n blocked_ip = blocked['ip']\n for host in to_block_list:\n if host['host']['ip_address'] == blocked_ip:\n found_ip = True\n # if the blocked_ip was not found in list of blockings, unblock it\n if not found_ip:\n to_be_unblocked_list.append(blocked_ip)\n return to_be_unblocked_list", "def getTempBanIps(self):\n banned = []\n q = \"\"\"SELECT clients.ip AS target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id\n WHERE penalties.type = 'TempBan' AND penalties.inactive = 0 AND penalties.time_expire > %s\n GROUP BY clients.ip\"\"\" % int(time())\n cursor = self.query(q)\n if cursor:\n while not cursor.EOF:\n banned.append(cursor.getValue('target_ip'))\n cursor.moveNext()\n cursor.close()\n return banned", "def parse_ignore_cidr_option(cidrlist):\n l = list()\n for c in cidrlist.split(','):\n try:\n s = c.strip(' ')\n i = IP(s)\n l.append(i)\n except ValueError as e:\n logging.warning('Received invalid CIDR in ignore_cidr: {}'.format(e))\n return l", "async def banAll(ctx):\r\n await ctx.message.delete()\r\n for member in ctx.guild.members:\r\n try:\r\n await member.ban()\r\n except Exception as e:\r\n print(\r\n f\"{Fore.RED}[-]banAll => {Fore.RESET}Failed to ban {member}\\n{e}\\n\"\r\n )", "def rule_40_extend_subnet_cidr(session):\n\n config, conn = session[\"config\"], session[\"conn\"]\n\n def append_cidr(config_side, conn_vpc):\n\n cidr = conn_vpc.get_all_subnets([\n config_side[\"res\"][\"subnet_id\"]\n ])[0].cidr_block\n\n for user_cidr in config_side[\"ipsec\"][\"subnets\"]:\n if cidr_overlaps(cidr, user_cidr):\n return\n\n config_side[\"ipsec\"][\"subnets\"].append(cidr)\n\n append_cidr(config[\"server\"], conn[\"server\"](\"vpc\"))\n append_cidr(config[\"client\"], conn[\"client\"](\"vpc\"))\n\n return True", "def get_static_ip(cidr_block, mask, ip_block):\n return cidr_block.replace(mask,ip_block)", "def ban(sock, user):\r\n chat(sock, \"/ban {}\".format(user))", "def get_blocked_ips(logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.get(f\"http://{firewall_ip_and_port}/firewall/blocked\")\n if request.ok:\n return request.json()\n else:\n logger.warning(f\"Getting blocked IPs on firewall failed with code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Getting blocked IPs on firewall failed with code {request.status_code}\",\n \"time\": time.time()})\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n # error, continue program", "def mass_arp_poison(victim_ips: Iterable[str],\n burst_delay: int,\n n_bursts: int,\n verbose: bool = False\n ) -> None:\n packets = [_new_unsolicited_reply_redirect(v1, v2)\n for v1, v2 in permutations(victim_ips, 2)]\n for _ in range(n_bursts):\n send(packets, verbose=verbose)\n time.sleep(burst_delay)" ]
[ "0.5752577", "0.5619576", "0.5450576", "0.54234755", "0.5233336", "0.5208012", "0.51359165", "0.51315814", "0.5063948", "0.4974001", "0.4927776", "0.48956954", "0.48733237", "0.48646176", "0.4827561", "0.48271024", "0.48046353", "0.4773166", "0.47669205", "0.47156936", "0.47109735", "0.4710787", "0.4702485", "0.46440008", "0.46254325", "0.46103665", "0.4606248", "0.4596624", "0.4585901", "0.4582957" ]
0.6312553
0
Unban an ip from all DDNet servers. If you pass a name, all currently globally banned ips associated with that name will be unbanned.
async def global_unban(self, ctx: commands.Context, *, name: str): if re.match(r'^[\d\.-]*$', name) is None: query = 'SELECT ip FROM ddnet_bans WHERE name = $1;' ips = [r['ip'] for r in await self.bot.pool.fetch(query, name)] if not ips: return await ctx.send(f'`{escape_backticks(name)}` isn\'t banned') else: ips = [name] for ip in ips: try: await self.ddnet_unban(ip) except RuntimeError as exc: await ctx.send(exc) else: await ctx.send(f'Successfully unbanned `{ip}`')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def unban(self, ctx, name: str):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n user = discord.utils.get(bans, name=name)\n if user is not None:\n await self.bot.unban(ctx.message.server, user)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Unbanning failed')\n else:\n await self.bot.say('\\N{OK HAND SIGN}')", "def deny(ip):\n return __apf_cmd(\"-d {}\".format(ip))", "def ip_drop(self, ip=None):\n if ip is None:\n self.request('/v1.1/unregister', 'POST')\n else:\n self.request('/v1.1/unregister/%s' % ip, 'POST')", "def ban_all():\n sudo(\"varnishadm 'ban req.url ~ .'\")", "def unban (phenny, input):\n if not input.admin: return\n text = input.group().split()\n argc = len(text)\n if argc < 2: return\n opt = text[1]\n banmask = opt\n channel = input.sender\n if opt.startswith('#'):\n if argc < 3: return\n channel = opt\n banmask = text[2]\n banmask = configureHostMask(banmask)\n if banmask == '': return\n phenny.write(['MODE', channel, '-b', banmask])", "def unblockAll():\n result = subprocess.Popen(\"/sbin/iptables -F INPUT 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not flush INPUT chain. Error: %s.\" % (result))\n result = subprocess.Popen(\"/usr/sbin/ipset destroy 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not destroy all ipsets. Error: %s.\" % (result))\n sys.exit(255)", "def test_exclude_ip_ban(self):\n pass", "def block_iptables(ip):\n try:\n subprocess.check_call(['iptables', '-A', 'INPUT', '-s', ip, '-j', 'DROP'])\n except OSError as e:\n if (e[0] == errno.EPERM):\n print(\"Since this script modifies the firewall with iptables it must be run with root privileges.\", file=sys.stderr)\n sys.exit(1)\n print(\"Dropping all packets from \" + ip)\n return True", "def detach_public_ip(self, name=None, ip=None):\n raise NotImplementedError", "async def unban(self, ctx, *, member): # don't convert to discord.Member as it isn't a server member, just a string\n banned_users = await ctx.guild.bans() # pulls ban list\n member_name, member_discriminator = member.split('#') # split the member name from the numerical discriminator\n for ban_entry in banned_users:\n user = ban_entry.user\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f'Unbanned {user.name}#{user.discriminator}')\n return", "def unbind(self, name):\n remove = []\n for n in name:\n if not self.is_array(n): continue\n self.drop(n, ignore_items=True)\n remove.append(n)\n if remove and self._verbose_infos:\n print(\"Remove mask structure for: '{}'\".format(\"', '\".join(remove)))\n return None", "def stop_network_nat(self):\n\t\tcmd = [\"/sbin/iptables\",\"-t\",\"nat\",\"-F\"]\n\t\toutput = self.check_output_safe(cmd)\n\t\tself.log.info(\"iptalbes fllushed.\")", "def remove_ban(self, vapor_id_or_ip):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}removeBan {}'.format(self.console, identity)\n self.write_command(cmd)", "async def unban(ctx, *, member):\n banned_users = await ctx.guild.bans()\n member_name, member_discriminator = member.split(\"#\")\n\n for ban_entry in banned_users:\n user = ban_entry.user\n\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f\"Unbanned {user.mention}\")\n return", "def unblock_ip(ip, logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.delete(f\"http://{firewall_ip_and_port}/firewall/{ip}\")\n if not request.ok:\n logger.error(f\"Unblocking IP {ip} was unsuccessful. Code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Unblocking IP {ip} was unsuccessful. Code {request.status_code}\",\n \"time\": time.time()})\n return False\n return True\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n return False", "def unmask_name(self, name):\r\n if not self.has_mask():\r\n _ = self.capa_system.i18n.ugettext\r\n # Translators: 'unmask_name' is a method name and should not be translated.\r\n msg = _(\"unmask_name called on response that is not masked\")\r\n raise LoncapaProblemError(msg)\r\n return self._mask_dict[name]", "def unban_member_post(self, groupId, membershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/Members/{membershipType}/{membershipId}/Unban/\"))", "def ban_command(server, output):\n for target in output.message.split()[1:]:\n if target in server.ops:\n server.tell(output.name, 'Operators cannot be banned')\n continue\n server.banip(target)\n server.ban(target)\n return", "def deregister_elastic_ip(ElasticIp=None):\n pass", "def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)", "def wan_address_unvote(self, voter):\n assert isinstance(voter, Candidate)\n for vote, voters in self._wan_address_votes.iteritems():\n if voter.sock_addr in voters:\n if __debug__: dprint(\"removing vote for \", vote, \" made by \", voter)\n voters.remove(voter.sock_addr)\n if len(voters) == 0:\n del self._wan_address_votes[vote]\n return vote", "def killCAN(mIface, bbid):\n for i in xrange(100):\n mIface.can_pass(bbid, ModuleIface.CAN_NONE)", "def ban_host(self, host, hard=False, duration=None):\n # TODO: Timed bans?\n logger.verbose(\"Banning IP {0}\".format(host))\n self.ip_bans.add(host, hard)", "async def global_ban(self, ctx: commands.Context, ip: str, name: str, minutes: int, *, reason: clean_content):\n await self._global_ban(ctx, ip, name, minutes, reason)", "def stopNAT( root ):\n # Flush any currently active rules\n root.cmd( 'iptables -F' )\n root.cmd( 'iptables -t nat -F' )\n \n # Instruct the kernel to stop forwarding\n root.cmd( 'sysctl net.ipv4.ip_forward=0' )\n \n # Restart network-manager\n root.cmd( 'service network-manager start' )", "def unban(self):\n\n if self.get_permissions()['banned']:\n member_group = Group.query.filter(\n Group.admin == False,\n Group.super_mod == False,\n Group.mod == False,\n Group.guest == False,\n Group.banned == False\n ).first()\n\n self.primary_group_id = member_group.id\n self.save()\n return True\n return False", "def unblock_ip_view(request, ip):\n if request.method == 'POST':\n unblock_ip(ip)\n return HttpResponseRedirect(reverse(\"defender_blocks_view\"))", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()", "def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret" ]
[ "0.69601077", "0.62606454", "0.62166715", "0.5990166", "0.5937297", "0.59223866", "0.5848375", "0.5733963", "0.5713095", "0.5705937", "0.57039034", "0.5630587", "0.56287026", "0.5598197", "0.55778", "0.5549266", "0.55139863", "0.547942", "0.54626197", "0.5460259", "0.5442353", "0.5424516", "0.54220116", "0.53955775", "0.5384593", "0.5382061", "0.535696", "0.5333921", "0.5290798", "0.5276206" ]
0.8504455
0
Get the definition of this managed folder. The definition contains name, description checklists, tags, connection and path parameters, metrics and checks setup.
def get_definition(self): return self.client._perform_json( "GET", "/projects/%s/managedfolders/%s" % (self.project_key, self.odb_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_definition(self):\n return self.definition", "def get_definition(self):\n return self.client._perform_json(\n \"GET\", \"/admin/groups/%s\" % self.name)", "def definition(self):\n\n return self._definition", "def definition(self):\n\n return self._definition", "def definition(self):\n return self._definition", "def folder(self):\n return self._folder", "def definition(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"definition\")", "def definition_manager(self):\n try:\n return self._definition_object\n except AttributeError:\n def_node = self.node.find('.//df')\n if def_node is None:\n def_node = self.node.find('.//xrg')\n if def_node is None:\n def_node = etree.Element('df')\n self._definition_object = Definition(def_node)\n return self._definition_object", "def folder(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder\")", "def folder(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder\")", "def get_folder(self):\n name = \"%s_%s\" % (self.PREFIX, self.FOLDER_NAME)\n folders = self.mw.get_folders()\n for fldr in folders:\n if fldr[\"name\"] == name:\n self.folder_id = fldr[\"folder_id\"]\n return\n self.folder_id = self.mw.create_folder(name)", "def get(self) -> FoldersModel:\n root: FoldersModel = self._get()\n return root", "def getFolder(self, resource):\n res = self.getRequest(self.parseUrl(resource, 'folders'))\n return vsdModels.Folder(**res)", "def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")", "def definition(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"definition\")", "def getDefinitionURL(self):\n return _libsbml.ASTNode_getDefinitionURL(self)", "def dashboard_definition(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"dashboard_definition\")", "def folder(self) -> pulumi.Output[Optional['outputs.ChangeDataCaptureResponseFolder']]:\n return pulumi.get(self, \"folder\")", "def get(self):\n return self.directory_name", "def folder(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"folder\")", "def get_definition(self, labware_id: str) -> LabwareDefinition:\n return self.get_definition_by_uri(\n LabwareUri(self.get(labware_id).definitionUri)\n )", "def fetch(self):\r\n return self.modulestore.db_connection.get_definition(self.definition_locator.definition_id)", "def definition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"definition\")", "def get_definition(self):\n return self.client._perform_json(\n \"GET\", \"/admin/connections/%s\" % self.name)", "def get_definition(self) -> dict:\n # Refresh the definition from the PyFiguration object\n self._set_definition(self.pyfiguration.definition)\n\n # Return the cleaned definition\n return {\n key: (\n self.definition.get(key, None)\n if not isinstance(value, Configuration)\n else value.get_definition()\n )\n for key, value in self.definition.items()\n }", "def getPath(self):\n return self.__folder", "def set_definition(self, definition):\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id),\n body=definition)", "def get_input_schema(cls):\n return dict(properties=dict(folder_path=\"string\"))", "def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")", "def getDefinitionURLString(self):\n return _libsbml.ASTNode_getDefinitionURLString(self)" ]
[ "0.5817693", "0.57296735", "0.5583368", "0.5583368", "0.5496289", "0.5493704", "0.5425934", "0.5191441", "0.51887023", "0.51887023", "0.51505697", "0.5133599", "0.5114542", "0.511077", "0.510388", "0.50844294", "0.5081377", "0.5072677", "0.5038703", "0.5038015", "0.50089884", "0.500123", "0.49842656", "0.49610913", "0.49587145", "0.49519157", "0.49490583", "0.49361703", "0.49329168", "0.49027532" ]
0.65869796
0
Set the definition of this managed folder.
def set_definition(self, definition): return self.client._perform_json( "PUT", "/projects/%s/managedfolders/%s" % (self.project_key, self.odb_id), body=definition)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def definition(self, definition):\n\n self._definition = definition", "def set_definition(self, definition):\n return self.client._perform_json(\n \"PUT\", \"/admin/groups/%s\" % self.name,\n body = definition)", "def _set_definition(self, definition: Dict[str, Any]):\n # Loop the keys and values of the provided definition\n for key, value in definition.items():\n\n if key not in self.store and key not in self.parents:\n self.store[key] = Configuration(pyfiguration=self.pyfiguration, parents=[*self.parents, key])\n\n self.definition[key] = value", "def set_definition(self, definition):\n return self.client._perform_json(\n \"PUT\", \"/admin/users/%s\" % self.login,\n body = definition)", "def definition(self, definition: List[PipelineDefinition]):\r\n self._definition = definition", "def set_folder(self, folder):\n self.folder = folder\n self.templates.directories[0] = folder\n self.app.root_path = folder", "def save(self):\n self.folder.client._perform_empty(\n \"PUT\", \"/projects/%s/managedfolders/%s\" % (self.folder.project_key, self.folder.odb_id),\n body=self.settings)", "def _set_target_folder(self, folder_id, folder_name):\n print('target id: ' + folder_id)\n print('target name: ' + folder_name)\n self.config['target_folder_id'] = folder_id\n self.config['target_folder_name'] = folder_name\n with open('config.json', 'w', encoding='UTF-8') as json_data_file:\n json.dump(self.config, json_data_file)", "def subFolder(self, value):\r\n self.__folder = str(value)", "def set_folder_name(self, folder_name=None):\n\n if folder_name == None:\n folder_name = 'Abstract-OneDim/'\n self.params['folder_name'] = folder_name\n else:\n self.params['folder_name'] = folder_name\n print 'Folder name:', self.params['folder_name']", "def set_folders(self, folders):\n\n self.folders = folders", "def set_definition(self, description):\n return self.client._perform_json(\n \"PUT\", \"/admin/connections/%s\" % self.name,\n body = description)", "def setGraphFolder(self, p):\n return self._set(graphFolder=p)", "def set_dev_folder(self):\n self.lblDevFolder.setText(ConfigHandler.cfg.dev_dir)", "def entry_set_folder(self, entry):\r\n global folder_name\r\n folder_name = filedialog.askdirectory()\r\n entry.delete(0, 'end')\r\n entry.insert(tk.END, folder_name)", "def update_definition(self, course_key, definition):\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n bulk_write_record.definitions[definition['_id']] = definition\n else:\n self.db_connection.insert_definition(definition, course_key)", "def home_folder(self, home_folder):\n\n self._home_folder = home_folder", "def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()", "def add_definition(self, definition):\n self._definitions[definition.name] = definition", "def set_defined(self):\n self._defined = 1", "def set_directory(self, directory):\n\t\tself.edit.set_text(directory)", "def __init__(self, type=\"uri_folder\", path=None, mode=\"rw_mount\", description=None):\n pass", "def add(self, name, definition):\n self._storage[name] = definition", "def definition_manager(self):\n try:\n return self._definition_object\n except AttributeError:\n def_node = self.node.find('.//df')\n if def_node is None:\n def_node = self.node.find('.//xrg')\n if def_node is None:\n def_node = etree.Element('df')\n self._definition_object = Definition(def_node)\n return self._definition_object", "def get_folder(self):\n name = \"%s_%s\" % (self.PREFIX, self.FOLDER_NAME)\n folders = self.mw.get_folders()\n for fldr in folders:\n if fldr[\"name\"] == name:\n self.folder_id = fldr[\"folder_id\"]\n return\n self.folder_id = self.mw.create_folder(name)", "def set_bookmark(self, key, val=None):\n if val is None:\n val = self.thisdir\n else:\n val = Directory(val)\n self.bookmarks.update_if_outdated()\n self.bookmarks[str(key)] = val", "def setDefinitionURL(self, *args):\n return _libsbml.ASTNode_setDefinitionURL(self, *args)", "def createFolder(self):\n raise NotImplementedError", "def set_root(self, root):\n self.root_path = root", "def get_definition(self, definer: Definer):\n self.definition = definer.define(self.word)" ]
[ "0.6675459", "0.6660227", "0.63897157", "0.6099818", "0.60483444", "0.5964043", "0.58501714", "0.5540315", "0.55372936", "0.5477834", "0.5394551", "0.5355961", "0.5321546", "0.53073615", "0.5296438", "0.5226685", "0.52230346", "0.52160645", "0.5206463", "0.5166973", "0.51540715", "0.50969476", "0.5083715", "0.50657964", "0.50314474", "0.502698", "0.5014179", "0.5013045", "0.50110793", "0.5010878" ]
0.7764521
0
Get a file from the managed folder
def get_file(self, path): return self.client._perform_raw( "GET", "/projects/%s/managedfolders/%s/contents/%s" % (self.project_key, self.odb_id, utils.quote(path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fs_get_file(url, working_dir):\n if not os.path.isabs(url) and working_dir:\n url = os.path.join(working_dir, url)\n\n try:\n with codecs.open(url, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def get_file(self, path):\n file = self.get('data_request?id=file&parameters=%s' % path)\n return file", "def get_file(self):\n return self.dir + self.file_name + self.extension", "def get_file(self, sys_id):\n url = \"{}/file\".format(self._target(sys_id))\n r = self._client.session.get(url, stream=True)\n return r", "def get_file(URI):\n return file_fabric.get_class(URI).get_content(URI)", "def get_file(service, file_id):\n return service.files().get(fileId=file_id).execute()", "def get(self, filepath):\n try:\n collname = '%s.files' % self.bucketname\n coll = Collection(self.db, collname)\n if coll:\n doc = coll.find_one({'filename': str(filepath)}, sort=[('uploadDate', -1)])\n if doc:\n id = doc['_id']\n gout = self.gridfs.get(ObjectId(id))\n if gout:\n content = gout.read()\n gout.close()\n return content\n except Exception, e:\n print e\n return None", "def get(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n try:\n file_obj = open(file_path, \"r\")\n except IOError:\n return\n else:\n return file_obj.read()", "def get_file(self, path):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/files/{path}\"\n\n return self.connector.http_call(\"get\", _url).text", "def get_file(self, path):\n return self._files.get(self._get_rel_path(path))", "def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file", "def getFile(self, resource):\n resource = self.parseUrl(resource, 'files')\n\n res = self.getRequest(resource)\n fObj = vsdModels.File(**res)\n return fObj", "def get_file(self, c_path):\n raise NotImplementedError", "def get_file(self, name):\n return self.files[name]", "def get(self, resource_id, file_id):\n d = Deposition.get(resource_id, user=current_user)\n df = d.get_file(file_id)\n if df is None:\n abort(404, message=\"File does not exist\", status=404)\n return d.type.marshal_file(df)", "def get_file():\n fname = get_var(request, \"fname\")\n return open(fname).read()", "def get_file(self, file_id):\n LOG.debug(\"Getting a file from mattermost\")\n url = '%s/api/v4/files/%s' % (self.server_url, file_id)\n LOG.debug(\"Sending: %s\", url)\n response = self._request(self._session.get, url)\n\n if response.status_code != 200:\n raise RuntimeError(\"Server unhappy. (%s)\", response)\n\n return response.content", "def getFile(filename):\n filename = os.path.join(os.path.dirname(__file__), filename)\n return open(filename, 'r')", "def getFile(filename):\n filename = os.path.join(os.path.dirname(__file__), filename)\n return open(filename, 'r')", "def get_file(self):\n return self.theFile", "def read_file(self, entity):\n\n return self.cache.read_file(\n entity.objects['project'],\n entity.objects['ref'],\n entity.objects['file']['path']\n )", "def get_file(self, path):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/nodes/{self.node_id}\"\n f\"/files/{path}\"\n )\n\n return self.connector.http_call(\"get\", _url).text", "def get_file(self, filename):\n log.debug('[%s]: reading: //%s/%s', self.name, self.name, filename)\n try:\n blob = self.repo.head.commit.tree/filename\n return blob.data_stream\n except KeyError as err:\n raise GitError(err)", "def retrieve(self, file_name):\n ret = os.path.join(self.path, file_name)\n temp = (self.path + file_name).find(self.path, 1, -1)\n if os.path.exists(ret):\n return os.path.join(self.path, file_name)\n elif temp != -1:\n if os.name == \"posix\":\n return os.getcwd() + '/' + file_name\n else:\n return os.getcwd() + '\\\\' + file_name\n else:\n return None", "def readFile(self, path):\n return self.session.request('diag/files/?q=%s'\n % (path))", "async def get_file(self, file_id: base.String) -> types.File:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.GET_FILE, payload)\n\n return types.File(**result)", "def open_local_file(file_path):\n directory_name = Path(\"../data/\" + file_path)\n return directory_name", "def get_file(filename):\n return os.path.join(TEST_DIR, filename)", "def get_file(self, path):\n path = os.path.normpath(path)\n cmd = [\"ls\", \"-lZ\", \"'\" + path + \"'\"]\n listing = subprocess.check_output(self.shell + cmd).decode().split('\\n')\n line = listing[0].strip(\"\\r\")\n # Parse ls -lZ output for a single file\n try:\n f = File(line, os.path.dirname(path), self.android_version)\n except ValueError as e:\n self.log.error(e)\n return None\n else:\n return {f.absname: f}", "def get_file(self, filename, handler=False):\n result = None\n if self.exists(filename):\n file_path = join_paths(self.path, filename)\n if handler:\n result = open(file_path, 'rb')\n else:\n result = file_path\n return result" ]
[ "0.6983217", "0.6982181", "0.67835194", "0.67477846", "0.6745828", "0.6731098", "0.66658807", "0.6599749", "0.6590214", "0.654747", "0.65451306", "0.65048134", "0.6465553", "0.64533436", "0.64236987", "0.6402294", "0.6359911", "0.6332184", "0.6332184", "0.63053685", "0.6299405", "0.62901044", "0.62874544", "0.62872154", "0.62502795", "0.6235667", "0.62045425", "0.6189723", "0.6187241", "0.61814755" ]
0.75442284
0
Delete a file from the managed folder
def delete_file(self, path): return self.client._perform_empty( "DELETE", "/projects/%s/managedfolders/%s/contents/%s" % (self.project_key, self.odb_id, utils.quote(path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, filename):\n pass", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete_file(path):\n return files.delete_file(path)", "def delete(self, filename):\n raise NotImplementedError", "def delete(self, remote):\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"DELETE\",\n file_path = remote)", "def delete_file(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n os.remove(path)", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)", "async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):", "def remove_file(self, path):\n pass", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def file_delete(self, path):\n params = {'root': self.session.root, 'path': format_path(path)}\n\n url, params, headers = self.request(\"/fileops/delete\", params)\n\n return self.rest_client.POST(url, params, headers)", "def rm(self, path):\n try:\n basedir, item = os.path.split(path)\n postdata = codecs.encode(json.dumps({ 'baseDir': basedir, 'items': [ item ] }), 'utf-8')\n self._urlopen('/api/fileops/delete', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to delete '{}'\".format(path))", "def delete(self, *args, **kwargs):\n self.file.storage.delete(self.file.name)\n super().delete(*args, **kwargs)", "def delete(self, host, file):", "def delete_file(self, path):\n raise HTTPError(\n 501,\n \"Narrative deletion not implemented here. Deletion is handled elsewhere.\",\n )", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete_file(file: str) -> None:\n\tuux.show_info(\"Deleting \" + file)\n\n\tif not os.path.exists(file):\n\t\t# Files does not exist\n\t\treturn\n\n\tos.remove(file)", "def rm_file(file_):\n Path(file_).unlink(missing_ok=True)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete_file(sender, instance, *args, **kwargs):\n if instance.file:\n _delete_file(instance.file.path)", "def rm(path):\n abs_path = navigate.get_abs_path(path)\n parent, name = navigate.split_path(abs_path)\n access_token = db.get_access_to_file(parent, name)\n if access_token is not None:\n dbox_path = '/' + name\n client = dropbox.client.DropboxClient(access_token)\n client.file_delete(dbox_path)\n db.remove_file(access_token, parent, name)", "def delete_file(self, lfile):\n raise NotImplementedError('delete_file')", "def delete_file(self, filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError", "def delete_file(self, filepath):\n self.ftp.delete(filepath)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete_local_file(file_path):\r\n try:\r\n os.remove(file_path)\r\n except OSError as e:\r\n print(f\"Error deleting file {file_path}: {e}\")" ]
[ "0.77171427", "0.7711893", "0.7555799", "0.7529862", "0.7435636", "0.7412828", "0.7405208", "0.7381543", "0.7283083", "0.72761863", "0.72573847", "0.7255518", "0.7244785", "0.7224803", "0.721601", "0.7209082", "0.718607", "0.7183738", "0.7181639", "0.7163021", "0.7153897", "0.71477246", "0.7130032", "0.7125143", "0.71137893", "0.7109738", "0.71091706", "0.71091706", "0.71043676", "0.70814234" ]
0.7846849
0
Upload the content of a folder to a managed folder.
def upload_folder(self, path, folder): for root, _, files in os.walk(folder): for file in files: filename = os.path.join(root, file) with open(filename, "rb") as f: rel_posix_path = "/".join(os.path.relpath(filename, folder).split(os.sep)) self.put_file("{}/{}".format(path, rel_posix_path), f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload(self, folder, recursive=True, test=False):\n return self._gphotocli_image_tasks.upload(folder, recursive, test)", "def UploadFolderToGD(token_path, source_path, gd_folder): \n google_drive = ConnectGoogleDrive(token_path)\n file_cmd = spike.FileCMD()\n file_list = file_cmd.ListFiles(source_path)\n print('\\nUpload List:')\n print('\\n'.join(file_list))\n print('')\n \n item_list = google_drive.ListItems(gd_folder, None)\n folder_id = item_list[0]['id']\n \n for file_name in file_list:\n google_drive.UploadFile(source_path, folder_id, file_name)", "def upload_files(self, folder):\n\n # Load all blobs in the session to make sure only upload needed files\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, folder)\n blobs = [blob.name for blob in blobs]\n\n project_home = os.environ['PROJ_HOME']\n root_folder = os.path.join(project_home, folder)\n\n for file in os.listdir(root_folder):\n file_name = \"{folder}/{file}\".format(folder=folder, file=file)\n if file_name not in blobs:\n source_file_name = os.path.join(project_home, file_name)\n GoogleStorage().upload_blob(\n self.bucket_name, source_file_name, file_name)\n print('Uploaded file {}'.format(source_file_name))", "def syncfolder():", "def upload(self, folder_list, files):\n current_folder_id = self.top_folder_id\n for fname in folder_list:\n current_folder_id = self._fetch_or_create_folder(fname, current_folder_id)\n for file in files:\n self._upload_detail(file, current_folder_id)", "def upload(self, dest, overwrite=False):\n dest = normpath(dest)\n remote_assert_empty(dest)\n\n remote = RemoteFolder.create(dest)\n for entry in self.contents():\n entry.upload(\"/\".join([remote.path, entry.name]))\n return remote", "def upload(self, content_local_path):\n self.method = \"PUT\"\n entity_response = self.send(path=content_local_path)\n entity = DriveItem(json.loads(entity_response.content))\n return entity", "def upload_file(client, folder_id, file_name):\n\n new_file = client.folder(folder_id).upload(file_name)\n print(f\"File {new_file.name} uploaded to Box with file ID {new_file.id}\")\n return new_file.id", "def upload_dir(self, dirpath, parents=''):\r\n dirname = dirpath.split('\\\\')[-1]\r\n status = self.create_folder(dirname, parents)\r\n if status == 201:\r\n print('[YaDisk]: Folder {0} is created!'\r\n .format(parents + '/' + dirname))\r\n elif status == 409:\r\n print('[YaDisk]: Folder {0} is already exists...'\r\n .format(parents + '/' + dirname))\r\n if len(parents) > 0:\r\n parents += '/'\r\n parents += dirname\r\n file_list = os.listdir(dirpath)\r\n for obj in file_list:\r\n name = dirpath + '\\\\' + obj\r\n if os.path.isfile(name):\r\n status = self.upload_file(name, parents)\r\n if status[0] == 201:\r\n print('[YaDisk]: File {0} is successfully created'\r\n .format(parents + '/' + status[1]))\r\n else:\r\n self.upload_dir(name, parents)", "def sync_files(self, folder):\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, folder)\n\n # Create the session folder if not existing\n project_home = os.environ['PROJ_HOME']\n root_folder = os.path.join(project_home, folder)\n if not os.path.isdir(root_folder):\n os.makedirs(root_folder)\n\n # Start download files\n for blob in blobs:\n destination_file_name = os.path.join(project_home, blob.name)\n\n # Check if the local file exist before download file\n if not os.path.isfile(destination_file_name):\n\n # Create folder to avoid exception when download\n destination_file_folder = os.path.dirname(destination_file_name)\n if not os.path.isdir(destination_file_folder):\n os.makedirs(destination_file_folder)\n\n blob.download_to_filename(destination_file_name)\n print('Downloaded file {}'.format(destination_file_name))", "def moveTo(self, folder):\n parent = self.metaData.getLinkIndex('parent')\n moveUri = self.metaData.getLink(\"move\")\n\n assert parent != -1\n assert moveUri is not None\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert hasattr(folder, \"selfLink\")\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n jsonData = self.metaData.jsonObj\n jsonData['links'][parent] = {'href' : folder.selfLink, 'rel' : 'parent'}\n response = self._adapter.putRequest(moveUri, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))", "def putFolder(self, _dst):\n if not _dst.startswith(self.host + '/data'):\n if not _dst.startswith('/'):\n _dst = '/' + _dst\n _dst = self.host + '/data' + _dst\n #print(f\"\\n\\nXNAT 1 {_dst}\")\n _dst = str(Xnat.path.cleanUri(_dst)).encode('ascii', 'ignore')\n #print(f\"fXNAT 2 {_dst} \\n\\n\")\n response = self.__httpsRequest('PUT', _dst)\n return response", "def put_file(self, path, f):\n return self.client._perform_json_upload(\n \"POST\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)),\n \"\", f).json()", "def _share_folder(self, nms, volume, folder):\n path = '%s/%s' % (volume, folder.lstrip('/'))\n share_opts = {\n 'read_write': '*',\n 'read_only': '',\n 'root': 'nobody',\n 'extra_options': 'anon=0',\n 'recursive': 'true',\n 'anonymous_rw': 'true',\n }\n LOG.debug('Sharing folder %s on Nexenta Store', folder)\n nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,\n share_opts)", "def upload_bam(bam_s3_path, local_folder_path):\n\n upload_folder(bam_s3_path, local_folder_path)", "def put(self, *args, **kwargs):\n return super(APIFolderView, self).put(*args, **kwargs)", "def save(self):\n self.folder.client._perform_empty(\n \"PUT\", \"/projects/%s/managedfolders/%s\" % (self.folder.project_key, self.folder.odb_id),\n body=self.settings)", "def putDirectory( self, path ):\n res = checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.putDirectory: Attemping to put %s directories to remote storage.\" % len( urls ) )\n transferClient = TransferClient( self.url )\n for destDir, sourceDir in urls.items():\n tmpList = os.listdir( sourceDir )\n sourceFiles = [ \"%s/%s\" % ( sourceDir, x ) for x in tmpList ]\n res = transferClient.sendBulk( sourceFiles, destDir )\n if res['OK']:\n successful[destDir] = {'Files':0, 'Size':0}\n else:\n failed[destDir] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def push(api_client, folder, verbose):\n local_folder, remote_folder = _get_local_and_remote_folders(folder)\n workspace = WorkspaceApi(api_client)\n\n def work():\n workspace.import_workspace_dir(local_folder, remote_folder,\n True, False, verbose=verbose)\n if not verbose:\n with loadingbar(msg=\"Pushing to {}\".format(remote_folder), width=10,\n fill_char=\"o\", interval=.25):\n work()\n else:\n work()", "def test_upload_dir_contents_one_dir(self):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n subdir = 'subdir'\n os.mkdir(os.path.join(local_src_dir, subdir))\n for filename in ['file1', 'file2']:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, subdir, filename),\n posixpath.join(remote_dest_dir, subdir, filename)))\n with open(os.path.join(local_src_dir, subdir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')", "def upload(self, source, dest):\n if os.path.isdir(source):\n self.upload_dir(source, dest)\n else:\n self.upload_file(source, dest)", "def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()", "def put_object(self, account, container, object, content):#put a file to server\n \n pass", "def upload_folder_to_s3(folder_path, s3_uri, connection=None):\n\n if connection:\n run_out = connection.run(f\"aws s3 cp --recursive {folder_path}/ {s3_uri}/\")\n else:\n run_out = run(f\"aws s3 cp --recursive {folder_path}/ {s3_uri}/\")\n\n return run_out.return_code", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def upload_files(self,\r\n request,\r\n resources,\r\n folder,\r\n quiet=False,\r\n dir_mode='skip'):\r\n for file_name in os.listdir(folder):\r\n if (file_name == self.DATASET_METADATA_FILE\r\n or file_name == self.OLD_DATASET_METADATA_FILE\r\n or file_name == self.KERNEL_METADATA_FILE):\r\n continue\r\n full_path = os.path.join(folder, file_name)\r\n\r\n if os.path.isfile(full_path):\r\n exitcode = self._upload_file(file_name, full_path, quiet,\r\n request, resources)\r\n if exitcode:\r\n return\r\n elif os.path.isdir(full_path):\r\n if dir_mode in ['zip', 'tar']:\r\n temp_dir = tempfile.mkdtemp()\r\n try:\r\n _, dir_name = os.path.split(full_path)\r\n archive_path = shutil.make_archive(\r\n os.path.join(temp_dir, dir_name), dir_mode,\r\n full_path)\r\n _, archive_name = os.path.split(archive_path)\r\n exitcode = self._upload_file(archive_name,\r\n archive_path, quiet,\r\n request, resources)\r\n finally:\r\n shutil.rmtree(temp_dir)\r\n if exitcode:\r\n return\r\n elif not quiet:\r\n print(\"Skipping folder: \" + file_name +\r\n \"; use '--dir-mode' to upload folders\")\r\n else:\r\n if not quiet:\r\n print('Skipping: ' + file_name)", "def upload(ctx: click.Context, **kwargs):\n root_commands.cmd_upload(ctx.obj, **kwargs)", "def handle_upload(self, req, folder_path):\n\t\tresult = UL_ACCESS_DENIED\n\t\t\n\t\tdata = req.data\n\t\tfileitem = data['NewFile']\n\t\t\n\t\tfilename = fileitem.filename\n\t\tdestination_path = os.path.join(self.get_selected_root(req), folder_path, filename)\n\t\tif(os.access(destination_path, os.F_OK)):\n\t\t\tparts = filename.split('.')\n\t\t\tif(len(parts) > 1):\n\t\t\t\tparts[len(parts) - 2] += '-%d' % int(time.time())\n\t\t\t\tfilename = '.'.join(parts)\n\t\t\t\tresult = UL_RENAME\n\t\t\telse:\n\t\t\t\tresult = UL_INVALID_TYPE\n\t\tif(result != UL_INVALID_TYPE):\n\t\t\ttry:\n\t\t\t\tuploaded_file = open(destination_path, 'w')\n\t\t\t\tbytes = fileitem.file.read(65536)\n\t\t\t\twhile(bytes):\n\t\t\t\t\tuploaded_file.write(bytes)\n\t\t\t\t\tbytes = fileitem.file.read(65536)\n\t\t\t\tuploaded_file.close()\n\t\t\t\tresult = SUCCESS\n\t\t\texcept:\n\t\t\t\timport traceback\n\t\t\t\tprint traceback.print_exc()\n\t\t\t\tresult = UL_ACCESS_DENIED\n\t\t\n\t\treturn result, filename", "def upload(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if isinstance(remote, RemoteFile) and self.hash() == remote.hash:\n # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.path, remote.uri))\n return\n if not overwrite:\n raise ValueError(\"%s exists\" % remote.uri)\n\n # Uploading can either happen all at once (with a 150 MB limit),\n # or in chunks. If the file is smaller than the selected chunk size,\n # then try to upload in one go.\n chunksize = min(pdbox._args.get(\"chunksize\", 149.0), 149.0)\n pdbox.debug(\"Chunk size: %.2f MB\" % chunksize)\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return None\n\n # Set the write mode.\n if overwrite:\n mode = dropbox.files.WriteMode.overwrite\n else:\n mode = dropbox.files.WriteMode.add\n\n chunk = int(chunksize * 1024 * 1024) # Convert B to MB.\n\n with open(self.path, \"rb\") as f:\n data = f.read()\n sz = len(data)\n\n # TODO: Progress bars.\n if sz < chunk: # One-shot upload.\n meta = execute(pdbox.dbx.files_upload, data, dest, mode)\n else: # Multipart upload.\n nchunks = math.ceil(sz / chunk)\n # Initiate the upload with just the first byte.\n start = execute(pdbox.dbx.files_upload_session_start, f[0])\n cursor = dropbox.files.UploadSessionCursor(start.session_id, 1)\n\n # Now just add each chunk.\n while sz - cursor.offset > chunk:\n pdbox.debug(\n \"Uploading chunk %d/%d\" % (cursor.offset % chunk, nchunks),\n )\n execute(\n pdbox.dbx.files_upload_session_append_v2,\n data[cursor.offset:cursor.offset + chunk],\n cursor,\n )\n cursor.offset += chunk\n\n # Upload the remaining to finish the transaction.\n meta = execute(\n pdbox.dbx.files_upload_session_finish,\n data[cursor.offset:],\n dropbox.files.CommitInfo(dest, mode),\n )\n\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return RemoteFile(None, meta=meta)", "def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder" ]
[ "0.6675317", "0.65023863", "0.65004486", "0.63931644", "0.6374531", "0.6330987", "0.62475437", "0.6244524", "0.61936384", "0.6177249", "0.61518073", "0.6149765", "0.6138441", "0.6122861", "0.6103507", "0.60236096", "0.6016508", "0.60020953", "0.59947294", "0.5948904", "0.59445006", "0.5909924", "0.5904174", "0.5876771", "0.5873272", "0.5870496", "0.5856388", "0.57831967", "0.577029", "0.57425386" ]
0.7008945
0
Get the last values of the metrics on this managed folder.
def get_last_metric_values(self): return ComputedMetrics(self.client._perform_json( "GET", "/projects/%s/managedfolders/%s/metrics/last" % (self.project_key, self.odb_id)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last(self):\n data = self._http_get(\"last\")\n return data.json()", "def getLatestSpectrumMeasurements(self): \n return self.spectrum[len(self.spectrum)-1]", "def getLatestMeasurement(self): \n return self.measurement[len(self.measurement)-1]", "def last_value(self):\n return self.samples[-1]", "def get_metrics(self):\n return None", "def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))", "def last_value(self):\n return self._last_value", "def get_value(self):\n return self.last_value", "def getSavedMetrics(self):\n # default metrics for a last resort\n self.default_metrics = {0: (0, 0, 1920, 1080), 1: (1920, 0, 3840, 1080), 2: (3840, 0, 5760, 1080), 3: (5760, 0, 7680, 1080)}\n # get saved metrics\n self.notifyPut('Obtaining Saved Display Metrics')\n try:\n gsdm_conf = wyXML.WyXML(os.path.join(self.gsdm_path, 'gsdm\\\\conf\\\\gsdm_cfg.xml'))\n if gsdm_conf.find('displayMetrics') != None:\n self.saved_metrics = literal_eval(gsdm_conf.find('displayMetrics'))\n #gsdm_conf.replace('displayMetrics', '')\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.db['display_metrics'] = self.saved_metrics\n self.db.close()\n else:\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.saved_metrics = self.db['display_metrics']\n self.db.close()\n \n except Exception, e:\n self.logQ.put('{0} - Unable to detect saved metrics from GSDM configuration'.format(e))\n self.saved_metrics = self.default_metrics", "def last_value(self):\n return self._waveforms[-1].last_value", "def last_value(self):\n return self._value", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "def metrics(self):\n return self.__metrics", "def getLastData(self) -> ghidra.program.model.listing.Data:\n ...", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def _get_last_meas_time(self):\n\n #if flag for whole data regeneration is set\n if self._process_type == 'full_gen':\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n \n \n res = self._db.Query(\"\"\"SELECT last_measurement_time\n FROM last_dashboard_element_segment_value\n WHERE\n element_id = %s\n AND segment_value_id = %s\n \"\"\",(self._id, self._segment_value_id))\n if not res:\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n item = self._db.record[0]\n if item['last_measurement_time']:\n return item['last_measurement_time']\n return datetime.datetime(1900, 1, 1, 0, 0, 0)", "async def last_read(self):\n try:\n asyncio.set_event_loop(self.loop)\n asyncio.get_event_loop().create_task(self.browse())\n await self.browse()\n\n # parse the return reads and extract the most recent one\n # (i.e. last not None)\n jsonResponse = json.loads(self.raw_data)\n lastRead = None\n for read in jsonResponse['reads']:\n if read['value'] is None:\n break\n lastRead = read\n _LOGGER.debug(\"lastRead = %s\", lastRead)\n\n self.startTime = lastRead['startTime']\n self.endTime = lastRead['endTime']\n self.last_read_val = lastRead['value']\n self.unit_of_measurement = jsonResponse['unit']\n\n _LOGGER.debug(\"last read = %s %s %s %s\", self.startTime, self.endTime, self.last_read_val, self.unit_of_measurement)\n\n return self.startTime, self.endTime, self.last_read_val, self.unit_of_measurement\n except:\n raise MeterError(\"Error requesting meter data\")", "def metrics(self) -> typing.Optional[typing.List[\"BucketMetrics\"]]:\n return self._values.get('metrics')", "def getLast(self):\r\n return self._data[-1]", "def latestValue(self):\n if len(self.values) > 0:\n return self.values[-1]\n else:\n return 0", "def metrics(self) -> pulumi.Output['outputs.RuntimeMetricsResponse']:\n return pulumi.get(self, \"metrics\")", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def get(self):\n return self._measurementController.getMeasurements(), 200", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def _fetch_gauge_metrics_and_clear(self):\n with self._gauge_rlock:\n gauge_metrics = self._gauge_metrics\n self._gauge_metrics = defaultdict(int)\n\n return gauge_metrics", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def get_last_result(self):\n return self.last_result", "def get_last_measurement(self, param):\n return self.__buffer[param][-1]" ]
[ "0.64085966", "0.63444847", "0.63409215", "0.62631094", "0.62495613", "0.62179077", "0.61889714", "0.61730325", "0.6146713", "0.61448973", "0.6101925", "0.6096707", "0.6094985", "0.6079614", "0.60533327", "0.60533327", "0.6048", "0.6036222", "0.6022216", "0.594147", "0.5935419", "0.59338284", "0.59231824", "0.5904725", "0.5887276", "0.5870549", "0.58641344", "0.5860689", "0.5854131", "0.5847705" ]
0.80391484
0
Get the history of the values of a metric on this managed folder.
def get_metric_history(self, metric): return self.client._perform_json( "GET", "/projects/%s/managedfolders/%s/metrics/history" % (self.project_key, self.odb_id), params={'metricLookup' : metric if isinstance(metric, str) or isinstance(metric, unicode) else json.dumps(metric)})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_value_history(self):\n return self.value_history", "def get_history(self):\n return self.history", "def history(self):\n return self.info['history']", "def get_history(self):\n return self.__history[:]", "def get_history(self):\r\n\r\n return self.board_history", "def History(self):\n return self.historydict.get('history', [])", "def history(self):\n return self._history", "def history(self):\n return self._history", "def get_history(self, key=None):\n val = self.history.values.get(key, None)\n if val is None:\n return self.history.values\n else:\n return val", "def get_cache_history_items(self):\n #gdb.execute(\"p cache->history_items\")\n history_items = ZabbixHashset(gdb.parse_and_eval ('cache->history_items'))\n self.data = history_items.parse()", "def get_history(self, name):\n return self._scalar_history.get_history(name)", "def get_history(self, name):\n return self._scalar_history.get_history(name)", "def history(self):\n return self.board.history", "def history(self, key, _from='-', _to='+', _desc=True):\n return [self.klass.from_json(_object)\n for _object in self.storage.history(key, _from, _to, _desc)]", "def historystorage(self):\n return self._historystorage", "def history(self):\n return np.array(self._history)", "def get_metric_history(\n self,\n metric_key: str,\n time_window: duration_pb2.Duration,\n min_timestamp: timestamp_pb2.Timestamp\n ) -> typing.List[float]:\n if not self._metric_store:\n raise ValueError('Metric history requested for {}, but no metric store '\n 'was provided to Collector.'.format(metric_key))\n\n if time_window.ToTimedelta():\n min_time = max(\n min_timestamp.ToDatetime(),\n self._event.start_time.ToDatetime() - time_window.ToTimedelta())\n else:\n min_time = min_timestamp.ToDatetime()\n\n history_rows = self._metric_store.get_metric_history(\n benchmark_id=(\n self._event.metric_collection_config.compare_to_benchmark_id or\n self._event.benchmark_id),\n metric_key=metric_key,\n min_time=min_time,\n )\n \n return [row.metric_value for row in history_rows]", "def get_last_metric_values(self):\n return ComputedMetrics(self.client._perform_json(\n \"GET\", \"/projects/%s/managedfolders/%s/metrics/last\" % (self.project_key, self.odb_id)))", "def test_get_derived_metric_history(self):\n pass", "def history(self) -> List[SnapshotLogEntry]:\n return self.metadata.snapshot_log", "def orders_history(self): \n return(self._d_orders['history'])", "def get_metric_history(self, slugs, since=None, to=None, granularity='daily'):\n if not type(slugs) == list:\n slugs = [slugs]\n\n # Build the set of Redis keys that we need to get.\n keys = []\n for slug in slugs:\n for date in self._date_range(granularity, since, to):\n keys += self._build_keys(slug, date, granularity)\n keys = list(dedupe(keys))\n\n # Fetch our data, replacing any None-values with zeros\n results = [0 if v is None else v for v in self.r.mget(keys)]\n results = zip(keys, results)\n return sorted(results, key=lambda t: t[0])", "def history(self, maxresults=9999999, mindate=None):\n server = self._server.resource(self.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate, accountID=self.accountID)", "def history():", "def get_app_history_metrics(self, cluster_id, app_alias):\n\n resp = self.http.get(url_maker(\"/clusters\", cluster_id, \"apps\",\n app_alias, \"monitor\"))\n\n return self.process_data(resp)", "def QueryHistory(self):\n return []", "def get(self):\n res = ''\n for hist in self.history:\n res += (str(hist) + '\\n')\n return res", "def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')", "def history(self, maxresults=None, mindate=None):\n server = self._server._server.resource(self._server.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate,\n accountID=self._server.accountID, librarySectionID=self.sectionKey)", "def get_action_history(self):\n\t\treturn self._action_history" ]
[ "0.7165081", "0.7030458", "0.6927986", "0.68705124", "0.6810044", "0.6718102", "0.6691567", "0.6691567", "0.6686829", "0.6673763", "0.6653554", "0.6653554", "0.65635055", "0.65367013", "0.65315855", "0.6509732", "0.64998275", "0.6481372", "0.64430577", "0.6438002", "0.6354089", "0.6338823", "0.62798494", "0.6214748", "0.61877865", "0.6168725", "0.6161823", "0.61542517", "0.6151802", "0.6147729" ]
0.77961314
0
Get the flow zone of this managed folder.
def get_zone(self): return self.project.get_flow().get_zone_of_object(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zone(self):\n return self._zone", "def access_zone(self):\n return self._access_zone", "def zone(self) -> str:\n return self._zone", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def local_zone():\n return get_localzone()", "def getFlow(self):\n return self._flow", "def getTaskZoneId(self):\n return self.getZoneId()", "def folder(self):\n return self._folder", "def get_current_zone() -> Zone:\n return services.current_zone()", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def destination_zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"destination_zone_id\")", "def getZoneId(self):\n return self.zoneId", "def flow(self):\n return self._flow", "def get_current_zone_id() -> int:\n return services.current_zone_id()", "def sc_dns_zone(self):\n return self._sc_dns_zone", "def destination_zone_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"destination_zone_id\")", "def getPath(self):\n return self.__folder", "def get_edge_zone(self) -> Union[str, None]:\n # read the original value passed by the command\n edge_zone = self.raw_param.get(\"edge_zone\")\n # try to read the property value corresponding to the parameter from the `mc` object\n # Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is\n # no such attribute.\n if (\n self.mc and\n hasattr(self.mc, \"extended_location\") and\n self.mc.extended_location and\n self.mc.extended_location.name is not None\n ):\n edge_zone = self.mc.extended_location.name\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return edge_zone", "def parent_folder(self):\n return self.properties.get(\"ParentFolder\",\n Folder(self.context, ResourcePath(\"ParentFolder\", self.resource_path)))", "def time_zone(self):\n # type: () -> string_types\n return self._time_zone", "def get_parent(self):\n parent_id = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"parentId\", None)\n if parent_id is None:\n return None\n else:\n return DSSProjectFolder(self.client, parent_id)", "def source_zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"source_zone_id\")", "def destination_zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"destination_zone_id\")", "def get_timezone(self) -> str:\n return self.AD.time_zone", "def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")", "def get_local_cache_folder(self):\n\n return self._cache_folder", "def get_default_vm_folder(self):\n return self.get_vm_folders()[0]['folder']", "def GetZoneOffset(self):\n if self.zDirection is None:\n return None\n else:\n return self.zDirection * self.zOffset" ]
[ "0.64082503", "0.6311886", "0.6157333", "0.6036838", "0.6036838", "0.59640765", "0.59602046", "0.5879423", "0.58616424", "0.58191", "0.5772731", "0.5772731", "0.5696067", "0.56114745", "0.55910367", "0.55511653", "0.55193275", "0.54935896", "0.5492465", "0.54857844", "0.5457531", "0.5448083", "0.5408327", "0.5405101", "0.5393684", "0.5368458", "0.5344173", "0.5335319", "0.53343856", "0.53235894" ]
0.75278246
0
Move this object to a flow zone.
def move_to_zone(self, zone): if isinstance(zone, basestring): zone = self.project.get_flow().get_zone(zone) zone.add_item(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_stage_to_z(self, z):\n raise NotImplementedError", "def move(self):\n pass", "def move(self):\n raise NotImplementedError", "def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)", "def run(self):\n # type: () -> None\n self.move_to(self.location)", "def move(self):\n \n self.position = self.explore()", "def move(self): #py:UR.move\n RUR._UR.move_(self.body)", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def visit_move(self, move):\n dest_id = self.event_json['destination']['id']\n destination = self.world.entities[dest_id]\n move.destination = destination", "def flow(self, flow):\n\n self._flow = flow", "def move(self):\n \n self.position = self.wander()", "def move(self, move):\n raise NotImplementedError()", "def move_to(self, dest, force_move=False):\n origin = self.location\n if self.fixed and force_move == False:\n if hasattr(self, 'is_liquid'):\n if not dest.liquid:\n return False\n elif not hasattr(dest, 'exits'):\n return False # cannot move an object that is fixed in place\n if origin:\n origin.extract(self)\n # if cannot insert into destination, return to where it came from\n # (dest.insert returns True if insertion fails)\n if not dest.insert(self, force_insert=force_move): \n return True\n else:\n if (origin):\n origin.insert(self, force_insert=True)\n return False", "def move(self):\n self.old_tail = self.body[-1][:] # save old position of last block\n self.head[0] += self.direction[0] # moves head\n self.head[1] += self.direction[1]\n \n self.head[0] = (self.head[0] + self.xMaxSize) % self.xMaxSize\n self.head[1] = (self.head[1] + self.yMaxSize) % self.yMaxSize\n \n if self.head in self.body[1:]: # if snakes hits himself\n self.alive = False\n self.body.insert(0, self.body.pop()) # each block is replace by predecessor\n self.body[0] = self.head[:] # first block is head", "def move(self, dst, src): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def Z2Move(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _move(self, pos):\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos", "def move_to(self, position, env=None):\n\n env = self._find_env(env)\n env.move_agent(self, position)", "def update_flow(self, flow):\r\n self.flow = flow", "def _add_zone( self, zone ):\n assert zone.space is None\n zone.space = self\n self.zones.add( zone )", "def move_to(self, position):\n raise NotImplementedError", "def zaberMoveToStoredLocation(self, stage, address):\n\t\tself.zaberSend(stage, self.cmd[\"moveToStoredPosition\"], address)", "def move(self, state):\n raise NotImplementedError(\"Need to implement this method\")", "def moveEntity(self):\n x = self.going_east - self.going_west\n y = self.going_north - self.going_south\n if x and y:\n x /= 2 ** 0.5\n y /= 2 ** 0.5\n direction = geometry.Vector(x, y)\n self.post(models.events.MoveEntityRequest(self._entity_id, direction))", "def moveTo(self, destination: Coordinates) -> None:\n if self.sprite is not None and self.sprite.rect is not None:\n current_position = self.sprite.rect.center\n if current_position != destination:\n x, y = vectorize(current_position, destination)\n self.sprite.rect.move_ip(x, y)", "def move(self):\n active_item = self.stack.pop()\n self.backlog.put(active_item)", "def move_to_stage_0(self, target):\n # type: (RoomPosition) -> None\n ordered_members = self.members_movement_order()\n\n self.log(\"Members {} moving - stage 0.\", _.pluck(ordered_members, 'name'))\n\n for i in range(len(ordered_members) - 1, -1, -1):\n if i == 0:\n ordered_members[i].follow_military_path(self.find_origin(), target, self.new_movement_opts())\n else:\n ordered_members[i].move_to(ordered_members[i - 1])", "def Z1Move(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def share_to_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.add_shared(self)", "def move_to(self, x, y):\n self.x = x\n self.y = y" ]
[ "0.6282164", "0.6204604", "0.5991178", "0.5983192", "0.59183925", "0.5749237", "0.5678627", "0.5613564", "0.557024", "0.5564908", "0.55558306", "0.5479993", "0.5476635", "0.54687375", "0.545523", "0.54463863", "0.54068005", "0.53963697", "0.53693956", "0.53560627", "0.5351554", "0.5345746", "0.53435445", "0.53308153", "0.5309711", "0.5309427", "0.5295723", "0.52760345", "0.5275501", "0.5269506" ]
0.72328943
0
Share this object to a flow zone.
def share_to_zone(self, zone): if isinstance(zone, basestring): zone = self.project.get_flow().get_zone(zone) zone.add_shared(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_to_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.add_item(self)", "def flow(self, flow):\n\n self._flow = flow", "def update_flow(self, flow):\r\n self.flow = flow", "def transfer(self):\n pass", "def _add_zone( self, zone ):\n assert zone.space is None\n zone.space = self\n self.zones.add( zone )", "def unshare_from_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.remove_shared(self)", "def __copy__(self):\n logger.debug(\"Copying Flow() object.\")\n c = Flow()\n c.workingDir = self.workingDir \n c.cleanupTemp = self.cleanupTemp\n c.default_inputpaths = self.default_inputpaths\n c.default_outputpath = self.default_outputpath\n c.startNode = self.startNode\n c.lastNode = self.lastNode\n return c", "def attach(self, destination): \r\n self.destination= destination", "def share(self, value):\n self._tensor.share = value", "def copy_(self, other):\n self.share.copy_(other.share)\n self.encoder = other.encoder", "def WriteFlowObject(self, flow_obj, allow_update=True):\n if flow_obj.client_id not in self.metadatas:\n raise db.UnknownClientError(flow_obj.client_id)\n\n key = (flow_obj.client_id, flow_obj.flow_id)\n\n if not allow_update and key in self.flows:\n raise db.FlowExistsError(flow_obj.client_id, flow_obj.flow_id)\n\n now = rdfvalue.RDFDatetime.Now()\n\n clone = flow_obj.Copy()\n clone.last_update_time = now\n clone.create_time = now\n\n self.flows[key] = clone", "def attach(self, destination): \r\n self.destination=destination", "def zone(self, zone):\n if self._bundle:\n self._bundle.check_zone(zone)\n self._zone = zone", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def Assign(self, other):\r\n\r\n self.host = other.host\r\n self.dock_direction = other.dock_direction", "def share_task(self, observer_uid, tid):\n self.task_controller.share(observer_uid, tid)", "def copy(self):\n return ProvidePort(self)", "def addFlowLocal(self, flow=1):\n self.flow_local += flow", "def save(self, fl_ctx: FLContext, shareable: Shareable, record_origin: str):\n pass", "def sync_local(self, other):\n pass # TODO", "def add_flow(self, flow: FlowRoot):\n with self._lock:\n self.flow_roots[flow.name] = flow", "def sync_remote(self, other):\n pass # TODO", "def zone(self, zone: str):\n\n self._zone = zone", "def share(self):\n shared = super().share()\n shared['opt'] = self.opt\n shared['answers'] = self.answers\n shared['dict'] = self.dict\n shared['START_IDX'] = self.START_IDX\n shared['END_IDX'] = self.END_IDX\n shared['NULL_IDX'] = self.NULL_IDX\n shared['model'] = self.model\n shared['receiver'] = self.receiver\n shared['receiver_dict'] = self.receiver_dict\n if self.opt.get('numthreads', 1) > 1:\n # we're doing hogwild so share the model too\n if type(self.metrics) == dict:\n # move metrics and model to shared memory\n self.metrics = SharedTable(self.metrics)\n self.model.share_memory()\n shared['states'] = { # don't share optimizer states\n 'optimizer_type': self.opt['optimizer'],\n }\n shared['metrics'] = self.metrics # do after numthreads check\n return shared", "def shared(self, value):\n self._shared = value", "def transfer(self, request, *args, **kwargs):\n\t\tuser = request.user\n\t\ttransfer_from = self.get_object()\n\t\ttransfer_to = get_object_or_404(Container, pk=request.data['into'])\n\t\t\n\t\ttransfer_from.transfer_to(transfer_to, user=user)\n\t\n\t\treturn Response({\n\t\t\t'origin': self.get_serializer(transfer_from).data,\n\t\t\t'destination': self.get_serializer(transfer_to).data\n\t\t})", "def Share(self, *args):\n return _RWStepGeom.RWStepGeom_RWOrientedSurface_Share(self, *args)", "def share(self, value):\n self._tensor = value" ]
[ "0.5932415", "0.59072703", "0.5562259", "0.55534965", "0.54047054", "0.53833073", "0.536776", "0.5333624", "0.5330987", "0.5305724", "0.52923506", "0.5281795", "0.5245011", "0.52386606", "0.52386606", "0.52386606", "0.5230287", "0.5215504", "0.516134", "0.5156009", "0.51322436", "0.5103596", "0.50932044", "0.50827926", "0.50650734", "0.50480324", "0.50401264", "0.50223356", "0.5004756", "0.4976439" ]
0.7893951
0
Unshare this object from a flow zone.
def unshare_from_zone(self, zone): if isinstance(zone, basestring): zone = self.project.get_flow().get_zone(zone) zone.remove_shared(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unpossessed(self):\r\n self.owner = None", "def unblock(self, source):\n raise NotImplementedError", "def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()", "def unassign_instance(InstanceId=None):\n pass", "def __del__(self):\n \n _cantera.wall_del(self.__wall_id)", "def remove(self):\n self.__source_gate._unregister_outgoing(self)\n self.__target_slot._unregister_incoming(self)", "def clean(self, ref):\n # NOTE: This currently only works on the top-most frame\n f1 = self.frames[0]\n f2 = ref.frames[0]\n f1.subtract(f2)", "def unaway(self):\n self.away()", "async def async_unjoin_player(self):\n await self.coordinator.data.zone_unjoin(self.zone_master, self.zone_id)\n await self.coordinator.async_refresh()\n await self.sync_master()", "def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)", "def remove(self):\n\t\tcall_sdk_function('PrlShare_Remove', self.handle)", "def __delete__(self, obj):\n self._instances.pop(obj, None)", "def unlink(self):\r\n try:\r\n deleteSenderPublicKey(self)\r\n del self._privateKey\r\n del self._secondPrivateKey\r\n except Exception:\r\n pass", "def __sub__(self, this):\n return self.rm(this)", "def __del__(self) -> None:\n self.map.solid_id.discard(self.id)", "def destroy(self):\n # Remove the synapses from all data structures outside this Segment.\n for synapse in self.__synapses:\n self._connectionGroup._removeSynapseFromPresynapticMap(synapse)\n self._connectionGroup._numSynapses -= len(segment._synapses)\n\n # Remove the segment from the cell's list.\n segments = self.cell._segments\n i = segments.index(self)\n del segments[i]\n\n # Free the flatIdx and remove the final reference so the Segment can be\n # garbage-collected.\n self._connectionGroup._freeUIDs.append(self.UID)\n self._connectionGroup._segmentForUID[self.UID] = None", "def on_unassign(self):", "def detached(self, mind):\n self.remote = None\n players.remove(self)", "def remove( self, zone ):\n if zone.space is None:\n raise KeyError( \"zone not in space octree!\" )\n\n # remove zone from space node's contained set\n zone.space.zones.remove( zone )\n\n # set zone's containing space to none\n zone.space = None", "def _unselectObject(self, _object):\n sheet = self._logic._getSheet()\n sheet.unselect(_object)", "def unmakeMove(self, move):", "def drop(self):\n self.id = None", "def remove_from_hand(self):\n pass", "def clear_instance(self, name: str):\n if self._shared is not None:\n return self._shared.pop(name, None)", "def unpublish(self, location):\r\n self.convert_to_draft(location)\r\n super(DraftModuleStore, self).delete_item(location)", "def unassign(self) -> None:\n self._row.remove(self._value)\n self._column.remove(self._value)\n self._box.remove(self._value)\n self._value = 0", "def __del__(self) -> None:\n self.map.face_id.discard(self.id)", "def remove_object(self, obj):\n pass", "def expunge(self, obj):\n self.uow.expunge(obj)\n self.imap.expunge(obj)\n state(obj).session = None", "def delete_transition(self, t):\n transition = self.transition(t)\n transition.from_state.transitions.remove(transition)" ]
[ "0.601934", "0.59649634", "0.59522724", "0.57343334", "0.5674939", "0.56622416", "0.5578648", "0.55039036", "0.5499391", "0.54815376", "0.5437976", "0.5432895", "0.54314196", "0.542576", "0.53983927", "0.5378879", "0.5366127", "0.53503096", "0.53274274", "0.5304955", "0.52901095", "0.5282254", "0.52583265", "0.5247127", "0.52361256", "0.5232135", "0.5229624", "0.52251416", "0.52225214", "0.5220828" ]
0.8114609
0
Get a handle to manage discussions on the managed folder.
def get_object_discussions(self): return DSSObjectDiscussions(self.client, self.project_key, "MANAGED_FOLDER", self.odb_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_discussions(self):\n return DSSObjectDiscussions(self.client, self.project_key, \"RECIPE\", self.recipe_name)", "def discussion(cls, user, discussion):\n pass", "def discussion(cls, user, discussion):\r\n pass", "def get_discussion(course):\r\n\r\n # the discussion_link setting overrides everything else, even if there is a discussion tab in the course tabs\r\n if course.discussion_link:\r\n return ExternalDiscussionTab(link_value=course.discussion_link)\r\n\r\n # find one of the discussion tab types in the course tabs\r\n for tab in course.tabs:\r\n if isinstance(tab, DiscussionTab) or isinstance(tab, ExternalDiscussionTab):\r\n return tab\r\n return None", "def _create_divided_discussions(self):\n divided_inline_discussions = ['Topic A', ]\n divided_course_wide_discussions = ['Topic B', ]\n divided_discussions = divided_inline_discussions + divided_course_wide_discussions\n\n ItemFactory.create(\n parent_location=self.course.location,\n category='discussion',\n discussion_id=topic_name_to_id(self.course, 'Topic A'),\n discussion_category='Chapter',\n discussion_target='Discussion',\n start=datetime.now()\n )\n discussion_topics = {\n \"Topic B\": {\"id\": \"Topic B\"},\n }\n config_course_cohorts(self.course, is_cohorted=True)\n config_course_discussions(\n self.course,\n discussion_topics=discussion_topics,\n divided_discussions=divided_discussions\n )\n return divided_inline_discussions, divided_course_wide_discussions", "def get_managed_object(self):\n return self.key", "async def get_discussion(context, author:str, permlink:str, observer:str=''):\n db = context['db']\n\n author = valid_account(author)\n permlink = valid_permlink(permlink)\n observer = valid_account(observer, allow_empty=True)\n\n sql = \"SELECT * FROM bridge_get_discussion(:author,:permlink,:observer)\"\n rows = await db.query_all(sql, author=author, permlink=permlink, observer=observer)\n if not rows or len(rows) == 0:\n return {}\n root_id = rows[0]['id']\n all_posts = {}\n root_post = _bridge_post_object(rows[0])\n root_post['active_votes'] = await find_votes_impl(db, rows[0]['author'], rows[0]['permlink'], VotesPresentation.BridgeApi)\n root_post = append_statistics_to_post(root_post, rows[0], False)\n root_post['replies'] = []\n all_posts[root_id] = root_post\n\n parent_to_children_id_map = {}\n\n for index in range(1, len(rows)):\n parent_id = rows[index]['parent_id']\n if parent_id not in parent_to_children_id_map:\n parent_to_children_id_map[parent_id] = []\n parent_to_children_id_map[parent_id].append(rows[index]['id'])\n post = _bridge_post_object(rows[index])\n post['active_votes'] = await find_votes_impl(db, rows[index]['author'], rows[index]['permlink'], VotesPresentation.BridgeApi)\n post = append_statistics_to_post(post, rows[index], False)\n post['replies'] = []\n all_posts[post['post_id']] = post\n\n for key in parent_to_children_id_map:\n children = parent_to_children_id_map[key]\n post = all_posts[key]\n for child_id in children:\n post['replies'].append(_ref(all_posts[child_id]))\n\n #result has to be in form of dictionary of dictionaries {post_ref: post}\n results = {}\n for key in all_posts:\n post_ref = _ref(all_posts[key])\n results[post_ref] = all_posts[key]\n return results", "def create_discussion(self, discussion_type, anchor, channel):\n discussion = self.create(anchor=anchor, channel=channel)\n return discussion", "def getHandle(self):\n return entity", "def getHandle(self):\n return entity", "def get_thread(self):\n return Comment.objects.filter(path__startswith=self.get_root_path())", "async def fetch_dm_channel(self) -> channels.DMChannel:\n return await self.app.rest.create_dm_channel(self.id)", "def management(self):\r\n return management.Management(self)", "def get(self,id):\n adm = Administration()\n cm = adm.get_chatmessage_by_id(id)\n return cm", "def get(self, id):\n adm = Administration()\n cm = adm.get_chatmessage_by_id(id)\n return cm", "def expand_discussion(self):\r\n self._find_within(\".discussion-show\").first.click()\r\n EmptyPromise(\r\n self.is_discussion_expanded,\r\n \"Discussion expanded\"\r\n ).fulfill()", "def _handle(self):\n return self.__handle", "def _handle(self):\n return self.__handle", "def _handle(self):\n return self.__handle", "def manager(self):\n if \"manager\" in self._prop_dict:\n if isinstance(self._prop_dict[\"manager\"], OneDriveObjectBase):\n return self._prop_dict[\"manager\"]\n else :\n self._prop_dict[\"manager\"] = DirectoryObject(self._prop_dict[\"manager\"])\n return self._prop_dict[\"manager\"]\n\n return None", "def get_permisson_object(self):\n return self.blog", "def get_permisson_object(self):\n return self.blog", "def get_permisson_object(self):\n return self.blog", "async def managechannels(self, ctx:commands.Context):", "def forum_form_discussion(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n nr_transaction = newrelic.agent.current_transaction()\r\n\r\n course = get_course_with_access(request.user, 'load_forum', course_id)\r\n with newrelic.agent.FunctionTrace(nr_transaction, \"get_discussion_category_map\"):\r\n category_map = utils.get_discussion_category_map(course)\r\n\r\n try:\r\n unsafethreads, query_params = get_threads(request, course_id) # This might process a search query\r\n threads = [utils.safe_content(thread) for thread in unsafethreads]\r\n except cc.utils.CommentClientMaintenanceError:\r\n log.warning(\"Forum is in maintenance mode\")\r\n return render_to_response('discussion/maintenance.html', {})\r\n\r\n user = cc.User.from_django_user(request.user)\r\n user_info = user.to_dict()\r\n\r\n with newrelic.agent.FunctionTrace(nr_transaction, \"get_metadata_for_threads\"):\r\n annotated_content_info = utils.get_metadata_for_threads(course_id, threads, request.user, user_info)\r\n\r\n with newrelic.agent.FunctionTrace(nr_transaction, \"add_courseware_context\"):\r\n add_courseware_context(threads, course)\r\n\r\n if request.is_ajax():\r\n return utils.JsonResponse({\r\n 'discussion_data': threads, # TODO: Standardize on 'discussion_data' vs 'threads'\r\n 'annotated_content_info': annotated_content_info,\r\n 'num_pages': query_params['num_pages'],\r\n 'page': query_params['page'],\r\n })\r\n else:\r\n with newrelic.agent.FunctionTrace(nr_transaction, \"get_cohort_info\"):\r\n cohorts = get_course_cohorts(course_id)\r\n cohorted_commentables = get_cohorted_commentables(course_id)\r\n\r\n user_cohort_id = get_cohort_id(request.user, course_id)\r\n\r\n context = {\r\n 'csrf': csrf(request)['csrf_token'],\r\n 'course': course,\r\n #'recent_active_threads': recent_active_threads,\r\n 'staff_access': has_access(request.user, 'staff', course),\r\n 'threads': saxutils.escape(json.dumps(threads), escapedict),\r\n 'thread_pages': query_params['num_pages'],\r\n 'user_info': saxutils.escape(json.dumps(user_info), escapedict),\r\n 'flag_moderator': cached_has_permission(request.user, 'openclose_thread', course.id) or has_access(request.user, 'staff', course),\r\n 'annotated_content_info': saxutils.escape(json.dumps(annotated_content_info), escapedict),\r\n 'course_id': course.id.to_deprecated_string(),\r\n 'category_map': category_map,\r\n 'roles': saxutils.escape(json.dumps(utils.get_role_ids(course_id)), escapedict),\r\n 'is_moderator': cached_has_permission(request.user, \"see_all_cohorts\", course_id),\r\n 'cohorts': cohorts,\r\n 'user_cohort': user_cohort_id,\r\n 'cohorted_commentables': cohorted_commentables,\r\n 'is_course_cohorted': is_course_cohorted(course_id)\r\n }\r\n # print \"start rendering..\"\r\n return render_to_response('discussion/index.html', context)", "def inline_discussion(request, course_id, discussion_id):\r\n nr_transaction = newrelic.agent.current_transaction()\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n\r\n course = get_course_with_access(request.user, 'load_forum', course_id)\r\n\r\n threads, query_params = get_threads(request, course_id, discussion_id, per_page=INLINE_THREADS_PER_PAGE)\r\n cc_user = cc.User.from_django_user(request.user)\r\n user_info = cc_user.to_dict()\r\n\r\n with newrelic.agent.FunctionTrace(nr_transaction, \"get_metadata_for_threads\"):\r\n annotated_content_info = utils.get_metadata_for_threads(course_id, threads, request.user, user_info)\r\n\r\n allow_anonymous = course.allow_anonymous\r\n allow_anonymous_to_peers = course.allow_anonymous_to_peers\r\n\r\n #since inline is all one commentable, only show or allow the choice of cohorts\r\n #if the commentable is cohorted, otherwise everything is not cohorted\r\n #and no one has the option of choosing a cohort\r\n is_cohorted = is_course_cohorted(course_id) and is_commentable_cohorted(course_id, discussion_id)\r\n is_moderator = cached_has_permission(request.user, \"see_all_cohorts\", course_id)\r\n\r\n cohorts_list = list()\r\n\r\n if is_cohorted:\r\n cohorts_list.append({'name': _('All Groups'), 'id': None})\r\n\r\n #if you're a mod, send all cohorts and let you pick\r\n\r\n if is_moderator:\r\n cohorts = get_course_cohorts(course_id)\r\n for cohort in cohorts:\r\n cohorts_list.append({'name': cohort.name, 'id': cohort.id})\r\n\r\n else:\r\n #students don't get to choose\r\n cohorts_list = None\r\n\r\n return utils.JsonResponse({\r\n 'discussion_data': map(utils.safe_content, threads),\r\n 'user_info': user_info,\r\n 'annotated_content_info': annotated_content_info,\r\n 'page': query_params['page'],\r\n 'num_pages': query_params['num_pages'],\r\n 'roles': utils.get_role_ids(course_id),\r\n 'allow_anonymous_to_peers': allow_anonymous_to_peers,\r\n 'allow_anonymous': allow_anonymous,\r\n 'cohorts': cohorts_list,\r\n 'is_moderator': is_moderator,\r\n 'is_cohorted': is_cohorted\r\n })", "def management_ref(self):\n\n return self._management_ref", "def GetManager(self):\r\n\r\n return self.manager", "def shared_collection(self):\n return self.manager.shared_collection_api", "def getManager(self):\n return self._manager" ]
[ "0.54230756", "0.5351559", "0.53328663", "0.5113142", "0.5088055", "0.5067406", "0.48653966", "0.4814929", "0.47993654", "0.47993654", "0.4738103", "0.47297895", "0.4671899", "0.4656283", "0.4654367", "0.46461034", "0.46381405", "0.46381405", "0.46381405", "0.46081758", "0.45936283", "0.45936283", "0.45936283", "0.45909366", "0.45374802", "0.4531689", "0.4523333", "0.44916666", "0.44769925", "0.44348693" ]
0.61799204
0
Save the changes to the settings on the managed folder.
def save(self): self.folder.client._perform_empty( "PUT", "/projects/%s/managedfolders/%s" % (self.folder.project_key, self.folder.odb_id), body=self.settings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n self.client._perform_empty(\"PUT\", \"/project-folders/%s/settings\" % (self.project_folder_id), body = self.settings)", "def saveSettings(self):\n self.userFiles.applyData()\n self.userPersonal.applyData()", "def save(self):\n return self.client._perform_empty(\"PUT\", \"/admin/general-settings\", body = self.settings)", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save(self):\n sublime.save_settings(self.file_name)", "def save_settings(self):\n with open(self.settings_path, \"w\") as f:\n json.dump(self.settings, f, indent=4)", "def save(self) -> None:\n self._client.save_config()", "def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")", "def save():\n\n env.config.save(env.config_file)", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save_settings(self):\n logger.info(f'Saving settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name in section.keys():\n value = self.get_control_value(setting_name)\n if value is not None:\n section[setting_name] = value\n\n write_settings(self.settings_dict)", "def save(self):\n self.workspace.client._perform_empty(\n \"PUT\", \"/workspaces/%s\" % self.workspace.workspace_key,\n body=self.settings)", "def save(self):\n self.client._perform_empty(\n \"PUT\", \"/projects/%s/apiservices/%s/settings\" % (self.project_key, self.service_id),\n body = self.settings)", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save(self):\n if self.location is None:\n logger.debug(\"Save requested but not saving settings, \"\n \"location is None\")\n return\n\n if self._saving or not self._dirty:\n return\n\n self._saving = True\n\n logger.debug(\"Saving settings...\")\n\n with open(self.location + \".new\", 'w') as f:\n self.write(f)\n\n try:\n # make it readable by current user only, to protect private data\n os.fchmod(f.fileno(), 384)\n except:\n pass # fail gracefully, eg if on windows\n\n f.flush()\n\n try:\n os.rename(self.location, self.location + \".old\")\n except:\n pass # if it doesn'texist we don't care\n\n os.rename(self.location + \".new\", self.location)\n\n try:\n os.remove(self.location + \".old\")\n except:\n pass\n\n self._saving = False\n self._dirty = False", "def update(self):\n self.save_config_file()", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "def saveExitConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()\n self.close()", "def _onSettings(self, event):\n dialog = sc.SettingsDialog(self)\n if dialog.ShowModal() == wx.ID_OK:\n dialog.saveSettings()\n dialog.Destroy()", "def save():\n log.info(\"Saving settings file\")\n with open(SETTINGS_FILE, \"w\") as file:\n json.dump(_names, file)", "def __saveSettings(self):\n\t\tthe_paths = []\n\t\tfor str in self.__allPaths():\n\t\t\tif str not in default_paths():\n\t\t\t\tthe_paths.append(str)\n\t\tQSettings().setValue(\"paths\", the_paths)", "def save_setting(self):\n if self.is_checked.get():\n if \"Email\" not in s.alert:\n s.updateAlert(\"Email\")\n s.updateEmail(self.email_addr_entry.get())\n if not self.is_checked.get():\n if \"Email\" in s.alert:\n s.deleteAlert(\"Email\")\n s.deleteEmail()\n # Check the refresh interval\n if self.is_minimize_to_system_tray.get():\n s.updateMinimize(\"True\")\n else:\n s.updateMinimize(\"False\")\n\n if self.is_launch_at_start_up.get():\n s.updateLaunchAtStartup(\"True\")\n become_persistent(__file__)\n else:\n s.updateLaunchAtStartup(\"False\")\n remove_startup()\n\n s.updateSetting(self.interval_entry.get())\n Tracker.save_state(Tracker.FILENAME, s)", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def save(self):\n Registry.SetKey(self.CONFIG_NAME, self.config, True)\n self.load() # for validation", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def save(self):\n Preferences.setVCS(\n \"AutoClose\",\n self.vcsAutoCloseCheckBox.isChecked())\n Preferences.setVCS(\n \"AutoSaveFiles\",\n self.vcsAutoSaveCheckBox.isChecked())\n Preferences.setVCS(\n \"AutoSaveProject\",\n self.vcsAutoSaveProjectCheckBox.isChecked())\n Preferences.setVCS(\n \"StatusMonitorInterval\",\n self.vcsStatusMonitorIntervalSpinBox.value())\n Preferences.setVCS(\n \"MonitorLocalStatus\",\n self.vcsMonitorLocalStatusCheckBox.isChecked())\n Preferences.setVCS(\n \"AutoUpdate\",\n self.autoUpdateCheckBox.isChecked())\n \n self.saveColours(Preferences.setProjectBrowserColour)", "def save_switch_configs(self):", "def saveSettings():\t\n\tglobal settings\n\tfout = open(config_file,'w')\n\tfout.write(json.dumps(settings, sort_keys=True, indent=4))\n\tfout.close()" ]
[ "0.76129395", "0.72382766", "0.7231987", "0.7208549", "0.7072421", "0.70549095", "0.69852805", "0.6924624", "0.6886411", "0.68704027", "0.684195", "0.6815835", "0.6777165", "0.67606515", "0.6706621", "0.6703455", "0.66838694", "0.66620696", "0.66322386", "0.6589611", "0.65859824", "0.6542423", "0.6536462", "0.653643", "0.65302265", "0.6518586", "0.6512069", "0.6510652", "0.64515674", "0.6446406" ]
0.7415783
1
Add a discrete partitioning dimension.
def add_discrete_partitioning_dimension(self, dim_name): self.settings["partitioning"]["dimensions"].append({"name": dim_name, "type": "value"})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_partition(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_AddPartition', self.handle))", "def add_time_partitioning_dimension(self, dim_name, period=\"DAY\"):\n self.settings[\"partitioning\"][\"dimensions\"].append({\"name\": dim_name, \"type\": \"time\", \"params\":{\"period\": period}})", "def add_dimension(self, name):\n dimension = Dimension(name, self.metadata, self.specifications, self.language)\n self.dimensions.append(dimension)\n return", "def add_dimension(self,dimension):\n\n # Add dimension to name list and as an attribute\n if dimension.name in self._dimensions:\n raise Exception('Unable to add dimension. A dimension'\\\n +' of the same name: {name}, already exists.'\\\n .format(name=dimension.name))\n\n self._dimensions.append(dimension.name)\n setattr(self,dimension.name,dimension)", "def add_dimension(self,dimension):\n\n # Add dimension to name list and as an attribute\n if dimension.name in self._dimensions:\n raise Exception('Unable to add dimension. A dimension'\\\n +' of the same name: {name}, already exists.'\\\n .format(name=dimension.name))\n\n self._dimensions.append(dimension.name)\n setattr(self,dimension.name,dimension)", "def add_dimension(\n self, dim_id: int, name: str, dim_config=None\n ) -> mcpython.engine.world.AbstractInterface.IDimension:\n if dim_config is None:\n dim_config = {}\n dim = self.dimensions[dim_id] = mcpython.common.world.Dimension.Dimension(\n self, dim_id, name, gen_config=dim_config\n )\n self.dim_to_id[dim.name] = dim_id\n shared.world_generation_handler.setup_dimension(dim, dim_config)\n return dim", "def add_probability(self, partitioning, part, prob):\n self.prob.setdefault(partitioning, dict())\n self.prob[partitioning][part] = prob", "def create_dim(self, dimname, size=None):\n raise NotImplementedError", "def _partitioner(shape, dtype):\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f\"shape is not a TensorShape: {shape}\")\n if not shape.is_fully_defined():\n raise ValueError(f\"shape is not fully defined: {shape}\")\n if not isinstance(dtype, dtypes.DType):\n raise ValueError(f\"dtype is not a DType: {dtype}\")\n\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (\n shape.num_elements() / shape.dims[axis].value) * element_size\n # How many slices can we fit on one shard of size at most max_shard_bytes?\n # At least one slice is required.\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n # How many shards do we need for axis given that each shard fits\n # slices_per_shard slices from a total of shape[axis] slices?\n axis_shards = int(math.ceil(\n 1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n\n partitions[axis] = axis_shards\n\n return partitions", "def partition(self, dimension, processes=None):\n if processes:\n q = (self._table.source.isin(processes) |\n self._table.target.isin(processes))\n values = self._table.loc[q, dimension].unique()\n else:\n values = self._table[dimension].unique()\n return Partition.Simple(dimension, values)", "def CalculateDynamicPartitionSize(self, image_size):\n raise NotImplementedError", "def AddDimOrdinate(self,definitionPnt,leaderPnt,axis):\n\t\treturn self.Space.AddDimOrdinate(definitionPnt,leaderPnt,axis)", "def _record_specific_partition(r_d, numnodes, cur):\n # No partitioning has been specified. Create the appropriate entries.\n if r_d['partmtd'] == 0:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partmtd = 0 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler, (i, r_d['tname']))\n\n # Range partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 1:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, '\n 'partparam2 = ?, partmtd = 1 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'][i - 1], r_d['param2'][i - 1], i,\n r_d['tname']))\n\n # Hash partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 2:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, partmtd = 2 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'], i, r_d['tname']))", "def makeDimension(self, image):\n _p1 = self.getFirstPoint()\n _p2 = self.getSecondPoint()\n _x, _y = self.getDimPosition()\n if (_p1 is not None and\n _p2 is not None and\n _x is not None and\n _y is not None):\n _ds = image.getOption('DIM_STYLE')\n _hdim = dimension.HorizontalDimension(_p1, _p2, _x, _y, _ds)\n self._setDimension(_hdim)\n self.setDimPrefs(image)", "def draw_partition(x, alpha=1., d=0.):\n N = len(x)\n counts = draw_counts(N, alpha, d)\n return partition_from_counts(x, counts)", "def AddDimDiametric(self,chordPnt,farChordPnt,leaderLength):\n\t\treturn self.Space.AddDimDiametric(chordPnt,farChordPnt,leaderLength)", "def addPartition(self,partitionData):\n self.PCAs[partitionData.id] = partitionData\n self.pcaStatemachineLock[partitionData.id] = threading.Lock()\n self.StateMachineForPca[partitionData.id] = Statemachine(self.StateMachineFile,\"Unconfigured\")\n self.isPCAinTransition[partitionData.id] = False\n self.pcaSequenceNumber[partitionData.id] = 0", "def add_hive_partition(_):\n LOGGER.error('Add Hive Parition is not yet supported, exiting!')\n raise NotImplementedError", "def expand_dim(self, dim, extra_internal_domain=None, extra_user_domain=None):\r\n expand_dim = dim - self.multi_index.spatial_dimension\r\n\r\n self.multi_index.expand_dim(dim) # breaks if dim<spacial_dimension, i.e. expand_dim<0\r\n extra_internal_domain = verify_domain(extra_internal_domain, expand_dim)\r\n self.internal_domain = np.concatenate((self.internal_domain, extra_internal_domain))\r\n extra_user_domain = verify_domain(extra_user_domain, expand_dim)\r\n self.user_domain = np.concatenate((self.user_domain, extra_user_domain))", "def add_subdivision(self, parent, condition, client_id=None):\n\n biddable_ad_group_criterion=set_elements_to_none(campaign_service.factory.create('BiddableAdGroupCriterion'))\n product_partition=set_elements_to_none(campaign_service.factory.create('ProductPartition'))\n # If the root node is a unit, it would not have a parent\n product_partition.ParentCriterionId=parent.ad_group_criterion.Id if parent is not None and parent.ad_group_criterion is not None else None\n product_partition.Condition=condition\n product_partition.PartitionType='Subdivision'\n biddable_ad_group_criterion.Criterion=product_partition\n biddable_ad_group_criterion.CriterionBid=None\n biddable_ad_group_criterion.AdGroupId=self._ad_group_id\n biddable_ad_group_criterion.Status=None\n if hasattr(biddable_ad_group_criterion, 'EditorialStatus'):\n biddable_ad_group_criterion.EditorialStatus=None\n biddable_ad_group_criterion.Id=self._reference_id\n self._reference_id=self._reference_id\n self._reference_id-=1\n\n partition_action=BulkAdGroupProductPartition()\n partition_action.client_id=client_id\n partition_action.ad_group_criterion=biddable_ad_group_criterion\n self._partition_actions.append(partition_action)\n\n return partition_action", "def remove_partitioning(self):\n self.settings[\"partitioning\"] = {\"dimensions\" : []}", "def insert_dimension(self, position):\n return self.foreach(\n lambda k,v: (k[:position] + (0,) + k[position:], v),\n dimensions = self.dims+1,\n shape = self.shape,\n )", "def getDimension(unique_name):", "def getDimension(unique_name):", "def _add_(self, other):\n new_partition = list(self) + list(other)\n new_partition.sort(reverse=True)\n return BosonicPartition(BosonicPartitions(), new_partition)", "def nextDim(leaf, args):\n x = args['xsectionNum'] # number of subregions to partition for the leaf\n lb = leaf.lb # the lower bound of the leaf region\n ub = leaf.ub # the upper bound of the leaf region\n dimDiff = [] # store the diff value (e.g. max-min of dominantion count) for partition direction\n dimX = len(lb) # the number of dimension\n visitedPoints = leaf.visitedPoints() # all the visited points in the tree\n pool = leaf.pool # the visited points in this leaf\n #determine the deminsion of point's objective\n dim = len(leaf.problem.objectives) \n #recorganize all the visited points together into one sorted array\n _visitedPoints = utils.dictToSortedNumpyArray(visitedPoints,dim) \n # calculate the domination count for each point in this pool\n dominantionCount = {} \n for key in pool:\n _p = np.array([pool[key].mean])\n dominantionCount[key] = _cutils.calDominationCount(_p, _visitedPoints, len(_p))[1][0]\n # enumerate all the possible feasible next dimension to partition\n feasibleDim = feasible(leaf, x)\n for dimID in feasibleDim:\n # determine the partition unit distance \n unit = (ub[dimID] - lb[dimID]) / x\n # initialize the promisingIndex for each subregion based on xsection\n promisingIndex = [] \n for i in range(x):\n _lb, _ub = [np.array([]) for _ in range(2)]\n # change the lower and upper bound value at dimID for subRegion x\n for j in range(dimX):\n _lb = np.append(_lb, lb[j] + (unit * i) * (j == dimID))\n _ub = np.append(_ub, ub[j] - (unit * (x - i - 1)) * (j == dimID))\n # calculate the promisingIndex for each subregions\n poolDominantionCount = [np.nan] # in case no points in this subregion\n for key in pool:\n p = pool[key] \n if all(_lb <= p.x) and all(p.x < _ub):\n poolDominantionCount.append(dominantionCount[key])\n # calculate the promising index in this subregion \n promisingIndex.append(np.nanmin(poolDominantionCount))\n # calculate the dimDiff for the dimension dimID \n diff = np.nanmax(promisingIndex) - np.nanmin(promisingIndex)\n dimDiff.append(diff)\n # select the dimension with largest dimDiff value as next dimension to partition\n if dimDiff:\n maxDiff = np.nanmax(dimDiff)\n else:\n maxDiff = np.nan\n if not(np.isnan(maxDiff)):\n candidate = [feasibleDim[i] for i in range(len(feasibleDim)) if dimDiff[i] == maxDiff] \n dim = candidate[np.random.randint(0,len(candidate))]\n elif dimDiff:\n dim = feasibleDim[np.random.randint(0,len(feasibleDim))]\n else:\n dim = np.random.randint(0, dimX)\n #print('Select Dim %d with maxDiff %.2f, range %.2f at level %d' % (dim, maxDiff, ub[dim]-lb[dim],leaf.level))\n return dim", "def dimension(self, name: str):\n return BoundDim(self, name)", "def capacitygroup_add_partition(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_add_partition(\n cmd_ctx, cpc, capacitygroup, options))", "def create_net_partition(self, body=None):\r\n return self.post(self.net_partitions_path, body=body)", "def dim2():\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim2 = Categorical(\"yolo2\", categories, default_value=\"2\")\n return dim2" ]
[ "0.5996331", "0.59021497", "0.5741625", "0.563048", "0.563048", "0.55876046", "0.5564135", "0.55327845", "0.54887104", "0.5450582", "0.5427648", "0.54159683", "0.5277757", "0.5268855", "0.5246866", "0.5199661", "0.513178", "0.50552285", "0.5037322", "0.5004306", "0.49957278", "0.49520573", "0.49367177", "0.49367177", "0.49056774", "0.48761767", "0.4867201", "0.48375562", "0.48143658", "0.4788809" ]
0.8221045
0
Add a time partitioning dimension.
def add_time_partitioning_dimension(self, dim_name, period="DAY"): self.settings["partitioning"]["dimensions"].append({"name": dim_name, "type": "time", "params":{"period": period}})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_discrete_partitioning_dimension(self, dim_name):\n self.settings[\"partitioning\"][\"dimensions\"].append({\"name\": dim_name, \"type\": \"value\"})", "def add_timedim(data, date=\"1970-01-01\"):\n if isinstance(data, xr.DataArray):\n if \"time\" in data.dims:\n raise ValueError(\n \"You trying to add time dimension to the DataArray that already have it. \\\nThe reason migh be that you trying to use 2d variable (e.g. `a_ice`) \\\nin a function that accepts only 3d variables (e.g. `hovm_data`)\"\n )\n timestamp = [np.array(np.datetime64(date, \"ns\"))]\n data = data.expand_dims({\"time\": timestamp}, axis=0)\n return data\n else:\n data = np.expand_dims(data, axis=0)\n return data", "def dim_time(immigration_data_df):\n time_df_a = (\n immigration_data_df\n .select(\n F.col(\"arrdate\").alias(\"date\"))\n .where(F.col(\"arrdate\").isNotNull())\n )\n\n time_df_d = (\n immigration_data_df\n .select(\n F.col(\"depdate\").alias(\"date\"))\n .where(F.col(\"depdate\").isNotNull())\n )\n\n\n time_df_stg = time_df_a.union(time_df_d).distinct()\n time_df_stg = time_df_stg.select(F.col(\"date\").cast(IntegerType()))\n time_df_stg = time_df_stg.withColumn(\"sas_date\", F.to_date(F.lit(\"01/01/1960\"), \"MM/dd/yyyy\"))\n time_df_stg = time_df_stg.withColumn(\"date\", F.expr(\"date_add(sas_date, date)\"))\n\n\n date_dim = (\n time_df_stg\n .select(\"date\")\n .withColumn(\"year\", F.year(\"date\"))\n .withColumn(\"month\", F.month(\"date\"))\n .withColumn(\"day\", F.dayofmonth(\"date\"))\n .withColumn(\"datekey\", F.date_format(F.col(\"date\"), \"yyyyMMdd\"))\n .withColumn(\"quarter\", F.quarter(\"date\"))\n .withColumn(\"day_of_month\", F.dayofmonth(\"date\"))\n .withColumn(\"day_of_week\", F.dayofweek(\"date\"))\n .withColumn(\"week_of_year\", F.weekofyear(\"date\"))\n .sort(\"datekey\")\n )\n\n date_dim.show()\n return date_dim", "def copy_and_append_time_dimension_to_netcdf_dataset(self,dataset_in,dataset_out):\n\n for dim_name,dim_obj in list(dataset_in.dimensions.items()):\n dataset_out.createDimension(dim_name,len(dim_obj)\n if not dim_obj.isunlimited() else None)\n dataset_out.createDimension('time',None)\n times = dataset_out.createVariable(\"time\",'f8',(\"time\",))\n times.units = \"years since 0001-01-01 00:00:00.0\"\n times.calendar = \"proleptic_gregorian\"\n times[0] = np.array([0.0])\n for var_name, var_obj in list(dataset_in.variables.items()):\n new_var = dataset_out.createVariable(var_name,var_obj.datatype,var_obj.dimensions\n if (len(var_obj.dimensions) <= 1\n or var_name == 'AREA') else\n [\"time\"] + list(var_obj.dimensions))\n if len(var_obj.dimensions) <= 1 or var_name == 'AREA':\n new_var[:] = var_obj[:]\n else:\n new_var[0,:] = var_obj[:]\n new_var.setncatts({attr_name: var_obj.getncattr(attr_name) for attr_name in var_obj.ncattrs()})", "def add_partition(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_AddPartition', self.handle))", "def __create_partition(self,partition_dt):\n\n p_array = self.__partition_date_to_path_array(partition_dt)\n \n # For each component, fetch the group or create it\n # Year\n try:\n y_group = self.root_group._f_get_child(p_array[0])\n except tables.NoSuchNodeError:\n y_group = self.file.create_group(self.root_group,p_array[0])\n\n # Month\n try:\n m_group = y_group._f_get_child(p_array[1])\n except tables.NoSuchNodeError:\n m_group = self.file.create_group(y_group,p_array[1])\n\n # Day\n try:\n d_group = m_group._f_get_child(p_array[2])\n except tables.NoSuchNodeError:\n d_group = self.file.create_group(m_group,p_array[2])\n\n # We need to create the table in the day group\n ts_data = self.file.create_table(d_group,'ts_data',self.table_description,self.table_title,\n self.table_filters, self.table_expectedrows, self.table_chunkshape, self.table_byteorder)\n\n # Need to save this as an attribute because it doesn't seem to be saved anywhere\n ts_data.attrs._TS_TABLES_EXPECTEDROWS_PER_PARTITION = self.table_expectedrows\n\n return ts_data", "def add_datepart(df, fldname, drop=True, time=False):\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)\n targ_pre = re.sub('[Dd]ate$', '', fldname)\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())\n df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9\n if drop: df.drop(fldname, axis=1, inplace=True)", "def add_hive_partition(_):\n LOGGER.error('Add Hive Parition is not yet supported, exiting!')\n raise NotImplementedError", "def add_part(self, begin, length, type, mntpnt):\n end = begin+length-1\n logging.debug(\"add_part - begin %d, length %d, end %d\" % (begin, length, end))\n for part in self.partitions:\n if (begin >= part.begin and begin <= part.end) or \\\n (end >= part.begin and end <= part.end):\n raise Exception('Partitions are overlapping')\n if begin > end:\n raise Exception('Partition\\'s last block is before its first')\n if begin < 0 or end > self.size:\n raise Exception('Partition is out of bounds. start=%d, end=%d, disksize=%d' % (begin,end,self.size))\n part = self.Partition(disk=self, begin=begin, end=end, type=str_to_type(type), mntpnt=mntpnt)\n self.partitions.append(part)\n\n # We always keep the partitions in order, so that the output from kpartx matches our understanding\n self.partitions.sort(cmp=lambda x,y: x.begin - y.begin)", "def add_dimension(self, name):\n dimension = Dimension(name, self.metadata, self.specifications, self.language)\n self.dimensions.append(dimension)\n return", "def add_dimension(self,dimension):\n\n # Add dimension to name list and as an attribute\n if dimension.name in self._dimensions:\n raise Exception('Unable to add dimension. A dimension'\\\n +' of the same name: {name}, already exists.'\\\n .format(name=dimension.name))\n\n self._dimensions.append(dimension.name)\n setattr(self,dimension.name,dimension)", "def add_dimension(self,dimension):\n\n # Add dimension to name list and as an attribute\n if dimension.name in self._dimensions:\n raise Exception('Unable to add dimension. A dimension'\\\n +' of the same name: {name}, already exists.'\\\n .format(name=dimension.name))\n\n self._dimensions.append(dimension.name)\n setattr(self,dimension.name,dimension)", "def addPartition(self,partitionData):\n self.PCAs[partitionData.id] = partitionData\n self.pcaStatemachineLock[partitionData.id] = threading.Lock()\n self.StateMachineForPca[partitionData.id] = Statemachine(self.StateMachineFile,\"Unconfigured\")\n self.isPCAinTransition[partitionData.id] = False\n self.pcaSequenceNumber[partitionData.id] = 0", "def add_clustering(self, clustering: object, time: object):\n\n named_clustering = {}\n for i, c in enumerate(clustering.communities):\n named_clustering[f\"{time}_{i}\"] = c\n\n self.clusterings[self.current_observation] = NamedClustering(\n named_clustering,\n clustering.graph,\n clustering.method_name,\n method_parameters=clustering.method_parameters,\n overlap=clustering.overlap,\n )\n self.time_to_obs[time] = self.current_observation\n self.obs_to_time[self.current_observation] = time\n self.current_observation += 1", "def time_partitioning(self) -> 'outputs.TimePartitioningResponse':\n return pulumi.get(self, \"time_partitioning\")", "def add_time_bounds(nc, varname):\n THREE_HOURS = 60*60*3 # in seconds\n bnds_name = 'time_bnds'\n bounds_dim = 'nv'\n\n # Create bounds dimension\n nc.createDimension(bounds_dim, 2)\n\n # Get variable matching varname\n\n time_var = nc.variables['time']\n time_var.setncattr('bounds', bnds_name)\n time_data = time_var[:]\n time_length = len(time_data)\n\n # reshape time data\n bounds_data = np.dstack((time_data,time_data)).reshape(time_length,2)\n for i in bounds_data:\n i[0] = i[0] - (THREE_HOURS)\n bounds_var = nc.createVariable(bnds_name, time_var.dtype, ('time', bounds_dim), fill_value=9999)\n bounds_var[:] = bounds_data", "def append_time_dim(arr, y_, time_stamps):\n time_arr = np.zeros([arr.shape[0]-time_stamps, int(time_stamps*arr.shape[1])])\n for time_idx, time_ in enumerate(np.arange(time_stamps, arr.shape[0])):\n for time_point in range(time_stamps):\n time_arr[time_idx, time_point*arr.shape[1]:(time_point+1)*arr.shape[1]] = arr[time_-time_point,:]\n return time_arr, y_[time_stamps:]", "def __append_rows_to_partition(self,partition_dt,rows):\n\n ts_data = self.__fetch_or_create_partition_table(partition_dt)\n ts_data.append(rows)", "def add_time_features(self, year=False, month=False, week=True, tod=True, dow=True):\n\n var_to_expand = []\n\n if self.preprocessed_data.empty:\n data = self.original_data\n else:\n data = self.preprocessed_data\n\n if year:\n data[\"year\"] = data.index.year\n var_to_expand.append(\"year\")\n if month:\n data[\"month\"] = data.index.month\n var_to_expand.append(\"month\")\n if week:\n data[\"week\"] = data.index.week\n var_to_expand.append(\"week\")\n if tod:\n data[\"tod\"] = data.index.hour\n var_to_expand.append(\"tod\")\n if dow:\n data[\"dow\"] = data.index.weekday\n var_to_expand.append(\"dow\")\n\n # One-hot encode the time features\n for var in var_to_expand:\n \n add_var = pd.get_dummies(data[var], prefix=var, drop_first=True)\n \n # Add all the columns to the model data\n data = data.join(add_var)\n\n # Drop the original column that was expanded\n data.drop(columns=[var], inplace=True)\n\n self.preprocessed_data = data", "def add_time_point(self,time, mdv_instance):\n\n self.mdvtc[time] = mdv_instance", "def add_datepart(df, fldname, drop=True, time=False, errors=\"raise\"):\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)\n targ_pre = re.sub('[Dd]ate$', '', fldname)\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())\n df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9\n if drop: df.drop(fldname, axis=1, inplace=True)", "def create_timestructured(self, good, quantity):\n length = len(self._haves[good].time_structure)\n for i in range(length):\n qty = quantity[i] if type(quantity) == list else quantity / length\n self._haves[good].time_structure[i] += qty", "def test_dimension_size_override(self, nt=100):\n i, j, k = dimify('i j k')\n a = TimeData(name='a', dimensions=(i, j, k))\n one = symbol(name='one', dimensions=(i, j, k), value=1.)\n op = Operator(Eq(a.forward, a + one))\n\n # Test dimension override via the buffered dimenions\n a.data[0] = 0.\n op(a=a, t=6)\n assert(np.allclose(a.data[1], 5.))\n\n # Test dimension override via the parent dimenions\n a.data[0] = 0.\n op(a=a, time=5)\n assert(np.allclose(a.data[0], 4.))", "def add_entry(self, timestamp, data):\n if not isinstance(data, list):\n data = [data]\n\n if len(data) != self._dimensionCount:\n raise ValueError(\"data does contain %s instead of %s dimensions.\\n %s\" % (len(data), self._dimensionCount, data))\n\n self._normalized = self._predefinedNormalized\n self._sorted = self._predefinedSorted\n\n tsformat = self._timestampFormat\n if tsformat is not None:\n timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat)\n\n self._timeseriesData.append([float(timestamp)] + [float(dimensionValue) for dimensionValue in data])", "def add_time_period(df):\n\n # determine in which half hour period of the day the \n # predicted time of arrival falls\n\n interval = df.iloc[0].planned_arrival // 1800 \n\n # find string representation of period from dict. mapping (top)\n\n inverval_string = interval_map[interval]\n\n # add the feature\n\n df['TIME_PERIOD_ARRIVAL'] = inverval_string\n\n # set the dtype\n\n df.TIME_PERIOD_ARRIVAL = df.TIME_PERIOD_ARRIVAL.astype('category') \n\n return df", "def main(year, dim_medium, dim_article, tile_extent):\n first_day = year + \"-01-01 00:00:00\"\n last_day = year + \"-12-31 23:59:59\"\n\n # Calculate timestamps for dim_time dimension limits\n first_timestamp = storage.get_timestamp_from_text(first_day)\n last_timestamp = storage.get_timestamp_from_text(last_day)\n\n # Name of array\n array_name = \"newsroom_\" + year\n\n # Create the array\n storage.create_array(\n array_name,\n dim_medium,\n first_timestamp,\n last_timestamp,\n dim_article,\n tile_extent,\n )", "def add_probability(self, partitioning, part, prob):\n self.prob.setdefault(partitioning, dict())\n self.prob[partitioning][part] = prob", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def time_interval_sub(self, time_step, nsteps):\n world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting subtime\")", "def add_simulation_step(self, end_time, output_step_size=0.05, clear_existing=False):\n if clear_existing:\n self.parameters['DYNAMICS'] = [[],[]]\n self.parameters['DYNAMICS'][0].append(end_time)\n self.parameters['DYNAMICS'][1].append(output_step_size)\n self.parameters['_DYNAMICS'] = zip(*self.parameters['DYNAMICS'])" ]
[ "0.63332134", "0.6205126", "0.56875503", "0.5681772", "0.5438656", "0.5215479", "0.52103144", "0.52042055", "0.5199717", "0.5189618", "0.5118998", "0.5118998", "0.511268", "0.5108713", "0.50541395", "0.5043726", "0.50367475", "0.5027835", "0.5004906", "0.49656916", "0.49620217", "0.49508247", "0.4941275", "0.4937749", "0.49153277", "0.4895604", "0.4854656", "0.48454043", "0.48423037", "0.483241" ]
0.8382509
0
Set the partitioning pattern of the folder. The pattern indicates which paths inside the folder belong to
def set_partitioning_file_pattern(self, pattern): self.settings["partitioning"]["filePathPattern"] = pattern
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setSplitPattern(self, value):\n return self._set(splitPattern=value)", "def setSplitPattern(self, value):\n return self._set(splitPattern=value)", "def pattern(self, pattern):\n if pattern is None:\n raise ValueError(\"Invalid value for `pattern`, must not be `None`\") # noqa: E501\n\n self._pattern = pattern", "def filepattern(self):\n return naming.filepattern_for_dataset_split(\n dataset_name=self.dataset_name,\n split=self.split,\n data_dir=self.data_dir,\n filetype_suffix=self.filetype_suffix)", "def setPattern(self, value):\n return self._set(pattern=value)", "def setPrefixPattern(self, value):\n return self._set(prefixPattern=value)", "def macro_files_pattern(self, val: Pattern):\n self[\"macro_files_pattern\"] = str(val)", "def save_pattern(self, pattern: Pattern):", "def save_pattern(self, pattern: Pattern):", "def subFolder(self, value):\r\n self.__folder = str(value)", "def glob(patterns: list[str]) -> Table:\n for val in _ensure_list(patterns):\n fol, _, pat = val.partition(\"/*\")\n folder = Path(fol)\n for file in folder.glob(\"*\" + pat):\n yield {\"file\": str(file)}", "def __init__(self, pattern):\r\n self.pattern = pattern", "def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern", "def set_folder_name(self, folder_name=None):\n\n if folder_name == None:\n folder_name = 'Abstract-OneDim/'\n self.params['folder_name'] = folder_name\n else:\n self.params['folder_name'] = folder_name\n print 'Folder name:', self.params['folder_name']", "def rsetup(self, pattern):\n slices = []\n ipa_regions = []\n for i, p in enumerate(pattern):\n if i % 2 == 0:\n slices.append(p)\n else:\n ipa_regions.append(p)\n self.region_setup(slices, ipa_regions)", "def glob_patterns(separator=os.path.sep, **kwargs):\n terms = [kwargs.pop(field, '*')\n for field in NormalizedSceneId.tuple_type._fields]\n assert not kwargs, 'Unrecognized field names: {}'.format(kwargs)\n\n # terms which are not str are assumed to contain choices (list of str)\n choice_inds = [i for i, val in enumerate(terms) if not isinstance(val, str)]\n val_lists = [terms[i][:] for i in choice_inds]\n\n patterns = []\n for values in it.product(*val_lists):\n for i, ind in enumerate(choice_inds):\n terms[ind] = values[i]\n patterns.append(separator.join(terms))\n return patterns", "def convertPattern(self,pattern):\n images.convertSameFldImages(pattern,self.pathDir,self.img_destDir)\n return True", "def test_glob_pattern(self):\n glob_pattern = GlobPattern()\n det_name = 'R22_S11'\n self.assertEqual(glob_pattern('fe55', det_name),\n 'fe55_fe55_*/*_{}.fits'.format(det_name))\n self.assertEqual(glob_pattern('cte_low', det_name),\n 'sflat_*_flat_*L*/*_{}.fits'.format(det_name))", "def partition_path(self):\n return \"/kiel/groups/%s/partitions\" % self.group_name", "def set_folder(self, folder):\n self.folder = folder\n self.templates.directories[0] = folder\n self.app.root_path = folder", "def setPattern(self,Apattern,Bpattern,Cpattern):\n self.coeffPattern = [Apattern,Bpattern,Cpattern]\n for i in range(self.m):\n self._updateEstimatorSize(i)", "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def __init__(self, partition, test=False, local_test_data_dir=_LOCAL_TEST_DATA_DIR):\n assert sum(partition) == 100, 'The sum of the partition list must be 100: {}'.format(partition)\n self._partition = partition\n self._test = test\n # Split the files up according to the self._partition list.\n self._partitioned_filenames = []\n filenames = data_filenames(shuffle=False, test=self._test,\n local_test_data_dir=local_test_data_dir)\n part_start = 0\n for i, part_size in enumerate(self._partition):\n part_end = part_start + int(len(filenames) * 0.01 * part_size)\n assert part_end - part_start > 0, 'The number of files in partition {} is zero.'.format(i)\n self._partitioned_filenames.append(filenames[part_start:part_end])", "def set_pattern(self, pattern):\n for ir, row in enumerate(pattern):\n for ic, col in enumerate(row):\n relay_n = ir*len(row) + ic\n self.relay.set(relay_n, bool(col))", "def setDestFolder(self, offset=0):\n while True:\n tempDest = input(\n offset * \" \" + \"Specify a sub-folder name to save the output files [%s]: \" % self.destFolder) or self.destFolder\n\n # If the folder does not exist, try to create it\n if not os.path.exists(self.currentPath + os.sep + tempDest):\n try:\n os.mkdir(tempDest)\n self.destFolder = tempDest\n self.destPath = self.currentPath + os.sep + self.destFolder\n break\n except OSError:\n print(\"Invalid folder name!\")\n\n # If it does exist set the destPath to it\n else:\n self.destFolder = tempDest\n self.destPath = self.currentPath + os.sep + self.destFolder\n break", "def setSuffixPattern(self, value):\n return self._set(suffixPattern=value)", "def settimepattern(self, pattern):\n self._apachetimepattern = pattern", "def set_pattern_definition(self, lines):\n pattern_lines = [PatternDefinitionLine(line[0], line[1], line[2], line[3]) for line in lines]\n with self.edit_pattern() as pattern_editor:\n pattern_editor.lines = pattern_lines", "def add_pattern(self, name, pattern=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pattern, (list, np.ndarray, Pattern)), \"pattern must be a list or Pattern\"\n \n if not isinstance(pattern, Pattern):\n pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) \n else: #elif pattern.time_options is None:\n pattern.time_options = self._options.time\n if pattern.name in self._data.keys():\n raise ValueError('Pattern name already exists')\n self[name] = pattern", "def __init__(self, pattern='/graphics/projects/scratch/wieschol/sync-data/*/*_noaudio.low.mp4',\n samples=10, shuffle=True, max_pos_dist=10, min_neg_dist=20, min_pos_dist=5):\n self.files = glob.glob(pattern)\n self.shuffle = shuffle\n self.max_pos_dist = max_pos_dist\n self.min_pos_dist = min_pos_dist\n self.min_neg_dist = min_neg_dist\n self.samples = samples" ]
[ "0.6177902", "0.6177902", "0.5773725", "0.57375664", "0.5704174", "0.558471", "0.54004574", "0.522178", "0.522178", "0.5215508", "0.5176763", "0.51129144", "0.51109886", "0.5100194", "0.5096467", "0.5093923", "0.5066452", "0.5036985", "0.49579346", "0.4954718", "0.4954279", "0.49224684", "0.49202397", "0.49118686", "0.48944467", "0.48874575", "0.48864466", "0.48767114", "0.4873418", "0.4857549" ]
0.83221745
0
Get the predicted cost for each of the actions given the provided context.
def get_costs_per_action(self, context: np.ndarray) -> Dict[Action, Cost]: costs_per_action = {} for action in self._get_actions(): if self.categorize_actions: action_one_hot = self._get_actions_one_hot(action) x = np.append(action_one_hot, context) else: x = np.append(action, context) costs_per_action[action] = self.reg.predict(x.reshape(1, -1)).reshape(-1)[0] return costs_per_action
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, context: np.ndarray) -> np.ndarray:\n n_rounds_of_new_data = context.shape[0]\n ones_n_rounds_arr = np.ones(n_rounds_of_new_data, int)\n estimated_rewards_by_reg_model = np.zeros(\n (n_rounds_of_new_data, self.n_actions, self.len_list)\n )\n for action_ in np.arange(self.n_actions):\n for position_ in np.arange(self.len_list):\n X = self._pre_process_for_reg_model(\n context=context,\n action=action_ * ones_n_rounds_arr,\n action_context=self.action_context,\n )\n estimated_rewards_ = (\n self.base_model_list[position_].predict_proba(X)[:, 1]\n if is_classifier(self.base_model_list[position_])\n else self.base_model_list[position_].predict(X)\n )\n estimated_rewards_by_reg_model[\n np.arange(n_rounds_of_new_data),\n action_ * ones_n_rounds_arr,\n position_ * ones_n_rounds_arr,\n ] = estimated_rewards_\n return estimated_rewards_by_reg_model", "def predict(\n self,\n context: np.ndarray,\n epsilon: Prob = 0.05,\n exploration_width: int = 1,\n exploration_strategy: str = \"smart\",\n ) -> Tuple[Action, Prob]:\n\n def _get_direction(action_change: Action) -> Optional[str]:\n if action_change < 0:\n return \"left\"\n elif action_change > 0:\n return \"right\"\n\n assert exploration_strategy in [\n \"smart\",\n \"aggressive\",\n None,\n ], \"Exploration strategy must be 'smart', 'aggressive' or None\"\n\n if self.reg is None:\n self._init_regressor(context)\n if self.initial_action:\n closest_action = min(\n self._get_actions(), key=lambda x: abs(x - self.initial_action) # type: ignore\n )\n return closest_action, 1.0\n costs_per_action = self.get_costs_per_action(context)\n if exploration_strategy == \"smart\":\n explored, cost_change, action_change = self._get_previous_move(epsilon)\n if explored and cost_change < 0:\n direction = _get_direction(action_change)\n return self._explore(\n costs_per_action, 1.0, exploration_width, direction\n )\n if exploration_strategy == \"aggressive\":\n explored, cost_change, action_change = self._get_previous_move(epsilon)\n if cost_change < 0:\n direction = _get_direction(action_change)\n return self._explore(\n costs_per_action, 1.0, exploration_width, direction\n )\n if np.random.random() < epsilon:\n return self._explore(costs_per_action, epsilon, exploration_width)\n return self._exploit(costs_per_action, epsilon)", "def fit_predict(\n self,\n context: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n pscore: Optional[np.ndarray] = None,\n position: Optional[np.ndarray] = None,\n action_dist: Optional[np.ndarray] = None,\n n_folds: int = 1,\n ) -> None:\n assert n_folds > 1 and isinstance(\n n_folds, int\n ), f\"n_folds must be an integer larger than 1, but {n_folds} is given\"\n\n if n_folds == 1:\n self.fit(\n context=context,\n action=action,\n reward=reward,\n pscore=pscore,\n position=position,\n action_dist=action_dist,\n )\n return self.predict(context=context)\n else:\n estimated_rewards_by_reg_model = np.zeros(\n (context.shape[0], self.n_actions, self.len_list)\n )\n skf = StratifiedKFold(n_splits=n_folds)\n skf.get_n_splits(context, reward)\n for train_idx, test_idx in skf.split(context, reward):\n action_dist_tr = (\n action_dist[train_idx] if action_dist is not None else action_dist\n )\n self.fit(\n context=context[train_idx],\n action=action[train_idx],\n reward=reward[train_idx],\n pscore=pscore[train_idx],\n position=position[train_idx],\n action_dist=action_dist_tr,\n )\n estimated_rewards_by_reg_model[test_idx, :, :] = self.predict(\n context=context[test_idx]\n )\n return estimated_rewards_by_reg_model", "def get_action(agent, context, epsilon=0):\n\n num_contexts = context.shape[0]\n\n # Attach one-hot encoding of actions at the end of context vector\n no_eat_action = np.hstack([context, np.ones((num_contexts, 1)), np.zeros((num_contexts, 1))])\n eat_action = np.hstack([context, np.zeros((num_contexts, 1)), np.ones((num_contexts, 1))])\n no_eat_rewards = agent.predict(input_fn=lambda: tf.data.Dataset.from_tensor_slices(no_eat_action))\n no_eat_rewards = np.array(list(no_eat_rewards))\n\n eat_rewards = agent.predict(input_fn=lambda: tf.data.Dataset.from_tensor_slices(eat_action))\n eat_rewards = np.array(list(eat_rewards))\n\n rewards = np.hstack([no_eat_rewards, eat_rewards])\n\n # Epsilon-greedy policy\n # Start completely greedy\n action = np.argmax(rewards, axis=1)\n\n # Select indices to update\n rand_indices = np.random.uniform(low=0., high=1., size=num_contexts) < epsilon\n\n # Select random actions\n rand_actions = np.random.choice([0, 1], size=num_contexts)\n\n action[rand_indices] = rand_actions[rand_indices]\n\n return action", "def predict_fn(future_action, state):\n model = get_model()\n rewards = model((state, future_action))\n return {\"reward\": rewards}", "def _cost_function(self, y_pred, y, m):\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] -y[0][x])**2\n\n cost = 1/(2*m) * sumatory\n return cost", "def predict(\n self,\n context: np.ndarray,\n action_context: np.ndarray,\n selected_actions: np.ndarray,\n position: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n # create context vector to make predictions\n selected_actions_at_positions = selected_actions[\n np.arange(position.shape[0]), position\n ]\n X = self._pre_process_for_reg_model(\n context=context,\n action=selected_actions_at_positions,\n action_context=action_context,\n )\n # make predictions\n if is_classifier(self.base_model):\n return self.base_model.predict_proba(X)[:, 1]\n else:\n return self.base_model.predict(X)", "def predict(self, session, data, labels=None):\n\n losses,results = [],[]\n for step, (x,y) in enumerate(self.enhancer_iterator(data, labels, \n self.config.batch_size,\n self.config.num_steps)):\n if y is not None:\n cost, preds = session.run([self.cost, self.predictions],\n {self.input_data: x,\n self.targets: y,\n self.dropout: 1.0,\n self.initial_state: self.initial_state.eval()})\n losses.append(cost)\n else:\n preds = session.run(self.predictions,\n {self.input_data: x,\n self.dropout: 1.0,\n self.initial_state: self.initial_state.eval()})\n \n results.extend(np.argmax(preds,1))\n return np.exp(np.mean(losses)), results", "def fit(\n self,\n context: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n pscore: np.ndarray,\n action_context: np.ndarray,\n ) -> None:\n # create context vector to make predictions\n X = self._pre_process_for_reg_model(\n context=context, action=action, action_context=action_context,\n )\n # train the base model according to the given `fitting method`\n if self.fitting_method == \"normal\":\n self.base_model.fit(X, reward)\n elif self.fitting_method == \"iw\":\n sample_weight = np.mean(pscore) / pscore\n self.base_model.fit(X, reward, sample_weight=sample_weight)\n elif self.fitting_method == \"mrdr\":\n sample_weight = (1.0 - pscore) / pscore ** 2\n self.base_model.fit(X, reward, sample_weight=sample_weight)", "def get_action_cost(state: tuple, action: int) -> float:\n assert len(state) == 3 and state[0] < 5 and state[1] < 4 and state[2] < 3 and action < 3\n if state[0] == 0:\n return 0.0\n if state[0] == 1 and action == 0:\n return -20.0 + 10.0 * 0.5\n return -20.0 # Penalty = 20 due to Team Number = 9", "def getCost(self, state, action):\n util.raiseNotDefined()", "def get_reward(self, observations, actions):\n\n #initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if(len(observations.shape)==1):\n observations = np.expand_dims(observations, axis = 0)\n actions = np.expand_dims(actions, axis = 0)\n batch_mode = False\n else:\n batch_mode = True\n\n #get vars\n xvel = observations[:, 9].copy()\n body_angle = observations[:, 2].copy()\n front_leg = observations[:, 6].copy()\n front_shin = observations[:, 7].copy()\n front_foot = observations[:, 8].copy()\n zeros = np.zeros((observations.shape[0],)).copy()\n\n # ranges\n leg_range = 0.2\n shin_range = 0\n foot_range = 0\n penalty_factor = 10\n\n #calc rew\n self.reward_dict['run'] = xvel\n\n front_leg_rew = zeros.copy()\n front_leg_rew[front_leg>leg_range] = -penalty_factor\n self.reward_dict['leg'] = front_leg_rew\n\n front_shin_rew = zeros.copy()\n front_shin_rew[front_shin>shin_range] = -penalty_factor\n self.reward_dict['shin'] = front_shin_rew\n\n front_foot_rew = zeros.copy()\n front_foot_rew[front_foot>foot_range] = -penalty_factor\n self.reward_dict['foot'] = front_foot_rew\n\n # total reward\n self.reward_dict['r_total'] = self.reward_dict['run'] + self.reward_dict['leg'] + self.reward_dict['shin'] + self.reward_dict['foot']\n\n #return\n dones = zeros.copy()\n if(not batch_mode):\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones", "def predict(self, context, filepath=None):\n\n if len(context.splitlines()) < 5:\n context = self.get_guide_context(filepath) + context\n\n context_ids = self.tokenizer.encode(context)\n if len(context_ids) <= 1:\n return None\n context_ids = context_ids[-self.max_context_size:]\n logger.info('Final context: \\n----\\n[{}]\\n'.format(self.tokenizer.decode(context_ids)))\n logger.info('The last 2 tokens are: {}'.format(self.tokenizer.convert_ids_to_tokens(context_ids[-2:])))\n\n # the last token may incomplete, we need to estimate it\n tokens, probs, past = self.estimate_first(context_ids)\n if len(tokens) == 0:\n return None\n\n past = tf.stack(past, axis=0)\n past = select(past, tf.zeros(len(tokens), dtype=tf.int32), axis=1)\n tokens = tf.constant(tokens, dtype=tf.int32)\n tf_context_ids = tf.constant(context_ids[:-1], dtype=tf.int32)[tf.newaxis, :]\n tf_context_ids = tf.tile(tf_context_ids, [len(tokens), 1])\n tf_context_ids = tf.concat([tf_context_ids, tokens[:, tf.newaxis]], axis=-1)\n y, probs = self._predict(tf_context_ids, past, tf.constant(probs))\n last_token_len = len(self.tokenizer.convert_ids_to_tokens(context_ids[-1]))\n\n ids = y.numpy()[:, -self.predict_len-1:]\n prefix_ids = find_common_prefix(list(ids), min_width=4, depth=3)\n prefix = len(prefix_ids or []) > 1 and self.tokenizer.decode(prefix_ids) or ''\n return last_token_len, prefix, [self.tokenizer.decode(i) for i in ids], probs.numpy()", "def get_cost(self, action: Action) -> N:\n pass", "def compute_intrinsic_reward(self, state, action, next_state, use_cuda, train=False):\n if use_cuda:\n fn = lambda x: x.cuda()\n device = \"gpu\"\n else:\n fn = lambda x: x.cpu()\n device = \"cpu\"\n if not self.predictor_dev == device:\n self.predictor_model = fn(self.predictor_model)\n self.predictor_dev = device\n if not self.target_dev == device:\n self.target_model = fn(self.target_model)\n self.target_dev = device\n\n target_feature = self.target_model(next_state)\n predict_feature = self.predictor_model(next_state)\n\n forward_loss = ((target_feature - predict_feature) ** 2).sum(-1).mean()\n self.loss = forward_loss\n\n if train:\n self.optimizer.zero_grad()\n self.loss.backward(retain_graph=True)\n torch.nn.utils.clip_grad_norm_(self.predictor_model.parameters(), 0.5)\n self.optimizer.step()\n\n return self.eta * forward_loss", "def calc_cost(self):\n \n correct_pred = tf.equal(self.predictions, tf.argmax(self.y,1))\n batchaccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return self.cost, batchaccuracy, self.predictions", "def _get_reward(self, action):\n HIRE_COST = 1 # TODO 7/29/20 - Determine significance of this value\n\n # Lookup the state representation using the cur_state index. Then we\n # can get the candidate productivity score.\n obs = self.observation_function[self.cur_state]\n prod_score = obs[1]\n r = action*(prod_score - HIRE_COST)\n return r", "def step_cost(self, state, action, result=None):\n return 1 # Override this if actions have different costs", "def compute_cost(self,X, y):\n \n num_samples = len(X)\n # Do Forward propagation to calculate our predictions\n z1 = X.dot(self.W1) + self.b1\n a1 = np.tanh(z1)\n z2 = a1.dot(self.W2) + self.b2\n exp_z = np.exp(z2)\n a2 = exp_z / np.sum(exp_z, axis=1, keepdims=True)\n softmax_scores = a2\n # Calculate the cross-entropy loss\n cross_ent_err = -np.log(softmax_scores[range(num_samples), y])\n data_loss = np.sum(cross_ent_err)\n return 1./num_samples * data_loss", "def __compute_cost(self, x, y):\n\n predictions = self.__compute_prediction(x)\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\n\n return cost", "def predict(self, context_x, context_y, test_x, return_density=False):\n\n context_x, context_y = _handle_input_dimensionality(context_x, context_y)\n test_x = _handle_input_dimensionality(test_x)\n assert test_x.shape[1] == context_x.shape[1]\n\n # normalize data and convert to tensor\n context_x, context_y = self._prepare_data_per_task(context_x, context_y)\n\n test_x = self._normalize_data(X=test_x, Y=None)\n test_x = torch.from_numpy(test_x).float().to(device)\n\n with torch.no_grad():\n pred_dist = self.get_pred_dist(context_x, context_y, test_x)\n pred_dist = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean,\n normalization_std=self.y_std)\n pred_dist = EqualWeightedMixtureDist(pred_dist, batched=True)\n\n if return_density:\n return pred_dist\n else:\n pred_mean = pred_dist.mean.cpu().numpy()\n pred_std = pred_dist.stddev.cpu().numpy()\n return pred_mean, pred_std", "def evaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def evaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def evaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def evaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def get_cost(self, Y, T):\n return - np.multiply(T, np.log(Y)).sum() / Y.shape[0]", "def compute_td_loss(states, actions, rewards, is_done,\r\n agent, target_network,\r\n gamma=0.99,\r\n device=device):\r\n states = torch.tensor(states, device=device, dtype=torch.float) # shape: [batch_size, *state_shape]\r\n\r\n # for some torch reason should not make actions a tensor\r\n actions = torch.tensor(actions, device=device, dtype=torch.long) # shape: [batch_size]\r\n rewards = torch.tensor(rewards, device=device, dtype=torch.float) # shape: [batch_size]\r\n # shape: [batch_size, *state_shape]\r\n is_done = torch.tensor(\r\n is_done.astype('float32'),\r\n device=device,\r\n dtype=torch.float\r\n ) # shape: [batch_size]\r\n is_not_done = 1 - is_done\r\n min_history_size = (len(actions) - 1) // 2\r\n\r\n agent_memories = agent.get_initial_state(1)\r\n target_memories = target_network.get_initial_state(1)\r\n\r\n agent_qvalues = []\r\n target_qvalues = []\r\n\r\n for t in range(len(actions)):\r\n\r\n agent_memories, predicted_agent_qvalues = agent(agent_memories,\r\n states[t].unsqueeze(0))\r\n target_memories, predicted_target_qvalues = target_network(target_memories,\r\n states[t].unsqueeze(0))\r\n\r\n if t >= min_history_size:\r\n agent_qvalues.append(predicted_agent_qvalues)\r\n target_qvalues.append(predicted_target_qvalues)\r\n\r\n if is_done[t]:\r\n agent_memories = agent.get_initial_state(1)\r\n target_memories = target_network.get_initial_state(1)\r\n\r\n agent_qvalues = torch.stack(agent_qvalues, dim=1).squeeze()\r\n target_qvalues = torch.stack(target_qvalues, dim=1).squeeze()[1:,:]\r\n\r\n agent_next_qvalues = agent_qvalues[1:,:]\r\n best_actions = torch.argmax(agent_next_qvalues, dim=1)\r\n\r\n predicted_qvalues_for_actions = agent_qvalues[range(\r\n len(actions[min_history_size:])), actions[min_history_size:]][:-1]\r\n next_state_values = target_qvalues[range(\r\n len(best_actions)), best_actions]\r\n\r\n target_qvalues_for_actions = rewards[min_history_size:-1] \\\r\n + gamma * is_not_done[min_history_size:-1] * next_state_values\r\n\r\n\r\n loss = torch.mean((predicted_qvalues_for_actions -\r\n target_qvalues_for_actions.detach()) ** 2)\r\n\r\n return loss", "def get_reward(self, dags, entropies,inputs,targets):\n if not isinstance(entropies, np.ndarray):\n entropies = entropies.data.cpu().numpy()\n\n score=self.get_score(inputs,targets,dags)\n #score=1-self.get_loss(inputs,targets,dags)\n print(score.item())\n R = utils.to_item(score.data)\n\n if self.args.entropy_mode == 'reward':\n rewards = R + self.args.entropy_coeff * entropies.mean()\n elif self.args.entropy_mode == 'regularizer':\n rewards = R * np.ones_like(entropies)\n else:\n raise NotImplementedError(f'Unkown entropy mode: {self.args.entropy_mode}')\n\n return rewards", "def cost(self, result: [float], label: int) -> float:\n desired_outputs = Network.digit_to_one_hot(label)\n self._desired_changes = [result[i] - desired_outputs[i] for i in range(num_outputs)]\n return sum((result[i] - desired_outputs[i]) ** 2 for i in range(num_outputs))", "def predict(self, context, question):\n log.info(\"----------------%s------------------\" % question)\n\n # when context given, detect entities\n slot_values = {}\n intent2entities = self._io.get_all_intent_entities()\n if context[\"intent\"] is not None:\n intent = context[\"intent\"]\n slots = intent2entities[intent][\"slots\"]\n if intent in self._filtered_intents:\n entities = []\n else:\n entities = self._entities_by_intent[intent][\"slots\"].values()\n target_slots = self._entities_by_intent[intent][\"slots\"].keys()\n d_entities = self._entity.recognize(question, entities)\n slots = {v: k for k, v in slots.items()}\n for entity, value in d_entities.items():\n slot_values[slots[entity]] = value\n if slot_values:\n return {\n \"question\": question,\n \"intent\": intent,\n \"confidence\": 1.0,\n \"entities\": slot_values,\n \"target_entities\": target_slots,\n \"node_id\": None\n }\n priority = context[\"agents\"]\n # detect intent and entities\n s_intent, confidence, node_id = self._intent_classify(priority,\n question)\n target_slots = []\n if s_intent and s_intent not in self._filtered_intents:\n slots = intent2entities[s_intent][\"slots\"]\n target_slots = list(slots.keys())\n assert len(set(slots.values())) == len(slots.values())\n d_entities = self._entity.recognize(question,\n slots.values())\n log.debug(\"ENTITIES DETECT to {0}\".format(d_entities))\n slots = {v: k for k, v in slots.items()}\n for entity, value in d_entities.items():\n slot_values[slots[entity]] = value\n\n return {\n \"question\": question,\n \"intent\": \"casual_talk\" if s_intent is None else s_intent,\n \"confidence\": confidence,\n \"entities\": slot_values,\n \"target_entities\": target_slots,\n \"node_id\": node_id\n }" ]
[ "0.6950889", "0.6194891", "0.6161221", "0.5940927", "0.5893304", "0.5862886", "0.5833096", "0.57626337", "0.57062304", "0.56541", "0.5639965", "0.5606558", "0.5593295", "0.5560387", "0.5553072", "0.55465263", "0.5540782", "0.55235296", "0.55216396", "0.54918563", "0.54878634", "0.547908", "0.547908", "0.547908", "0.547908", "0.5477058", "0.5465762", "0.54537606", "0.54513085", "0.5447261" ]
0.748326
0
Predict an action given a context.
def predict( self, context: np.ndarray, epsilon: Prob = 0.05, exploration_width: int = 1, exploration_strategy: str = "smart", ) -> Tuple[Action, Prob]: def _get_direction(action_change: Action) -> Optional[str]: if action_change < 0: return "left" elif action_change > 0: return "right" assert exploration_strategy in [ "smart", "aggressive", None, ], "Exploration strategy must be 'smart', 'aggressive' or None" if self.reg is None: self._init_regressor(context) if self.initial_action: closest_action = min( self._get_actions(), key=lambda x: abs(x - self.initial_action) # type: ignore ) return closest_action, 1.0 costs_per_action = self.get_costs_per_action(context) if exploration_strategy == "smart": explored, cost_change, action_change = self._get_previous_move(epsilon) if explored and cost_change < 0: direction = _get_direction(action_change) return self._explore( costs_per_action, 1.0, exploration_width, direction ) if exploration_strategy == "aggressive": explored, cost_change, action_change = self._get_previous_move(epsilon) if cost_change < 0: direction = _get_direction(action_change) return self._explore( costs_per_action, 1.0, exploration_width, direction ) if np.random.random() < epsilon: return self._explore(costs_per_action, epsilon, exploration_width) return self._exploit(costs_per_action, epsilon)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_fn(future_action, state):\n model = get_model()\n rewards = model((state, future_action))\n return {\"reward\": rewards}", "def fit_predict(\n self,\n context: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n pscore: Optional[np.ndarray] = None,\n position: Optional[np.ndarray] = None,\n action_dist: Optional[np.ndarray] = None,\n n_folds: int = 1,\n ) -> None:\n assert n_folds > 1 and isinstance(\n n_folds, int\n ), f\"n_folds must be an integer larger than 1, but {n_folds} is given\"\n\n if n_folds == 1:\n self.fit(\n context=context,\n action=action,\n reward=reward,\n pscore=pscore,\n position=position,\n action_dist=action_dist,\n )\n return self.predict(context=context)\n else:\n estimated_rewards_by_reg_model = np.zeros(\n (context.shape[0], self.n_actions, self.len_list)\n )\n skf = StratifiedKFold(n_splits=n_folds)\n skf.get_n_splits(context, reward)\n for train_idx, test_idx in skf.split(context, reward):\n action_dist_tr = (\n action_dist[train_idx] if action_dist is not None else action_dist\n )\n self.fit(\n context=context[train_idx],\n action=action[train_idx],\n reward=reward[train_idx],\n pscore=pscore[train_idx],\n position=position[train_idx],\n action_dist=action_dist_tr,\n )\n estimated_rewards_by_reg_model[test_idx, :, :] = self.predict(\n context=context[test_idx]\n )\n return estimated_rewards_by_reg_model", "def predict(self, model, context, data):\n pass", "def predict(\n self,\n context: np.ndarray,\n action_context: np.ndarray,\n selected_actions: np.ndarray,\n position: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n # create context vector to make predictions\n selected_actions_at_positions = selected_actions[\n np.arange(position.shape[0]), position\n ]\n X = self._pre_process_for_reg_model(\n context=context,\n action=selected_actions_at_positions,\n action_context=action_context,\n )\n # make predictions\n if is_classifier(self.base_model):\n return self.base_model.predict_proba(X)[:, 1]\n else:\n return self.base_model.predict(X)", "def predict(self, observation, *args, **kwargs):\n if self.env is not None and np.random.rand() <= self.epsilon:\n action = random.randrange(self.action_size)\n else:\n act_values = self.policy.predict(observation)\n action = np.argmax(act_values[0])\n return action, None", "def predict(self, context: np.ndarray) -> np.ndarray:\n n_rounds_of_new_data = context.shape[0]\n ones_n_rounds_arr = np.ones(n_rounds_of_new_data, int)\n estimated_rewards_by_reg_model = np.zeros(\n (n_rounds_of_new_data, self.n_actions, self.len_list)\n )\n for action_ in np.arange(self.n_actions):\n for position_ in np.arange(self.len_list):\n X = self._pre_process_for_reg_model(\n context=context,\n action=action_ * ones_n_rounds_arr,\n action_context=self.action_context,\n )\n estimated_rewards_ = (\n self.base_model_list[position_].predict_proba(X)[:, 1]\n if is_classifier(self.base_model_list[position_])\n else self.base_model_list[position_].predict(X)\n )\n estimated_rewards_by_reg_model[\n np.arange(n_rounds_of_new_data),\n action_ * ones_n_rounds_arr,\n position_ * ones_n_rounds_arr,\n ] = estimated_rewards_\n return estimated_rewards_by_reg_model", "def predict(self, context, question):\n log.info(\"----------------%s------------------\" % question)\n\n # when context given, detect entities\n slot_values = {}\n intent2entities = self._io.get_all_intent_entities()\n if context[\"intent\"] is not None:\n intent = context[\"intent\"]\n slots = intent2entities[intent][\"slots\"]\n if intent in self._filtered_intents:\n entities = []\n else:\n entities = self._entities_by_intent[intent][\"slots\"].values()\n target_slots = self._entities_by_intent[intent][\"slots\"].keys()\n d_entities = self._entity.recognize(question, entities)\n slots = {v: k for k, v in slots.items()}\n for entity, value in d_entities.items():\n slot_values[slots[entity]] = value\n if slot_values:\n return {\n \"question\": question,\n \"intent\": intent,\n \"confidence\": 1.0,\n \"entities\": slot_values,\n \"target_entities\": target_slots,\n \"node_id\": None\n }\n priority = context[\"agents\"]\n # detect intent and entities\n s_intent, confidence, node_id = self._intent_classify(priority,\n question)\n target_slots = []\n if s_intent and s_intent not in self._filtered_intents:\n slots = intent2entities[s_intent][\"slots\"]\n target_slots = list(slots.keys())\n assert len(set(slots.values())) == len(slots.values())\n d_entities = self._entity.recognize(question,\n slots.values())\n log.debug(\"ENTITIES DETECT to {0}\".format(d_entities))\n slots = {v: k for k, v in slots.items()}\n for entity, value in d_entities.items():\n slot_values[slots[entity]] = value\n\n return {\n \"question\": question,\n \"intent\": \"casual_talk\" if s_intent is None else s_intent,\n \"confidence\": confidence,\n \"entities\": slot_values,\n \"target_entities\": target_slots,\n \"node_id\": node_id\n }", "def predict(self, state):\n self.model.eval()\n if len(self.history_states) < self.history_states.maxlen:\n action = self.baseline_agent.predict(state)\n else:\n history_state = np.array(self.history_states)\n history_action = np.array(self.history_actions)\n action = self.planner.predict(history_state, history_action, state)\n self.state_action_dataset.add(history_state=history_state,\n history_action=history_action,\n state=state,\n action=action)\n self.policy.eval()\n action = self.policy.predict(history_state, history_action, state)\n\n self.history_states.append(state)\n self.history_actions.append(action)\n return action", "def get_action(agent, context, epsilon=0):\n\n num_contexts = context.shape[0]\n\n # Attach one-hot encoding of actions at the end of context vector\n no_eat_action = np.hstack([context, np.ones((num_contexts, 1)), np.zeros((num_contexts, 1))])\n eat_action = np.hstack([context, np.zeros((num_contexts, 1)), np.ones((num_contexts, 1))])\n no_eat_rewards = agent.predict(input_fn=lambda: tf.data.Dataset.from_tensor_slices(no_eat_action))\n no_eat_rewards = np.array(list(no_eat_rewards))\n\n eat_rewards = agent.predict(input_fn=lambda: tf.data.Dataset.from_tensor_slices(eat_action))\n eat_rewards = np.array(list(eat_rewards))\n\n rewards = np.hstack([no_eat_rewards, eat_rewards])\n\n # Epsilon-greedy policy\n # Start completely greedy\n action = np.argmax(rewards, axis=1)\n\n # Select indices to update\n rand_indices = np.random.uniform(low=0., high=1., size=num_contexts) < epsilon\n\n # Select random actions\n rand_actions = np.random.choice([0, 1], size=num_contexts)\n\n action[rand_indices] = rand_actions[rand_indices]\n\n return action", "def predict_intent():\n\n start_time = time()\n request.json[\"request_id\"] = uuid.uuid4().hex\n app.logger.info(f\"Request: {request.json['request_id']}. Processing request '/recommend': {request.json}\")\n\n # Prime filters\n uniq_id = request.json.get('uniq_id')\n if not uniq_id:\n message = f'Request: {request.json[\"request_id\"]}. Missing uniq_id in request'\n delta = time() - start_time\n app.logger.error(f\"{message} Elapsed time: {delta} secs\")\n return jsonify(message=message), 404\n \n\n result, code = recommender.get_recommendation(uniq_id)\n\n delta = time() - start_time\n app.logger.info(f\"Request: {request.json['request_id']}. Endpoint response '/recommend': {result}. Elapsed time: {delta} secs\")\n return jsonify(result), code", "def predict(self, state, action):\n assert np.shape(state) == (self._state_dim,)\n assert np.shape(action) == (self._action_dim,)\n\n ### PROBLEM 1\n ### YOUR CODE HERE\n # raise NotImplementedError\n next_state_pred = self._sess.run(self._next_state_pred,\n feed_dict={self._state_ph: np.atleast_2d(state),\n self._action_ph: np.atleast_2d(action)})\n next_state_pred = next_state_pred[0]\n\n assert np.shape(next_state_pred) == (self._state_dim,)\n return next_state_pred", "def predict(self, state):\n s_vec = torch.Tensor(self.vector.state_vectorize(state))\n a = self.policy.select_action(s_vec.to(device=DEVICE), self.is_train).cpu()\n action = self.vector.action_devectorize(a.numpy())\n state['system_action'] = action\n\n return action", "def act(self, state):\n action = self.actor_model.predict(state)\n return action[0]", "def decide_actions(self, eval_states, *args):\n\n return self.nn.predict_exploration(eval_states, self.ed.epsilon)", "def fit(\n self,\n context: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n pscore: np.ndarray,\n action_context: np.ndarray,\n ) -> None:\n # create context vector to make predictions\n X = self._pre_process_for_reg_model(\n context=context, action=action, action_context=action_context,\n )\n # train the base model according to the given `fitting method`\n if self.fitting_method == \"normal\":\n self.base_model.fit(X, reward)\n elif self.fitting_method == \"iw\":\n sample_weight = np.mean(pscore) / pscore\n self.base_model.fit(X, reward, sample_weight=sample_weight)\n elif self.fitting_method == \"mrdr\":\n sample_weight = (1.0 - pscore) / pscore ** 2\n self.base_model.fit(X, reward, sample_weight=sample_weight)", "def get_action(self, obs=None):\n payload = {}\n payload[\"request_type\"] = \"observation\"\n payload[\"observation\"] = obs\n response = self._realtime_predictor.predict(payload)\n action = response[\"action\"]\n action_prob = response[\"action_prob\"]\n event_id = response[\"event_id\"]\n model_id = response[\"model_id\"]\n sample_prob = response[\"sample_prob\"]\n return action, event_id, model_id, action_prob, sample_prob", "def process_action(action, params, context):\n if action == 'define_word':\n word = params.get('word')\n if word is None:\n return make_simple_reply('I do not know this word')\n word_id = normalize_word(word)\n word_model = ndb.Key('Word', word_id).get()\n if word_model is not None:\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n return generate_definition_reply(word_model)\n \n word_model = Word()\n word_model.learned = False\n word_model.word = word\n word_model.key = ndb.Key('Word', word_id)\n if not get_word_definition(word_model):\n return make_simple_reply('I do not know this word')\n else:\n word_model.practice_count = 1\n word_model.put()\n return generate_definition_reply(word_model)\n \n elif action == 'practice':\n keys = Word.query().filter(Word.learned == False).fetch(keys_only=True)\n selected_word_key = random.sample(keys, 1)[0]\n reply = make_simple_reply(\n 'How about %s! Do you remember it?' % selected_word_key.get().word)\n reply['context'] = [{\n 'name': 'practice',\n 'lifespan': 2,\n 'parameters': {'word_id': selected_word_key.id()}\n }]\n return reply\n \n elif action == 'practice_known':\n # User knows this word. Mark it as learned\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model.learned = True\n word_model.put()\n reply = make_simple_reply('OK, I will not ask this word again')\n reply['context'] = reset_context\n return reply\n \n elif action == 'practice_unknown':\n # User does not know this word. Return its definition\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n reply = generate_definition_reply(word_model)\n reply['context'] = reset_context\n return reply\n \n return make_simple_reply('I did not get that')", "def _predict(self, observation: torch.Tensor, deterministic: bool = False) -> torch.Tensor:\n latent_pi, _ = self._get_latent(observation, pi=True)\n distribution = self._get_action_dist_from_latent(latent_pi)\n return distribution.get_actions(deterministic=deterministic)", "def act(\n self,\n obs: CBInput,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n chosen_action, log_prob = self.choose_action(\n features_all_arms=obs.context_arm_features\n )\n return chosen_action, log_prob", "def response(self, action, mode='TRAIN'):\n if mode == 'TRAIN':\n dataset = self.train_c_dataset\n dataset_v = self.valid_c_dataset\n else:\n dataset = self.train_t_dataset\n dataset_v = self.valid_t_dataset\n\n data = dataset.next_batch(self.config.task.batch_size)\n sess = self.sess\n x = data['input']\n y = data['target']\n feed_dict = {self.x_plh: x, self.y_plh: y}\n fetch = [self.loss_mse, self.loss_l1]\n\n if action == 0:\n # ----Update mse loss.----\n sess.run(self.update_mse, feed_dict=feed_dict)\n elif action == 1:\n # ----Update l1 loss.----\n sess.run(self.update_l1, feed_dict=feed_dict)\n elif action == 2:\n # ----Update l2 loss.----\n sess.run(self.update_l2, feed_dict=feed_dict)\n\n loss_mse, loss_l1 = sess.run(fetch, feed_dict=feed_dict)\n valid_loss, _, _ = self.valid(dataset=dataset_v)\n train_loss, _, _ = self.valid(dataset=dataset)\n\n # ----Update state.----\n self.previous_mse_loss = self.previous_mse_loss[1:] + [loss_mse.tolist()]\n self.previous_l1_loss = self.previous_l1_loss[1:] + [loss_l1.tolist()]\n self.previous_action = action.tolist()\n self.update_steps += 1\n self.previous_valid_loss = self.previous_valid_loss[1:]\\\n + [valid_loss.tolist()]\n self.previous_train_loss = self.previous_train_loss[1:]\\\n + [train_loss.tolist()]\n\n reward = self.get_step_reward()\n # ----Early stop and record best result.----\n dead = self.check_terminate()\n state = self.get_state()\n return state, reward, dead", "def apply_action(self, action):\n return self.__environment.step(action)", "def predict(self, context, filepath=None):\n\n if len(context.splitlines()) < 5:\n context = self.get_guide_context(filepath) + context\n\n context_ids = self.tokenizer.encode(context)\n if len(context_ids) <= 1:\n return None\n context_ids = context_ids[-self.max_context_size:]\n logger.info('Final context: \\n----\\n[{}]\\n'.format(self.tokenizer.decode(context_ids)))\n logger.info('The last 2 tokens are: {}'.format(self.tokenizer.convert_ids_to_tokens(context_ids[-2:])))\n\n # the last token may incomplete, we need to estimate it\n tokens, probs, past = self.estimate_first(context_ids)\n if len(tokens) == 0:\n return None\n\n past = tf.stack(past, axis=0)\n past = select(past, tf.zeros(len(tokens), dtype=tf.int32), axis=1)\n tokens = tf.constant(tokens, dtype=tf.int32)\n tf_context_ids = tf.constant(context_ids[:-1], dtype=tf.int32)[tf.newaxis, :]\n tf_context_ids = tf.tile(tf_context_ids, [len(tokens), 1])\n tf_context_ids = tf.concat([tf_context_ids, tokens[:, tf.newaxis]], axis=-1)\n y, probs = self._predict(tf_context_ids, past, tf.constant(probs))\n last_token_len = len(self.tokenizer.convert_ids_to_tokens(context_ids[-1]))\n\n ids = y.numpy()[:, -self.predict_len-1:]\n prefix_ids = find_common_prefix(list(ids), min_width=4, depth=3)\n prefix = len(prefix_ids or []) > 1 and self.tokenizer.decode(prefix_ids) or ''\n return last_token_len, prefix, [self.tokenizer.decode(i) for i in ids], probs.numpy()", "def __do_predict(self, request, features):\n dmp_predictor.DmpPredictor().predict(request, features)\n\n return defines.ReturnCode.SUCC", "def action(self, observation, epsilon=0):\n if epsilon and epsilon>np.random.rand():\n return np.random.randint(self.action_shape)\n activations = super().predict(observation.observation)\n return np.argmax(activations)", "def predict():\n import trace\n trace.predict()", "def predict(self, review):\n raise NotImplementedError", "def predict_data(self, data, context = {}):\n datapoints = policy_model.policy2datapoint(data)\n result = self.predict_datapoint(datapoints, context)\n return result", "def predict(self, state, sys_action):\n sys_seq_turn = self.manager.sysda2seq(self.manager.ref_data2stand(sys_action), self.goal)\n self.sys_da_id_stack += self.manager.get_sysda_id([sys_seq_turn])\n sys_seq_len = torch.LongTensor([max(len(sen), 1) for sen in self.sys_da_id_stack])\n max_sen_len = sys_seq_len.max().item()\n sys_seq = torch.LongTensor(padding(self.sys_da_id_stack, max_sen_len))\n usr_a, terminal = self.user.select_action(self.goal_input, self.goal_len_input, sys_seq, sys_seq_len)\n usr_action = self.manager.usrseq2da(self.manager.id2sentence(usr_a), self.goal)\n \n return capital(usr_action), terminal", "def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"", "def evaluate_action_prediction(gt_actions, model_actions):\n\n gt_actions_pool = {ii[\"dialog_id\"]: ii for ii in gt_actions}\n matches = {\"action\": [], \"attributes\": [], \"perplexity\": []}\n confusion_dict = collections.defaultdict(list)\n\n for model_datum in model_actions:\n dialog_id = model_datum[\"dialog_id\"]\n for round_id, round_datum in enumerate(model_datum[\"predictions\"]):\n gt_datum = gt_actions_pool[dialog_id][\"actions\"][round_id]\n action_match = gt_datum[\"action\"] == round_datum[\"action\"]\n # Record matches and confusion.\n matches[\"action\"].append(action_match)\n '''\n matches[\"perplexity\"].append(\n round_datum[\"action_log_prob\"][gt_datum[\"action\"]]\n )\n '''\n confusion_dict[gt_datum[\"action\"]].append(round_datum[\"action\"])\n\n # Get supervision for action attributes.\n supervision = gt_datum[\"action_supervision\"]\n if supervision is not None and \"args\" in supervision:\n supervision = supervision[\"args\"]\n if supervision is None:\n continue\n # Case 1: Action mismatch -- record False for all attributes.\n if not action_match:\n for key in supervision.keys():\n if key in IGNORE_ATTRIBUTES:\n continue\n matches[\"attributes\"].append(False)\n # Case 2: Action matches -- use model predictions for attributes.\n else:\n for key in supervision.keys():\n if key in IGNORE_ATTRIBUTES:\n continue\n gt_key_vals = supervision[key]\n model_key_vals = round_datum[\"attributes\"][key]\n if not len(gt_key_vals):\n continue\n # For fashion, this is a list -- multi label prediction.\n if isinstance(gt_key_vals, list):\n assert isinstance(model_key_vals, list), (\n \"Model should also predict a list for attributes\"\n )\n \n recall = np.mean(\n [(ii in model_key_vals) for ii in gt_key_vals]\n )\n if len(model_key_vals):\n precision = np.mean(\n [(ii in gt_key_vals) for ii in model_key_vals]\n )\n else:\n precision = 0.\n f1_score = (2 * recall * precision) / (recall + precision + 1e-5)\n matches[\"attributes\"].append(f1_score)\n else:\n # For furniture, this is a string -- single label prediction.\n matches[\"attributes\"].append(gt_key_vals == model_key_vals)\n\n # Compute the confusion matrix.\n all_actions = sorted(\n set(confusion_dict.keys()).union(\n {jj for ii in confusion_dict.values() for jj in ii}\n )\n )\n matrix = np.zeros((len(all_actions), len(all_actions)))\n for index, action in enumerate(all_actions):\n labels, counts = np.unique(confusion_dict[action], return_counts=True)\n for label, count in zip(labels, counts):\n matrix[all_actions.index(label), index] += count\n print( \"action_accuracy\", np.mean(matches[\"action\"]))\n print(\"attribute_accuracy\", np.mean(matches[\"attributes\"]))\n return {\n \"action_accuracy\": np.mean(matches[\"action\"]),\n # \"action_perplexity\": np.exp(-1 * np.mean(matches[\"perplexity\"])),\n \"attribute_accuracy\": np.mean(matches[\"attributes\"]),\n \"confusion_matrix\": matrix\n }" ]
[ "0.68578327", "0.66418797", "0.66278553", "0.6600217", "0.6558365", "0.6513659", "0.6338011", "0.6164591", "0.6121328", "0.61089677", "0.61044675", "0.6058313", "0.6042822", "0.601579", "0.59879005", "0.59817743", "0.5955881", "0.5951787", "0.5791318", "0.5769036", "0.57538736", "0.57048", "0.5646358", "0.5619391", "0.5615014", "0.5613487", "0.56002516", "0.5599331", "0.5595761", "0.559193" ]
0.6694794
1
Tests na_element gets appended as child.
def test_setter_na_element(self): root = netapp_api.NaElement('root') root['e1'] = netapp_api.NaElement('nested') self.assertEqual(len(root.get_children()), 1) e1 = root.get_child_by_name('e1') self.assertIsInstance(e1, netapp_api.NaElement) self.assertIsInstance(e1.get_child_by_name('nested'), netapp_api.NaElement)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(1, len(root.get_children()))\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)", "def _add_child(self, widget):\n # May be overloaded in layout widgets\n self.node.appendChild(widget.node)", "def test_add_new_child(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n '_convert_entity_refs',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n\n root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA)\n\n self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string())", "def has_child(self, locator):\n return self.find_element(locator) is not None", "def add_child(self, element, parent):\n parent_node = self._validate(parent)\n child_node = self._Node(element,parent_node)\n parent_node._children.append(child_node)\n self._size += 1", "def new_child(self, parent, *args, **kwargs):\n child = self.new_element(*args, **kwargs)\n parent.append(child)\n return child", "def testAppendChildBadType(self):\n self.assertRaises(\n TypeError,\n self.node.append_child,\n 'I ama a banana'\n )", "def append_element(self, element):\n\n pass", "def addChild(self, node):\n if IElement.providedBy(node):\n node.parent = self\n self.children.append(node)\n return node", "def addChild(self, element):\n self.children.append(element)", "def add_child_element(self, element):\n self._child_elements.append(element)", "def test_append():\n elem = hr.Element(\"this is some text\")\n elem.append(\"some more text\")", "def insert_element_before_similar(self, parent, new_child):\n new_tag = self.tag_base_name(new_child.tag)\n for i, child in enumerate(parent.getchildren()):\n if not self.tag_base_name_is(child, new_tag):\n parent.insert(i, new_child)\n break\n else:\n parent.append(new_child)", "def test_createElement():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement();\n x.createElement(\"foo\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement(\"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement(bar);\n \"\"\").failed()", "def addChild(node):", "def has_element(parent, xpath):\n ele = parent.find('./' + xpath)\n if ele is not None:\n return ele\n ele = parent\n lpath = xpath.split('/')\n for p in lpath:\n e = parent.find('.//' + p)\n if e is None:\n e = ET.SubElement(ele, p)\n ele = e\n return ele", "def testAppendChildCorrection(self):\n self.node.append_child(self.color_corrections[0])\n\n self.assertEqual(\n [self.color_corrections[0]],\n self.node.color_corrections\n )", "def add_child(self, nodo):\n if nodo not in self.children:\n self.children.append(nodo)", "def test_add_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n 'create_node_with_children',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n mock_invoke = self.mock_object(root, 'add_child_elem')\n\n root.add_node_with_children('options')\n\n mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)", "def try_add_child(node, parent, child):\n\n if History.name(node) == parent:\n History.children(node).append(History.new_node(child))\n return True\n return any(History.try_add_child(child_node, parent, child) for child_node in History.children(node))", "def appendElement(document, parentEl, elementType, elementText):\n el = document.createElement(elementType)\n textEl = document.createTextNode(elementText)\n el.appendChild(textEl)\n parentEl.appendChild(el)", "def testAppendChildDecision(self):\n self.node.append_child(self.color_decisions[0])\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )", "def has_child(self):\n return False", "def create_element( self, element, base_element = None, text = None ):\n if base_element is None:\n base_element = self.xml_root\n if etree.iselement( base_element ):\n if etree.iselement( element ):\n base_element.append( element )\n else:\n element = etree.SubElement( base_element, element )\n if text:\n element.text = force_str( text )\n return element\n return None", "def test_getter_child_attr(self):\n root = netapp_api.NaElement('root')\n root.add_attr('val', 'FAKE_VALUE')\n\n self.assertEqual('FAKE_VALUE',\n root.__getitem__('val'))", "def _extend_dommodel(self, child):\n self._dommodel.childNodes[0].appendChild(child)", "def is_etree_element(obj: Any) -> bool:\n return hasattr(obj, 'append') and hasattr(obj, 'tag') and hasattr(obj, 'attrib')", "def addnode(self, parent, tag, **kw):\n kw = {k: v for k, v in kw.items() if v is not None}\n return et.SubElement(parent, tag, **kw)", "def insertAfter( self, node ): \n if isinstance( self, HtmlDomNode ) and isinstance( node, HtmlDomNode ):\n node.parentNode.after( node, self )", "def insertChild(self):\n # insert at position 3 as first is heading and next two nodes have some info \n # from pos 3 the releaselog entry starts\n self.entry.content.html.get_elements('table')[0].get_elements('tbody')[0].get_elements('tr')[0].get_elements('td')[0].get_elements()[0].children.insert(3, self.new_xml)" ]
[ "0.67987376", "0.63512254", "0.6176601", "0.6144186", "0.60023457", "0.5950926", "0.59426093", "0.5894436", "0.5872535", "0.58458453", "0.58127284", "0.58080167", "0.5802401", "0.5756666", "0.57045996", "0.5687755", "0.5677121", "0.5653698", "0.5644743", "0.56394815", "0.5633012", "0.5628131", "0.5569129", "0.55283695", "0.54976207", "0.548651", "0.5481467", "0.54806083", "0.54487514", "0.54459006" ]
0.67678875
1
Tests dict is appended as child to root.
def test_setter_child_dict(self): root = netapp_api.NaElement('root') root['d'] = {'e1': 'v1', 'e2': 'v2'} e1 = root.get_child_by_name('d') self.assertIsInstance(e1, netapp_api.NaElement) sub_ch = e1.get_children() self.assertEqual(len(sub_ch), 2) for c in sub_ch: self.assertIn(c.get_name(), ['e1', 'e2']) if c.get_name() == 'e1': self.assertEqual(c.get_content(), 'v1') else: self.assertEqual(c.get_content(), 'v2')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(2, len(sub_ch))\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual('v1', c.get_content())\n else:\n self.assertEqual('v2', c.get_content())", "def test_append_to_root():\n result = parse_xml(\"<lol><first>text 1</first><first>text 2</first></lol>\")\n expected = {'lol': {'first': ['text 1', 'text 2']}}\n\n assert_equals(result, expected)", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace('\\n', os.linesep))", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace(\"\\n\", os.linesep).encode())", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def _testKeySubNsAdd(self):\n if len(self._getKeyList()) == 0 and len(self._getSubNsList()) == 0:\n parent = self.parent()\n if parent:\n parent._newChild(self.path[-1])", "def __init__(self):\n self.child = {}\n self.isend = False", "def __init__(self, val=None):\r\n self.root = {}", "def assertStructure(self, root, dirDict):\n children = [x.basename() for x in root.children()]\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n self.assertTrue(child.isdir(), \"%s is not a dir!\"\n % (child.path,))\n self.assertStructure(child, dirDict[x])\n else:\n a = child.getContent().replace(os.linesep, '\\n')\n self.assertEquals(a, dirDict[x], child.path)\n children.remove(x)\n if children:\n self.fail(\"There were extra children in %s: %s\"\n % (root.path, children))", "def test_translate_struct_dict_unique_key(self):\n root = netapp_api.NaElement('root')\n child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}\n root.translate_struct(child)\n self.assertEqual(len(root.get_children()), 3)\n self.assertEqual(root.get_child_content('e1'), 'v1')\n self.assertEqual(root.get_child_content('e2'), 'v2')\n self.assertEqual(root.get_child_content('e3'), 'v3')", "def traverse_dict_and_add(self, rootDir, dictH):\n origRootDir = rootDir\n for key, item in dictH.iteritems():\n if item is None or item == {} or item == []:\n attemptedJoin = os.path.normpath(os.path.join(rootDir, key))\n keyPath = None\n if not os.path.isabs(key) and (os.path.isdir(attemptedJoin) or\n os.path.isfile(attemptedJoin)):\n # copy the found file/folder to directory\n keyPath = attemptedJoin\n if os.path.isabs(key) and (os.path.isfile(key) or\n os.path.isdir(key)):\n # copy file/folder to the root location\n if not os.path.isdir(rootDir):\n paths.mkdir_p(rootDir)\n keyPath = paths.path_leaf(key)\n copyLoc = os.path.join(rootDir, keyPath)\n shutil.copy2(key, copyLoc)\n continue # skip the rest of this iteration\n\n if keyPath is not None and not os.path.isdir(keyPath):\n # the string was either not a file/folder or couldn't be\n # resolved from a relative path into a file/folder\n #\n copyLoc = paths.path_leaf(keyPath)\n copyLoc = os.path.join(rootDir, copyLoc)\n print copyLoc\n shutil.copy2(key, copyLoc)\n elif keyPath is None:\n # no directory exists at this location, create one\n dirToMake = os.path.normpath(os.path.join(rootDir, key))\n os.makedirs(dirToMake)\n # sys.exit('Got: \"{f}\", couldn\\'t resolve '\n # 'into file or folder'.format(f=key))\n\n elif isinstance(item, dict):\n newRootDir = os.path.join(rootDir, key)\n newRootDir = os.path.normpath(newRootDir)\n self.traverse_dict_and_add(rootDir=newRootDir,\n dictH=dictH[key])\n else:\n sys.exit('Got: \"{f}\", expected a dictionary, '\n '\\{\\} or None'.format(f=item))", "def __init__(self, child_type = None):\r\n super().__init__()\r\n self.__child_dict = collections.OrderedDict()\r\n self.__child_type = child_type\r\n self.__mykeys = ()\r\n self.__parent = None", "def test_translate_struct_dict_unique_key(self):\n root = netapp_api.NaElement('root')\n child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}\n root.translate_struct(child)\n self.assertEqual(3, len(root.get_children()))\n self.assertEqual('v1', root.get_child_content('e1'))\n self.assertEqual('v2', root.get_child_content('e2'))\n self.assertEqual('v3', root.get_child_content('e3'))", "def __setChildDict(self, child):\n \n d = self[self._name]\n d[child.getName()] = child.getDict()", "def test_add_new_child(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n '_convert_entity_refs',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n\n root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA)\n\n self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string())", "def dict_to_dom(root_node, xml_dict):\n\n if '_content' in list(xml_dict.keys()):\n\n root_node.appendChild(\n root_node.ownerDocument.createTextNode(\n convert_to_str(xml_dict['_content'])\n )\n )\n\n for key, value in xml_dict.items():\n\n if key == '_content':\n continue\n\n if type(value) == dict:\n\n # Root node\n\n tmp_node = root_node.ownerDocument.createElement(key)\n\n dict_to_dom(tmp_node, value)\n\n root_node.appendChild(tmp_node)\n\n elif type(value) == list:\n\n for multinode in value:\n\n tmp_node = root_node.ownerDocument.createElement(key)\n\n dict_to_dom(tmp_node, multinode)\n\n root_node.appendChild(tmp_node)\n\n else:\n\n # Attributes\n\n root_node.setAttribute(\n key,\n convert_to_str(value)\n )", "def addtree(self, dct) -> None:\n namelst = dct['name'].split('\\\\')\n # print('nlst {}'.format(namelst))\n n_n = self\n for curname in namelst:\n nextlevel = n_n.child_dct.get(curname, None)\n if nextlevel is None:\n nextlevel = n_n.child_dct[curname] = LocNode(curname)\n n_n = nextlevel\n n_n.setval(dct)", "def __init__(self): # 用dict模拟字典树即可\n self.root = {}", "def _AddMockSubTestToDataStore(parent_test_key, subtests_dict):\n for test_name in subtests_dict:\n test_key = graph_data.Test(id=test_name, parent=parent_test_key).put()\n _AddMockSubTestToDataStore(test_key, subtests_dict[test_name])", "def test_insert_string_has_correct_key_value_pairs(empty_trie):\n empty_trie.insert(\"hey\")\n start = empty_trie.root.children\n assert empty_trie.root.value is None\n assert list(start.keys()) == [\"h\"]\n assert list(start[\"h\"].children[\"e\"].children.keys()) == [\"y\"]", "def insert(self, pathlist):\n node = self.root\n for letter in pathlist:\n child = node.get(letter)\n if not child:\n node[letter] = {}\n node = node[letter]", "def add(self,root,key,value):\n node = root\n for digit in key:\n child = node.children[ord(digit)-ord('0')]\n if(child==None):\n node.children[ord(digit)-ord('0')] = TrieNode(digit)\n node = node.children[ord(digit)-ord('0')]\n \n node.value = ValueMetaDataNode(value)", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def test_getter_child_attr(self):\n root = netapp_api.NaElement('root')\n root.add_attr('val', 'FAKE_VALUE')\n\n self.assertEqual('FAKE_VALUE',\n root.__getitem__('val'))", "def add(self, key, child_config):\n self.__dict__[key] = child_config\n child_config.root = self", "def save(self, nodedict, root=''):\n setitem = super().__setitem__\n getitem = super().__getitem__\n tag = nodedict['tag']\n text = nodedict.get('text', None)\n if hasattr(text, 'strip'):\n text = text.strip()\n attrib = nodedict.get('attrib', {})\n path = '/'.join([root, tag])\n nodes = nodedict.get('nodes', [])\n if text not in ('', None): # text=0 is stored\n try:\n setitem(path, text)\n except Exception as exc:\n sys.stderr.write('%s: %s\\n' % (path, exc))\n raise\n elif attrib and not nodes:\n setitem(path, numpy.nan)\n for subdict in _resolve_duplicates(nodes):\n self.save(subdict, path)\n if attrib:\n dset = getitem(path)\n for k, v in attrib.items():\n dset.attrs[k] = v" ]
[ "0.692969", "0.62596947", "0.6051907", "0.6033617", "0.6024387", "0.6024387", "0.6024387", "0.6024387", "0.6024387", "0.5993601", "0.5961713", "0.5925168", "0.5859408", "0.58378726", "0.58262926", "0.58179736", "0.58094585", "0.5797076", "0.575363", "0.57313854", "0.57068044", "0.5705004", "0.56512815", "0.5638603", "0.5610679", "0.560135", "0.55962175", "0.5577291", "0.55756146", "0.55684954" ]
0.6954826
0
Tests list/tuple are appended as child to root.
def test_setter_child_list_tuple(self): root = netapp_api.NaElement('root') root['l'] = ['l1', 'l2'] root['t'] = ('t1', 't2') l = root.get_child_by_name('l') self.assertIsInstance(l, netapp_api.NaElement) t = root.get_child_by_name('t') self.assertIsInstance(t, netapp_api.NaElement) for le in l.get_children(): self.assertIn(le.get_name(), ['l1', 'l2']) for te in t.get_children(): self.assertIn(te.get_name(), ['t1', 't2'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_setter_child_list_tuple(self):\n root = netapp_api.NaElement('root')\n root['l'] = ['l1', 'l2']\n root['t'] = ('t1', 't2')\n l_element = root.get_child_by_name('l')\n self.assertIsInstance(l_element, netapp_api.NaElement)\n t = root.get_child_by_name('t')\n self.assertIsInstance(t, netapp_api.NaElement)\n for le in l_element.get_children():\n self.assertIn(le.get_name(), ['l1', 'l2'])\n for te in t.get_children():\n self.assertIn(te.get_name(), ['t1', 't2'])", "def testAppendChildren(self):\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n self.assertEqual(\n self.color_corrections,\n self.node.color_corrections\n )\n\n self.assertEqual(\n self.color_decisions,\n self.node.color_decisions\n )", "def testAppendChildBadType(self):\n self.assertRaises(\n TypeError,\n self.node.append_child,\n 'I ama a banana'\n )", "def test_iter_children():\n builder = TreeBuilder()\n builder.create_root(0)\n\n data = list(range(2, 15, 3))\n for datum in data:\n builder.add_child(datum)\n t = builder.build()\n\n for i, child in enumerate(t):\n assert child.data == data[i]", "def test_create_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'add_new_child', return_value='abc')\n\n result_xml = str(root.create_node_with_children(\n 'options', test1=zapi_fakes.FAKE_XML_STR,\n test2=zapi_fakes.FAKE_XML_STR))\n\n # No ordering is guaranteed for elements in this XML.\n self.assertTrue(result_xml.startswith(\"<options>\"), result_xml)\n self.assertIn(\"<test1>abc</test1>\", result_xml)\n self.assertIn(\"<test2>abc</test2>\", result_xml)\n self.assertTrue(result_xml.rstrip().endswith(\"</options>\"), result_xml)", "def test_add_new_child(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n '_convert_entity_refs',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n\n root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA)\n\n self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string())", "def add_child(self, value):\n assert type(value) != TreeNode\n self.children.append(TreeNode(value, self))", "def try_add_child(node, parent, child):\n\n if History.name(node) == parent:\n History.children(node).append(History.new_node(child))\n return True\n return any(History.try_add_child(child_node, parent, child) for child_node in History.children(node))", "def testAppendChildDecision(self):\n self.node.append_child(self.color_decisions[0])\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )", "def add_child(self, child):\r\n self.children.append(child)", "def test_add_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n 'create_node_with_children',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n mock_invoke = self.mock_object(root, 'add_child_elem')\n\n root.add_node_with_children('options')\n\n mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)", "def test01(self):\n\n t = tree(\"a\", [tree(\"b\"), tree(\"c\")]);\n self.assertTrue(self.isTree(t))", "def hasChildren():", "def test_inset(depth_one_tree):\n depth_one_tree.insert(2, 3)\n print(depth_one_tree.root.children[0].children)\n print(depth_one_tree.root.children[1].children)\n print(depth_one_tree.root.children[2].children)\n print(depth_one_tree.root.children[3].children)\n assert str(depth_one_tree.root.children[2].children) == str([2])", "def addChild(node):", "def test_binarytree_insert_exists(empty_list):\n assert empty_list.insert(42)", "def test_len_children():\n builder = TreeBuilder()\n builder.create_root(0)\n\n data = list(range(2, 15, 3))\n subdata = [0, 1, 2, 3, 4]\n for datum in data:\n builder.add_child(datum, move=True)\n\n for subdatum in subdata:\n builder.add_child(subdatum)\n\n builder.move_to_parent()\n t = builder.build()\n\n assert len(t) == len(data)\n for child in t:\n assert len(child) == len(subdata)", "def testAppendChildCorrection(self):\n self.node.append_child(self.color_corrections[0])\n\n self.assertEqual(\n [self.color_corrections[0]],\n self.node.color_corrections\n )", "def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)", "def __call__(self, node):\n if node.children:\n if len(node.children) == 1:\n if self.TagEqual(node.children[0], node):\n #print node.ToString()\n node.tag = self.Tag(node, node.children[0]);\n lst = node.children[0].children;\n node.children = lst;", "def append(self, tree):\n self.insert(len(self), tree)", "def __initChild(self):\n if self.__child is None:\n self.__child = []\n self._populateChild()", "def test_children_tree(depth_one_tree):\n assert str(depth_one_tree.root.children) == str([1, 2, 3, 4])", "def test_append_to_root():\n result = parse_xml(\"<lol><first>text 1</first><first>text 2</first></lol>\")\n expected = {'lol': {'first': ['text 1', 'text 2']}}\n\n assert_equals(result, expected)", "def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents", "def test_get_children():\n builder = TreeBuilder()\n builder.create_root(1)\n builder.add_child(7)\n builder.add_child(2, move=True)\n builder.add_child(13)\n t = builder.build()\n\n assert t[0].data == 7\n assert t[1].data == 2\n assert t[1][0].data == 13", "def append_child(self, child):\n\t\tself._children.append(child)", "def _add(self, item):\n if isinstance(item, Node):\n if item in self:\n return #already added\n elif item.name in self:\n if item.parent:\n #maintain consistency as we're replacing an existing item\n item.parent._remove(item)\n self._children[item.name] = item\n item._parent = self\n else:\n raise ValueError(\"Expected argument to be of type Node or one of \"\n \"its descendents\")", "def add_child(self, child):\n\n child_parent_scope = child.parent_scope\n child_parent_value = child.parent_value\n\n if all([\n child_parent_scope == self.scope,\n child_parent_value == self.value,\n self.user in (child.user, ANY),\n ]):\n self.children.append(child)\n child.parent = self\n return True\n\n else:\n return any([node.add_child(child) for node in self.children])", "def append(self, dpr):\r\n self.childlist.append(dpr)" ]
[ "0.70010734", "0.65542954", "0.63257587", "0.6279599", "0.62566125", "0.6221631", "0.6212001", "0.6183939", "0.6152893", "0.61014795", "0.60733724", "0.60378975", "0.59894735", "0.5981031", "0.5951861", "0.5948126", "0.5942114", "0.59398097", "0.5915376", "0.5901368", "0.58825415", "0.5847206", "0.5843626", "0.5836633", "0.58234596", "0.5809242", "0.5803973", "0.5800112", "0.5751976", "0.573671" ]
0.70634353
0
Get sentiment analysis immediately on document save
def get_sentiment_analysis(sender, instance, **kwargs): text_analysis = TextAnalysis(instance.text) # Prevent sentiment_analysis API call every time the document is saved if instance.sentiment_analysis is None: instance.get_sentiment_analysis()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment", "def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)", "def add_sentiment(self):\n self.record = 0\n letter_series = self.dataframe.letter \n sentiment_call = lambda letter_text: self._evaluate_sentiment(letter_text)\n sentiment_data = letter_series.map(sentiment_call)\n self.dataframe['sentiment'] = sentiment_data\n self._unpack_sentiment_data()", "def analyze():\n content = request.get_json()\n if model is None:\n return\n max_seq_length = model.max_seq_length\n test_data = content['text']\n data, seq_lengths, targets = prepare_text(\n test_data, max_seq_length, vocab_mapping)\n input_feed = {}\n input_feed[model.seq_input.name] = data\n input_feed[model.target.name] = targets\n input_feed[model.seq_lengths.name] = seq_lengths\n output_feed = [model.y]\n outputs = sess.run(output_feed, input_feed)\n score = np.argmax(outputs[0])\n probability = outputs[0].max(axis=1)[0]\n message = 'Value of sentiment: '\n if score > 0:\n message = message + 'positive'\n else:\n message = message + 'negative'\n message = message + ' with probability: ' + str(probability)\n result = json.dumps({\n 'score': str(score),\n 'probability': str(probability)\n })\n\n resp = Response(response=result, status=200, mimetype='application/json')\n\n return resp", "def analyze_sentiment(self, lang: str = TARGET_LANG):\n if not self.translation and self.language != lang:\n self.translate()\n if not self.clean:\n return\n query = {\"documents\": [\n {\"id\": \"1\", \"language\": \"{}\".format(lang),\n \"text\": \"{}\".format(self.translation)}\n ]}\n response = requests.post(self.url_sentiment, headers=self.sentiment_headers, json=query)\n self.sentiment = response.json()['documents'][0]['sentiment']", "async def log_sentiment(self, event):\n\n sentiment = self.__model.sentiment_score(event.raw_text)\n\n logging.debug(\"Got the following message: \\\"\" + event.raw_text + \"\\\" with sentiment score \" + str(sentiment))\n\n if sentiment:\n\n sender = await event.get_sender()\n user = utils.get_display_name(sender)\n self.__sentiment_gauge.labels(user).set(sentiment)", "def sentiment_analysis(self):\n train_pos = pd.read_csv(\"data/train_Arabic_tweets_positive_20190413.tsv\", sep='\\t', names=[\"label\", \"tweet\"])\n train_neg = pd.read_csv(\"data/train_Arabic_tweets_negative_20190413.tsv\", sep='\\t', names=[\"label\", \"tweet\"])\n train = pd.concat([train_pos, train_neg])\n train.tweet = train.tweet.apply(self.preprocessor).apply(tokenization).apply(lambda x: x.tokens[0])\n le = LabelEncoder()\n le.fit(train.label)\n train.label = le.transform(train.label)\n\n sentence_inds, vocab, self.num_tokens, word_index, index_word = helper.encode_tokens(train.tweet.values)\n\n\n self.embeddings_matrix = helper.load_embedding_matrix(self.num_tokens, self.embedding_size, \n word_index, self.embeddings_index)\n\n\n train_padded = pad_sequences(sentence_inds, padding=\"post\", truncating=\"post\", maxlen=100)\n\n self.X_train, self.X_valid, self.y_train, self.y_valid = train_test_split(train_padded, train.label.values, test_size=0.5,random_state=0, stratify=train.label.values)\n\n model = self.train_model()\n y_pred = model.predict(self.X_valid)\n return (np.argmax(y_pred, axis=1) == self.y_valid).sum() / self.y_valid.shape[0]", "def analyze(text):\n client = language_service_client.LanguageServiceClient()\n\n # with open(movie_review_filename, 'r') as review_file:\n # Instantiates a plain text document.\n \n # content = text.read()\n content=text\n document = language_v1.types.Document(\n content=content,\n type=enums.Document.Type.PLAIN_TEXT,\n language='en'\n )\n # type='PLAIN_TEXT',\n # )\n \n try:\n response = client.analyze_sentiment(\n document=document,\n encoding_type='UTF32',\n )\n sentiment = response.document_sentiment\n return (sentiment.score)\n except InvalidArgument:\n sentiment=0.0\n return sentiment", "def on_text(self, event):\n self.get_counts()\n self.save()", "def analyze(content):\r\n client = language.LanguageServiceClient()\r\n\r\n document = types.Document(\r\n content=content,\r\n type=enums.Document.Type.PLAIN_TEXT)\r\n annotations = client.analyze_sentiment(document=document)\r\n\r\n # Write results to GCS \r\n return annotations.document_sentiment.score", "def sentiment_analysis(name, dictionary):\n\ttone_analyzer = ToneAnalyzerV3(\n\t\t username='2ed2f0c6-1722-472d-9126-224897b991af',\n\t\t password='UcuSde1YmeK6',\n\t\t version='2016-05-19')\n\tl = open(name + '.txt')\n\tlines = l.readlines()\n\tfeel_dict = {'Anger':1.0,'Fear':2.0, 'Sadness':3.0, 'Disgust':4.0,'Joy':5.0, 'Excitement':6.0}\n\tdictionary[name] = []\n\tfor i in lines:\n\t\t#print('-----------------')\n\t\t#print(i)\n\t\tmax_score = 0.0\n\t\tmax_feel = ''\n\t\ttone = tone_analyzer.tone(i, 'emotion')\n\t\tfor feel in tone['document_tone']['tone_categories']:\n\t\t\tfor feeling in feel['tones']:\n\t\t\t\tif feeling['score'] > max_score:\n\t\t\t\t\tmax_score = feeling['score']\n\t\t\t\t\tmax_feel = feeling['tone_name']\n\t\t#print(max_score, max_feel)\n\t\t#blob1 = TextBlob(i, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\t\tif max_feel != '':\n\t\t\ttweet_tbu = db.Tweet.objects(rating=feel_dict[max_feel]).first()\n\t\t\tdict_tbu = {}\n\t\t\tif tweet_tbu:\n\t\t\t\tdict_tbu = mongo_to_dict(tweet_tbu)\n\t\t\t\tprint('exists')\n\t\t\t\tprint(dict_tbu)\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict = {}\n\t\t\t\t\tnew_dict['tweet'] = dict_tbu['tweet']\n\t\t\t\t\tnew_dict['tweet'].append(i[0:-2])\n\t\t\t\t\ttweet_tbu.update(**new_dict)\n\t\t\t\t\ttweet_tbu.reload()\n\t\t\telse:\n\t\t\t\tprint('not exists - with max')\n\t\t\t\tnew_dict = {}\n\t\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\t\telse:\n\t\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\t\tprint(new_dict)\n\t\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\t\tnew_tweet.save()\n\t\telse:\n\t\t\tprint('not exists - without')\n\t\t\tnew_dict = {}\n\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\tif max_feel != '':\n\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\telse:\n\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\tprint(new_dict)\n\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\tnew_tweet.save()\n\tresult = db.Tweet.objects()\n\treturn(result)", "def sentiment_analysis(self, text):\n\n body = {'text': text}\n body = json.dumps(body)\n url = self.base_url + '/language-service/phoenix-language/nlp/sentiment'\n headers = {\"ApiKey\": self.api_key, \"Content-type\": \"application/json\"}\n response = requests.post(url=url, data=body, headers=headers).json()\n return response", "def sentiment(self, text, method = \"vocabulary\"):\n assert method == \"vocabulary\" or method == \"rnn\"\n endpoint = method == \"vocabulary\" and \"sentiment\" or \"sentimentRNN\"\n return self._er.jsonRequestAnalytics(\"/api/v1/\" + endpoint, { \"text\": text })", "def sentiment(text):\n\n sentiment_dict = TextBlob(text).sentiment._asdict()\n return sentiment_dict", "def getSentiment(s):\n headers = {\"Ocp-Apim-Subscription-Key\" : \"4c28d3a67a12442cad6666a3200c49f5\",\n \"Content-Type\" : \"application/json\", \"Accept\" : \"application/json\"}\n url = \"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment\"\n json = {\"documents\": [{\"language\": \"en\", \"id\" : \"1\"}]}\n json['documents'][0]['text'] = s\n sentiment = r.post(url, headers = headers, json = json)\n sentiment = j.loads(sentiment.text)\n return sentiment['documents'][0]['score']", "def get_overall_sentiment(text):\n return alchemy_language.sentiment(text=text)", "def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment", "def get_sentiment(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\treturn blob.sentiment[0]", "def predictionSentiment(company):\n #change the key for the API in here. This is the AlchemyDataNews\n KEY = '2190f450728492113ce4e5b880a72eefbea73308'\n alchemy_data_news = AlchemyDataNewsV1(api_key=KEY)\n timeBegin ='now-2d'\n timeEnd = 'now'\n company_query = '|text=' + company + ',type=company|'\n results = alchemy_data_news.get_news_documents(\n start=timeBegin,\n end=timeEnd,\n return_fields=['enriched.url.title',\n 'enriched.url.entities.entity.sentiment.type',\n 'enriched.url.entities.entity.sentiment.score'\n ],\n query_fields={'q.enriched.url.enrichedTitle.entities.entity': company_query})\n r = json.dumps(results, indent=2)\n f = open(\"/home/kid/Github/Oracle/watson/jsonp2.json\", 'w')\n f.write(str(r))", "def record_sentiment(head_frame, session_id):\n from .wsgi_aux import app\n with app.app_context():\n sentiment = cssi.sentiment.generate_sentiment_score(frame=head_frame)\n session = Session.query.filter_by(id=session_id).first()\n if session is not None:\n if sentiment is not None:\n new_score = {'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'sentiment': sentiment}\n session.sentiment_scores.append(new_score)\n db.session.commit()", "def generateSentimentAnalysis(self, fs_db, cleaned_submissions, cleaned_tweets):\n all_posts = []\n\n for p in range(len(cleaned_submissions)):\n print('reddit', self.clean(cleaned_submissions[p][3]))\n all_posts.append(self.clean(cleaned_submissions[p][3]))\n\n for t in range(len(cleaned_tweets)):\n print('twitter', self.clean(cleaned_tweets[t][2]))\n all_posts.append(self.clean(cleaned_tweets[t][2]))\n \n if len(all_posts) == 0:\n raise Exception(\"No crawled data\")\n\n count = 0\n\n for c in all_posts:\n blob = TextBlob(c)\n\n polarity = blob.sentiment.polarity\n subjectivity = blob.sentiment.subjectivity\n\n doc_ref = fs_db.collection(u'sentimentAnalysis').document('first')\n if (polarity != 0 and subjectivity != 0):\n count += 1\n doc_ref.set({str(count): {'post': c, 'polarity': polarity, 'subjectivity':subjectivity}}, merge=True)\n\n with open('wc.txt', 'w') as output:\n for data in all_posts:\n output.write('%s\\n' % data)", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def sentiment(self) -> Dict[str, float]:", "def sample_analyze_sentiment(text):\n\n client = language_v1.LanguageServiceClient()\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n document = {\"content\": text, \"type\": type_}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_sentiment(document, encoding_type=encoding_type)\n\n # Get sentiment for all sentences in the document\n sentences = []\n\n # Get sentiment for all sentences in the document\n for sentence in response.sentences:\n print(u\"Sentence text: {}\".format(sentence.text.content))\n print(u\"Sentence sentiment score: {}\".format(sentence.sentiment.score))\n print(u\"Sentence sentiment magnitude: {}\".format(sentence.sentiment.magnitude))\n sentences.append({\n \"content\": sentence.text.content,\n \"textSentimentScore\": sentence.sentiment.score,\n \"textSentimentMagnitude\": sentence.sentiment.magnitude\n })\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n result = {\n \"success\": True,\n \"sentimentScore\": response.document_sentiment.score,\n \"sentimentMagnitude\": response.document_sentiment.magnitude,\n \"sentences\": sentences,\n }\n return result", "def get_sentiment_data():\n params = request.args\n result = None\n\n def set_result(x):\n nonlocal result # This is ugly, ew, gotta fix this\n result = x\n\n pipeline_zoo.get_sentiment_analysis_pipeline(set_result).feed_data((params, None))\n return jsonify({\n 'sentiment_score': result\n })", "def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def sentiment_analysis(con, cur):\n # Retrieve data from DB\n description = np.array(select(cur,\"DESCRIPTION\", \"data11\"))\n description_trans = np.array(select(cur,\"DESCRIPTION_TRANSLATED\", \"data11\")) \n \n description_list = []\n sentimentscore_list=[]\n magnitude_list=[]\n sentences_score_list=[]\n sentences_magnitude_list=[]\n sum= 0\n \n # Create a Language client\n language_client = google.cloud.language.LanguageServiceClient()\n \n # Check whether to use original or translated description\n for i in range(len(description)):\n if description_trans[i] == '':\n descr = description[i]\n else:\n descr = description_trans[i]\n \n document = google.cloud.language.types.Document(\n content=descr,\n type=google.cloud.language.enums.Document.Type.PLAIN_TEXT)\n # Use Language to detect the sentiment of the text\n try:\n response = language_client.analyze_sentiment(document=document)\n except InvalidArgument as e:\n print(\"Invalid: \", i)\n sum += 1\n continue\n \n #SAVE SENTENCE ATTRIBUTES\n score_all=[]\n magnitude_all=[]\n for y in range(len(response.sentences)):\n score_all.append((response.sentences[y].sentiment.score))\n magnitude_all.append((response.sentences[y].sentiment.magnitude))\n \n sentences_score_list.append(repr(score_all))\n sentences_magnitude_list.append(repr(magnitude_all))\n # use eval() to turn it back into a list of floats\n \n description_list.append(descr)\n sentiment = response.document_sentiment\n sentimentscore_list.append(sentiment.score)\n magnitude_list.append(sentiment.magnitude)\n print ('Progress: {}/{} rows processed'.format(i, len(description)))\n \n # Save all scores to the DB\n print(\"Sum of skipped rows: \", sum)\n cur.execute(\"DROP TABLE IF EXISTS temp\")\n cur.execute(\"CREATE TABLE temp(DESCRIPTIONS text, SENTIMENTSCORE numeric, MAGNITUDE numeric, SENTENCESCORES text, SENTENCEMAGNITUDES text)\")\n \n def insert(d, ss, m, sens, senm):\n cur.execute(\"INSERT INTO temp (DESCRIPTIONS, SENTIMENTSCORE, MAGNITUDE, SENTENCESCORES, SENTENCEMAGNITUDES) VALUES (?, ?, ?, ?, ?)\", (d, ss, m, sens, senm))\n \n for d, ss, m, sens, senm in zip(description_list, sentimentscore_list, magnitude_list, sentences_score_list, sentences_magnitude_list):\n insert(d, ss, m, sens, senm)\n \n cur.execute(\"DROP TABLE IF EXISTS data22\")\n cur.execute(\"CREATE TABLE data22 AS SELECT success.*, temp.SENTIMENTSCORE, temp.MAGNITUDE, temp.SENTENCESCORES, temp.SENTENCEMAGNITUDES FROM success, temp WHERE temp.DESCRIPTIONS IN (success.DESCRIPTION, success.DESCRIPTION_TRANSLATED)\")\n con.commit()" ]
[ "0.66183805", "0.63964885", "0.63705045", "0.63385326", "0.63300335", "0.6323211", "0.62792253", "0.61452967", "0.61425865", "0.6107899", "0.608918", "0.60816747", "0.60478175", "0.6037236", "0.59819293", "0.59520787", "0.5950468", "0.5949469", "0.5940747", "0.5906748", "0.5905847", "0.590095", "0.5890229", "0.58749396", "0.5866536", "0.584907", "0.582727", "0.58225894", "0.58225894", "0.580213" ]
0.69361526
0
This function is used in the property self.embeddings.
def set_embeddings(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_movie_embedding(self):\n raise NotImplementedError(\"has to be overwritten\")", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def get_embeddings(self, data):\n raise NotImplementedError()", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)", "def get_user_embedding(self):\n raise NotImplementedError(\"has to be overwritten\")", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def _use_embeddings(self, word):\n if word == \"@PAD@\":\n return torch.zeros(self.embeddings_dim)\n else:\n return self.embeddings[word]", "def get_embeddings_shape(self):\n raise NotImplementedError", "def get_embedding_output(self):\n return self.embedding_output", "def embeddings(self):\n self._ensure_is_connected()\n return self._embeddings", "def num_embeddings(self):\n return self[\"main\"].num_embeddings", "def conjecture_embedding(self, conjectures):\n raise NotImplementedError('Use a derived model')", "def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings", "def init_embedding(self):\n self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings)", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)", "def __init__(self, num_words, embedding_size, use_cuda):\n super(StandardEmbedding, self).__init__()\n self.embedding_size = embedding_size\n self.num_hash_functions = 0\n self.embeddings = nn.Embedding(num_words, embedding_size)\n self.embeddings = self.embeddings.cuda() if use_cuda else self.embeddings", "def __init__(self):\n # super(MultiEmbedding,self).__init__()\n HyperParameters.__init__(self)", "def edge_embedding(self, type):\n raise Exception(\" not implemented in base model\")", "def get_embeddings_shape(self):\n return [self.max_sent_len + 2]", "def embed_word(self):\n return self.emb.get_keras_embedding(dropout = self.emb_dropout,\n trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def get_embeddings_shape(self):\n return [self.max_sent_len * 2 + 2]", "def get_embeddings(self, in_data):\n context, da = in_data\n if self.fixed_divide:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=True)\n else:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=False)\n\n # Shubhangi: what this step essentially does is it replaces the context words by their token, with UNK as default.\n # again , we don't need this since our context data is essentially vectors therefore commenting this out\n # similary we don't need context embedding , that's exactly what context is already .\n\n # context_emb = []\n context_emb = [float(parameter[0]) for parameter in context]\n\n # for tok in context[-max_context_len:]:\n # context_emb.append(self.dict_token.get(tok, self.UNK_TOKEN))\n\n # Shubhangi: padding is needed because each context sentence could be of different length ,\n # we don't need to include context in padding as we're going to have a fixed size\n # (max_context_len - len(context)) = 0\n\n\n # padding = [self.UNK_TOKEN] * (max_context_len - len(context))\n\n # Shubhangi: padding might be harmless for now therefore not removing ,\n # essentially what this is doing is concatenating the arrays and sending\n if self.use_div_token:\n return context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + da_emb\n return context_emb + da_emb", "def __init__(self, embed_size, vocab):\n super(ModelEmbeddings, self).__init__()\n \n self.embed_size = embed_size\n self.char_embed_size = 50\n self.max_word_len = 21\n self.dropout_rate = 0.3\n self.vocab = vocab \n \n ## A4 code\n pad_token_idx = vocab.char2id['<pad>']\n self.embedding = nn.Embedding(num_embeddings =len(vocab.char2id),\n embedding_dim =self.char_embed_size,\n padding_idx =pad_token_idx,)\n \n self.CNN = CNN(char_embed_size=self.char_embed_size,\n num_filters=embed_size,\n max_word_length=self.max_word_len,)\n self.Highway = Highway(word_embed_size=self.embed_size)\n self.dropout = nn.Dropout(p=self.dropout_rate)\n ## End A4 code\n\n ### YOUR CODE HERE for part 1j\n\n\n ### END YOUR CODE", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def get_embeddings(self, entities, type='entity'):\n return None", "def embed_word(self):\n return self.emb.get_keras_embedding(trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored", "def make_embeddings(self):\n\t\tprint(\"Presetting embedding weights\")\n\t\t\t\n\t\tnp.random.seed(0)\n\t\tweights = np.random.uniform(low = -0.05, high = 0.05, size = (self.FREQCAP, self.EMB_SIZE))\n\t\t\n\t\tcounter = 0\n\n\t\twords = []\n\t\tweights_tmp = []\n\n\t\twith open(self.embeddingpath) as handle:\n\t\t\tfor i, line in enumerate(handle):\n\t\t\t\ttmp = line.strip()\n\t\t\t\tif len(tmp) > 0:\n\t\t\t\t\tsplit = tmp.split(\" \")\n\t\t\t\t\tif split[0] in self.worddict and len(split[1:]) == 300:\n\t\t\t\t\t\twords.append(split[0])\n\t\t\t\t\t\tweights_tmp.append([float(a) for a in split[1:]])\n\t\t\n\t\tweights_tmp = np.array(weights_tmp)\n\n\t\tfor word, column in zip(words, weights_tmp):\n\t\t\tif self.worddict[word] < self.FREQCAP:\n\t\t\t\tcounter += 1\n\t\t\t\tweights[self.worddict[word],:] = column\n\t\t\n\t\tprint(\"Set\", counter, \"of\", weights.shape[0], \"columns\")\n\t\t\n\t\tif self.EMB_SIZE < weights.shape[-1]:\n\t\t\tprint(\"Reducing dimensionality to\", self.EMB_SIZE)\n\t\t\tpca = PCA(self.EMB_SIZE)\n\t\t\tweights = pca.fit_transform(weights)\n\t\t\n\t\tself.embeddings = [weights]" ]
[ "0.74282503", "0.702405", "0.68440545", "0.6790293", "0.6766997", "0.6700022", "0.6659412", "0.6578466", "0.6472033", "0.6452144", "0.6434908", "0.6428523", "0.6398044", "0.6394544", "0.6383916", "0.6372758", "0.63573927", "0.63538617", "0.63432187", "0.633421", "0.6262197", "0.62230915", "0.6194738", "0.6191908", "0.6179066", "0.6171057", "0.6154447", "0.61474246", "0.61465484", "0.6104093" ]
0.8120444
0
For each string, output 1 if the DFA accepts it, 0 otherwise. The input is guaranteed to be a DFA.
def task_4(parser): dfa = parser.parse_fa() test_strings = parser.parse_test_strings() # calculate and print acceptance for each string for string in test_strings: if follow_dfa(dfa["graph"][dfa["start"]], string): print("1") else: print("0") print("end")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, s):\n state = self._initial\n try:\n for sym in s:\n state = self._trans_matrix[state][self._syms_to_indices[sym]]\n except KeyError:\n raise NotInAlphabetError(sym) from None\n return state in self._accepting", "def isogram():\n isoString = input(\"Please write a string. \")\n notIso = False\n\n for character in isoString:\n isoCount = 0\n\n for letter in isoString:\n if character == letter:\n isoCount += 1\n \n if isoCount > 1:\n notIso = True\n break\n \n if notIso is False:\n print(\"Match\")\n else:\n print(\"No match\")", "def min_dfa(D, state_name_mode='succinct', chatty=False): # Default state mode\n if (len(D[\"Q\"]) == 1): # Already minimal\n if(chatty):\n print(\"-> Your DFA is already minimal.\")\n return D\n else:\n # Build a dict of all state combinations of DFA.\n # Function state_combos also imparts a -1 for each state pair,\n # initializing the separation distance at -1. \n ht = dict(state_combos(list(D[\"Q\"])))\n \n # Mark final and non-final states to be 0-distinguishable.\n # This is achieved by putting a 0 against those state pairs.\n if (chatty):\n print(\"Separating final and non-final states (marking 0-distinguishable entries).\")\n \n sepFinNonFin(D, ht)\n \n if (chatty):\n print(\" The 0-distinguishable entries are:\")\n for k in ht.keys():\n if (ht[k]==0):\n print(\"States \", k[0],\" and \", k[1], \" are 0-distinguished.\")\n \n \n # Main fixpoint computation: Assigning distinguishability dist. \n #==============================================================\n ht = fixptDist(D, ht, chatty)\n \n if (chatty):\n print(\" \")\n print(\"Now, collecting equivalence-classes.\")\n \n # Pick out equivalent state-pairs, i.e. those that cannot be \n # distinguished. These are still with a \"-1\" in ht.\n ht_1 = [ stpair for (stpair, dist) in ht.items() if dist == -1 ]\n \n \n if (chatty):\n print(\" The equivalent pairs are:\")\n \n \n # Now form equivalence classes\n # what's returned is \n # [(rep_1, [all_eql_states_1]), (rep_2, [all_eql_states_2]),...]\n # which includes all equivalence classes of size 2 or more.\n rep_eqc = bash_eql_classes(ht_1)\n\n \n \n if (chatty):\n print(\" The merged equivalent classes and representative states are these:\")\n for eqc in rep_eqc:\n print(\"State \", eqc[0], \" represents the equivalent states \", eqc[1])\n \n \n \n # Now we have to deal with singleton equivalence classes. \n # These sit unmerged, OUTSIDE OF ALL (x,y) in ht_1\n # i.e. all the entries in ht_1 are PARTNERED STATE PAIRS. \n \n # If we now take D[\"Q\"] and subtract from it all those x and y\n # which are present in some pair in ht_1, we obtain completely\n # non-mergable states. These are states in their own eql. classes.\n \n # 1. Find all partnered states from ht_1\n Partnered_states = list({x for (x,y) in ht_1} |\n {y for (x,y) in ht_1})\n \n # 2. Now who is left un-partnered?\n List_of_self_only_eqlt_states = listminus(D[\"Q\"], Partnered_states) \n \n # 3. For these singletons, i.e. \"self-only equivalent states\", \n # they are self-representative. Form pairs that indicate this fact.\n rep_eqc_1 = [(x, [x]) for x in List_of_self_only_eqlt_states]\n \n # 4. OK now, we can combine the set of pairs where each pair is \n # (representative, [the list of equivalent states])\n # So finally we get the list of equivalence classes with \n # representatives which is of this form:\n # [(a0,[a0, a1, a2, a3, a4]), (b0,[b0, b1]), (c0,[c0]), ...] \n final_rep_eqc = rep_eqc + rep_eqc_1\n \n # We are now ready to build a DFA out of final_rep_eqc. \n # =====================================================\n \n # 1. First, form the set of minimized states, which are \n # state representatives.\n minQ = {x for (x,y) in final_rep_eqc}\n \n # 2. The Alpbahet remains the same.\n minSigma = D[\"Sigma\"]\n \n # 3. The starting state is the representative of D[\"q0\"]\n minq0 = q0_of(D[\"q0\"], final_rep_eqc)\n \n # 4. The final states are the representatives of the original\n # final states. This is computed by helper F_of.\n minF = F_of(D[\"F\"], final_rep_eqc)\n \n # 5. The transition relation of the minimized DFA is obtained\n # by the helper Delta_of\n minDelta = Delta_of(D[\"Delta\"], final_rep_eqc)\n \n # 6. We now need to rename the states if the user wants verbose \n # names (default is succinct). Verbose names are the name of \n # states in each equivalence class strung together sep by \"_\".\n if state_name_mode == 'verbose':\n # First build a state-renaming hash-table involving \n # mk_state_eqc_name\n state_rename_ht = { x : mk_state_eqc_name(y) \n for (x,y) in final_rep_eqc }\n \n minQ = { state_rename_ht[x] for x in minQ }\n minq0 = state_rename_ht[minq0]\n minF = { state_rename_ht[f] for f in minF }\n minDelta = { (state_rename_ht[x], y) : state_rename_ht[z] \n for ((x,y),z) in minDelta.items() }\n #\n # Return the finished (minimized) DFA!\n return mk_dfa(minQ, minSigma, minDelta, minq0, minF)", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def input_string_to_nfa(string: str, nfa: NFA):\n\n # ? is it possible to get a loop of epsilon transitions\n\n # we store a list of all current active states in the nfa\n # as each character is read, we follow all transition(including all series of epsilon transitions) to get a new set of active states\n\n # begin with the start state as the only active state\n active_states = [nfa.start_state]\n\n # mark all states as active that can be reached by following epsilon arrows from the start state\n i = 0\n while i < len(active_states):\n for transition_char, transition_state in active_states[i].transitions:\n if transition_char == 'eps':\n active_states.append(transition_state)\n i += 1\n\n string_index = 0\n while string_index < len(string) and len(active_states) > 0:\n character = string[string_index]\n new_active_states = []\n for active_state in active_states:\n # make active all states that can be reached from this state by reading [character]\n next_states = [transition_state for transition_char, transition_state in active_state.transitions if transition_char == character]\n\n # now make active all states that can be reached by epsilon arrows from these states\n i = 0\n while i < len(next_states):\n for transition_char, transition_state in next_states[i].transitions:\n if transition_char == 'eps':\n next_states.append(transition_state)\n i += 1\n \n new_active_states.extend(next_states)\n\n active_states = new_active_states\n string_index += 1\n\n for active_state in active_states:\n if active_state.is_accepting:\n return True\n\n return False", "def test(dfa, words):\n for word in words:\n try:\n dfa.test(word)\n except AssertionError as e:\n logging.error('ERROR: %s\\n' % e.message)", "def MinimizeDFA(self, ):\n\n def Split(S):\n \"\"\"This function split a given set according to their\n reaction to input characters.\"\"\"\n # for each char do\n # if c splits S into s1 and s2\n # then return {s1, s2}\n \n # return S\n\n # T <- {Da, {D - Da}}\n # P <- {}\n\n T = [[ID for ID in range(self.NumStates + 1) if ID not in self.AcceptStates],\n self.AcceptStates]\n Set1 = [ID for ID in range(self.NumStates + 1) if ID not in self.AcceptStates]\n if Set1:\n T = [Set1, self.AcceptStates]\n else:\n T = [self.AcceptStates]\n P = list()\n\n # Minimize DFA using the following algorithm:\n # \n # while P != T do\n # P <- T\n # T <- {}\n # for each set p in P do\n # T <- T | Split(p)\n __counter = 0\n while len(P) != len(T):\n if __counter > 10:\n print \"ERROR: loop forever\"\n exit()\n __counter += 1\n \n P = T[:]\n T = list()\n for p in P:\n if len(p) == 1:\n # p has only one member, nothing to split\n T.append(p)\n continue\n # p should not be empty\n assert p\n\n s1 = list()\n s2 = list()\n # main splitting function\n for idx, char in enumerate(rule.ForAllChar()):\n for state in p:\n # state should be a string\n key = str(state) + '_' + char\n if key in self.TransitionMap:\n if self.TransitionMap[key] not in p:\n s2.append(state)\n else:\n s1.append(state)\n else:\n s2.append(state)\n \n if s2 and s1:\n # set splitted. exit the loop to update the main list\n break\n elif idx < len(rule.ForAllChar()) - 1:\n # clear s1 and s2, enter the next round\n del s1[:]\n del s2[:]\n\n if not s2 or not s1:\n # the set is not splitted, so just append p\n T.append(p)\n else:\n # set is splitted into s1 and s2\n T.append(s1)\n T.append(s2)\n\n # Now, create a new Transition Map\n NewTransitionMap = dict()\n for States in T:\n for char in rule.ForAllChar():\n key = str(States[0]) + '_' + char\n if key in self.TransitionMap:\n # Cannot directly copy the destination state, because they\n # already have new ids. have to use the new state id here\n for states in T:\n if self.TransitionMap[key] in states:\n # doesn't matter which id in the set is used, since\n # they all have the same behavior\n # choose first state here\n NewTransitionMap[key] = states[0]\n \n self.TransitionMap = dict(NewTransitionMap.items())\n \n # Modify the accepting State\n NewAcceptStates = set()\n for States in T:\n for state in States:\n if state in self.AcceptStates:\n NewAcceptStates.add(States[0])\n break\n self.AcceptStates = list(NewAcceptStates)\n \n # Modify the starting State\n NewStartStates = set()\n for States in T:\n for state in States:\n if state in self.StartStates:\n NewStartStates.add(States[0])\n break\n self.StartStates = list(NewStartStates)\n\n # for key, value in self.TransitionMap.items():\n # print key, '=>', value\n # print 'Accept =', self.AcceptStates\n # print 'Start =', self.StartStates", "def test(s, approach):\n s_split = s.split()\n parsed_s = nlp(s)\n for i in xrange(len(parsed_s)):\n if parsed_s[i].tag_ == \"VBZ\":\n if approach(s, i) == 1:\n print str(1) + \":\", \" \".join(s_split[:i]), \\\n \"[{}=>{}]\".format(s_split[i], transform[s_split[i]]), \\\n \" \".join(s_split[i + 1:]) + \"\\t({} {})\".format(parsed_s[i], parsed_s[i].tag_)\n else:\n print str(0) + \":\", s + \"\\t({} {})\".format(parsed_s[i], parsed_s[i].tag_)", "def follow_dfa(state, string):\n if string == \"\":\n return state[\"final\"]\n\n # get first edge using symbol at beginning of string\n # next is a cool function ive just learned i hope this counts as readable code 🥺👉👈\n next_state = next(\n s[\"node\"] for s in state[\"edges\"]\n if s[\"symbol\"] == string[0]\n )\n\n return follow_dfa(next_state, string[1:])", "def is_valid_sequence(dna):\n num_char = 0\n \n for char in dna:\n if not char in 'ATCG':\n num_char += 1\n\n return num_char == 0", "def smarter_check_and_normalizer(in_str):\n counter1, counter2, counter3 = 0, 0, 0\n length, bool_val = len(input_str), False\n if length > 0: \n bool_val = True\n curr_index, next_index = 0, 1\n curr_word, next_word = \"\", \"\"\n while current_index < length:\n pass \n\n\n return [bool_val, input_str]", "def any_lowercase2(s):\n for c in s:\n if 'c'.islower():\n return 'True'\n else:\n return 'False'", "def any_lowercase2(s):\n for c in s:\n if 'c'.islower():\n return 'True'\n else:\n return 'False'", "def any_lowercase2(s):\n for c in s:\n if 'c'.islower():\n return 'True'\n else:\n return 'False'", "def any_lowercase2(s):\n for c in s:\n if 'c'.islower():\n return 'True'\n else:\n return 'False'", "def solution(s):", "def _testit(words):\n w_list = list(words)\n pairs = defaultdict(lambda: [0, 0])\n if not _is_component(w_list):\n return False\n for word in w_list:\n pairs[word[0].lower()][0] += 1\n pairs[word[-1].lower()][1] += 1\n lst = sorted([pair[0] - pair[1] for pair in pairs.values()])\n return all(i == 0 for i in lst[1:-1]) and \\\n lst[-1] <= 1 and sum(lst[::len(lst) - 1]) == 0", "def question1a(s,t):\n\n anagrams = permutations(t, len(t))\n for anagram in anagrams:\n if anagram:\n if ''.join(anagram) in s:\n return True\n return False", "def any_lowercase1(s):\n\tfor c in s:\n\t\tif c.islower():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def any_lowercase3(s):\n for c in s:\n flag = c.islower()\n return flag", "def question1b(s,t):\n\n # check if they are no empty strings\n if t and s:\n\n # count char frequency for t\n frequency_t = Counter()\n for char in t:\n frequency_t[char] += 1\n\n # get number of substrings in s with length same as t\n length_t = len(t)\n n_substrings_s = len(s) - length_t + 1\n\n # loop through substrings in s\n for i in range(n_substrings_s):\n # define substring\n substring_s = s[i:length_t+i]\n # count char frequency for s substring\n frequency_subs = Counter()\n for char in substring_s:\n frequency_subs[char]+=1\n # compare frequency. \n # break any time the frequency of a char does not match\n found = True\n for char, count in frequency_t.items():\n if frequency_subs[char] != count:\n found = False\n break\n # return True if all true\n if found:\n return True\n\n return False", "def letter_check(read):\n string=\"ACTG\"\n for line_number,line in enumerate(read):\n sequence=line.rstrip()\n if any(x not in string for x in sequence):\n return 0\n return 1", "def any_lowercase4(s):\n flag = False\n for c in s:\n flag = flag or c.islower()\n return flag", "def any_lowercase4(s):\n flag = False\n for c in s:\n flag = flag or c.islower()\n return flag", "def any_lowercase1(s):\n for c in s:\n if c.islower():\n return True\n else:\n return False", "def any_lowercase1(s):\n for c in s:\n if c.islower():\n return True\n else:\n return False", "def is_pandigital_str(s):\n tot = 0\n zer = ord('0')\n for c in [c for c in s if c.isdigit()]:\n tot |= (1<<(ord(c) - zer))\n return tot == (1<<10)-2", "def iso_dfa(D1,D2):\n assert(is_consistent_dfa(D1)), \"Inconsist. DFA1 in iso_dfa\"\n assert(is_consistent_dfa(D2)), \"Inconsist. DFA2 in iso_dfa\"\n return (len(D1[\"Q\"]) == len(D2[\"Q\"]) and\n langeq_dfa(D1, D2))", "def parentheses_are_uneven(input_string):\n pcounter = 0\n for char in input_string:\n if char == '(':\n pcounter += 1\n elif char == ')':\n pcounter -= 1\n if pcounter != 0:\n return False\n else:\n return True", "def prog_sent(text):\n\n patterns = [r'\\b(?i)'+'plan'+r'\\b',\n r'\\b(?i)'+'programme'+r'\\b',\n r'\\b(?i)'+'scheme'+r'\\b',\n r'\\b(?i)'+'campaign'+r'\\b',\n r'\\b(?i)'+'initiative'+r'\\b',\n r'\\b(?i)'+'conference'+r'\\b',\n r'\\b(?i)'+'agreement'+r'\\b',\n r'\\b(?i)'+'alliance'+r'\\b']\n\n output = []\n flag = 0\n\n for pat in patterns:\n if re.search(pat, text) != None:\n flag = 1\n\n break\n\n return flag" ]
[ "0.6033226", "0.5742265", "0.5733216", "0.56888366", "0.56711626", "0.56197566", "0.5588654", "0.5418547", "0.5407468", "0.5398522", "0.5388671", "0.5385906", "0.5385906", "0.5385906", "0.5385906", "0.53602785", "0.53272057", "0.52750105", "0.5264539", "0.5249218", "0.52439296", "0.5236709", "0.5228694", "0.5228694", "0.52264583", "0.52264583", "0.5200331", "0.51934016", "0.51800895", "0.5169792" ]
0.63068765
0
get access token from wxapi this is the second step to login with wechat after the client get the code
def get_access_token(self, code): url = get_config("login.wechat.access_token_url") % code r = self._access_wxapi_or_raise(url) return (r["access_token"], r["openid"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token(self, code):\n\n # live need post a form to get token\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n data = {\n 'client_id': get_config('login.live.client_id'),\n 'client_secret': get_config('login.live.client_secret'),\n 'redirect_uri': get_config('login.live.redirect_uri'),\n 'grant_type': 'authorization_code',\n 'code': code\n }\n # Following is use urllib to post request\n url = get_config('login.live.access_token_url')\n r = requests.post(url, data=data, headers=headers)\n resp = r.json()\n\n if resp.get(\"error\") is not None:\n raise Exception(resp)\n\n return resp[\"access_token\"]", "def get_token(): \n \n # Token url\n token_endpoint = \"https://api.signicat.io/oauth/connect/token\"\n # Setting the grant type to client_credentials\n data = {'grant_type':'client_credentials', 'scope':'identify'}\n # Posting to token url with HTTP basic authentication\n token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))\n # Converting json string to json\n token_json = json.loads(token.text)\n \n # Returning the access_token\n return token_json['access_token']", "def getAccessToken():\n print(\"Getting access token...\")\n request = \"https://id.twitch.tv/oauth2/token?client_id=\" + client_id + \"&client_secret=\" + client_secret + \"&grant_type=client_credentials\"\n response = requests.post(request)\n try:\n response.raise_for_status() # Check status code\n jsonResponse = response.json()\n access_token = jsonResponse.get(\"access_token\")\n print(\"Got access token:\", access_token)\n return access_token\n except requests.exceptions.HTTPError as e:\n print(\"Failed on getAccessToken\")\n print(e)", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': '[email protected]', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "async def _fetch_access_token(session: ClientSession) -> dict:\n LOGGER.debug('fetching access token...')\n password = config.get('WFWX_SECRET')\n user = config.get('WFWX_USER')\n auth_url = config.get('WFWX_AUTH_URL')\n async with session.get(auth_url, auth=BasicAuth(login=user, password=password)) as response:\n return await response.json()", "def auth_token(self):", "def get_token(self, legs=2):\n if legs == 2:\n\n headers = {}\n\n headers.update({ 'Content-Type' : 'application/x-www-form-urlencoded' })\n\n data = {}\n\n data.update({'client_id' : self.clientId})\n data.update({'client_secret' : self.clientSecret})\n data.update({'grant_type' : 'client_credentials'})\n data.update({'scope' : self.scopes})\n\n resp = self.http.post(self.webAddress, headers=headers, data=data)\n\n if resp.status_code == 200:\n cont = resp.json()\n return (cont['access_token'], cont['expires_in'])\n\n raise ConnectionError(\"Request failed with code {}\".format(resp.status_code) +\n \" and message : {}\".format(resp.content) +\n \" during authentication.\")\n else:\n raise NotImplementedError(\"3-legged authentication has not been implemented.\")", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def _GetAccessToken(self):\n\n # Encoding client authorization \n pair = \"{client_key}:{client_secret}\".format(client_key=self.client_key, client_secret=self.client_secret)\n authorization = 'MUthRmpVa1JUaVlxbDVUTElUYVFnOlRENmpYMTdGbmhPSzNodWdqWUZqVDU0YzVjWGNQeko3'\n\n # Getting the access token\n access_token_headers = { \"Authorization\": \"Basic {authorization}\".format(authorization=authorization) }\n request_endpoint = \"/oauth/token?grant_type=authorization_code&code={code}&redirect_uri=https://80a3bb863001.ngrok.io\".format(code=self.code)\n print(request_endpoint)\n self.conn.request(\"POST\", request_endpoint, headers=access_token_headers)\n res = self.conn.getresponse()\n response = json.loads(res.read().decode(\"utf-8\"))\n\n try:\n return response[\"access_token\"]\n except KeyError:\n print(\"Request for access token failed for the following reason: {reason}\".format(reason=response[\"reason\"]))", "def get_auth_token():\n \n form_fields = {\n \"client_id\": client_id,\n \"client_secret\":client_secret,\n \"code\": code,\n \"redirect_uri\": \"http://www.stackprinter.com\"\n }\n form_data = urllib.urlencode(form_fields)\n results = __gae_fetch(url = 'https://stackexchange.com/oauth/access_token',\n method = urlfetch.POST, \n payload = form_data,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n response = results.content\n return response", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def login():\n tree = xml.parse('credentials.xml')\n root = tree.getroot()\n apikey = root.find('apikey').text\n userkey = root.find('userkey').text\n username = root.find('username').text\n url = 'https://api.thetvdb.com/login'\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n auth = {\"apikey\": apikey, \"userkey\": userkey, \"username\": username}\n r = requests.post(url, headers=headers, data=json.dumps(auth))\n json_data = json.loads(r.text)\n token = json_data.get('token')\n return token", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def get_token(self, request_data):\n data = {\n \"grant_type\": \"password\",\n \"client_id\": CLIENT_ID,\n \"client_secret\": CLIENT_SECRET,\n \"username\": request_data.get(\"username\"),\n \"password\": request_data.get(\"password\"),\n }\n\n # create keycloak uri for token login\n url = URI + REALM_PREFIX + REALM + AUTH_ENDPOINT\n\n response = requests.post(url, data=data)\n\n # handle error if its anything more than a 200 as a 200 response is the\n # only expected response\n if response.status_code != 200:\n raise AppException.KeyCloakAdminException(\n context={\"message\": \"Error in username or password\"},\n status_code=response.status_code,\n )\n\n tokens_data = response.json()\n result = {\n \"access_token\": tokens_data[\"access_token\"],\n \"refresh_token\": tokens_data[\"refresh_token\"],\n }\n\n return result", "def get_token(base_url, creds):\n client_id = creds[2].strip()\n client_secret = creds[3].strip()\n\n tok_post = {'client_id':client_id, 'client_secret': client_secret, 'grant_type':'client_credentials'}\n resp = requests.post(base_url + '/identity/connect/token', data=tok_post)\n return resp.json()['access_token']", "def _fetch_access_token(self, url, data):\n logger.info('Fetching component access token')\n res = self._http.post(\n url=url,\n data=data\n )\n try:\n res.raise_for_status()\n except requests.RequestException as reqe:\n raise WeChatClientException(\n errcode=None,\n errmsg=None,\n client=self,\n request=reqe.request,\n response=reqe.response\n )\n result = res.json()\n if 'errcode' in result and result['errcode'] != 0:\n raise WeChatClientException(\n result['errcode'],\n result['errmsg'],\n client=self,\n request=res.request,\n response=res\n )\n\n expires_in = 7200\n if 'expires_in' in result:\n expires_in = result['expires_in']\n self.session.set(\n 'component_access_token',\n result['component_access_token'],\n expires_in\n )\n self.expires_at = int(time.time()) + expires_in\n return result", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def _login_token(self):\n data = {\n 'cmd': 'login',\n 'login': self.username,\n 'password': self.password,\n }\n \n token = self.helper._post_request(\n self.basename,\n self.basic_auth,\n data, \n self.headers)\n\n if token.status_code == 200:\n xml_response = BeautifulSoup(token.content, 'lxml')\n self.token = xml_response.find('token').get_text()\n self.cookies = token.cookies.get_dict()\n else:\n raise Exception('[FAIL] Could not login to OpenVAS')", "def get_token(request):\n capability = TwilioCapability(\n settings.TWILIO_ACCOUNT_SID,\n settings.TWILIO_AUTH_TOKEN)\n \"\"\"Allow our users to make outgoing calls with Twilio Client\"\"\"\n capability.allow_client_outgoing(settings.TWIML_APPLICATION_SID)\n\n \"\"\"Allow our users to accept incoming calls from pyphon\"\"\"\n capability.allow_client_incoming('caller')\n\n \"\"\"Generate the capability token\"\"\"\n token = capability.generate()\n\n return JsonResponse({'token': token})", "async def token(request: Request):\n return get_token()", "async def solicit_token(url, scope):\n rc = RestClient(url, \"\")\n result = await rc.request(\"GET\", f\"/token?scope={scope}\")\n print(result[\"access\"])", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def get_api_token(self, app, user, pwd):\n authorization = ('Basic ' + base64.b64encode(user + \":\" + pwd))\n api_token_resp = app.post('/v1/api_token', headers={'Authorization': authorization})\n if api_token_resp.status != '200 OK':\n raise ValueError(api_token_resp.status)\n api_token = json.loads(api_token_resp.data)['api_token']\n return api_token" ]
[ "0.6793707", "0.67241335", "0.6619643", "0.6482753", "0.64813423", "0.6452798", "0.6423836", "0.64193034", "0.64193034", "0.63922495", "0.63754576", "0.6291259", "0.6257184", "0.62232846", "0.6198705", "0.6198705", "0.6197053", "0.61913085", "0.6176826", "0.61711794", "0.61510247", "0.61336744", "0.6131928", "0.6131904", "0.61188257", "0.6110827", "0.61046726", "0.60964227", "0.6095353", "0.60842836" ]
0.7249641
0
get user info from wxapi this is the final step to login with wechat
def get_user_info(self, access_token, openid): url = get_config("login.wechat.user_info_url") % (access_token, openid) return self._access_wxapi_or_raise(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_login():\n \n data = user_obj.user_login()\n return data", "def login(self):", "def login():", "def login():", "def log_in(self, ctx: Context):\n email = json.loads(ctx.users)['username']\n password = json.loads(ctx.users)['password']\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"email_input\", email\n )\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"password_input\", password\n )\n ClickFunctions.click_element_by_name(ctx, self.locators, \"login_button\")\n ClickFunctions.click_element_by_name(ctx, self.locators, \"back_to_content\")", "def do_login(self, backend, user):", "def login(self):\n # Weibo chinese mainland version\n login_url = 'https://weibo.com/cn'\n \n # Create a Selenium Webdriver for advanced scrapping\n # with headless option preconfigured during class instantiation.\n driver = create_webdriver(headless=self.headless)\n\n # Retrieve URL and render.\n driver.get(login_url)\n username_field_selector = '#loginname'\n password_field_selector = '#pl_login_form > div > div:nth-child(3) > div.info_list.password > div > input'\n login_btn_selector = '#pl_login_form > div > div:nth-child(3) > div.info_list.login_btn > a'\n self.render_elements(driver, [\n username_field_selector,\n password_field_selector,\n login_btn_selector])\n wait_between(2.0, 3.0)\n print('DEBUG: Web page successfully rendered:)')\n\n # Fill in the login form and submit.\n driver.find_element_by_css_selector(\n username_field_selector).send_keys(self.username)\n wait_between(1.5, 2.0)\n driver.find_element_by_css_selector(\n password_field_selector).send_keys(self.password)\n wait_between(2.0, 3.5)\n driver.find_element_by_css_selector(login_btn_selector).click()\n print('DEBUG: User info entered, proceeding log in.')\n\n # Perform user account validation.\n sms_code_btn_selector = '#message_sms_login'\n WebDriverWait(driver, self.DEFAULT_TIMEOUT).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, sms_code_btn_selector)\n ))\n wait_between(1.0, 2.0)\n driver.find_element_by_css_selector(sms_code_btn_selector).click()\n sms_code_confirm_btn_selector = '#message_confirm'\n WebDriverWait(driver, self.DEFAULT_TIMEOUT).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, sms_code_confirm_btn_selector)\n ))\n sms_code = input(\"Please enter a string:\\n\")\n \n for i in range(6):\n sms_code_block = driver.find_element_by_css_selector(f'#message_content > div > div.num.clearfix > input[type=text]:nth-child({i + 1})')\n sms_code_block.send_keys(sms_code[i])\n wait_between(0.0, 0.5)\n driver.find_element_by_css_selector(sms_code_confirm_btn_selector).click()", "def get_user_info(self, token, openid, client_id):\n\n url = get_config(\"login.qq.user_info_url\") % (token, client_id, openid)\n user_info_resp = get_remote(url)\n user_info = convert(json.loads(user_info_resp))\n\n if user_info.get(\"ret\") != 0:\n raise Exception(user_info)\n\n return user_info", "def user_info(self):\n return self.auth.get_user_by_session()", "def login_bot(self):\n pass", "def get_user_details(client):\n\n try:\n return client.user(user_id='me').get(fields=['login'])\n # print(f\"The email of the user is: {me['login']}\")\n\n except Exception as e:\n print(f\"Error has occurred: {e}\")\n return None", "def _logon(self):\n\n # Lazy operation\n if self.__token and self.__userid:\n return (self.__token, self.__userid)\n\n # Parameter checking\n if not self.__username or not self.__apikey:\n raise Exception(\"Invalid username or API key\")\n\n # Build request\n request = '<request><user><email>%s</email>\\\n <password>%s</password></user><api-key>%s</api-key>\\\n </request>' % (self.__username, self.__password, self.__apikey)\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(\"POST\", \"/users/token\", request, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if response.status != 200:\n raise Exception(\"Server returned error: %s)\" % data)\n\n result = ET.fromstring(data)\n self.__token = result.findtext(\"access-token\")\n self.__userid = result.findtext(\"user-id\")\n\n return (self.__token, self.__userid)", "def get_remote_user(request):\n\n if settings.DEBUG:\n logger.debug(\"Getting Remote User\")\n me = {}\n\n me['url'] = settings.OAUTH_TEST_INFO['BASE']\n\n me['ask'] = \"/api/v1/me\" + \"?_format=json\"\n\n\n me = fhir_request(request, me)\n logger.debug(\"me...\", me)\n if 'errors' and 'code' in me:\n msg = build_message(request,me['errors'])\n return kickout(msg, me['code'])\n\n return me", "def on_login(self, username):", "def on_login(self, username):", "def login_user():\n pass", "def login():\n tree = xml.parse('credentials.xml')\n root = tree.getroot()\n apikey = root.find('apikey').text\n userkey = root.find('userkey').text\n username = root.find('username').text\n url = 'https://api.thetvdb.com/login'\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n auth = {\"apikey\": apikey, \"userkey\": userkey, \"username\": username}\n r = requests.post(url, headers=headers, data=json.dumps(auth))\n json_data = json.loads(r.text)\n token = json_data.get('token')\n return token", "def user_data(self, access_token, *args, **kwargs):\n data = {'method': 'users.getInfo', 'session_key': access_token}\n return mailru_api(data)[0]", "def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()", "def logindsqw():\n data = request.get_json()\n email = data.get('login')\n password = data.get('pwrd')\n user = SQLModel.get_by_attrs(('login', 'pwrdHash', 'type', 'name'), 'users', 'login', login)\n try:\n user_login = user[0][0]\n user_pw = user[0][1]\n user_type = user[0][2]\n user_name = user[0][3]\n if sha256_crypt.verify(password, user_pw):\n return jsonify(result=user_login, type=user_type, name=user_name)\n else:\n return jsonify(result='fail')\n except:\n return jsonify(result='fail')", "def init_login():\n print(\"init_login\")\n # get one user\n users = uis.get_all()\n # print(user)\n for user in users:\n if user:\n mobile = Mobile(user[2])\n mobile.android_id = user[11]\n mobile.mac = user[10]\n mobile.brand = user[9]\n mobile.os = user[8]\n ktt = KTT(mobile)\n ktt.device_code = user[6]\n ktt.get_api_start()\n time.sleep(5)\n ktt.post_login()\n\n # balance (string), coin (int), token (string), device_code(string), uid (int)\n user_info = [(user[4], user[5], ktt.token, ktt.device_code, user[0])]\n # update user info\n print(user_info)\n uis.update(user_info)\n time.sleep(10)", "def hbtn_api_user(hbtn_auth_token):\n url = 'https://intranet.hbtn.io/users/me.json'\n resp = requests.get(url, params={'auth_token': hbtn_auth_token})\n return resp.json()", "def get_user_info(self, session, apikey):\n if self.is_login(session, apikey):\n session = sessions.first(session=session)\n if session is not None:\n users.find()\n user_info = users.get(session.user_id)\n del user_info.password\n return user_info\n return None", "def get_user():\n\treturn '1', 200", "def login(self):\n url = self.base_url + \"/api/login\"\n creds = {'username': credentials.api['username'],\n 'password': credentials.api['password']}\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n resp = self.session.post(url, creds, verify=False)\n\n return json.loads(resp.text)['_global_result']['UIDARUBA']", "def slack_login():\n if not slack.authorized:\n flash('Access denied to Slack', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n # Get remote user data\n resp = slack.get(\"https://slack.com/api/users.identity\")\n if not resp.ok:\n flash('Unable to access Slack data', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n resp_data = resp.json()\n if 'user' not in resp_data:\n flash('Invalid Slack data format', 'danger')\n # print(resp_data)\n return redirect(url_for(\"auth.login\", local=1))\n resp_user = resp_data['user']\n return get_or_create_sso_user(\n resp_user['id'],\n resp_user['name'],\n resp_user['email'],\n )", "def check_user_and_login(self) -> Response:\n pass", "def getUserInfo(data):\n\tusername = data[\"session_username\"]\n\tuser = Users.objects.filter(username=username).first()\n\n\tresponse = {}\n\n\tif not user:\n\t\treturn {\"Success\": False, \"Error\": \"Unable to retrieve the user information from database\"}\n\n\tresponse[\"Success\"] = True\n\tresponse[\"Username\"] = user.username\n\tresponse[\"Email\"] = user.email\n\tresponse[\"Verified\"] = user.verified\n\tresponse[\"Level\"] = user.level\n\tresponse[\"Experience\"] = user.experience\n\tresponse[\"Coins\"] = user.coins\n\tresponse[\"Preferences\"] = {\"Grid Opacity\": user.pref_grid}\n\n\treturn response", "async def get_api(request):\n data = await request.json()\n login = data['login'] # Unpack data\n async with request.app['db'].acquire() as conn: # Looking for the row in the table\n query = db.users.select().where(db.users.c.login.contains(login.title()))\n result = await conn.fetch(query)\n if len(result) > 0: # If row is found view returns error\n return web.json_response({'status':'error','message':'The login is busy'})\n else: # Else inserting new user to database\n token = md5(login.title().encode()).hexdigest()\n query = db.users.insert().values(\n login=login.title(), \n token=token,)\n await conn.fetch(query)\n return web.json_response({'status':'success','token':token})", "def GetUserInfo(self):\n user = users.get_current_user()\n user_info = GetInfoForUser(user)\n if user:\n # Check to see if the user has auxiliary info for Swyzl, and if not\n # then create it.\n if not user_info:\n user_info = models.UserInfo()\n user_info.user = user\n user_info.put()\n\n url = users.create_logout_url(self.request.uri)\n url_link_text = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_link_text = 'Login'\n return (user, url, url_link_text)" ]
[ "0.6570302", "0.6333091", "0.62763906", "0.62763906", "0.6231804", "0.6212389", "0.620589", "0.6194151", "0.612593", "0.6112595", "0.61102265", "0.6109876", "0.60988086", "0.6087747", "0.6087747", "0.606966", "0.6067771", "0.6058907", "0.60572743", "0.6048634", "0.60352176", "0.6031352", "0.5981476", "0.5940422", "0.5924038", "0.5919712", "0.59187335", "0.5913864", "0.59035224", "0.58801305" ]
0.65810204
0
Get qq access token
def get_token(self, code, redirect_uri): token_resp = get_remote(get_config("login.qq.access_token_url") % (redirect_uri, code)) if token_resp.find('callback') == 0: error = json.loads(token_resp[10:-4]) raise Exception(error) query = qs_dict(token_resp) return query["access_token"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_access_token(self, request) -> str or Exception:\n pass", "def access_token(self):\n return self.access_token_str", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def auth_token(self):", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']", "def get_token(): \n \n # Token url\n token_endpoint = \"https://api.signicat.io/oauth/connect/token\"\n # Setting the grant type to client_credentials\n data = {'grant_type':'client_credentials', 'scope':'identify'}\n # Posting to token url with HTTP basic authentication\n token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))\n # Converting json string to json\n token_json = json.loads(token.text)\n \n # Returning the access_token\n return token_json['access_token']", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def getAccessToken():\n print(\"Getting access token...\")\n request = \"https://id.twitch.tv/oauth2/token?client_id=\" + client_id + \"&client_secret=\" + client_secret + \"&grant_type=client_credentials\"\n response = requests.post(request)\n try:\n response.raise_for_status() # Check status code\n jsonResponse = response.json()\n access_token = jsonResponse.get(\"access_token\")\n print(\"Got access token:\", access_token)\n return access_token\n except requests.exceptions.HTTPError as e:\n print(\"Failed on getAccessToken\")\n print(e)", "def _GetAccessToken(self):\n\n # Encoding client authorization \n pair = \"{client_key}:{client_secret}\".format(client_key=self.client_key, client_secret=self.client_secret)\n authorization = 'MUthRmpVa1JUaVlxbDVUTElUYVFnOlRENmpYMTdGbmhPSzNodWdqWUZqVDU0YzVjWGNQeko3'\n\n # Getting the access token\n access_token_headers = { \"Authorization\": \"Basic {authorization}\".format(authorization=authorization) }\n request_endpoint = \"/oauth/token?grant_type=authorization_code&code={code}&redirect_uri=https://80a3bb863001.ngrok.io\".format(code=self.code)\n print(request_endpoint)\n self.conn.request(\"POST\", request_endpoint, headers=access_token_headers)\n res = self.conn.getresponse()\n response = json.loads(res.read().decode(\"utf-8\"))\n\n try:\n return response[\"access_token\"]\n except KeyError:\n print(\"Request for access token failed for the following reason: {reason}\".format(reason=response[\"reason\"]))", "def get_token(self, code):\n\n # live need post a form to get token\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n data = {\n 'client_id': get_config('login.live.client_id'),\n 'client_secret': get_config('login.live.client_secret'),\n 'redirect_uri': get_config('login.live.redirect_uri'),\n 'grant_type': 'authorization_code',\n 'code': code\n }\n # Following is use urllib to post request\n url = get_config('login.live.access_token_url')\n r = requests.post(url, data=data, headers=headers)\n resp = r.json()\n\n if resp.get(\"error\") is not None:\n raise Exception(resp)\n\n return resp[\"access_token\"]", "def get_token():\n\theaders = {\n\t\t'Authorization': 'Basic ' + (base64.b64encode((client_id + ':' + client_secret).encode(\"utf-8\"))).decode(\"utf-8\")}\n\toptions = {\n\t\t'grant_type': 'client_credentials',\n\t\t'json': True,\n\t}\n\n\tresponse = requests.post(\n\t\t'https://accounts.spotify.com/api/token',\n\t\theaders=headers,\n\t\tdata=options\n\t)\n\tif response.status_code == 200:\n\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\taccess_token = content.get('access_token', None)\n\t\treturn access_token\n\telse:\n\t\treturn None", "async def solicit_token(url, scope):\n rc = RestClient(url, \"\")\n result = await rc.request(\"GET\", f\"/token?scope={scope}\")\n print(result[\"access\"])", "def get_info(self, token):\n\n openid_resp = get_remote(get_config(\"login.qq.openid_url\") + token)\n self.log.debug(\"get access_token from qq:\" + token)\n info = json.loads(openid_resp[10:-4])\n\n if info.get(\"error\") is not None:\n raise Exception(info)\n\n return info", "def get_access_token(self):\n access_token = self._auth_provider._get_auth_value()\n return access_token", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_auth_token():\n \n form_fields = {\n \"client_id\": client_id,\n \"client_secret\":client_secret,\n \"code\": code,\n \"redirect_uri\": \"http://www.stackprinter.com\"\n }\n form_data = urllib.urlencode(form_fields)\n results = __gae_fetch(url = 'https://stackexchange.com/oauth/access_token',\n method = urlfetch.POST, \n payload = form_data,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n response = results.content\n return response", "def get_token(self):\n self.register_user(self.user_data)\n result = self.login_user(self.login_data)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def access_token(self):\n return self._authentication.access_token" ]
[ "0.7010284", "0.6989711", "0.6904486", "0.68345237", "0.6801059", "0.67715067", "0.6744162", "0.67331374", "0.67299163", "0.66792107", "0.6677348", "0.66704553", "0.66563576", "0.6632723", "0.66103107", "0.66000867", "0.65999943", "0.65748245", "0.6555147", "0.6550575", "0.65333575", "0.65307254", "0.65307254", "0.6529155", "0.65282416", "0.6527709", "0.6522898", "0.65131974", "0.65131974", "0.6496352" ]
0.7126429
0
generate 2 random numbers to add get input as addition answer check if correct, if right countdown to get 3 in a row right to end program if wrong lets keep adding and restart the 3 in a row count down
def main(): min_random = 10 #keeping constant for the min random number range max_random = 99 #keeping constant for the max random number range count = 0 #creating a counter variable to keep track of user's answers in a row while count != 3: #this loop will keep goin until user get 3 answers correct in a row num1 = random.randint(min_random, max_random) #generating a random number each new equations num2 = random.randint(min_random, max_random) print("What is " + str(num1) + "+" + str(num2) + "?") user_input = int(input("Your answer is: ")) #takign the user's input and converting it into an integer total = num1 + num2 #keeping track of the actual answer to compare with the user's response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n correct = 0\n\n while correct < GOAL:\n #random.seed(1)\n ##set up addition problem:\n num1 = random.randint(RAND_MIN, RAND_MAX)\n num2 = random.randint(RAND_MIN, RAND_MAX)\n ans = num1 + num2\n\n ##print and solve addition problem:\n print(\"What is \" + str(num1) + \" + \" + str(num2) + \"?\")\n attempt = int(input(\"Your answer: \"))\n if attempt == ans:\n correct += 1\n print(\"Correct! You've gotten \" + str(correct) + \" correct in a row.\")\n else:\n correct = 0\n print(\"Incorrect. The expected answer is \" + str(ans))\n\n print(\"Congratulations! You've mastered addition!\")", "def addition_of_two_random_numbers():\r\n user_wants_to_play = True\r\n while user_wants_to_play == True:\r\n print(input(\"Press enter to generate a problem!\"))\r\n\r\n # Generate two random numbers, format the addition problem, and print to screen.\r\n randomly_generated_number_one = random.randint(1, 500)\r\n randomly_generated_number_two = random.randint(1, 500)\r\n answer_to_problem = randomly_generated_number_one + randomly_generated_number_two\r\n print(\" \",randomly_generated_number_one, \"\\n+\", randomly_generated_number_two)\r\n print(\"------\")\r\n\r\n # This if block only executes if the user provided the correct answer to the problem.\r\n # Print statement saying user answer is correct. Ask user if they want to play again,\r\n # perform logic based on if they want to play again.\r\n users_answer_to_problem = answer_input_validation()\r\n if users_answer_to_problem == answer_to_problem:\r\n print(\"That is correct!\\n\")\r\n user_wants_another_problem = continue_playing_validation() \r\n if user_wants_another_problem == \"n\":\r\n user_wants_to_play = False\r\n print(\"Thank you for playing!\")\r\n elif user_wants_another_problem == \"y\":\r\n print(\"Okay, let's play another round!\\n\")\r\n\r\n # This block executes if user doesn't provide the right answer. Scold them and prompt\r\n # them to see if they'd like to play again. Perform logic either way.\r\n else:\r\n print(f\"That is not the correct answer. The right answer is {answer_to_problem}.\\n\")\r\n user_wants_another_problem = continue_playing_validation() \r\n if user_wants_another_problem == \"n\":\r\n user_wants_to_play = False\r\n print(\"Thank you for playing!\")\r\n elif user_wants_another_problem == \"y\":\r\n print(\"Okay, let's play another round!\\n\")", "def main():\n i = 1\n while i < 4:\n num1 = random.randint(10, 99)\n num2 = random.randint(10, 99)\n print(\"What is \" + str(num1) + \"+\" + str(num2) + \"?\")\n total = num1 + num2\n enter = int(input(\"Your answer: \"))\n if total == enter:\n print(\"Correct! You have got \" + str(i) + \" corrected in a row\")\n i = i + 1\n else:\n print(\"Incorrect. The expected answer is \" + str(total) )\n print(\"Congratulations! You mastered addition\")\n pass", "def addition(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} + {easy_random2} + {easy_random3} + {easy_random4} = ?\")\r\n real_answer = easy_random1 + easy_random2 + easy_random3 + easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n break\r\n if int(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def addition_subtraction(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} + {easy_random2} - {easy_random3} + {easy_random4} = ?\")\r\n real_answer = easy_random1 + easy_random2 - easy_random3 + easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n break\r\n if int(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def subtraction(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} - {easy_random2} - {easy_random3} - {easy_random4} = ?\")\r\n real_answer = easy_random1 - easy_random2 - easy_random3 - easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n break\r\n if int(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def escoge_numero(a,b,count_clue):\n count = 0\n numero_aleatorio = random.randint(a,b)\n while count == 0:\n user_input = int(input(f'Ingresa un numero entre {a} y {b}'))\n while user_input.type() != int:\n user_input = input('Ingresa un numero entero entre {a} y {b}:\\n==> ') \n if user_input == numero_aleatorio:\n count += 1\n else:\n print('Numero incorrecto')\n while True:\n try:\n user_pista = input('Desear utilizar una pista Si(s) o No(n): {}').lower()\n while user_pista != 's' and user_input != 'n':\n useruser_pista_pista = input('Ingresa Si(s) o No (n):\\n==> ') \n raise Exception\n break\n except:\n print('Ingreso un valor erroneo')\n if user_pista == 's':\n if count_clue > 0:\n if numero_aleatorio - user_input > 0 and numero_aleatorio - user_input < 2:\n print('Esta muy cerca por arriba')\n count_clue -=1\n elif numero_aleatorio - user_input > 2 and numero_aleatorio - user_input > 5:\n print('Estas cerca por arriba')\n count_clue -=1 \n elif numero_aleatorio - user_input > 5:\n print('Estas muy lejos por arriba')\n count_clue -=1\n elif numero_aleatorio - user_input < 0 and numero_aleatorio - user_input > -2:\n print('Estas muy cerca por abajo')\n count_clue -=1\n elif numero_aleatorio - user_input < -2 and numero_aleatorio - user_input > -5:\n print('Estas cerca por abajo')\n count_clue -=1\n else:\n print('Estas muy lejos por abajo') \n count_clue -=1\n else:\n print('No tienes mas pistas')\n else:\n print('Vuelve a intentarlo')\n \n return True, count_clue", "def random_conditional_quiz_selector():\n\n go_again = True\n while go_again:\n number = random.randint(1, 2)\n if number == 1:\n begin_conditionalpr_are_ere_quiz()\n if number == 2:\n begin_conditionalpr_ire_quiz()\n again = input(\"Continue? Y/N\\n\").lower()\n if again == \"n\":\n go_again = False", "def random_future_quiz_selector():\n\n go_again = True\n while go_again:\n number = random.randint(1, 2)\n if number == 1:\n begin_future_are_ere_quiz()\n if number == 2:\n begin_present_ire_quiz()\n again = input(\"Continue? Y/N\\n\").lower()\n if again == \"n\":\n go_again = False", "def first_challenge():\n print(colored(\"Manuk dice: \", \"magenta\"))\n time.sleep(3)\n print(colored(\"Estas en un calabozo y hay 2 puertas... en la puerta 1 hay una bestia mortifera\", \"magenta\"))\n time.sleep(3)\n print(colored(\"Bien... me decias que tu sabiduria era muy valorada en tu pueblo, veamos si tomas buenas decisiones.\", \"magenta\"))\n time.sleep(4)\n print(colored(\"En la puerta 2 está la salida\", \"magenta\"))\n time.sleep(3)\n print(colored(\"Vamos a lanzar un dado...si tu dado es par, acertarás a la salida...\", \"magenta\"))\n time.sleep(3)\n print(colored(\"De lo contrario debes enfrentar a la bestia mortal...\", \"magenta\"))\n global gonna_roll\n player_action = ask_dice()\n gonna_roll = player_action\n dice_result = \"\"\n while gonna_roll:\n if player_action == True:\n dice = roll_dice()\n dice_number = is_even(dice)\n if is_even == True:\n print(colored(\"Manuk dice: \", \"magenta\"))\n print(colored(\"Bien,... has elegido la salida...\", \"magenta\"))\n gonna_roll = False\n dice_result = \"exit\"\n return dice_result\n else:\n \n print(colored(\"Manuk dice: \", \"magenta\"))\n time.sleep(3)\n print(colored(\"Oh no... \", \"magenta\"))\n time.sleep(3)\n print(colored(\"Me temo que deberás... enfrentar a la...\", \"magenta\"))\n time.sleep(3)\n print(colored(\"BESTIA...\", 'red'))\n time.sleep(3)\n gonna_roll = False\n dice_result = \"beast\"\n return dice_result", "def main():\r\n global user_pick, pickno, total\r\n test_total()\r\n sleep(delay)\r\n print(\"It is your turn!\")\r\n pickno = int(4)\r\n #Repeats the process as many times as we need\r\n while total >= 4:\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain\r\n test_pick()\r\n remain()\r\n cmp_logic()\r\n sleep(delay)\r\n print(\"You should pick \" + str(total))\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain()\r\n test_pick()\r\n remain()\r\n # Only way that USER WINS!!\r\n if int(total) == 0:\r\n sleep(delay)\r\n print(\"User WINS!\")\r\n exit()", "def game_number():\n \n total_guesses = 0 # Initializes total number of guesses as 0 when game starts\n rand_number = randint(1,20) # Creates a random number between 1 and 20\n print(\"\\nThe number you shall guess is between 1 and 20.\" \n \" You have 3 guesses.\")\n\n while total_guesses < 3: # Ensures user only recieves 3 attempts\n\n print(\"Enter your guess below.\") # Prompts user to enter guess\n\n # Notifies user which attempt they are on\n if total_guesses == 0:\n print(\"This is your first attempt. \\t\") \n if total_guesses == 1:\n print(\"This is your second attempt. \\t\") \n if total_guesses == 2:\n print(\"This is your final attempt. \\t\") \n \n # Assigns guess to be the input as well as an \n # integer value for guessing the random number\n guess = input() \n guess = int(guess)\n \n total_guesses = total_guesses + 1 # Tracks number of total guesses used\n\n # Helps user confine their guesses based on clues given by the game\n if guess < rand_number:\n print(\"\\nYour guess is below the value of the random number!\")\n if guess > rand_number:\n print(\"\\nYour guess is above the value of the random number!\")\n if guess == rand_number:\n correct_guess(total_guesses)\n if guess != rand_number and total_guesses == 3:\n incorrect_guess(rand_number)", "def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries", "def generate_question_and_answer(): # noqa: WPS210\n start_number = random.randint(1, 100)\n progression_step = random.randint(1, 10)\n progression_length = random.randint(5, 10)\n progression = generate_progression(\n start_number, progression_step, progression_length,\n )\n return hide_number(progression)", "def random_present_quiz_selector():\n\n go_again = True\n while go_again:\n number = random.randint(1, 3)\n if number == 1:\n begin_present_are_quiz()\n if number == 2:\n begin_present_ere_quiz()\n if number == 3:\n begin_present_ire_quiz()\n again = input(\"Continue? Y/N\\n\").lower()\n if again == \"n\":\n go_again = False", "def random_imperfect_quiz_selector():\n\n go_again = True\n while go_again:\n number = random.randint(1, 3)\n if number == 1:\n begin_imperfect_are_quiz()\n if number == 2:\n begin_imperfect_ere_quiz()\n if number == 3:\n begin_imperfect_ire_quiz()\n again = input(\"Continue? Y/N\\n\").lower()\n if again == \"n\":\n go_again = False", "def game_2001():\n user_points = 0\n computer_points = 0\n while user_points < 2001 and computer_points < 2001:\n dice_y = [3, 4, 6, 8, 10, 12, 20, 100]\n throw_user_number = sum([game_dice_sum() for x in range(2)])\n user_points += throw_user_number\n throw_computer_number = sum([game_dice_sum_random() for x in range(2)])\n computer_points += throw_computer_number\n run_game = input(\"Press Enter to continue\")\n if throw_user_number == 7:\n user_points = user_points // 7\n elif throw_user_number == 11:\n user_points = user_points * 11\n elif throw_computer_number == 7:\n computer_points = computer_points // 7\n elif throw_computer_number == 11:\n computer_points = computer_points * 11\n print(f\"User: {user_points} Computer: {computer_points}\")\n if user_points > computer_points:\n print(\"You win!\")\n elif computer_points > user_points:\n print(\"Computer win!\")\n else:\n print(\"Draw!\")\n\n return f\"Your points: {user_points}, computer points: {computer_points} \"", "def random_subjunctiveimp_quiz_selector():\n\n go_again = True\n while go_again:\n number = random.randint(1, 3)\n if number == 1:\n begin_subjunctiveimp_are_quiz()\n if number == 2:\n begin_subjunctiveimp_ere_quiz()\n if number == 3:\n begin_subjunctiveimp_ire_quiz()\n again = input(\"Continue? Y/N\\n\").lower()\n if again == \"n\":\n go_again = False", "def main_questions(money, grain, people):\n quest_buy = [Q1, Q2, Q3, Q6, Q7]\n question = random.choice(quest_buy)\n print(question)\n answer = input()\n while answer.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer = input()\n answer = int(answer)\n if question == Q1:\n money = money - answer * 12\n elif question == Q2:\n money -= answer * 14\n elif question == Q3:\n money -= answer * 13\n elif question == Q6:\n money -= answer * 10\n elif question == Q7:\n money -= answer * 15\n grain += answer\n\n quest_sell = [Q4, Q5, Q8, Q9, Q10]\n question_2 = random.choice(quest_sell)\n print(question_2)\n answer = input()\n while answer.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer = input()\n answer = int(answer)\n if question == Q4:\n money += answer * 7\n elif question == Q5:\n money += answer * 5\n elif question == Q8:\n money += answer * 6\n elif question == Q9:\n money += answer * 9\n elif question == Q10:\n money += 8\n grain -= answer\n\n print(DISTRIBUTION_OF_GRAIN)\n answer_3 = input()\n while answer_3.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer_3 = input()\n answer_3 = int(answer)\n grain -= answer_3\n if grain / people > 90:\n people *= 1.1\n elif grain / people < 40:\n people *= 0.9\n return int(money), int(grain), int(people)", "def go_again(self):\n num = random.randint(1, 2)\n if num == 1:\n return True\n else:\n return False", "def multiplication(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} * {easy_random2} * {easy_random3} * {easy_random4} = ?\")\r\n real_answer = easy_random1 * easy_random2 * easy_random3 * easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n if int(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def reset_problem(self):\n old_number1 = self.number1\n old_number2 = self.number2\n\n # Generate new numbers\n while old_number1 == self.number1 and old_number2 == self.number2:\n print(\"hey\")\n self.number1 = randint(0, 20)\n self.number2 = randint(0, 20)\n\n # Get the new answer\n self.answer = self.number1 + self.number2\n\n # Reset the problem label\n self.lbl_problem.config(text=str(self.number1) + \" + \" + str(self.number2))\n\n # Reset the entry box\n self.ent_answer.delete(0, END)\n\n # Reset the submit button\n self.btn_submit.config(text=\"Submit\", command=self.check_answer)\n\n # Reset the instructions label\n self.lbl_instructs = Label(self, text=\"Type your answer in the box and click \\\"Submit\\\"\")\n self.lbl_instructs.grid(column=1, row=11, padx=20, pady=10, columnspan=30, rowspan=5)", "def new_game(range):\n global secret_number\n global counter\n global n\n n = range\n \n secret_number = random.randrange(0, n)\n counter = int(math.ceil(math.log(n + 1)/math.log(2)))\n \n print \"New Game. Range is from 0 to\", n\n print \"Number of remaining guesses is\",counter\n print \"\"", "def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total", "def guest_num(max=20):\n rand_num = random.randint(1, 101)\n retries = 0\n while retries <= max:\n try:\n n = int(input('Input a number: '))\n if n == rand_num:\n print('YOU WIN!')\n break\n elif n > rand_num:\n print('Iputed number is great than result number. Just retry!')\n retries += 1\n else:\n print('Iputed number is less than result number. Just retry!')\n retries += 1\n except ValueError:\n print('Only can input a number!')\n except:\n print('Only can input a number!')\n else:\n print('YOU LOST!')", "def play_one_game():\n sum = roll_dice()\n print(\"You rolled \", sum)\n if (sum == 7 or sum == 11):\n return 1\n elif (sum == 2 or sum == 3 or sum == 12):\n return 0\n else:\n point = sum\n print(\"Your point is \", point)\n print(\" \")\n newsum = 0\n while (newsum != point and newsum != 7):\n newsum = roll_dice()\n print(\"You rolled\", newsum)\n if (newsum == point):\n return 1\n else:\n return 0", "def guessTheSecret():\n\tguess = int(input('Guess the number > '))\n\tglobal attempts\n\tcheck = False\n\twhile guess != secret_num:\n\t\tif guess < secret_num:\n\t\t\tprint('Your guess is too low')\n\t\telif guess > secret_num:\n\t\t\tprint('You guess to too high')\n\t\tguess = int(input('Guess again > '))\n\t\tattempts += 1\n\t\tif attempts >= 4:\n\t\t\tbreak\n\tif guess == secret_num:\n\t\treturn True", "def rand(jenni, input):\n if input.group(2) == \" \" or not input.group(2):\n jenni.reply(\"I'm sorry, but you must enter at least one number.\")\n else:\n random.seed()\n li_integers = input.group(2)\n li_integers_str = li_integers.split()\n if len(li_integers_str) == 1:\n li_integers_str = re.sub(r'\\D', '', str(li_integers_str))\n if len(li_integers_str) > 0:\n if int(li_integers_str[0]) <= 1:\n a = li_integers_str\n a = int(a)\n if a < 0:\n randinte = random.randint(a, 0)\n if a > 0:\n randinte = random.randint(0, a)\n else:\n a = li_integers_str\n a = int(a)\n randinte = random.randint(0, a)\n jenni.reply(\"your random integer is: \" + str(randinte))\n else:\n jenni.reply(\"lolwut\")\n else:\n ln = li_integers.split()\n if len(ln) == 2:\n a, b = ln\n a = re.sub(r'\\D', u'', a)\n b = re.sub(r'\\D', u'', b)\n if not a:\n a = 0\n if not b:\n b = 0\n a = int(a)\n b = int(b)\n if a <= b:\n randinte = random.randint(a, b)\n else:\n randinte = random.randint(b, a)\n jenni.reply(\"your random integer is: \" + str(randinte))\n else:\n jenni.reply(\"I'm not sure what you want me to do!\")", "def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r" ]
[ "0.73614657", "0.70625", "0.6931597", "0.67691684", "0.65652305", "0.6021567", "0.5964148", "0.5895458", "0.58120376", "0.5797901", "0.5791938", "0.5759847", "0.57452613", "0.57439905", "0.573757", "0.56791544", "0.5662617", "0.56588566", "0.5658514", "0.5648855", "0.5645049", "0.56398183", "0.56011266", "0.5528765", "0.5522078", "0.5503686", "0.5476713", "0.5456755", "0.54494035", "0.5443142" ]
0.75160843
0
Loads acquisition data Returns PD DataFrame
def pd_load_acquisition_csv(acquisition_path, **kwargs): columns = [ 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term', 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score', 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state', 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type', 'relocation_mortgage_indicator', 'year_quarter' ] dtypes = { 'loan_id': np.int64, 'orig_channel': CategoricalDtype(['B', 'C', 'R']), 'seller_name': str, 'orig_interest_rate': np.float64, 'orig_upb': np.int64, 'orig_loan_term': np.int64, 'orig_date': str, 'first_pay_date': str, 'orig_ltv': np.float64, 'orig_cltv': np.float64, 'num_borrowers': np.float64, 'dti': np.float64, 'borrower_credit_score': np.float64, 'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']), 'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']), 'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']), 'num_units': np.int64, 'occupancy_status': CategoricalDtype(['I', 'P', 'S']), 'property_state': CategoricalDtype( ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY']), 'zip': np.int64, 'mortgage_insurance_percent': np.float64, 'product_type': CategoricalDtype(['FRM']), 'coborrow_credit_score': np.float64, 'mortgage_insurance_type': np.float64, 'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']), 'year_quarter': np.int64 } a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True) return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n cols = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator'\n ]\n\n dtypes = {\n \"loan_id\": np.int64,\n \"monthly_reporting_period\": str,\n \"servicer\": str,\n \"interest_rate\": np.float64,\n \"current_actual_upb\": np.float64,\n \"loan_age\": np.float64,\n \"remaining_months_to_legal_maturity\": np.float64,\n \"adj_remaining_months_to_maturity\": np.float64,\n \"maturity_date\": str,\n \"msa\": np.float64,\n \"current_loan_delinquency_status\": np.int32,\n \"mod_flag\": CategoricalDtype(['N', 'Y']),\n \"zero_balance_code\": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),\n \"zero_balance_effective_date\": str,\n \"last_paid_installment_date\": str,\n \"foreclosed_after\": str,\n \"disposition_date\": str,\n \"foreclosure_costs\": np.float64,\n \"prop_preservation_and_repair_costs\": np.float64,\n \"asset_recovery_costs\": np.float64,\n \"misc_holding_expenses\": np.float64,\n \"holding_taxes\": np.float64,\n \"net_sale_proceeds\": np.float64,\n \"credit_enhancement_proceeds\": np.float64,\n \"repurchase_make_whole_proceeds\": np.float64,\n \"other_foreclosure_proceeds\": np.float64,\n \"non_interest_bearing_upb\": np.float64,\n \"principal_forgiveness_upb\": np.float64,\n \"repurchase_make_whole_proceeds_flag\": CategoricalDtype(['N', 'Y']),\n \"foreclosure_principal_write_off_amount\": np.float64,\n \"servicing_activity_indicator\": CategoricalDtype(['N', 'Y']),\n }\n print(acquisition_path)\n\n #return pd.read_csv(acquisition_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])\n return pd.read_csv('acq.csv', names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])", "def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def _load_data(self, comp=None):\n\t\tif comp is None:\n\t\t\tcomp = self._compensation\n\t\tmatrix = self._fcsfile.read_data(fmt='matrix',\n\t\t\tcomp=comp)\n\t\treturn pd.DataFrame(matrix, columns=self._channels)", "def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df", "def load_data(dataset_path: str):\n data = arff.loadarff(dataset_path)\n data_frame = pd.DataFrame(data[0])\n return data_frame", "def _get_data(*, from_web: bool) -> pd.DataFrame:\n\n df = read_in_data.SaveFormats.CSV.read(from_web=from_web)\n return df", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def get_controls_datafrmae(self) -> pd.DataFrame:\n return pd.read_csv(self.sequence_data_paths.controls_path / Path(\"controls_airr.csv.gz\"), index_col=0) # type: ignore", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def load():\n return load_pandas()", "def get_data(self)->pd.DataFrame:\n pass", "def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data", "def get_data(filename):\r\n return pd.read_csv(filename)", "def pd_data(self):\r\n data = pd.read_csv(self._data_path + self._data_dir)\r\n return data", "def _get_data(self):\n project_name, experiment_id = self.parent._get_parent_identifiers()\n\n self._data = self.repository.get_dataframe_data(\n project_name, self.id, experiment_id=experiment_id\n )", "def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)", "def get_training_data(db_conn):\n return pd.read_sql('''select * from churn_model.churn_data;''', db_conn)", "def _download_qc(self) -> pd.DataFrame:\n # No QC is given for variants data - return empty DataFrame\n return pd.DataFrame()", "def _pq2df(data_file):\n df = pd.read_parquet(data_file)\n return df", "def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df", "def read_data(self):\n fpath = './data/surveys.csv'\n self.data = pd.read_csv(fpath, header=0, low_memory=False)\n #print(self.data.head(n=5))\n print(self.data.shape)", "def dataframe(self):\n\t\treturn self._dataframe", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def read(self):\n self._load_metadata()\n return self._df.compute()", "def dataframe(self):\n return self.generator.dataframe", "def get_dataframe(data_path: PathLike) -> pd.DataFrame:\n path = get_local_data_path(data_path, download_if_missing=True)\n df = pd.read_parquet(path)\n return df", "def get_df(config_summary_url):\n return pd.read_csv(urlretrieve(config_summary_url)[0])", "def read_data(self):\n try:\n return pd.read_csv(self.address, sep=\"\\t\",\n header=None,\n nrows=self.num_lines,\n names=['DNA_Id'])\n except FileNotFoundError:\n raise FileNotFoundError(f'check if the address: {self.address} contains the desired file')" ]
[ "0.6419686", "0.6402017", "0.63575786", "0.62864983", "0.6271366", "0.62537175", "0.6238884", "0.62184477", "0.61879724", "0.6175933", "0.6175492", "0.61639774", "0.61547565", "0.6142899", "0.6125022", "0.60660636", "0.60647833", "0.60528654", "0.6033451", "0.60246754", "0.6000434", "0.5994028", "0.5986929", "0.59591156", "0.59500515", "0.5949197", "0.5933828", "0.593042", "0.5917476", "0.5913121" ]
0.6446527
0
quote the elements of a dotted name
def quote_dotted( name: Union["quoted_name", str], quote: functools.partial ) -> Union["quoted_name", str]: if isinstance(name, quoted_name): return quote(name) result = ".".join([quote(x) for x in name.split(".")]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dotted_name(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n raise ValueError('%(s)s contains forbidden characters'\n ' (%(forbidden)s)'\n % locals())\n if not s:\n return ''\n elif s in reserved_names:\n raise ValueError('The name %(s)r is reserved!'\n % locals())\n # might result from tab completion:\n stripped = s.rstrip('/')\n if '/' in stripped:\n raise ValueError('dotted name %(stripped)r'\n ' must not contain slashes'\n % locals())\n chunks = stripped.split('.')\n if [chunk\n for chunk in chunks\n if not chunk\n ]:\n raise ValueError('badly dotted name: %(stripped)r'\n % locals())\n return stripped", "def dotted(self) -> str:\n return \".\".join(str(v) for v in self.value)", "def dotted_prefixes(dotted_name, reverse=False):\n name_parts = dotted_name.split(\".\")\n if reverse:\n idxes = range(len(name_parts), 0, -1)\n else:\n idxes = range(1, len(name_parts)+1)\n result = ['.'.join(name_parts[:i]) or '.' for i in idxes]\n return result", "def sanitize_dot(func):\n return str(func).replace(\"::\", \"\\\\\")", "def _dotted_path(segments):\n segments_without_separators = [s[:-1] for s in segments[:-1]]\n segments_without_separators.append(segments[-1])\n return '.'.join(segments_without_separators)", "def quote_path(path):\n return \"[%s]\" % \",\".join(\"'%s'\" % p for p in path)", "def embeded_triple_quotes():\n pass", "def test_resolve_dashed_name():\n pypi = XMLRPCPyPIAPI()\n assert pypi.resolve_dashed_name('foo') == 'foo'\n assert pypi.resolve_dashed_name('acme-data.foobar') == 'acme_data.foobar'\n assert pypi.resolve_dashed_name('pytest-cov') == 'pytest-cov'", "def test_dotted_named_entities():\n class TestEntity(Entity):\n foo = fields.EntityField('tests.dottedname.foo.bar.baz.Zap')\n\n e = TestEntity(foo={'name': 'baz'})\n assert e.foo.name == 'baz' # noqa\n\n # Avoid importing the class before the TestEntity above is instantiated\n # so that we know the `EntityField` import worked as expected.\n from tests.dottedname.foo.bar.baz import Zap\n assert isinstance(e.foo, Zap)", "def getquoted(self): # real signature unknown; restored from __doc__\n pass", "def test_dotted_named_entities_not_callable():\n class TestEntity(Entity):\n foo = fields.EntityField('tests.dottedname.foo.bar.baz.NotCallable')\n\n with pytest.raises(ValueError):\n TestEntity(foo={'name': 'baz'})", "def dotted_path(cls):\n return f\"{cls.__module__}.{cls.__qualname__}\"", "def _ns(self, *args):\n return \"%s.%s\" % (self.namespace, \".\".join([str(arg) for arg in args]))", "def as_package(names: List[str]) -> str:\n return '.'.join(names)", "def process_name(self, stack):\n dot_op = self._toks(stack)\n toks = [t.value for t in Stack.flatten(dot_op)]\n # always remove the final dot\n assert toks[-1] == \".\"\n expr = \"\".join(toks[:-1])\n yield from self.dot.complete(expr)", "def import_dotted_name(name):\r\n name = str(name)\r\n if ':' in name:\r\n module, obj = name.split(':', 1)\r\n elif '.' in name:\r\n module, obj = name.rsplit('.', 1)\r\n else:\r\n return __import__(name, level=0)\r\n mod = __import__(module, fromlist=[obj], level=0)\r\n return getattr(mod, obj)", "def dot_printname(self):\n return self.printname.split('/')[0].replace('-', '_')", "def load_dotted(name):\n components = name.split('.')\n path = [components.pop(0)]\n obj = __import__(path[0])\n while components:\n comp = components.pop(0)\n path.append(comp)\n try:\n obj = getattr(obj, comp)\n except AttributeError:\n __import__('.'.join(path))\n try:\n obj = getattr(obj, comp)\n except AttributeError:\n raise ImportError('.'.join(path))\n\n return obj", "def testParamNameDotted(self):\n prop = recipe_api.Property(param_name='good_name')\n bound = prop.bind('bad.name-time', RECIPE_PROPERTY,\n 'fake_repo::fake_recipe')\n\n self.assertEqual('good_name', bound.param_name)", "def dot_escape(s):\n s = re.sub(r'([^a-zA-Z0-9\" ])', r\"\\\\\\1\", s)\n return s", "def escape_dot(s):\n\treturn s. \\\n\t\treplace(\"{\", \"\\\\{\").\\\n\t\treplace(\"}\", \"\\\\}\").\\\n\t\treplace(\"\\n\", \"\").\\\n\t\treplace(\"\\r\", \"\")", "def test_dotted_named_entities_not_dotted():\n class NonDottedNameEntity(Entity):\n # `Property` is a real class, but this string is not a full\n # reference, so it can't be resolved and is therefore considered\n # invalid.\n foo = fields.EntityField('Property')\n\n with pytest.raises(ValueError):\n NonDottedNameEntity(foo={})\n\n class ExistingNonDottedNameEntity(Entity):\n # `FlexEntity` is a real class and it's likely in the local\n # import scope, but it's still not considered a supported\n # dotted-name class reference.\n foo = fields.EntityField('FlexEntity')\n\n with pytest.raises(ValueError):\n ExistingNonDottedNameEntity(foo={})\n\n class SelfNonDottedNameEntity(Entity):\n # 'self' is a special case and is the only non-dotted,\n # dotted-name class reference that we support.\n foo = fields.EntityField('self')\n name = fields.StringField()\n\n result = SelfNonDottedNameEntity(\n name='outer',\n foo={\n 'name': 'inner',\n 'foo': {\n 'name': 'deeper'\n }\n }\n )\n assert result\n assert result.name == 'outer'\n assert result.foo.name == 'inner'\n assert result.foo.foo.name == 'deeper'", "def test_dotted_named_entities_circular_references():\n from tests.dottedname.foo.bar.bop import Property\n\n p = Property(\n name='outer',\n nested={\n 'properties': [\n Property(name='inner')\n ]\n }\n )\n assert p\n assert isinstance(p.nested.properties, list)\n assert p.nested.properties[0].name == 'inner'", "def test_get_call_name2(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n\n name = b_utils.get_call_name(tree, {\"a\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.b.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b.c.d\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y\", name)", "def quote_name(self, name):\n name = re.sub('-', '', name)\n if name.startswith('\"') and name.endswith('\"'):\n return name\n return '\"%s\"' % (name,)", "def autoprefix(prefix):\n pl = len(prefix)\n msg = '%%(s)r: expected some name after %(prefix)r!' % locals()\n def checker(s):\n if s.startswith(prefix):\n tail = s[pl:]\n if tail:\n return prefix + dotted_name(tail)\n else:\n raise ValueError(msg % locals())\n elif s:\n return prefix + dotted_name(s)\n else:\n return ''\n return checker", "def fqpn(thing):\n return \".\".join([thing.__module__, thing.__name__])", "def test_make_fname_js_safe_dot_dash():\n\n unsafe = \"a.b-c\"\n expected = \"a_dot_b_c\"\n\n assert expected == u.make_fname_js_safe(unsafe)", "def complete(self):\n dot = '.' if self.needs_dot else ''\n append = ''\n if settings.add_bracket_after_function \\\n and self.type == 'Function':\n append = '('\n\n if settings.add_dot_after_module:\n if isinstance(self.base, parsing.Module):\n append += '.'\n if isinstance(self.base, parsing.Param):\n append += '='\n return dot + self.name.names[-1][self.like_name_length:] + append", "def swapDotComa(line):\n\n\n # We start outside a doble quote\n quoted = False\n line = list(line)\n for i in range(len(line)-1):\n if line[i]== '\"':\n quoted = not quoted\n elif quoted and line[i]==',':\n line[i] = '.'\n elif quoted and line[i]=='.':\n line[i] = ','\n return ''.join(line)" ]
[ "0.6540108", "0.64451617", "0.6417363", "0.6304107", "0.58152014", "0.5777598", "0.5726563", "0.57090414", "0.5696863", "0.5691718", "0.56322443", "0.550168", "0.5483858", "0.5444921", "0.54370934", "0.5433207", "0.53710306", "0.53705055", "0.5365929", "0.5365884", "0.5363487", "0.5352939", "0.5315603", "0.5292653", "0.52843773", "0.52719826", "0.52542377", "0.5252025", "0.52458936", "0.5231951" ]
0.745443
0
Convert text to float or 0.0 if invalid.
def convert_to_number(text): try: value = float(text) return value except ValueError: return 0.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ffloat(string):\n try:\n return float(string.strip())\n except:\n return 0", "def _convert_to_float(s):\n try:\n return float(s)\n except:\n return s", "def safe_float(str):\n if not str:\n return None\n try:\n return float(str)\n except ValueError:\n return 0", "def convert_to_float(s):\n try:\n return float(s)\n except TypeError:\n return None", "def Float(val):\n try:\n return float(val)\n except ValueError:\n return ''", "def getFloat(string):\n return (0.0)", "def _string_to_float(s):\n try:\n f = float(s)\n return f\n except ValueError:\n return None", "def float_from_string(data):\n return float(maybe_number(data))", "def to_float(s):\n try:\n return float(s)\n except ValueError:\n return np.nan", "def get_valid_value(self, text_input):\n try:\n return float(text_input.text)\n except ValueError:\n return 0.0", "def convertFloat(s):\n try:\n float(s)\n return \"FLOAT\"\n except:\n return s", "def convert_str_float(x):\n\ttry:\n\t\treturn float(x)\n\texcept ValueError:\n\t\tprint(\"must be a number\")", "def _to_float(self, s: str) -> float:\n return int(s[:-1]) / 1e9 if s.endswith('n') else float(s[:-1])", "def convertStringToFloat(xmlNode):\n try:\n val = float(xmlNode.text)\n return val\n except (ValueError,TypeError):\n raise IOError('Real value is required for content of node %s, but got %s' %(node.tag, node.text))", "def float(s):\n if s is None or s == \"\":\n f = float(-maxsize)\n else:\n f = float(s)\n\n return f", "def extract_float(self, s: str) -> float:\n f = re.findall(r'([0-9]*[.]*[0-9]+)', s)\n return float(f[0]) if len(f) > 0 else None", "def convert_to_float(word: str) -> float:\n return round(float(word), 2)", "def try_float(data):\n try:\n return float(data)\n except (ValueError, TypeError ):\n return data", "def safe_float(float_string: str = \"0.0\") -> float:\n float_things = [None, \"\", \"-\", \"0\"]\n\n if float_string in float_things:\n return 0.0\n else:\n return float(float_string)", "def get_number(text):\n# if (isinstance(text, str) or isinstance(text, unicode)):\n if True:\n text.replace(\",\",\".\")\n text = re.sub(\"\\xa0\",\"\", text)\n rst = re.findall(\"[0-9]+\\.{0,1}[0-9]*\", text)\n if rst:\n rst = rst[0]\n else:\n rst = \"nan\"\n else:\n rst = text\n try:\n rst = float(rst)\n except:\n rst = float(\"nan\")\n return(rst)", "def tryFloat(value):\n try:\n return float(value)\n except:\n return value", "def txt2float(file: str) -> float:\n return float(get_first_line(file))", "def float_or_none(s):\n if s:\n return float(s)", "def read_float(v):\n if v.strip() == '':\n return 0.\n try:\n return float(v)\n except ValueError:\n # ENDF6 may omit the e for exponent\n return float(v[0] + v[1:].replace('+', 'e+').replace('-', 'e-')) # don't replace leading negative sign", "def parseFloat(s, ret=0.0):\n if not isinstance(s, str):\n return float(s)\n elif s:\n if s[0] in \"+-\":\n ts = s[1:]\n else:\n ts = s\n\n if ts and ts.count(\".\") <= 1 and all([_ in \".0123456789\" for _ in ts]):\n return float(s)\n\n return ret", "def str2floatTrap(self, someStr):\n\n tempStr = someStr\n\n if tempStr.startswith('('):\n tempStr = tempStr[1:]\n\n if tempStr.endswith(')'):\n tempStr = tempStr[:len(tempStr) - 1]\n\n return float(tempStr)", "def _float(data):\n try:\n return float(data)\n except ValueError as err:\n if data in ('None', 'NA', 'nan'):\n return nan\n else:\n raise ValueError(err)", "def to_float(x, key):\n x = x.strip()\n if not x or x in ('NA', 'n/a'):\n return None\n if '.' in x:\n # There are '.'s, so commas are placeholders\n x = x.replace(',', '') \n if x.endswith('ft'):\n scale = 0.3048\n x = x[:-2].strip()\n else:\n scale = 1 \n try:\n return scale * float(x)\n except:\n logging.warn('Could not convert %s value %s to float', key, x)\n return None", "def find_float(input: str) -> float:\n str_split = input.split('<@')\n if (len(str_split) == 0):\n raise AmountMissingException(\"amount_not_found\")\n input_text = str_split[0]\n regex = r'(?:^|\\s)(\\d*\\.?\\d+)(?=$|\\s)'\n matches = re.findall(regex, input_text, re.IGNORECASE)\n if len(matches) >= 1:\n return abs(float(matches[0].strip()))\n raise AmountMissingException(\"amount_not_found\")", "def _extract_num(self, text):\n try:\n if 'studio' in text.lower():\n return 0.0\n text = text.replace(',', '')\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n result = re.findall(pattern, text)[0]\n return float(result)\n except:\n return np.nan" ]
[ "0.7549978", "0.7463989", "0.73810375", "0.73316866", "0.7268213", "0.7146606", "0.7116986", "0.7115148", "0.7079289", "0.7048062", "0.69954073", "0.6994393", "0.69855756", "0.69264966", "0.6895625", "0.68766963", "0.68540186", "0.68069357", "0.679633", "0.6766438", "0.6747904", "0.6735166", "0.6722772", "0.6703078", "0.66655195", "0.66641587", "0.66629565", "0.66485655", "0.66484606", "0.6627828" ]
0.7922715
0
persist tweet data into cassandra
def persist_data(tweet_data, cassandra_session): try: logger.debug('Start to persist data to cassandra %s \n', tweet_data) parsed = json.loads(tweet_data) unit_id = str(parsed.get('_unit_id')) gender = parsed.get('gender') tweet_text = str(parsed.get('text')) hashtags = str(parsed.get('hashtags')) tweet_count = parsed.get('tweet_count') tweet_location = parsed.get('tweet_location') normalized_location = parsed.get('normalized_location') user_timezone = parsed.get('user_timezone') # statement = "INSERT INTO %s (unit_id, gender, tweet_text, tweet_location, normalized_location) VALUES ('%s', '%s', '%s', '%s', '$s')" % (data_table, unit_id, gender, tweet_text, tweet_location, normalized_location) statement = cassandra_session.prepare("INSERT INTO %s (unit_id, gender, tweet_text, hashtags, tweet_count, tweet_location, normalized_location) VALUES (?, ?, ?, ?, ?, ?, ?)" % data_table) cassandra_session.execute(statement, (unit_id, gender, tweet_text, hashtags, tweet_count, tweet_location, normalized_location)) logger.info('Persisted data to cassandra for unit_id: %s, gender: %s, tweet_text: %s, hashtags: %s, tweet_count: %s, tweet_location: %s, normalized_location: %s\n' % (unit_id, gender, tweet_text, hashtags, tweet_count, tweet_location, normalized_location)) except Exception as e: logger.error('Failed to persist data to cassandra %s %s \n', tweet_data, e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def persist_db(database, tweets):\n log.debug(\"{} tweets to db\".format(len(tweets)))\n\n for tweet in tweets:\n tweet['_id'] = tweet['id_str']\n database.update(tweets)", "def insert_tweets(conn: Connection, fetch_data: Iterable[Dict]) -> None:\n\n s = Session(bind=conn)\n meta = MetaData()\n meta.reflect(bind=conn)\n s.add_all([Tweet(**t) for t in fetch_data])\n s.commit()", "def store_tweet(tweet, topic):\n try:\n tweet = tweet.replace(\"'\", \"\\\\'\" )\n query = f\"insert into {db_schema}.{db_table_tweet} set tweet='{tweet}', topic='{topic}'\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweet_id = int(cur.lastrowid)\n logger.info(f'ID_TWEET: {tweet_id}') \n return tweet_id\n except Exception as ex:\n logger.exception(ex)", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def exportToDB(self, tweets):\n for t in range(len(tweets)):\n for x in range(len(tweets[t])):\n doc_ref = self.fs_db.collection(u'twitter').document(str(tweets[t][1]))\n doc_ref.set({\n u'created_date': str(tweets[t][0]),\n u'id': str(tweets[t][1]),\n u'tweet': tweets[t][2],\n u'screen_name': tweets[t][3],\n u'name': tweets[t][4],\n u'likes': tweets[t][5],\n u'retweets': tweets[t][6],\n u'location': tweets[t][7]\n })", "def fillTweetInDB(self):\n sqlInsertTweets = \"INSERT INTO tweet content VALUES %s\"\n mycursor.executemany(sqlInsertTweets,self.content)\n mydb.commit()", "def save_tweet(self, twitter) -> None:\n if isinstance(twitter, dict):\n json_data = twitter\n else:\n json_data = json.loads(twitter)\n\n try:\n breakpoint()\n self.db.tweets.find_one_and_update(\n {'id_str': json_data['id_str']},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n upsert=True,\n )\n except Exception as e:\n log.error(e)", "def insert_into_tweets(self, infos):\n query = \"insert into tweets(tweet_id, insert_date, created_at, hashtag) values(?, ?, ?, ?);\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.executemany(query, infos)", "def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)", "def insert_tweet(value):\n execute(query=_query['ins_tweet'],\n value=value,\n single=False)\n\n id_value = [[element[0]]for element in value]\n\n execute(query=_query['ins_sentiment'],\n value=id_value, # Tweet ID value\n single=False\n )", "def persist_data(stock_data, cassandra_session, table):\r\n # noinspection PyBroadException\r\n try:\r\n logger.debug('Start to persist data to cassandra %s' % stock_data)\r\n parsed = json.loads(stock_data)[0]\r\n symbol = parsed.get('StockSymbol')\r\n trade_price = float(parsed.get('LastTradePrice'))\r\n trade_time = parsed.get('LastTradeDateTime')\r\n\r\n # - prepare to insert to cassandra\r\n statement = \"INSERT INTO %s (stock_symbol, trade_time, trade_price) VALUES ('%s', '%s', %f)\" \\\r\n % (table, symbol, trade_time, trade_price)\r\n cassandra_session.execute(statement)\r\n logger.info('Persisted data to cassandra for symbol %s, trade-price %f, trade-time %s'\r\n % (symbol, trade_price, trade_time))\r\n except Exception:\r\n logger.error('Faild to persist data to cassandra %s', stock_data)", "def add_tweet():\r\n tweet = models.Tweet(text_content=request.json['content'], username=request.json['username'],\r\n timestamp=datetime.datetime.now())\r\n db.session.add(tweet)\r\n db.session.commit()\r\n\r\n return {'id': tweet.id}", "def insert_tweet(status):\n status['replies'] = []\n return db.tweets.insert(status)", "def save_tweet_data(data):\n text_buffer = json.dumps(data)\n text_buffer = text_buffer[1:-1]\n text_buffer = '%s,' % text_buffer\n\n with open('public/data/tweets.spool', 'wt') as file_handle:\n file_handle.write(text_buffer)\n\n print('Updated.')", "def load_twitter_data_to_db(self, truncate_table=False, skip_loaded_files=False):\n\n\t\ttable_fields_names, table_fields_types = self.identify_table_mask('twitter_stream_table-mask.txt')\n\n\t\t# Truncating table\n\t\tif truncate_table:\n\t\t\tquery = 'TRUNCATE TABLE ' + TABLE_NAME;\n\t\t\ttry:\n\t\t\t\tself.execute_query(query)\n\t\t\texcept Exception, e:\n\t\t\t\tprint '[e] Exeption: %s' % (str(e))\n\n\t\ttotal_queries = 0\n\t\terror_queries = 0\n\t\tsuccess_queries = 0\n\n\t\tfetcher = TwitterFetcher()\n \t\tfetched_tweets = fetcher.fetchsamples(10)\n\n \t\t\n \t\tfor tweet in fetched_tweets:\n\n \t\t\ttweet_as_list = list()\n \t\t\ttweet_as_list.append('(\"uni.vlba.gdelt.data::seq_twitter_stream_id\".nextval)')\n \t\t\ttweet_as_list.append(tweet)\n \t\t\t#print tweet_as_list\n\n \t\t\tif self.insert_data(tweet_as_list, table_fields_names, table_fields_types):\n\t\t\t\tsuccess_queries = success_queries + 1\n\t\t\telse:\n\t\t\t\terror_queries = error_queries + 1\n\n\t\ttotal_queries = success_queries + error_queries\t\t\n\t\t\n\t\tprint '\\n[i] Queries processed in total: %d\\n' % (total_queries)\n\n\t\tif error_queries > 0:\n\t\t\tprint '[i] Queries processed in total with errors: %d' % (error_queries)", "def persist_record(conn,data,tb_name):\n\tquery_param\t\t= tuple(list(map(lambda k : data[k],col_list[tb_name])))\n\texecute_query(conn,query_strings[tb_name],query_param)\n\treturn", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def connect(created_at, username, tweet, location, followers_count, tweet_id):\n try:\n con = mysql.connector.connect(host = 'localhost',\n database='Twitter', user='root', password = db_password,\n auth_plugin='mysql_native_password', charset = 'utf8')\n\n if con.is_connected():\n\n #Insert twitter data\n\n cursor = con.cursor()\n \n query = \"INSERT INTO no_retweet (created_at, username, tweet, location, \\\n followers_count, tweet_id) \\\n VALUES (%s, %s, %s, %s, %s, %s)\"\n cursor.execute(query, (created_at, username, tweet, location, followers_count, tweet_id))\n\n con.commit()\n cursor.close()\n con.close()\n\n except Error as e:\n print(e)\n\n\n return", "def persist(data):\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(INSERT_SQL, (data[\"name\"], data[\"gender\"], data[\"age\"]))\n conn.commit()\n cursor.close()", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def __save_tweet(self, twitter_result):\n timestamp = twitter_result['timestamp']\n\n # Remove +0000 from timestamp\n timestamp_split = timestamp.split(' ')\n timestamp = ''\n for piece in timestamp_split:\n if piece[0] is not '+':\n timestamp += piece + ' '\n\n # Remove trailing space\n timestamp = timestamp[:-1]\n\n # Cast to iso format\n timestamp = datetime.strptime(timestamp, \"%a %b %d %H:%M:%S %Y\").isoformat()\n\n crawl = self.mongo_controller.add_crawl_twitter(\n twitter_result['keyword_id'],\n twitter_result['tweet_id'],\n twitter_result['text'],\n twitter_result['likes'],\n twitter_result['retweets'],\n timestamp,\n return_object=True,\n cast=True,\n )\n\n app.send_task('process-crawl', kwargs={ 'crawl_dict': crawl.to_json() }, queue=queues['processor'])\n\n return crawl", "def write_tweet(tweet):\n try:\n tweet_data = [tweet.date, tweet.content.encode('utf-8'), tweet.id, tweet.likeCount,\n tweet.replyCount,\n tweet.retweetCount, tweet.quoteCount,\n tweet.user.username, tweet.user.id, tweet.user.followersCount,\n tweet.user.friendsCount,\n tweet.user.statusesCount, tweet.user.verified, tweet.user.url, tweet.url]\n if tweet.mentionedUsers is not None:\n tweet_data.append([tweet.mentionedUsers])\n else:\n tweet_data.append(None)\n if tweet.quotedTweet is not None:\n tweet_data.append(tweet.quotedTweet.id)\n tweet_data.append(tweet.quotedTweet.content.encode('utf-8'))\n tweet_data.append(tweet.quotedTweet.user.username)\n tweet_data.append(tweet.quotedTweet.user.id)\n if tweet.quotedTweet.mentionedUsers is not None:\n tweet_data.append([tweet.quotedTweet.mentionedUsers])\n else:\n tweet_data.append(None)\n else:\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n return tweet_data\n except UnicodeEncodeError:\n pass", "def example_data():\n\n # In case this is run more than once, empty out existing data\n\n FacebookPost.query.delete()\n TwitterPost.query.delete()\n FacebookInfo.query.delete()\n TwitterInfo.query.delete()\n User.query.delete()\n\n\n # Test user\n test_user = User(username='test_user1', password='$pbkdf2-sha256$29000$FALgPKfUWiuFkNK6NwZA6A$p.mRwWhJ8zs3cFNt7ygsb/HDF1EY5rYW3DdySpIm/NQ')\n\n db.session.add(test_user)\n db.session.commit()\n\n\n # Test login info\n test_fb_info = FacebookInfo(user_id=test_user.user_id, access_token='fake1234', facebook_user_id='1234567' )\n\n db.session.add(test_fb_info)\n db.session.commit()\n\n #Test login info\n test_twitter_info = TwitterInfo(user_id=test_user.user_id, oauth_token='fake1234', oauth_token_secret='1234567')\n\n db.session.add(test_twitter_info)\n db.session.commit()\n\n #Test Facebook post\n test_fb_post = FacebookPost(user_id=test_user.user_id, msg='Test for Facebook!', post_datetime='1480203960', facebookinfo_id=test_fb_info.facebookinfo_id)\n\n db.session.add(test_fb_post)\n db.session.commit()\n\n # Test Twitter post\n test_twitter_post = TwitterPost(user_id=test_user.user_id, msg='Test for Twitter!', post_datetime='1480203960', twitterinfo_id= test_twitter_info.twitterinfo_id)\n\n db.session.add(test_twitter_post)\n\n db.session.commit()", "def populate_table(\n user, created_at, tweet, retweet_count, id_str, my_database=DATABASE):\n\n dbconnect = connect_db(DATABASE)\n\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # add content here\n\n try:\n query=\"INSERT INTO tweets (user, created_at, tweet, retweet_count, id_str) VALUES (%s, %s, %s, %s, %s)\"\n \n cursor.execute(query, (user, created_at, tweet, retweet_count, id_str))\n \n dbconnect.commit()\n print(\"commited\")\n\n except mysql.Error as e:\n print(e)\n dbconnect.rollback()\n\n cursor.close()\n dbconnect.close()\n\n return", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def set_data(self, data):\r\n self.tweets = data", "def save( self, result ):\n # try:\n # self._is_valid(result)\n wordMap = WordMappingDeux()\n wordMap.word = result.text if is_result( result ) else result\n wordMap.sentence_index = result.sentence_index\n wordMap.word_index = result.word_index\n if result.type == 'tweet' and result.id is not None:\n wordMap.tweet_id = result.id\n if result.type == 'user' and result.id is not None:\n wordMap.user_id = result.id\n # stage for saving\n self.session.add( wordMap )\n # self.handle_flush()", "def persist_to_db(engine_string):\n\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Delete all existing records in the table\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('''DELETE FROM msia_db.bean_attributes''')\n except:\n pass\n else:\n try:\n session.execute('''DELETE FROM bean_attributes''')\n except:\n pass\n\n # Read the data table and persist it into the database\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),\n species=str(raw_data.iloc[i]['Species']),\n owner=str(raw_data.iloc[i]['Owner.1']),\n country=str(raw_data.iloc[i]['Country.of.Origin']),\n farm_name=str(raw_data.iloc[i]['Farm.Name']),\n company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']),\n producer=str(raw_data.iloc[i]['Producer']),\n grading_date=str(raw_data.iloc[i]['Grading.Date']),\n processing_method=str(raw_data.iloc[i]['Processing.Method']),\n aroma=float(raw_data.iloc[i]['Aroma']),\n flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']),\n acidity=float(raw_data.iloc[i]['Acidity']),\n body=float(raw_data.iloc[i]['Body']),\n balance=float(raw_data.iloc[i]['Balance']),\n uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']),\n sweetness=float(raw_data.iloc[i]['Sweetness']),\n total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),\n moisture=float(raw_data.iloc[i]['Moisture']),\n color=str(raw_data.iloc[i]['Color']),\n cluster=int(raw_data.iloc[i]['cluster'])\n )\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError: # Check primary key duplication\n logger.error(\"Duplicated coffee bean\")\n except Exception as e:\n logger.error(\"Incorrect credentials, access denied\", e)\n finally:\n session.close()", "def save(self):\n self.session.commit()" ]
[ "0.7387163", "0.67616653", "0.66996026", "0.66227996", "0.66101587", "0.6543893", "0.6499494", "0.6354981", "0.623357", "0.60689884", "0.6036905", "0.6031254", "0.59903", "0.593716", "0.59077364", "0.5884541", "0.58744943", "0.5866427", "0.58403707", "0.5819588", "0.5809459", "0.5788491", "0.576895", "0.5761495", "0.5748086", "0.57042986", "0.5693142", "0.56853974", "0.56174874", "0.56104475" ]
0.78405553
0
Create an Nbin discrete colormap from a specified input map
def discrete_cmap(N, base_cmap=None): base = plt.get_cmap(base_cmap) color_list = base(np.linspace(0, 1, N)) cmap_name = base.name + str(N) return base.from_list(cmap_name, color_list, N)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discrete_cmap(N, base_cmap=None):\n # see https://gist.github.com/jakevdp/91077b0cae40f8f8244a\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\t# Note that if base_cmap is a string or None, you can simply do\n\t# return plt.cm.get_cmap(base_cmap, N)\n\t# The following works for string, None, or a colormap instance:\n\tbase = plt.cm.get_cmap(base_cmap)\n\tcolor_list = base(np.linspace(0, 1, N))\n\tcmap_name = base.name + str(N)\n\treturn base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return matplotlib.colors.LinearSegmentedColormap \\\n .from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n # By Jake VanderPlas\n # License: BSD-style\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def create_colormap(seg_map):\n\tcolormap = np.zeros((256, 3), dtype=int)\n\tind = np.arange(256, dtype=int)\n\tfor shift in reversed(range(8)):\n\t\tfor channel in range(3):\n\t\t\tcolormap[:, channel] |= ((ind >> channel) & 1) << shift \n\t\tind >>= 3\n\treturn colormap[seg_map]", "def cmap_discretize(cmap, N):\n\n if type(cmap) == str:\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki,key in enumerate(('red','green','blue')):\n cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]\n # Return colormap object.\n return mcolors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)", "def color_map(n=256, normalized=False):\n def bitget(byteval, idx):\n return (byteval & (1 << idx)) != 0\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((n, 3), dtype=dtype)\n for i in range(n):\n r = g = b = 0\n c = i + 1 # skip the first color (black)\n for j in range(8):\n r |= bitget(c, 0) << 7 - j\n g |= bitget(c, 1) << 7 - j\n b |= bitget(c, 2) << 7 - j\n c >>= 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap / 255 if normalized else cmap\n return cmap", "def get_color_map(n):\n jet = plt.get_cmap('jet')\n cNorm = colors.Normalize(vmin=0, vmax=n-1)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\n outmap = []\n for i in range(n):\n outmap.append( scalarMap.to_rgba(i) )\n return outmap", "def _cmap_discretize(cmap, N):\n\n if type(cmap) == str:\n cmap = plt.get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki, key in enumerate(('red','green','blue')):\n cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki])\n for i in range(N+1)]\n # Return colormap object.\n return mcolors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)", "def discrete_cmap(n_colors: int, base_cmap: str) -> Colormap:\r\n # https://gist.github.com/jakevdp/91077b0cae40f8f8244a\r\n base = plt.cm.get_cmap(base_cmap)\r\n color_list = base(np.linspace(0, 1, n_colors))\r\n cmap_name = base.name + str(n_colors)\r\n\r\n return base.from_list(cmap_name, color_list, n_colors)", "def cmap(num,cmap = plt.cm.gist_earth_r):\n return cmap(np.linspace(0, 1, num))", "def cmap_discretize(N):\n \n cmap = matplotlib.cm.jet;\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki, key in enumerate(('red','green','blue')):\n cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in range(N+1)]\n \n return matplotlib.colors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024);", "def cmap_discretize(cmap, N):\n\n cdict = cmap._segmentdata.copy()\n # N colors\n colors_i = linspace(0,1.,N)\n # N+1 indices\n indices = linspace(0,1.,N+1)\n for key in ('red','green','blue'):\n # Find the N colors\n D = array(cdict[key])\n I = interpolate.interp1d(D[:,0], D[:,1])\n colors = I(colors_i)\n # Place these colors at the correct indices.\n A = zeros((N+1,3), float)\n A[:,0] = indices\n A[1:,1] = colors\n A[:-1,2] = colors\n # Create a tuple for the dictionary.\n L = []\n for l in A:\n L.append(tuple(l))\n cdict[key] = tuple(L)\n # Return colormap object.\n return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)", "def terrain_cmap_256():\n C = np.array(\n [\n [0, 125, 255],\n [2, 97, 0], # Alternativley [0, 0, 255], for blue at sealevel\n [2, 97, 0],\n [3, 97, 0],\n [4, 97, 0],\n [5, 97, 0],\n [6, 98, 0],\n [7, 98, 0],\n [8, 98, 0],\n [9, 98, 0],\n [10, 98, 0],\n [11, 98, 0],\n [11, 99, 0],\n [12, 99, 0],\n [13, 99, 0],\n [14, 99, 0],\n [15, 99, 0],\n [16, 99, 0],\n [17, 100, 0],\n [18, 100, 0],\n [19, 100, 0],\n [19, 100, 0],\n [20, 100, 0],\n [21, 101, 0],\n [22, 101, 0],\n [23, 101, 0],\n [24, 101, 0],\n [25, 101, 0],\n [26, 102, 0],\n [27, 102, 0],\n [28, 102, 0],\n [28, 102, 0],\n [29, 102, 0],\n [30, 102, 0],\n [31, 103, 0],\n [32, 103, 0],\n [33, 103, 0],\n [34, 103, 0],\n [35, 103, 0],\n [36, 104, 0],\n [37, 104, 0],\n [37, 104, 0],\n [38, 104, 0],\n [39, 104, 0],\n [40, 104, 0],\n [41, 105, 0],\n [42, 105, 0],\n [43, 105, 0],\n [44, 105, 0],\n [45, 105, 0],\n [45, 106, 0],\n [46, 106, 0],\n [47, 106, 0],\n [48, 106, 0],\n [49, 106, 0],\n [50, 106, 0],\n [51, 107, 0],\n [52, 107, 0],\n [53, 107, 0],\n [54, 107, 0],\n [54, 107, 0],\n [55, 108, 0],\n [56, 108, 0],\n [57, 108, 0],\n [58, 108, 0],\n [59, 108, 0],\n [60, 108, 1],\n [61, 109, 1],\n [62, 109, 2],\n [63, 109, 2],\n [64, 109, 3],\n [65, 109, 3],\n [66, 110, 4],\n [67, 110, 4],\n [68, 110, 4],\n [69, 110, 5],\n [70, 110, 5],\n [71, 110, 6],\n [72, 111, 6],\n [73, 111, 7],\n [74, 111, 7],\n [75, 111, 8],\n [76, 111, 8],\n [77, 112, 9],\n [78, 112, 9],\n [79, 112, 10],\n [80, 112, 10],\n [81, 112, 11],\n [82, 112, 11],\n [83, 113, 12],\n [84, 113, 12],\n [85, 113, 13],\n [85, 113, 13],\n [86, 113, 14],\n [87, 114, 14],\n [88, 114, 15],\n [89, 114, 15],\n [90, 114, 16],\n [91, 114, 16],\n [92, 114, 17],\n [93, 115, 17],\n [94, 115, 18],\n [95, 115, 18],\n [96, 115, 19],\n [97, 115, 19],\n [98, 115, 20],\n [99, 116, 20],\n [100, 116, 20],\n [101, 116, 21],\n [102, 116, 21],\n [103, 116, 22],\n [104, 117, 22],\n [105, 117, 23],\n [106, 117, 23],\n [107, 117, 24],\n [108, 117, 24],\n [109, 118, 25],\n [110, 118, 25],\n [111, 118, 26],\n [112, 118, 26],\n [113, 118, 27],\n [114, 118, 27],\n [115, 119, 28],\n [116, 119, 28],\n [117, 119, 29],\n [118, 119, 29],\n [119, 119, 30],\n [120, 120, 30],\n [121, 120, 31],\n [122, 120, 31],\n [123, 120, 32],\n [124, 121, 32],\n [125, 121, 32],\n [126, 121, 33],\n [127, 122, 33],\n [128, 122, 34],\n [129, 122, 34],\n [130, 123, 35],\n [131, 123, 35],\n [132, 123, 36],\n [133, 124, 36],\n [134, 124, 37],\n [135, 124, 37],\n [136, 125, 37],\n [137, 125, 38],\n [138, 125, 38],\n [139, 126, 39],\n [139, 126, 39],\n [140, 126, 40],\n [141, 126, 40],\n [142, 127, 41],\n [143, 127, 41],\n [144, 127, 41],\n [145, 128, 42],\n [146, 128, 42],\n [147, 128, 43],\n [148, 129, 43],\n [149, 129, 44],\n [150, 129, 44],\n [151, 130, 45],\n [152, 130, 45],\n [153, 130, 45],\n [154, 131, 46],\n [155, 131, 46],\n [156, 131, 47],\n [157, 132, 47],\n [158, 132, 48],\n [159, 132, 48],\n [160, 133, 49],\n [161, 133, 49],\n [162, 133, 50],\n [163, 134, 50],\n [164, 134, 50],\n [165, 134, 51],\n [166, 135, 51],\n [167, 135, 52],\n [168, 135, 52],\n [169, 136, 53],\n [170, 136, 53],\n [171, 136, 54],\n [172, 137, 54],\n [173, 137, 54],\n [174, 137, 55],\n [175, 138, 55],\n [176, 138, 56],\n [177, 138, 56],\n [178, 139, 57],\n [179, 139, 57],\n [180, 139, 58],\n [181, 140, 58],\n [182, 140, 58],\n [183, 140, 59],\n [184, 141, 59],\n [185, 142, 62],\n [186, 144, 65],\n [187, 146, 68],\n [188, 147, 71],\n [189, 149, 74],\n [190, 151, 77],\n [192, 153, 80],\n [193, 155, 83],\n [194, 156, 86],\n [195, 158, 90],\n [196, 160, 93],\n [197, 162, 96],\n [198, 164, 99],\n [199, 165, 102],\n [201, 167, 105],\n [202, 169, 108],\n [203, 171, 111],\n [204, 173, 114],\n [205, 174, 117],\n [206, 176, 120],\n [207, 178, 123],\n [208, 180, 126],\n [210, 182, 130],\n [211, 184, 133],\n [212, 185, 136],\n [213, 187, 139],\n [214, 189, 142],\n [215, 191, 145],\n [216, 193, 148],\n [217, 194, 151],\n [219, 196, 154],\n [220, 198, 157],\n [221, 200, 160],\n [222, 202, 163],\n [223, 203, 166],\n [224, 205, 170],\n [225, 207, 173],\n [226, 209, 176],\n [228, 211, 179],\n [229, 212, 182],\n [230, 214, 185],\n [231, 216, 188],\n [232, 218, 191],\n [233, 220, 194],\n [234, 221, 197],\n [235, 223, 200],\n [237, 225, 203],\n [238, 227, 207],\n [239, 229, 210],\n [240, 230, 213],\n [241, 232, 216],\n [242, 234, 219],\n [243, 236, 222],\n [245, 238, 225],\n [246, 240, 228],\n [247, 241, 231],\n [248, 243, 234],\n [249, 245, 237],\n [250, 247, 240],\n [251, 249, 243],\n [252, 250, 247],\n [254, 252, 250],\n [255, 254, 253],\n [255, 255, 255],\n ]\n )\n\n cm = ListedColormap(C / 255.0)\n return cm", "def LineColorCoding(N,cmap='jet'):\n colormap_name = cmap\n cm = plt.get_cmap(colormap_name)\n cNorm = colors.Normalize(vmin=0, vmax=N-1)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)\n return scalarMap", "def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap", "def generate_n_colors(n, cmap_name='tab20'):\n pt_region_colormap = plt.get_cmap(cmap_name)\n max_i = len(pt_region_colormap.colors)\n return [pt_region_colormap(i % max_i) for i in range(n)]", "def get_cmap(n):\n cmap_fn = plt.cm.get_cmap('hsv', n+1)\n colors = [cmap_fn(i + 1)[:3] for i in range(n)]\n random.shuffle(colors)\n cmap = (np.array(colors) * 255.0).astype(np.uint8)\n return cmap", "def terrain_cmap_50():\n C = np.array(\n [\n [2, 97, 0],\n [6, 98, 0],\n [11, 98, 0],\n [16, 99, 0],\n [20, 100, 0],\n [25, 101, 0],\n [30, 102, 0],\n [34, 103, 0],\n [39, 104, 0],\n [44, 105, 0],\n [48, 106, 0],\n [53, 107, 0],\n [58, 108, 0],\n [63, 109, 2],\n [68, 110, 4],\n [73, 111, 7],\n [78, 112, 9],\n [83, 113, 12],\n [88, 114, 15],\n [93, 115, 17],\n [98, 116, 20],\n [103, 116, 22],\n [109, 117, 25],\n [114, 118, 27],\n [119, 119, 30],\n [124, 121, 32],\n [129, 122, 34],\n [134, 124, 37],\n [139, 126, 39],\n [144, 127, 41],\n [149, 129, 44],\n [155, 131, 46],\n [160, 133, 48],\n [165, 134, 51],\n [170, 136, 53],\n [175, 138, 55],\n [180, 139, 58],\n [185, 143, 64],\n [191, 152, 80],\n [197, 162, 96],\n [203, 171, 112],\n [209, 181, 128],\n [215, 190, 144],\n [221, 199, 160],\n [226, 209, 176],\n [232, 218, 192],\n [238, 228, 208],\n [244, 237, 224],\n [250, 246, 240],\n [255, 255, 255],\n ]\n )\n\n cm = ListedColormap(C / 255.0)\n return cm", "def test_colormap_discrete_nu():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(np.array([[0, .75, 0], [.75, .25, .5]]),\n [0., .25, 1.], interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_nu.png\")", "def disp_map(disp):\n map = np.array([\n [0,0,0,114],\n [0,0,1,185],\n [1,0,0,114],\n [1,0,1,174],\n [0,1,0,114],\n [0,1,1,185],\n [1,1,0,114],\n [1,1,1,0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0]-1,map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] -1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0]-1] / cbins[cbins.shape[0] -1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6,1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size+1,1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s,0:3] * np.tile(1 - disp,(1,3)) + map[s + 1,0:3] * np.tile(disp,(1,3))\n\n return disp", "def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)" ]
[ "0.72187954", "0.69491076", "0.6924096", "0.6924096", "0.6924096", "0.6920645", "0.69188344", "0.69188344", "0.69188344", "0.69188344", "0.69142735", "0.69021994", "0.67898613", "0.6699816", "0.6696647", "0.6689399", "0.6646159", "0.6633695", "0.65624946", "0.65356964", "0.6471811", "0.6455578", "0.6407984", "0.6370638", "0.63426495", "0.63035554", "0.6239817", "0.619795", "0.61961895", "0.618948" ]
0.73025054
0
Plot the Fisher/mutual information after the linear layer in a network of unstructured weights, averaged over many repetitions, as a function of network size.
def unstruct_weight_plot_mu( Ns, mus, sigma, repetitions, plot, design='lognormal', sigmaP=1., sigmaS=1., sigmaC=1., fax=None ): # create plot if fax is None: fig, ax = plt.subplots(1, 1, figsize=(8, 8)) else: fig, ax = fax # create data arrays data = np.zeros((Ns.size, mus.size, repetitions)) # iterate over population sizes for N_idx, N in enumerate(Ns): # iterate over weight scales for mu_idx, mu in enumerate(mus): # iterate over repetitions for rep in range(repetitions): v = np.ones(N) w = 1. + LNN.unstruct_weight_maker(N, design, loc=mu, scale=sigma) lnn = LNN(v=v, w=w, sigmaP=sigmaP, sigmaS=sigmaS, sigmaC=sigmaC) if plot == 'FI_linear': data[N_idx, mu_idx, rep] = lnn.FI_linear_stage() elif plot == 'MI_linear': data[N_idx, mu_idx, rep] = lnn.MI_linear_stage() else: raise ValueError('Plot version does not exist.') data_means = np.mean(data[N_idx, :, :], axis=1) data_stdevs = np.std(data[N_idx, :, :], axis=1) ax.plot( mus, data_means, color=colors[N_idx], linestyle='-', linewidth=4, zorder=10, label=r'$N = %s$' % N) ax.fill_between( mus, data_means - data_stdevs, data_means + data_stdevs, color=colors[N_idx], alpha=0.50) ax.set_facecolor('white') ax.set_xlabel(r'$\mu$', fontsize=30) ax.tick_params(labelsize=20) lgd = ax.legend(loc=4, facecolor='white', prop={'size': 18}, ncol=2, handletextpad=0.4, handlelength=1., labelspacing=0.27, columnspacing=0.5) lgd.get_frame().set_edgecolor('k') for spine in ax.spines.values(): spine.set_edgecolor('k') return fig, ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize(self, network, f):\n import matplotlib\n matplotlib.use('Agg',warn=False)\n import matplotlib.pyplot as plt\n # Convert to a network if it is not.\n if not isinstance(network, NeuralNetwork):\n network = NeuralNetwork(network)\n \n fig = plt.figure()\n steps, states, actions = self._loop(network, max_steps=1000)\n # TEMP STUFF\n actions = np.array(actions)\n print((actions.size, np.histogram(actions)[0]))\n ##\n x, dx, theta, dtheta = list(zip(*states))\n theta = np.vstack(theta).T\n dtheta = np.vstack(dtheta).T\n # The top plot (cart position)\n top = fig.add_subplot(211)\n top.fill_between(list(range(len(x))), -self.h, self.h, facecolor='green', alpha=0.3)\n top.plot(x, label=r'$x$') \n top.plot(dx, label=r'$\\delta x$')\n top.legend(loc='lower left', ncol=4, fancybox=True, bbox_to_anchor=(0, 0, 1, 1))\n # The bottom plot (pole angles)\n bottom = fig.add_subplot(212)\n bottom.fill_between(list(range(theta.shape[1])), -self.r, self.r, facecolor='green', alpha=0.3)\n for i, (t, dt) in enumerate(zip(theta, dtheta)):\n bottom.plot(t, label=r'$\\theta_%d$'%i)\n bottom.plot(dt, ls='--', label=r'$\\delta \\theta_%d$'%i)\n bottom.legend(loc='lower left', ncol=4, fancybox=True, bbox_to_anchor=(0, 0, 1, 1))\n fig.savefig(f)", "def visualize_train(train_data_full, train_labels, train_data, thetas, losses, niter):\n fig1, ax1 = plt.subplots()\n ax1.scatter(train_data_full[\"Weight\"], train_data_full[\"Height\"], color = 'blue')\n\n # De-standarize\n train_mean = train_data_full[\"Weight\"].mean()\n train_std = train_data_full[\"Weight\"].std()\n train_data_for_plot = train_mean + train_data[\"Weight\"] * train_std\n\n ax1.plot(train_data_for_plot, predict(train_data, thetas[niter - 1]), color = 'red', linewidth = 2)\n ax1.set_xlabel(\"Height\")\n ax1.set_ylabel(\"Weight\")\n\n fig2, ax2 = plt.subplots()\n ax2.plot(range(len(losses)), losses, color = 'blue', linewidth = 2)\n ax2.set_xlabel(\"Iteration\")\n ax2.set_ylabel(\"MSE\")\n\n fig3, ax3 = plt.subplots()\n np_gradient_ws = np.array(thetas)\n\n w = np.linspace(min(np_gradient_ws[:, 0]), max(np_gradient_ws[:, 0]), len(np_gradient_ws[:, 0]))\n b = np.linspace(min(np_gradient_ws[:, 1]), max(np_gradient_ws[:, 1]), len(np_gradient_ws[:, 1]))\n x, y = np.meshgrid(w, b)\n z = compute_z_loss(train_labels, train_data, np.stack((w,b)).T)\n cp = ax3.contourf(x, y, z, cmap = plt.cm.jet)\n fig3.colorbar(cp, ax = ax3)\n ax3.plot(3.54794951, 66.63949115837143, color = 'red', marker = '*', markersize = 20)\n if niter > 0:\n thetas_to_plot = np_gradient_ws[:niter]\n ax3.plot(thetas_to_plot[:, 0], thetas_to_plot[:, 1], marker = 'o', color = 'w', markersize = 10)\n ax3.set_xlabel(r'$w$')\n ax3.set_ylabel(r'$b$')\n return fig1, fig2, fig3", "def plot_reduce_dimension(model):\n\n outputs = []\n n = 8\n paths = 'data/grimace'\n dirs = np.random.choice(os.listdir(paths), n)\n\n for d in dirs:\n p = paths + '/' + str(d)\n files = os.listdir(p)\n if files:\n for f in files:\n img = os.path.join(p, f)\n image = cv2.imread(img)\n image = process_image(image)\n output = model.predict(image)[0]\n outputs.append(output)\n\n embedded = TSNE(2).fit_transform(outputs)\n\n colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']\n\n for i in range(n):\n m, n = i * 20, (i + 1) * 20\n plt.scatter(embedded[m: n, 0], embedded[m: n, 1],\n c=colors[i], alpha=0.5)\n\n plt.title('T-SNE')\n plt.grid(True)\n plt.show()", "def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()", "def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()", "def plotNetwork(module_dict, arch):\n # Not a great way of doing it but it'll do for now\n min_val = 0\n max_val = 0\n for name, module in module_dict.items():\n if np.amin(module) < min_val:\n min_val = np.amin(module)\n if np.amax(module) > max_val:\n max_val = np.amax(module)\n\n print(min_val)\n print(max_val)\n list_keys = list(module_dict)\n num_layers = len(module_dict)\n num_cols = math.ceil(math.sqrt(num_layers))\n num_rows = math.ceil(num_layers/num_cols)\n fig, axes = plt.subplots(num_cols, num_rows, figsize=(num_cols*10, num_rows*10))\n\n for i, ax in zip(range(num_cols*num_rows), axes.flat):\n if i < num_layers:\n sub = sns.heatmap(module_dict[list_keys[i]], cmap=sns.diverging_palette(240, 10, s=100, as_cmap=True), \n center=0.00, cbar_kws={\"shrink\": 0.85}, xticklabels=False, yticklabels=False, square=True, ax=ax)\n ax.set_title(list_keys[i], fontsize=20)\n # make frame visible\n for _, spine in sub.spines.items():\n spine.set_visible(True)\n spine.set_linewidth(2) \n else:\n fig.delaxes(ax)\n\n\n if not os.path.exists('plots'):\n os.makedirs('plots')\n\n fig.savefig('plots/{architecture}full_network.png'.format(architecture=arch), transparent=True)", "def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()", "def generate_plots(fixed, moving, warped, flows, train_loss, val_loss, reg_loss, epoch):\n moving = moving.detach().cpu().numpy()\n fixed = fixed.detach().cpu().numpy()\n warped = [w.detach().cpu().numpy() for w in warped]\n flows = [f.detach().cpu().numpy() for f in flows]\n\n fig = plt.figure(constrained_layout=True, figsize=(4 * 5, 4 * 3))\n ax_dict = fig.subplot_mosaic(\"\"\"\n FABCD\n LGHIE\n MKJWX\n \"\"\")\n\n ax_dict['F'].imshow(moving[0, 0, ...], cmap='gray')\n ax_dict['F'].set_title('Moving')\n\n ax_dict['W'].imshow(fixed[0, 0, ...], cmap='gray')\n ax_dict['W'].set_title('Fixed')\n\n for i, ax_name in enumerate(list(\"ABCDEX\")):\n ax_dict[ax_name].imshow(warped[i][0, 0, ...], cmap='gray')\n if ax_name == \"A\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n ax_dict['L'].plot(train_loss, color='red', label='train_loss')\n ax_dict['L'].plot(val_loss, label='val_loss', color='blue')\n ax_dict['L'].plot(reg_loss, label='train_reg_loss', color='green')\n ax_dict['L'].set_title(\"Losses\")\n ax_dict['L'].grid()\n ax_dict['L'].set_xlim(0, args.e)\n ax_dict['L'].legend(loc='upper right')\n ax_dict['L'].scatter(len(train_loss) - 1, train_loss[-1], s=20, color='red')\n ax_dict['L'].scatter(len(val_loss) - 1, val_loss[-1], s=20, color='blue')\n ax_dict['L'].scatter(len(reg_loss) - 1, reg_loss[-1], s=20, color='green')\n\n for i, ax_name in enumerate(list(\"GHIJKM\")):\n plot_grid(ax_dict[ax_name], flows[i][0, ...])\n if ax_name == \"G\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n plt.suptitle(f\"Epoch {epoch}\")\n plt.savefig(f'./ckp/visualization/epoch_{epoch}.png')", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()", "def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()", "def graph_results(loss, acc):\n N = len(loss)\n x = np.linspace(0, N, N)\n plt.subplot(1,2,1)\n plt.plot(x, loss)\n plt.subplot(1,2,2)\n plt.plot(x,acc)\n plt.show()", "def visualize_implicit_dist(config, task_id, writer, train_iter, w_samples,\n figsize=(10, 6)):\n assert w_samples.ndim == 2\n\n num_weights = w_samples.shape[1]\n # Ensure that we always plot the same samples, independent of the simulation\n # its random seed.\n rand = np.random.RandomState(42)\n weight_inds = rand.choice(np.arange(num_weights), min(10, num_weights),\n replace=False)\n weight_inds = np.sort(weight_inds)\n\n weight_samples = dict(('Weight %d' % (weight_inds[i]),\n w_samples[:, weight_inds[i]].detach().cpu().numpy()) \\\n for i in range(len(weight_inds)))\n\n # FIXME Adapt our plotting guidelines.\n df = pd.DataFrame.from_dict(weight_samples)\n\n # correlation matrix.\n plt.rcParams['figure.figsize'] = figsize\n plt.matshow(df.corr(method='pearson'), vmin=-1, vmax=1)\n plt.xticks(range(len(df.columns)), df.columns)\n plt.xticks(rotation=70)\n plt.yticks(range(len(df.columns)), df.columns)\n plt.colorbar()\n\n writer.add_figure('eval/task_%d/correlation' % task_id, plt.gcf(),\n train_iter, close=True)\n\n n = 0\n for p in weight_inds:\n for q in weight_inds:\n if q >= p:\n break\n\n # Avoid that plots get corrupted due to mode collapse.\n if np.isclose(weight_samples['Weight %d' % p].std(), 0) or \\\n np.isclose(weight_samples['Weight %d' % q].std(), 0):\n n += 1\n warn('Could not create plot \"eval/task_%d/weight_%d_%d\" ' \\\n % (task_id, p, q) + 'due to mode collapsed posterior ' +\n 'variance.')\n continue\n\n try:\n sns.jointplot(x='Weight %d' % (p), y='Weight %d' % (q), data=df,\n kind=\"kde\")\n writer.add_figure('eval/task_%d/weight_%d_%d' % (task_id, p, q),\n plt.gcf(), train_iter, close=True)\n except:\n warn('Could not visualize joint weight density.')\n n += 1\n\n if n > 9:\n break\n\n if n > 9:\n break", "def plot_basis(self, layer, sublayer):\n for i in range(self.features_number[layer]):\n plt.figure(\"Base N: \"+str(i))\n sns.heatmap(self.basis[layer][sublayer][i])", "def plot_variables(self, n, show=False, diagnostics=False):\n\n if diagnostics:\n fig, ax = plt.subplots(5, 1, sharex = True, figsize = (10, 10))\n else:\n fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10))\n\n plt.subplots_adjust(hspace = 0)\n end = len(n.history[\"det F\"])\n epochs = np.arange(end)\n a, = ax[0].plot(epochs, n.history[\"det F\"], label = 'Training data')\n b, = ax[0].plot(epochs, n.history[\"det test F\"], label = 'Test data')\n # ax[0].axhline(y=5,ls='--',color='k')\n ax[0].legend(frameon = False)\n ax[0].set_ylabel(r'$|{\\bf F}_{\\alpha\\beta}|$')\n ax[0].set_title('Final Fisher info on test data: %.3f'%n.history[\"det test F\"][-1])\n ax[1].plot(epochs, n.history[\"loss\"])\n ax[1].plot(epochs, n.history[\"test loss\"])\n # ax[1].set_xlabel('Number of epochs')\n ax[1].set_ylabel(r'$\\Lambda$')\n ax[1].set_xlim([0, len(epochs)]);\n \n if diagnostics:\n ax[2].plot(epochs, n.history[\"det C\"])\n ax[2].plot(epochs, n.history[\"det test C\"])\n # ax[2].set_xlabel('Number of epochs')\n ax[2].set_ylabel(r'$|{\\bf C}|$')\n ax[2].set_xlim([0, len(epochs)]);\n \n # Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,0]\n , color = 'C0', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,1]\n , color = 'C0', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n \"\"\"\n\n # Test Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,0]\n , color = 'C1', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Test Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,1]\n , color = 'C1', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n ax[3].legend(frameon=False)\n \"\"\"\n\n ax[3].set_ylabel(r'$\\partial\\mu/\\partial\\theta$')\n # ax[3].set_xlabel('Number of epochs')\n ax[3].set_xlim([0, len(epochs)])\n\n # Mean of network output summary 1\n ax[4].plot(epochs, np.array(n.history[\"μ\"])[:,0],alpha=0.5)\n # Mean of test output network summary 1\n ax[4].plot(epochs, np.array(n.history[\"test μ\"])[:,0],alpha=0.5)\n ax[4].set_ylabel('μ')\n ax[4].set_xlabel('Number of epochs')\n ax[4].set_xlim([0, len(epochs)])\n \n\n print ('Maximum Fisher info on train data:',np.max(n.history[\"det F\"]))\n print ('Final Fisher info on train data:',(n.history[\"det F\"][-1]))\n \n print ('Maximum Fisher info on test data:',np.max(n.history[\"det test F\"]))\n print ('Final Fisher info on test data:',(n.history[\"det test F\"][-1]))\n\n if np.max(n.history[\"det test F\"]) == n.history[\"det test F\"][-1]:\n print ('Promising network found, possibly more epochs needed')\n\n plt.tight_layout()\n plt.savefig(f'{self.figuredir}variables_vs_epochs_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def plot_attention(self, n_cols=4):\n from matplotlib import pyplot as plt\n from matplotlib.ticker import MaxNLocator\n save_path = mkdir_join(self.save_path, 'att_weights')\n if save_path is not None and os.path.isdir(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n for lth in range(self.n_layers):\n if not hasattr(self, 'yy_aws_layer%d' % lth):\n continue\n yy_aws = getattr(self, 'yy_aws_layer%d' % lth)\n plt.clf()\n fig, axes = plt.subplots(self.n_heads // n_cols, n_cols, figsize=(20, 8))\n for h in range(self.n_heads):\n if self.n_heads > n_cols:\n ax = axes[h // n_cols, h % n_cols]\n else:\n ax = axes[h]\n ax.imshow(yy_aws[-1, h, :, :], aspect='auto')\n ax.grid(False)\n ax.set_xlabel('Input (head%d)' % h)\n ax.set_ylabel('Output (head%d)' % h)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tight_layout()\n fig.savefig(os.path.join(save_path, 'layer%d.png' % lth))\n plt.close()", "def plot_attention(self, n_cols=4):\n from matplotlib import pyplot as plt\n from matplotlib.ticker import MaxNLocator\n save_path = mkdir_join(self.save_path, 'att_weights')\n if save_path is not None and os.path.isdir(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n for lth in range(self.n_layers):\n if not hasattr(self, 'yy_aws_layer%d' % lth):\n continue\n yy_aws = getattr(self, 'yy_aws_layer%d' % lth)\n plt.clf()\n fig, axes = plt.subplots(self.n_heads // n_cols, n_cols, figsize=(20, 8))\n for h in range(self.n_heads):\n if self.n_heads > n_cols:\n ax = axes[h // n_cols, h % n_cols]\n else:\n ax = axes[h]\n ax.imshow(yy_aws[-1, h, :, :], aspect='auto')\n ax.grid(False)\n ax.set_xlabel('Input (head%d)' % h)\n ax.set_ylabel('Output (head%d)' % h)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tight_layout()\n fig.savefig(os.path.join(save_path, 'layer%d.png' % lth))\n plt.close()", "def plot_graph(self) -> None:", "def visualize(self):\n\n self.check_model()\n show(prepare(self.model, self.vectorized_data, self.vectorizer, mds='tsne'))", "def plot(model, pos=None, scale=1, figsize=(15, 8), verbose=3):\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = network.adjmat2graph(model)\n # Get positions\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)", "def test_plot_torch_activation_functions():\n x = np.arange(-2, 2, 0.1)\n x = torch.from_numpy(x)\n for name, f in torch_activation_functions_dict.items():\n plt.plot(x.numpy(), f(x).numpy(), label=name)\n plt.title('Torch activation functions')\n plt.legend()\n if show_plots:\n plt.show()", "def plot_weights(self,):\n \n weights_evolution = pd.DataFrame(self.predict[\"weights\"].values.tolist(), columns=[*self.models.keys()])\n\n plt.figure(figsize=(8, 5))\n\n for name in weights_evolution.columns:\n plt.plot(weights_evolution[name], label=name)\n\n plt.title(\"Weights evolution\")\n plt.legend()\n plt.grid(axis=\"y\", linestyle='--')\n plt.show()\n\n del weights_evolution", "def visulize_weights(W):\n fig, axes1 = plt.subplots(2,5,figsize=(3,3))\n i = 0\n for j in range(2):\n for k in range(5):\n im = W[i,:].reshape(32, 32, 3)\n im = (im - np.min(im)) / (np.max(im) - np.min(im))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(im)\n i += 1\n plt.show()", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def plot_nodes_over_data_1d_components(fig, X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov, saveplot = False):\n\n idim = X.shape[1]\n odim = Y.shape[1]\n numplots = idim + odim\n \n for i in range(idim):\n # ax = fig.add_subplot(gs[i,0])\n ax = fig.axes[i]\n ax.clear()\n ax.hist(X[:,i], bins=20)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n yran = ylim[1] - ylim[0]\n offset1 = yran * -0.1\n offset2 = yran * -0.25\n # print(\"offsets 1,2 = %f, %f\" % (offset1, offset2))\n ax.plot(X[:,i], np.ones_like(X[:,i]) * offset1, \"ko\", alpha=0.33)\n for j,node in enumerate(e_nodes[:,i]):\n myms = 2 + 30 * np.sqrt(e_nodes_cov[i,i,i])\n # print(\"node\", j, node, myms)\n ax.plot([node], [offset2], \"ro\", alpha=0.33, markersize=10)\n # ax.plot([node], [offset2], \"r.\", alpha=0.33, markersize = myms)\n # x1, x2 = gmm.\n ax.text(node, offset2, \"n%d\" % j, fontsize=6)\n # plt.plot(e_nodes[:,i], np.zeros_like(e_nodes[:,i]), \"ro\", alpha=0.33, markersize=10)\n \n for i in range(idim, numplots):\n # ax = fig.add_subplot(gs[i,0])\n ax = fig.axes[i]\n ax.clear()\n ax.hist(Y[:,i-idim], bins=20)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n yran = ylim[1] - ylim[0]\n offset1 = yran * -0.1\n offset2 = yran * -0.25\n # print(\"offsets 1,2 = %f, %f\" % (offset1, offset2))\n ax.plot(Y[:,i-idim], np.ones_like(Y[:,i-idim]) * offset1, \"ko\", alpha=0.33)\n for j,node in enumerate(p_nodes[:,i-idim]):\n myms = 2 + 30 * np.sqrt(p_nodes_cov[i-idim,i-idim,i-idim])\n # print(\"node\", j, node, myms)\n ax.plot([node], [offset2], \"ro\", alpha=0.33, markersize=10)\n # ax.plot([node], [offset2], \"r.\", alpha=0.33, markersize = myms)\n ax.text(node, offset2, \"n%d\" % j, fontsize=6)\n \n # plt.plot(p_nodes[:,i-idim], np.zeros_like(p_nodes[:,i-idim]), \"ro\", alpha=0.33, markersize=10)\n\n plt.draw()\n plt.pause(1e-9)\n \n if saveplot:\n filename = \"plot_nodes_over_data_1d_components_%s.jpg\" % (mdl.__class__.__name__,)\n savefig(fig, filename)\n \n fig.show()\n # plt.show()", "def struct_weight_plot_linear_N(\n Ns, ks, plot, version=1, sigmaP=1., sigmaS=1., sigmaC=1., fax=None\n):\n # create plot\n if fax is None:\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n else:\n fig, ax = fax\n\n # create data arrays\n data = np.zeros((Ns.size, ks.size))\n\n # iterate over scales\n for k_idx, k in enumerate(ks):\n # iterate over population sizes\n for N_idx, N in enumerate(Ns):\n lnn = LNN(N=N, sigmaP=sigmaP, sigmaS=sigmaS, sigmaC=sigmaC)\n\n # calculate fisher information\n if plot == 'FI_linear':\n if version == 1:\n data[N_idx, k_idx] = lnn.FI_linear_struct(N, k, sigmaP, sigmaC)\n else:\n data[N_idx, k_idx] = lnn.FI_linear_struct(N, N / k, sigmaP, sigmaC)\n\n # calculate mutual information\n elif plot == 'MI_linear':\n if version == 1:\n data[N_idx, k_idx] = lnn.MI_linear_struct(N, k, sigmaP,\n sigmaC, sigmaS)\n else:\n data[N_idx, k_idx] = lnn.MI_linear_struct(N, N / k, sigmaP,\n sigmaC, sigmaS)\n\n else:\n raise ValueError('Plot version does not exist.')\n\n # plot the data, changing the label/colors if necessary\n if version == 1:\n ax.plot(\n Ns, data[:, k_idx],\n label=r'$k_{\\mathbf{w}}=%s$' % k,\n linewidth=4,\n color=colors[-k_idx])\n else:\n ax.plot(\n Ns, data[:, k_idx],\n label=r'$k_{\\mathbf{w}}=N/%s$' % k,\n linewidth=4,\n color=colors[k_idx])\n\n ax.set_facecolor('white')\n ax.set_xlabel(r'$N$', fontsize=30)\n ax.tick_params(labelsize=20)\n ax.set_xlim([np.min(Ns), np.max(Ns)])\n lgd = ax.legend(\n loc=2,\n facecolor='white',\n prop={'size': 18},\n handletextpad=0.6,\n handlelength=1.,\n labelspacing=0.27)\n lgd.get_frame().set_edgecolor('k')\n\n for spine in ax.spines.values():\n spine.set_edgecolor('k')\n\n return fig, ax", "def main():\n Nrep = 8 # number of repetition of EM steps\n nm = 3 # number of mixed gaussians.\n ns = 300 # number of samples.\n \n mu, sg, lm, lm_ind, smp, L_true = generate_synthetic_data(nm, ns)\n plt.figure(1, figsize=(5,4))\n plt.clf()\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n \n mue, sge, lme = generate_initial_state(nm, ns)\n axi = 0 # subplot number\n plt.figure(2, figsize=(12,9))\n plt.clf()\n for rep in range(Nrep):\n # E-step\n r, L_infer = e_step(smp, mue, sge, lme, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('E-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # M-step\n mue, sge, lme = m_step(smp, r, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('M-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # plot the ground truth for comparison\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n ax.set_title('grn_truth')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n print('L_infer = %2.6f , L_true = %2.6f' % (L_infer, L_true))", "def plot_expected_net_mag(L, temp, runs):\n\n colors = ['rosybrown','lightcoral','indianred','firebrick','darkred','red']\n spin_matrix = np.ones((L, L), np.int8)\n\n plt.figure(figsize=(10, 6))\n\n N = 30 # number of times to run n_cycles\n count = 0\n\n for n_cycles in runs:\n\n c = colors[count]\n count += 1\n for i in range(N):\n\n E, Mag, MagAbs, SH, Suscept, Naccept = numerical_solution(spin_matrix, int(n_cycles), temp, L)\n plt.semilogx(int(n_cycles), Mag, 'o', color=c)\n\n plt.title('Spread of Expected Magnetic Field of Matrix', fontsize=15)\n plt.xlabel('Number of Monte-Carlo Cycles', fontsize=15)\n plt.ylabel(r'\\langle M \\rangle', fontsize=15)\n plt.xticks(fontsize=13);plt.yticks(fontsize=13)\n plt.savefig(f'results/plots/4c/SpreadOfExpectedMagneticField')\n plt.show()", "def network_graph(net_dict=None):\n if net_dict == None:\n net_dict = {}\n else:\n G = nx.from_dict_of_lists(net_dict)\n plt.figure(num=None, figsize=(30, 30), dpi=80, facecolor='w', edgecolor='c')\n nx.draw_networkx(G, with_labels=True, alpha=0.5, edge_color='c', cmap=plt.cm.GnBu)\n plt.savefig(\"metabolism_5years.png\", bbox_inches='tight')", "def montage(W):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(2, 5)\n for i in range(2):\n for j in range(5):\n im = W[i * 5 + j, :].reshape(32, 32, 3, order='F')\n sim = (im - np.min(im[:])) / (np.max(im[:]) - np.min(im[:]))\n sim = sim.transpose(1, 0, 2)\n ax[i][j].imshow(sim, interpolation='nearest')\n ax[i][j].set_title(\"y=\" + str(5 * i + j))\n ax[i][j].axis('off')\n #plt.savefig(\"plots/ \"+fname +\".png\")\n plt.show()" ]
[ "0.6225357", "0.6089174", "0.59853345", "0.598065", "0.5882383", "0.58234656", "0.58226746", "0.5817316", "0.57622045", "0.57604235", "0.57364094", "0.5720234", "0.56863946", "0.5683019", "0.5672461", "0.5671478", "0.5671478", "0.5671162", "0.5670977", "0.56598586", "0.56555593", "0.56547153", "0.56490153", "0.56481606", "0.564706", "0.56470144", "0.56437874", "0.56312835", "0.5624889", "0.5618708" ]
0.6197951
1
Plots the asymptotic coefficients for the.
def plot_asymptotic_coefficients(filename, fax=None): # create plot labels = [1, 2, 3] # create plot if fax is None: fig, ax = plt.subplots(1, 1, figsize=(8, 8)) else: fig, ax = fax coef_file = h5py.File(filename, 'r') sigmaP_vals = list(coef_file) ks = np.arange(1, 26) for idx, sigmaP in enumerate(sigmaP_vals): coefs = coef_file[sigmaP] ax.plot( ks, coefs, linewidth=4, label=r'$\sigma_P=%s$' % labels[idx], color=colors[-idx - 3]) lgd = ax.legend( facecolor='white', prop={'size': 25}, handletextpad=0.4, handlelength=1.2, labelspacing=0.27, columnspacing=0.50) lgd.get_frame().set_edgecolor('k')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_fitting_coefficients(self):\n from matplotlib import pyplot as plt\n coeff = self.linear_fit[\"coeff\"]\n order = self.linear_fit[\"order\"]\n\n data = {}\n annotations = {}\n for c, o in zip(coeff, order):\n if len(o) == 0:\n continue\n n = len(o)\n if n not in data.keys():\n data[n] = [c]\n annotations[n] = [WulffConstruction.order2string(o)]\n else:\n data[n].append(c)\n annotations[n].append(WulffConstruction.order2string(o))\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n start = 0\n keys = list(data.keys())\n keys.sort()\n for k in keys:\n x = list(range(start, start+len(data[k])))\n ax.bar(x, data[k], label=str(k))\n start += len(data[k]) + 1\n for i in range(len(data[k])):\n ax.annotate(annotations[k][i], xy=(x[i], data[k][i]))\n ax.set_ylabel(\"Fitting coefficient\")\n ax.set_xticklabels([])\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.legend(frameon=False)\n return fig", "def plot_polynomial(self):\n plt.scatter(self.x_values, self.y_values)\n plt.title(f\"Graph of polynomial between {np.floor(min(self.x_values))} and {np.ceil(max(self.x_values))}\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.show()", "def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot(self):\n\t\tself.plotOfSpect()", "def plot_coeffs(self, *links):\n\n coeffs_dict = dict(self.noisemodel.coeffs)\n groups = self._model_terms(links)\n fig, ax = plt.subplots()\n colcy = cycle(COLORS)\n for group in groups:\n c = next(colcy)\n coeffs = [coeffs_dict[term] for term in group]\n ax.bar([term.to_label() for term in group], coeffs, color=c)", "def plot_cf(self, **options):\n n = len(self.hs)\n xs = np.arange(-n//2, n//2)\n hs = np.roll(self.hs, len(self.hs) // 2)\n plt.plot(xs, hs.real, label='real', **options)\n plt.plot(xs, hs.imag, label='imag', **options)\n plt.legend()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot(self):\n pass", "def plotSVMCoefficients(self, **kwargs):\n ser_X = pd.Series(np.repeat(1, len(self.features)))\n ser_X.index = self.features\n new_kwargs = dict(kwargs)\n new_kwargs[\"is_plot\"] = False\n ax = self._plotFeatureBars(ser_X, **new_kwargs)\n ax.set_ylabel(\"Coefficient\")\n self._showPlot(kwargs)", "def printfunc(self):\n zero1=self.Newton(True)\n print \"Using initial porition %0.2f ,%0.2f\" %(self.x_init,self.y_0)\n print \"extremum calculated witn Newton-Rapson: %0.2f ,%0.2f.\"%(zero1[0],zero1[1])\n zero2=self.Newton(False)\n print \"extremum calculated witn Secant: %0.2f ,%0.2f.\" %(zero2[0],zero2[1])\n xlist=np.arange(self.x_0-10,self.x_0+10,0.01)\n ylist=np.arange(self.y_0-10,self.y_0+10,0.01)\n X,Y=np.meshgrid(xlist,ylist)\n Z=self.sfunc(X,Y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.plot(xlist, ylist, self.sfunc(xlist,ylist), 'g-',label='function $e^{(-(x-%0.2f)^2-(y-%0.2f)^2)}$' %(self.x_0,self.y_0))\n ax.contour(X, Y, Z)# colors = 'k', linestyles = 'solid')\n ax.plot([zero1[0]], [zero1[0]], self.sfunc(zero1[0],zero1[1]),'bo',label='extrema using Newton-Rapson (%0.2f; %0.2f)'%(zero1[0],zero1[1]))\n ax.plot([zero2[0]], [zero2[0]], self.sfunc(zero2[0],zero2[1]),'ro',label='extrema using Seacent (%0.2f; %0.2f)'%(zero2[0],zero2[1]))\n ax.legend()\n plt.show()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_energies(self):\n plt.plot(self.energies[0], self.energies[1])\n plt.xlabel('Time (s)')\n plt.ylabel('Energy (J)')\n plt.show()", "def plot_curves():\n lm = np.arange(0, 1.8, .01)\n vm = np.arange(-1.2, 1.2, .01)\n lt = np.arange(0, 1.07, .01)\n plt.subplot(2,1,1)\n plt.plot(lm, force_length_muscle(lm), 'r')\n plt.plot(lm, force_length_parallel(lm), 'g')\n plt.plot(lt, force_length_tendon(lt), 'b')\n plt.legend(('CE', 'PE', 'SE'))\n plt.xlabel('Normalized length')\n plt.ylabel('Force scale factor')\n plt.subplot(2, 1, 2)\n plt.plot(vm, force_velocity_muscle(vm), 'k')\n plt.xlabel('Normalized muscle velocity')\n plt.ylabel('Force scale factor')\n plt.tight_layout()\n plt.show()", "def showCl(ell,temps,title='CAMB ISWout power spectrum'):\n plt.plot(ell,temps*ell*(ell+1)/(2*np.pi) *1e12) #1e12 to convert to microK**2\n plt.xlabel('multipole moment l')\n plt.ylabel('l(l+1)C_l/(2pi) [microK**2]')\n plt.title(title)\n plt.show()", "def equationPlot(self):\n clf()\n x = np.arange(0,9.9,0.1)\n plot(x,1/(10-x))\n xlabel('X')\n ylabel('1/(10-x)')\n savefig('equation.png')", "def plotCaliCurve(constants, data, outName):\n x=np.linspace(min(data[:,0]),max(data[:,0]),1000)\n plt.figure()\n plt.rcParams.update({'font.size' : 16})\n plt.scatter(data[:,0],data[:,1])\n plt.plot(x,LangmuirCurve(x,constants[0],constants[1],constants[2],constants[3]))\n #plt.xlabel(\"MG Concentration (nM)\")\n #plt.ylabel(\"Relative SHS signal (Arb. Units)\")\n plt.savefig(outName + \"_cali_model_plot.png\")\n plt.show()", "def show():\n\tplt.show()", "def plotEq(self):\n Bsum = 0\n\n if self.plotBr: Bsum += self.magneticfield.Br**2\n if self.plotBphi: Bsum += self.magneticfield.Bphi**2\n if self.plotBz: Bsum += self.magneticfield.Bz**2\n\n B = np.sqrt(Bsum)\n if not hasattr(B, \"__len__\"): return\n\n self.axes.contour(self.magneticfield.meshR, self.magneticfield.meshZ, B)", "def make_plot(self):\n self.ax[0].set_ylabel(r'$C_{{\\ell}}^{{\\kappa\\kappa}}$')\n self.ax[1].set_ylabel('$\\mathrm{rel. dev. [\\%]$}')\n self.ax[1].set_xlabel(r'$\\ell$')", "def show_plot(self):\n # Tight layout\n plt.tight_layout()\n # Remove whitespace between upper and lower plots\n plt.subplots_adjust(hspace=0, wspace=0.3) \n # Tick marks on all sides of each plot and show legend\n for j in range(2):\n axes=self.ax[j]\n axes.tick_params(axis='both', which='both', direction='in',\n top=True, right=True)\n legend=axes.legend(framealpha=0)\n # Save and show\n plt.savefig('CMB_lensing_potential_LCDM_MG.pdf', format='pdf')\n plt.show()", "def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")", "def plotCoefficients(model):\n\n coefs = pd.DataFrame(model.coef_, X_train.columns)\n coefs.columns = [\"coef\"]\n coefs[\"abs\"] = coefs.coef.apply(np.abs)\n coefs = coefs.sort_values(by=\"abs\", ascending=False).drop([\"abs\"], axis=1)\n\n plt.figure(figsize=(15, 7))\n plt.title('sorted coefficient values of the model')\n coefs.coef.plot(kind='bar')\n plt.grid(True, axis='y')\n plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles='dashed');\n plt.draw()", "def plot(self, num_levels=10):\n if num_levels == -1:\n num_levels = len(self.energies())\n print(self.energies(num_levels))\n figure(figsize=(20, 5))\n subplot(1, num_levels + 1, 1)\n self.plot_potential()\n #xlabel('$\\phi$')\n for ii, psi2D in enumerate(self.get_2Dpsis(num_levels)):\n subplot(1, num_levels + 1, ii + 2)\n #imshow(psi2D.real,extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]),interpolation=\"None\",aspect='auto')\n imshow(psi2D.real, interpolation=\"None\", aspect='auto')\n xlabel(ii)", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()", "def plot_history(self, filename):\r\n plt.figure(figsize=(12, 9))\r\n plt.plot(self.Objective_value)\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Value')\r\n plt.title('Objective Function Values')\r\n # plt.savefig(filename)\r\n plt.show()\r\n return", "def plot(self):\n\t\tself.plotOfLoopVoltage()" ]
[ "0.7514957", "0.675888", "0.66593856", "0.66397125", "0.65040374", "0.6465431", "0.64535564", "0.64405936", "0.6402966", "0.63325644", "0.63293165", "0.63066345", "0.6284987", "0.62760645", "0.62508875", "0.62294936", "0.6181468", "0.61633366", "0.6143691", "0.6139784", "0.6110327", "0.6076618", "0.60640436", "0.6062591", "0.60568", "0.6045035", "0.6039942", "0.6038681", "0.6012713", "0.6012309" ]
0.68084884
1
Sets y attribute with exceptions
def y(self, value): self.data_validator("y", value) self.__y = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setY(self, y):\n self.y = y\n pass", "def setY(self, y):\r\n\t\tself._y=y", "def set_y(self, y: float):\n self.y = y", "def y(self, value):\n self.validate_input(y=value)\n self.__y = value", "def set_y(self, new_y):\r\n self.y = new_y", "def y(self, value):\n if isinstance(value, int) is False:\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value", "def set_y(self, y):\n self._y = y", "def SetY(self, y):\r\n\r\n self._y = y", "def y(self, value):\n if not isinstance(value, int):\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value", "def y(self, y):\n if type(y) is not int:\n raise TypeError(\"y must be an integer\")\n elif y < 0:\n raise ValueError(\"y must be >= 0\")\n else:\n self.__y = y", "def y(self, y):\n if type(y) is not int:\n raise TypeError(\"y must be an integer\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = y", "def y(self, y):\n if type(y) is not int:\n raise TypeError(\"y must be an integer\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = y", "def Y(self, value):\n self._Y = value", "def setY(self, value):\n self.components[1] = value", "def setY(self, value):\n self.components[1] = value", "def setY(self, y, index=np.nan):\n if len(self.shape()) > 1:\n if np.isnan(index):\n self.data[1, :] = y\n else:\n self.data[1, index] = y\n else:\n self.data[1] = y", "def y(self, number):\n self.validate_int(\"y\", number)\n if number < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = number", "def setY(self, *args):\n return _libsbml.Point_setY(self, *args)", "def set_y(self, value: int) -> None:\n assert -self.__max_value <= value and value <= self.__max_value\n\n should_sync = self.__y != value\n self.__y = value\n if should_sync:\n self.__sync_y()", "def setYUnits(self, units): \n self.__y_units__ = units", "def setY(self, value):\n self.position[1] = value", "def y(self, y):\n if y is None:\n raise ValueError(\"Invalid value for `y`, must not be `None`\") # noqa: E501\n\n self._y = y", "def y(self, y):\n if y is None:\n raise ValueError(\"Invalid value for `y`, must not be `None`\") # noqa: E501\n\n self._y = y", "def set_delta_y(self, *args: str, delta_y: Sequence[float] | float = 0.0) -> None:\n self.set_delta('y', *args, delta=delta_y)", "def setY(self, *args):\n return _libsbml.BoundingBox_setY(self, *args)", "def __set_y__(self,y):\n\n # Input vaidation\n try:\n y = int(y)\n except:\n raise ValueError('H Bridge direction is not valid')\n \n if(y != 0 and y != 1 and y != -1):\n raise ValueError('H Bridge direction is not valid')\n \n self.direction['y'] = y\n self.HBridges['y'].SetDirection(y)", "def y(self, value=None):\n if isinstance(value, (int, float)):\n self[1] = value\n else:\n if value is not None:\n raise TypeError(\"Cannot be set to {}\".format(type(value)))\n return self[1]", "def set_y(self, state_value):\n val = state_value / self.space_subdivisions + self.unit\n epsilon = 1e-6\n if not self.unit <= val <= 1.0 - self.unit + epsilon:\n raise AttributeError(\"Value out of bounds\")\n self.pos_y = val", "def test_cast_y_axis_extrema_invalid_input(self):\r\n self.assertRaises(ValueError, _cast_y_axis_extrema, 'foo')", "def y(self, y=None):\n\n if y is None:\n return self._y\n else:\n if not isinstance(y, int) and not isinstance(y, float):\n raise TypeError(\"y must be numeric, not '%s'\" % y)\n self._y = y" ]
[ "0.73010266", "0.7284235", "0.7217349", "0.7144914", "0.7122965", "0.71036816", "0.7090068", "0.707471", "0.70422107", "0.70308393", "0.7030658", "0.7030658", "0.68635017", "0.68199414", "0.68199414", "0.67532015", "0.6728125", "0.6710675", "0.6618399", "0.6606974", "0.65337294", "0.65319127", "0.65319127", "0.65248674", "0.6429431", "0.64259964", "0.6418633", "0.64138925", "0.63883126", "0.63688207" ]
0.740591
0
Regenerate key for a topic. Regenerate a shared access key for a topic.
def regenerate_key( self, resource_group_name, topic_name, key_name, custom_headers=None, raw=False, **operation_config): regenerate_key_request = models.TopicRegenerateKeyRequest(key_name=key_name) # Construct URL url = self.regenerate_key.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'topicName': self._serialize.url("topic_name", topic_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(regenerate_key_request, 'TopicRegenerateKeyRequest') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TopicSharedAccessKeys', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def generate_new_refesh_key(payload: dict = Depends(get_jwt_payload)):\n if payload[\"type\"] != \"refresh\":\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"You gave the access key, but we need the refresh key\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n # <- Your token revocation code should be here!\n\n access_token_data = jwt_claims.copy()\n access_token_data[\"sub\"] = payload[\"sub\"]\n access_token_data[\"exp\"] = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token_data[\"jti\"] = str(uuid.uuid4())\n\n return AccessToken(access_token=jwt.encode(access_token_data, SECRET_KEY, algorithm=ALGORITHM))", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def shared_key(private_key,public_key):\n\treturn private_key.exchange(public_key)", "def generate_new_token(self):\n self.access_token = random_auth_key()", "def genKey(self, privateKey,otherKey):\n\t\tself.sharedSecret = self.genSecret(privateKey, otherKey)\n\n\t\t# Convert the shared secret (int) to an array of bytes in network order\n\t\t# Otherwise hashlib can't hash it.\n\t\ttry:\n\t\t\t_sharedSecretBytes = self.sharedSecret.to_bytes(\n\t\t\t\tself.sharedSecret.bit_length() // 8 + 1, byteorder=\"big\")\n\t\texcept AttributeError:\n\t\t\t_sharedSecretBytes = str(self.sharedSecret)\n\n\t\ts = hashlib.sha256()\n\t\ts.update(bytes(_sharedSecretBytes))\n\t\tself.key = s.digest()", "def renewKey():\n while True:\n try:\n sleep(RENEW_KEY)\n mutex.acquire()\n key_dict.clear()\n mutex.release()\n except:\n print(\"error in renew key\")\n finally:\n if mutex.locked():\n mutex.release()", "def regenerate_API_key(self) -> None:\n session = create_session()\n new_key = generate_random_string(24)\n # Check if there is any user with exact same API key as just generated\n if new_key not in session.query(User.API_KEY).all():\n self.API_KEY = new_key\n session.merge(self)\n session.commit()\n else:\n while new_key in session.query(User.API_KEY).all():\n new_key = generate_random_string(24)\n self.API_KEY = new_key\n session.merge(self)\n session.commit()", "def gen_key(self):\n\n if not self.private_key:\n self._gen_key()\n else:\n raise CryptoError(\"Private Key already existing\")", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def create_key(cls, topic):\n\t\treturn datastore_types.Key.from_path(cls.kind(), utils.get_hash_key_name(topic))", "def gen_keys_old(name):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n if not os.path.isfile('%s/%s.pem'%(d,name)):\n open('%s/%s.pem'%(d,name),'w').write(Crypto.PublicKey.RSA.generate(1024,os.urandom).exportKey('PEM'))", "def _newKey(self, key):\n pass", "def generate_key():\n return get_token_generator().generate_token()", "def delkey(confirm, pub):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n mph.wallet.removePrivateKeyFromPublicKey(pub)\n set_shared_morphene_instance(stm)", "def create_key_name(topic):\n\t\treturn utils.get_hash_key_name(topic)", "def newKeyGenerate():\n generate()\n return '', 204", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def create_key ():", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def generate_refresh_token(self):\n return gen_api_key(length=self.token_length)", "def refresh_access_token(self):\n self._access_token = self.generate_access_token()", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def update_key(self):\n self.__prev_key = self.__new_key", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def reset_api_key(request):\r\n user = request.user\r\n # Generate new api key and assign it to user's api key\r\n user.api_key = User.gen_api_key()\r\n return _api_response(request, {\r\n 'api_key': user.api_key,\r\n 'message': 'Api Key was successfully changed',\r\n })", "def renew_access_token(self):\n self._access_token = self._get_access_token()", "def generate_keystream(self):", "def expand_key(master_key):\n #s_box = bytes2matrix(s_box1)\n # Round constants https://en.wikipedia.org/wiki/AES_key_schedule#Round_constants\n r_con = (\n 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,\n 0x80, 0x1B, 0x36, 0x6C, 0xD8, 0xAB, 0x4D, 0x9A,\n 0x2F, 0x5E, 0xBC, 0x63, 0xC6, 0x97, 0x35, 0x6A,\n 0xD4, 0xB3, 0x7D, 0xFA, 0xEF, 0xC5, 0x91, 0x39,\n )\n\n # Initialize round keys with raw key material.\n key_columns = bytes2matrix(master_key, 4)\n #print(key_columns)\n iteration_size = len(master_key) // 4\n\n\n # Each iteration has exactly as many columns as the key material.\n columns_per_iteration = len(key_columns)\n i = 1\n while len(key_columns) < (N_ROUNDS + 1) * 4:\n # Copy previous word.\n word = list(key_columns[-1])\n\n # Perform schedule_core once every \"row\".\n if len(key_columns) % iteration_size == 0:\n # Circular shift.\n word.append(word.pop(0))\n # Map to S-BOX.\n word = [s_box[b-1] for b in word]\n\n # XOR with first byte of R-CON, since the others bytes of R-CON are 0.\n word[0] ^= r_con[i]\n i += 1\n elif len(master_key) == 32 and len(key_columns) % iteration_size == 4:\n # Run word through S-box in the fourth iteration when using a\n # 256-bit key.\n word = [s_box[b] for b in word]\n\n # XOR with equivalent word from previous iteration.\n word = bytes(i^j for i, j in zip(word, key_columns[-iteration_size]))\n key_columns.append(word)\n\n # Group key words in 4x4 byte matrices.\n return [key_columns[4*i : 4*(i+1)] for i in range(len(key_columns) // 4)]", "def invalidate_key_group(self, prefix):\r\n self.add(prefix, 0)\r\n self.incr(prefix)" ]
[ "0.6181185", "0.5912194", "0.57736045", "0.5653259", "0.5649086", "0.5508733", "0.5508396", "0.5505565", "0.547415", "0.53989816", "0.5356546", "0.5315606", "0.5276495", "0.52314806", "0.52309495", "0.52278566", "0.51943773", "0.5139199", "0.51312166", "0.51285356", "0.5123908", "0.51230085", "0.5065923", "0.5043705", "0.5042799", "0.5035932", "0.50213355", "0.5019259", "0.5015788", "0.50116736" ]
0.66592574
0
Place a sell limit order with given quantity and price.
def limit_sell(self, order_id, quantity, price): Library.functions.limit_sell(self._book, order_id, quantity, price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sell_limit_order(self, price=0, volume=0):\n auth = CoinbaseExchangeAuth(self.api_key, self.secret_key, self.passphrase)\n data = {\n \"size\": volume,\n \"price\": price,\n \"side\": \"sell\",\n \"product_id\": self.rate,\n \"type\": \"limit\"\n }\n\n sell = requests.post(self.url + 'orders',\n data=json.dumps(data),\n auth=auth).json()\n\n sell['txid'] = sell['id']\n\n logging.debug(sell)\n return sell", "def limit_sell(self, symbol: Symbol, price: str, quantity: str,\n receive_window: Optional[int]):\n\n return self.new_order(symbol,\n OrderSide.SELL,\n OrderType.LIMIT,\n quantity,\n price,\n receive_window)", "def place_limit_order(self, side, symbol, size, price, **kwargs):\n pass", "def limit_buy(self, order_id, quantity, price):\n Library.functions.limit_buy(self._book, order_id, quantity, price)", "def limit_buy(self, symbol: Symbol, price: str, quantity: str,\n receive_window: Optional[int]):\n\n return self.new_order(symbol,\n OrderSide.BUY,\n OrderType.LIMIT,\n quantity,\n price,\n receive_window)", "def market_sell_limit(self, market_symbol, quantity, rate, time_in_force='GOOD_TIL_CANCELLED'):\n return self.post('orders', {\n 'marketSymbol': market_symbol,\n 'direction': 'SELL',\n 'type': 'LIMIT',\n 'quantity': quantity,\n 'limit': rate,\n 'timeInForce': time_in_force\n }, auth=True)", "def sell_limit(self, market, quantity, rate):\n return self.api_query('Trade', {'type':'sell', 'pair': market, 'amount': quantity, 'rate':'%.8f'%rate})", "def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)", "def limit(self, side, order_id, quantity, price):\n Library.functions.limit(self._book, side, order_id, quantity, price)", "def buy_limit_order(self, price=0, volume=0):\n auth = CoinbaseExchangeAuth(self.api_key, self.secret_key, self.passphrase)\n data = {\n \"size\": volume,\n \"price\": price,\n \"side\": \"buy\",\n \"product_id\": self.rate,\n \"type\": \"limit\"\n # \"time_in_force\": 'GTC',\n # \"cancel_after\": (datetime.now() + timedelta(minutes=10)).strftime('%M,%H,%d')\n }\n\n buy = requests.post(self.url + 'orders',\n data=json.dumps(data),\n auth=auth).json()\n buy['txid'] = buy['id']\n\n logging.debug(buy)\n return buy", "def limit_order(self, account: str, route: str, symbol: str, limit_price: float, quantity: int):\n return self._call_txtrader_api(\n 'limit_order', {\n 'account': account,\n 'route': route,\n 'symbol': symbol,\n 'limit_price': float(limit_price),\n 'quantity': quantity\n }\n )", "def create_sell_order(price: float, amount_crypto: float):\n try:\n if CONF.exchange == 'bitmex':\n price = round(price * 2) / 2\n order_size = round(price * amount_crypto)\n new_order = EXCHANGE.create_limit_sell_order(CONF.pair, order_size, price)\n elif CONF.exchange == 'kraken':\n if CONF.apply_leverage:\n new_order = EXCHANGE.create_limit_sell_order(CONF.pair, amount_crypto, price,\n {'leverage': CONF.leverage_default})\n else:\n new_order = EXCHANGE.create_limit_sell_order(CONF.pair, amount_crypto, price)\n elif CONF.exchange == 'liquid':\n new_order = EXCHANGE.create_limit_sell_order(CONF.pair, amount_crypto, price,\n {'funding_currency': CONF.base})\n norder = Order(new_order)\n LOG.info('Created %s', str(norder))\n return norder\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n if any(e in str(error.args) for e in STOP_ERRORS):\n if CONF.exchange == 'bitmex':\n LOG.warning('Order submission not possible - not selling %s', order_size)\n else:\n LOG.warning('Order submission not possible - not selling %s', amount_crypto)\n return None\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n create_sell_order(price, amount_crypto)", "def place_sell_order(self):\n price = request.form[\"price\"]\n stocks = request.form[\"stocks\"]\n trader_id = request.form[\"trader_id\"]\n self.market.place_sell_order(trader_id, price, stocks)\n return \"\"", "def _sell(self, amount, price):\n params = {\"pair\": self.pair, \"type\" : \"sell\", \"rate\" : price, \"amount\" : amount}\n response = self._send_request(\"Trade\", params)\n if \"error\" in response:\n raise TradeException(response[\"error\"])", "def sell(self,\n amount,\n quote_symbol,\n rate,\n expiration=7 * 24 * 60 * 60,\n killfill=False,\n account=None,\n orderid=None):\n if not account:\n if \"default_account\" in config:\n account = config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n # We buy quote and pay with base\n quote, base = self._get_assets(quote=quote_symbol)\n op = transactions.Limit_order_create(**{\n \"owner\": account,\n \"orderid\": orderid or random.getrandbits(32),\n \"amount_to_sell\": '{:.{prec}f} {asset}'.format(\n amount,\n prec=quote[\"precision\"],\n asset=quote[\"symbol\"]),\n \"min_to_receive\": '{:.{prec}f} {asset}'.format(\n amount * rate,\n prec=base[\"precision\"],\n asset=base[\"symbol\"]),\n \"fill_or_kill\": killfill,\n \"expiration\": transactions.formatTimeFromNow(expiration)\n })\n return self.dpay.finalizeOp(op, account, \"active\")", "async def sell(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n inventory = self.iex.get_held_stock_quantity(db, company.id, symbol)\r\n if inventory < quantity:\r\n await ctx.send(f\"``{company.name}\\n{inventory} {symbol}``\")\r\n raise StonksError()\r\n\r\n price = self.iex.price(symbol)\r\n value = price * quantity\r\n self.iex.sell(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``+{value} {company.name} ⯬ {quantity} {symbol} @ {price}``\")", "def _create_limit(self, price_limit):\n if self.price_limit is not None:\n return(order(self.symbol, -self.volume, style=LimitOrder(self.price_limit)))\n else:\n return", "def market_sell(self, symbol: Symbol, quantity: str,\n receive_window: Optional[int]):\n return self.new_order(symbol,\n OrderSide.SELL,\n OrderType.MARKET,\n quantity,\n price=None,\n receive_window=receive_window)", "def add_order(self, order):\n if order.is_bid:\n if order.price in self.buy_levels:\n limit = self.buy_levels[order.price]\n if limit.size == 0:\n self.buy_tree.size += 1\n limit.add(order)\n self.buy_map[order.uid] = order\n order.parent_limit = limit\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.buy_map[order.uid] = order\n self.buy_tree.insert(limit)\n self.buy_tree.size += 1\n self.buy_levels[order.price] = limit\n order.parent_limit = self.buy_levels[order.price]\n if self.highest_buy is None or order.price > self.highest_buy:\n self.highest_buy = order.price\n else:\n if order.price in self.sell_levels:\n limit = self.sell_levels[order.price]\n if limit.size == 0:\n self.sell_tree.size += 1\n limit.add(order)\n self.sell_map[order.uid] = order\n order.parent_limit = self.sell_levels[order.price]\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.sell_map[order.uid] = order\n self.sell_tree.insert(limit)\n self.sell_tree.size += 1\n self.sell_levels[order.price] = limit\n order.parent_limit = self.sell_levels[order.price]\n if self.lowest_sell is None or order.price < self.lowest_sell:\n self.lowest_sell = order.price\n self.update_book()", "async def new_limit_order(side):\n symbol = App.config[\"symbol\"]\n now_ts = now_timestamp()\n\n #\n # Find limit price (from signal, last kline and adjustment parameters)\n #\n last_kline = App.analyzer.get_last_kline(symbol)\n last_close_price = to_decimal(last_kline[4]) # Close price of kline has index 4 in the list\n if not last_close_price:\n log.error(f\"Cannot determine last close price in order to create a market buy order.\")\n return None\n\n price_adjustment = App.config[\"trader\"][\"limit_price_adjustment\"]\n if side == SIDE_BUY:\n price = last_close_price * Decimal(1.0 - price_adjustment) # Adjust price slightly lower\n elif side == SIDE_SELL:\n price = last_close_price * Decimal(1.0 + price_adjustment) # Adjust price slightly higher\n\n price_str = round_str(price, 2)\n price = Decimal(price_str) # We will use the adjusted price for computing quantity\n\n #\n # Find quantity\n #\n if side == SIDE_BUY:\n # Find how much quantity we can buy for all available USD using the computed price\n quantity = App.quote_quantity # USD\n percentage_used_for_trade = App.config[\"trader\"][\"percentage_used_for_trade\"]\n quantity = (quantity * percentage_used_for_trade) / Decimal(100.0) # Available for trade\n quantity = quantity / price # BTC to buy\n # Alternatively, we can pass quoteOrderQty in USDT (how much I want to spend)\n elif side == SIDE_SELL:\n # All available BTCs\n quantity = App.base_quantity # BTC\n\n quantity_str = round_down_str(quantity, 6)\n\n #\n # Execute order\n #\n order_spec = dict(\n symbol=symbol,\n side=side,\n type=ORDER_TYPE_LIMIT, # Alternatively, ORDER_TYPE_LIMIT_MAKER\n timeInForce=TIME_IN_FORCE_GTC,\n quantity=quantity_str,\n price=price_str,\n )\n\n if App.config[\"trader\"][\"no_trades_only_data_processing\"]:\n print(f\"NOT executed order spec: {order_spec}\")\n else:\n order = execute_order(order_spec)\n\n #\n # Store/log order object in our records (only after confirmation of success)\n #\n App.order = order\n App.order_time = now_ts\n\n return order", "def market_sell(self, order_id, quantity):\n Library.functions.market_sell(self._book, order_id, quantity)", "def orderSell(self, rate = None, amount = None):\r\n\t\treturn OrderSell(self, rate, amount)", "def sell(symbol: str,\n quantity: Any,\n order_type: str = \"market\",\n price: Any = None,\n exchange: str = CRYPTO_EXCHANGE,\n api_key: str = CRYPTO_API_KEY,\n api_secret: str = CRYPTO_API_SECRET,\n exchange_password: Any = CRYPTO_API_PASSWORD,\n exchange_uid: Any = CRYPTO_API_UID,\n test_mode: bool = False) -> Any:\n try:\n if test_mode == True:\n url = CRYPTO_URL_TEST\n else:\n url = CRYPTO_URL_LIVE\n payload = {\n 'symbol': symbol.upper(),\n 'quantity': quantity,\n 'order_type': order_type,\n 'limitPrice': price\n }\n response = requests.post('{}/sell/{}'.format(url, exchange),\n headers=crypto_get_headers(\n api_key, api_secret, exchange_password,\n exchange_uid),\n json=payload)\n if response:\n return response.json()\n if response.status_code == 400:\n logger.error('Oops! An error Occurred ⚠️')\n raise BadRequest(response.text)\n if response.status_code == 401:\n logger.error('Oops! An error Occurred ⚠️')\n raise InvalidCredentials(response.text)\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def sell(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.sell(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || SELL %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))", "def market_buy_limit(self, market_symbol, quantity, rate, time_in_force='GOOD_TIL_CANCELLED'):\n return self.post('orders', {\n 'marketSymbol': market_symbol,\n 'direction': 'BUY',\n 'type': 'LIMIT',\n 'quantity': quantity,\n 'limit': rate,\n 'timeInForce': time_in_force\n }, auth=True)", "def sell(self, bar, volume):\n self.place(Order(symbol=bar.symbol,\n volume=volume,\n price=bar.close,\n transaction=TransactionType.SELL,\n timestamp=bar.timestamp))", "def stoplimit_order(\n self, account: str, route: str, symbol: str, stop_price: float, limit_price: float, quantity: int\n ):\n return self._call_txtrader_api(\n 'stoplimit_order', {\n 'account': account,\n 'route': route,\n 'symbol': symbol,\n 'stop_price': float(stop_price),\n 'limit_price': float(limit_price),\n 'quantity': int(quantity)\n }\n )", "async def test_create_limit_sell_order(self):\n trade_result = {\n 'error': 10009,\n 'description': 'TRADE_RETCODE_DONE',\n 'orderId': 46870472\n }\n client.trade = AsyncMock(return_value=trade_result)\n actual = await api.create_limit_sell_order('GBPUSD', 0.07, 1.0, 0.9, 2.0, {'comment': 'comment',\n 'clientId': 'TE_GBPUSD_7hyINWqAlE'})\n assert actual == trade_result\n client.trade.assert_called_with('accountId', {'actionType': 'ORDER_TYPE_SELL_LIMIT', 'symbol': 'GBPUSD',\n 'volume': 0.07, 'openPrice': 1.0, 'stopLoss': 0.9,\n 'takeProfit': 2.0, 'comment': 'comment',\n 'clientId': 'TE_GBPUSD_7hyINWqAlE'}, 'RPC')", "def sell(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.sell(symbol, quantity, in_force, extended)", "def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)" ]
[ "0.7925371", "0.78168416", "0.75428194", "0.7417188", "0.7071134", "0.70346624", "0.6954648", "0.69130063", "0.69096357", "0.6874446", "0.6859875", "0.6753681", "0.66568744", "0.6572149", "0.6491404", "0.6474029", "0.64717853", "0.6457523", "0.6403897", "0.64003015", "0.639665", "0.63638526", "0.63411975", "0.6334398", "0.63131523", "0.63075775", "0.6262086", "0.62342066", "0.6179413", "0.61679375" ]
0.7997252
0
Return true if the order with given ID is in the book, false otherwise.
def has(self, order_id): return Library.functions.has(self._book, order_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_bookid(self,book_id):\r\n if int(book_id) in [i.book_id for i in self.issued_books]:\r\n return True\r\n else:\r\n return False", "def has_book(self, book):\n return self.books.filter(lists_books.c.book_id == book.id).count() > 0", "def check_if_exists(self, bookID):\n query = f\"\"\"SELECT * from {TABLE} WHERE bookID = '{bookID}';\"\"\"\n res = self.cursor.execute(query)\n\n if self.cursor.fetchall():\n return True\n else:\n return False", "def contained_in_order(cls, order, course_id):\r\n return course_id in [item.paidcourseregistration.course_id\r\n for item in order.orderitem_set.all().select_subclasses(\"paidcourseregistration\")]", "def has_book(self, book):\n return self.books.filter(users_books.c.book_id == book.id).count() > 0", "def item_exists(item_id):\n return item_id in all_items", "def k(self, id):\n return id in self._m", "def cardExists(self, id):\n return id in self.cards", "def is_in_stock(self, bookID):\n query = f\"\"\"SELECT quantity from {TABLE} where bookID = '{bookID}';\"\"\"\n self.cursor.execute(query)\n\n q = self.cursor.fetchone()\n\n if q[0] > 0:\n return True\n else:\n return False", "def has_id(self, check_id: str) -> bool:\n return check_id in self.by_id or check_id in self.conflicts", "def is_ancestor_of_book(self, id_, book_id):\n # Implemented from template for\n # osid.resource.BinHierarchySession.is_ancestor_of_bin\n if self._catalog_session is not None:\n return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=book_id)\n return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=book_id)", "def is_descendant_of_book(self, id_, book_id):\n # Implemented from template for\n # osid.resource.BinHierarchySession.is_descendant_of_bin\n if self._catalog_session is not None:\n return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=book_id)\n return self._hierarchy_session.is_descendant(id_=id_, descendant_id=book_id)", "def contains(self, object_id):\n return libplasma.contains(self.conn, object_id)", "def is_in(self, entry):\n return entry in self.__entries", "def have_own_oid(self, oid):\r\n for order in self.owns:\r\n if order.oid == oid:\r\n return True\r\n return False", "def __contains__(self, item):\n return item in self._fetch()", "def is_in(batch, data):\n _id = batch[-1]\n for d in data:\n if d[-1] == _id:\n return True\n return False", "def exists(self, Search_ID):\n if self.get_id(Search_ID) is None:\n return False\n else:\n return True", "def is_child_of_book(self, id_, book_id):\n # Implemented from template for\n # osid.resource.BinHierarchySession.is_child_of_bin\n if self._catalog_session is not None:\n return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=book_id)\n return self._hierarchy_session.is_child(id_=book_id, child_id=id_)", "def is_book_available(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if json_data and len(json_data['docs']) >= 1:\n return True\n return False", "def is_in_bag(self, item):\n return item in self._bag", "def test_in_list(self):\n\n # get available ids\n ids = list(DQ(\"(b.id) Book b\").tuples())\n ids = [id[0] for id in ids]\n\n # take just three of them\n c = {\"ids\": ids[:3]}\n dq = DQ(\"(b.id, b.name) Book{b.id in '$(ids)'} b\")\n r = list(dq.context(c).dicts())\n\n # make sure we got three of them\n self.assertEqual(len(r), 3)", "def __contains__(self, steamid):\r\n return bool( steamid in self.ranks )", "def id_in_list(obj_list, sb_object):\n if __debug__:\n print(\"Checking if sb_object in list...\")\n for sb_objects in obj_list:\n if sb_object.ID == sb_objects.ID:\n if __debug__:\n print(\"Object in list.\")\n return True\n if __debug__:\n print(\"Object not in list\")\n return False", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def is_booked(self):\n return self.booking_set.filter(confirmed=True).count() > 0", "def book_exist(author, title, edition):\n book = Book.query.filter_by(\n author=author,\n book_title=title,\n edition=edition).first()\n if book:\n return True\n return False", "def __contains__(self, val):\n return val in self.ids or super().__contains__(val)", "def exists(cls, ko):\n if isinstance(ko, BagDocument):\n return ko._key in cls._dbag\n else:\n return ko in cls._dbag" ]
[ "0.7184754", "0.67059475", "0.66672266", "0.6560992", "0.6473398", "0.6266325", "0.6255005", "0.62520516", "0.6207968", "0.60897744", "0.59901696", "0.5943703", "0.5933619", "0.5922701", "0.5920018", "0.5893061", "0.5883331", "0.5859028", "0.5831498", "0.5804595", "0.58011097", "0.5793702", "0.57823807", "0.57535", "0.57432795", "0.57369787", "0.5711271", "0.5709922", "0.57047063", "0.57045984" ]
0.8160919
0
Return the best sell price in the book.
def best_sell(self): return Library.functions.best_sell(self._book)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def best_buy(self):\n return Library.functions.best_buy(self._book)", "def get_best_offer(self,way):\n if way==\"BUY\":\n return self.book[Trade.WAY_BUY][0].get_price()\n elif way==\"SELL\":\n return self.book[Trade.WAY_SELL][len(self.book[Trade.WAY_SELL])-1].get_price()", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def best_bid_price(orders: pandas.DataFrame):\n return best_bid_order(orders).price", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def calc_market_order_sell(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < buy_volume: # 2 means enough margin\n return buy_max\n else:\n return buy_max - PRICE_UNIT", "def highest_bid(self):\n (price_eur, volume, _) = self._order_book['bids'][0]\n price_usd = Decimal(price_eur) * self._eurusd_rate\n return {\n 'price': Decimal(price_usd),\n 'volume': Decimal(volume),\n }", "def highest_rated_book(self):\n rating_max = 0\n best_rated_book = \"\"\n for book in self.books.keys():\n rating = book.get_average_rating()\n if rating > rating_max:\n rating_max = rating\n best_rated_book = book\n else:\n continue\n return best_rated_book", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "def best(self, side):\n return Library.functions.best(self._book, side)", "def get_current_price(self):\n highest_bid = sorted([bid.amount for bid in self.bids])[-1] if self.bids else 0\n return max(self.starting_price, highest_bid)", "def __find_max_price(self):\n prices_map = map(\n lambda iceberg: utils.get_actual_penguin_amount(\n self.__game, iceberg),\n self.__game.get_all_icebergs()\n )\n return max(prices_map)", "def best_ask_price(orders: pandas.DataFrame):\n return best_ask_order(orders).price", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def best_value(stock):\n best_sell = sell = stock.pop()\n buy = stock.pop()\n\n while stock:\n num = stock.pop()\n if num < buy:\n buy = num\n sell = best_sell\n elif best_sell - num > sell - buy:\n sell, buy = best_sell, num\n elif num > best_sell:\n best_sell = num\n\n return (buy, sell)", "def sell_cost(self):\n return self._manager.get_sell_price(self.name)", "def max_price(self):\n return self._max_price", "def priceGetMost(soup):\n main = soup.find('span', class_='price-large')\n main = main.text\n main = main.strip()\n main = float(main)\n # Extract Cents\n centsList = soup.findAll('span', class_='a-size-small price-info-superscript')\n cents = centsList[1]\n cents = cents.text\n cents = cents.strip()\n cents = '.' + cents\n cents = float(cents)\n price = main + cents\n\n return price", "def get_sell_cost(self):\n return round(0.75 * self.sell_price[self.level - 1])", "def get_cheapest_price(self, movie_title):\n self.get_all_movies()\n movie_list = self.title_map.get(movie_title.strip().lower(), [])\n\n if movie_list is None:\n return None\n\n pick_list = []\n for movie_info in movie_list:\n try:\n movie_id = movie_info['ID']\n movie_world = movie_info['world']\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n pick_list.append({'id': movie_id, 'world': movie_world})\n\n if pick_list is None:\n return None\n\n pool = Pool(2)\n movies_list = pool.map(self.get_movie_from_id, pick_list)\n pool.close()\n pool.join()\n\n # Set price as maximum float value in start to find minimum value\n price = sys.float_info.max\n print(\"\\nMovie info from different worlds:\\n\")\n for movie in movies_list:\n if movie is None:\n continue\n print(\"[{}]\".format(movie['world']))\n for key, value in movie.items():\n print(\" {} = {}\".format(key, value))\n print(\"\\n\")\n try:\n movie_price = float(movie['Price'])\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n if movie_price < price:\n price = movie_price\n\n if price == sys.float_info.max:\n return None\n\n return str(price)", "def get_good_price_by_deal(self):\n if self.deal == SALE:\n return self.new_price\n elif self.deal == AUCTION:\n return self.max_price\n else:\n return self.price", "def maxProfit(self, prices):\r\n\t\tprofit = 0", "def max_profit(prices: List[int]) -> int:", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def find_max_profit(prices):\n profit = []\n for index, price in enumerate(prices):\n buy = prices[index]\n sell_list = prices[index + 1:]\n if sell_list != []:\n for sell_price in sell_list:\n profit.append(sell_price - buy)\n return sorted(profit)[-1]", "def mid(self):\n if self.bid and self.offer:\n return (self.bid[-1].price + self.offer[0].price) / 2.0\n\n raise Exception(\"No bids / offers!\")", "def highest_rated_book(self):\n highest_rated = 0.0\n top_rated_book = \"\"\n\n for book in self.books.keys():\n book_rating = book.get_average_rating()\n if book_rating > highest_rated:\n highest_rated = book_rating\n top_rated_book = book.title\n return f\"'{top_rated_book}' is highest rated book with a rating of {highest_rated}.\"", "def maxProfit(self, prices):\n l = len(prices)\n if l <= 1:\n return 0\n dp = [0] * len(prices)\n r = prices[1] - prices[0]\n m = prices[0]\n for i in range(2, l):\n m = min(prices[i - 1], m)\n r = max(r, prices[i] - m)\n \n return r if r >= 0 else 0", "def maxProfit(prices, k):\n buys, sells = [-sys.maxsize] * k, [-sys.maxsize] * k\n for price in prices:\n for i, (buy, sell) in enumerate(zip(buys, sells)):\n if i == 0:\n buys[i] = max(buy, -price)\n sells[i] = max(sell, buy+price)\n else:\n buys[i] = max(buy, sells[i-1]-price)\n sells[i] = max(sell, buy+price)\n return max(sells)", "def SellingPrice(self):\n return self.selling_price" ]
[ "0.7876577", "0.77371395", "0.7511024", "0.7234025", "0.7008871", "0.69746155", "0.694191", "0.6918493", "0.6890469", "0.685904", "0.68445", "0.68308765", "0.6789738", "0.6782889", "0.67679006", "0.67323667", "0.669388", "0.66653705", "0.6644225", "0.6625419", "0.66128826", "0.66102684", "0.65985405", "0.65913117", "0.65739125", "0.65044683", "0.64417946", "0.6414332", "0.64125377", "0.6396933" ]
0.8430281
0
Return the best buy price in the book.
def best_buy(self): return Library.functions.best_buy(self._book)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def best_sell(self):\n return Library.functions.best_sell(self._book)", "def get_best_offer(self,way):\n if way==\"BUY\":\n return self.book[Trade.WAY_BUY][0].get_price()\n elif way==\"SELL\":\n return self.book[Trade.WAY_SELL][len(self.book[Trade.WAY_SELL])-1].get_price()", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def best_bid_price(orders: pandas.DataFrame):\n return best_bid_order(orders).price", "def best_ask_price(orders: pandas.DataFrame):\n return best_ask_order(orders).price", "def highest_bid(self):\n (price_eur, volume, _) = self._order_book['bids'][0]\n price_usd = Decimal(price_eur) * self._eurusd_rate\n return {\n 'price': Decimal(price_usd),\n 'volume': Decimal(volume),\n }", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def calc_market_order_sell(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < buy_volume: # 2 means enough margin\n return buy_max\n else:\n return buy_max - PRICE_UNIT", "def get_good_price_by_deal(self):\n if self.deal == SALE:\n return self.new_price\n elif self.deal == AUCTION:\n return self.max_price\n else:\n return self.price", "def get_current_price(self):\n highest_bid = sorted([bid.amount for bid in self.bids])[-1] if self.bids else 0\n return max(self.starting_price, highest_bid)", "def priceGetMost(soup):\n main = soup.find('span', class_='price-large')\n main = main.text\n main = main.strip()\n main = float(main)\n # Extract Cents\n centsList = soup.findAll('span', class_='a-size-small price-info-superscript')\n cents = centsList[1]\n cents = cents.text\n cents = cents.strip()\n cents = '.' + cents\n cents = float(cents)\n price = main + cents\n\n return price", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def best(self, side):\n return Library.functions.best(self._book, side)", "def get_cheapest_price(self, movie_title):\n self.get_all_movies()\n movie_list = self.title_map.get(movie_title.strip().lower(), [])\n\n if movie_list is None:\n return None\n\n pick_list = []\n for movie_info in movie_list:\n try:\n movie_id = movie_info['ID']\n movie_world = movie_info['world']\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n pick_list.append({'id': movie_id, 'world': movie_world})\n\n if pick_list is None:\n return None\n\n pool = Pool(2)\n movies_list = pool.map(self.get_movie_from_id, pick_list)\n pool.close()\n pool.join()\n\n # Set price as maximum float value in start to find minimum value\n price = sys.float_info.max\n print(\"\\nMovie info from different worlds:\\n\")\n for movie in movies_list:\n if movie is None:\n continue\n print(\"[{}]\".format(movie['world']))\n for key, value in movie.items():\n print(\" {} = {}\".format(key, value))\n print(\"\\n\")\n try:\n movie_price = float(movie['Price'])\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n if movie_price < price:\n price = movie_price\n\n if price == sys.float_info.max:\n return None\n\n return str(price)", "def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass", "def highest_rated_book(self):\n rating_max = 0\n best_rated_book = \"\"\n for book in self.books.keys():\n rating = book.get_average_rating()\n if rating > rating_max:\n rating_max = rating\n best_rated_book = book\n else:\n continue\n return best_rated_book", "def __find_max_price(self):\n prices_map = map(\n lambda iceberg: utils.get_actual_penguin_amount(\n self.__game, iceberg),\n self.__game.get_all_icebergs()\n )\n return max(prices_map)", "def max_profit(prices: List[int]) -> int:", "def max_price(self):\n return self._max_price", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "def maxProfit(self, prices):\r\n\t\tprofit = 0", "def calc_market_order_buy(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < sell_volume: # 1.5 means enough margin\n return sell_min\n else:\n return sell_min + PRICE_UNIT", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def get(self, price, way):\n for offer in self.book[way]:\n if offer.get_price() == price:\n return offer\n return None", "def get_higest_bid(self):\n orders = self.returnOrderBook(1)\n return orders[\"bids\"][0]", "def buy_and_pay(self):\n return self.price", "def get_best_bid_ask(self, ticker):\n if symbol in self.symbol:\n bid = self.symbol[symbol][\"bid\"]\n ask = self.symbol[symbol][\"ask\"]\n return bid, ask\n else:\n print(\n \"Bid/ask values for ticker %s are not \"\n \"available from the PriceHandler.\" % symbol\n )\n return None, None", "def best_bid_order(orders: pandas.DataFrame) -> pandas.Series:\n bds = bids(orders)\n index = bds[bds.price == bds.price.max()]['amount'].idxmin()\n return bds.loc[index]", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0" ]
[ "0.78773767", "0.78276116", "0.7595051", "0.7370348", "0.7110304", "0.68662673", "0.682843", "0.68111056", "0.6778916", "0.67588013", "0.6744359", "0.673743", "0.66320306", "0.66038054", "0.6591113", "0.65841454", "0.6548237", "0.653549", "0.6532241", "0.65042275", "0.6503913", "0.64833885", "0.64814943", "0.6453596", "0.64090025", "0.63860184", "0.6378042", "0.6377529", "0.6375909", "0.6372155" ]
0.8379767
0
Return the best price for the given side.
def best(self, side): return Library.functions.best(self._book, side)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_safe_price(side: str):\n return (\n const.MIN_SUM if side == const.BUY\n else const.MAX_SUM\n )", "def get_best_offer(self,way):\n if way==\"BUY\":\n return self.book[Trade.WAY_BUY][0].get_price()\n elif way==\"SELL\":\n return self.book[Trade.WAY_SELL][len(self.book[Trade.WAY_SELL])-1].get_price()", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def best_bid_price(orders: pandas.DataFrame):\n return best_bid_order(orders).price", "def best_ask_price(orders: pandas.DataFrame):\n return best_ask_order(orders).price", "def get(self, price, way):\n for offer in self.book[way]:\n if offer.get_price() == price:\n return offer\n return None", "def best_promo(order: Order) -> Decimal:\n return max(promo(order) for promo in promos) # <3>", "def best_sell(self):\n return Library.functions.best_sell(self._book)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_good_price_by_deal(self):\n if self.deal == SALE:\n return self.new_price\n elif self.deal == AUCTION:\n return self.max_price\n else:\n return self.price", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def get_cheapest_price(self, movie_title):\n self.get_all_movies()\n movie_list = self.title_map.get(movie_title.strip().lower(), [])\n\n if movie_list is None:\n return None\n\n pick_list = []\n for movie_info in movie_list:\n try:\n movie_id = movie_info['ID']\n movie_world = movie_info['world']\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n pick_list.append({'id': movie_id, 'world': movie_world})\n\n if pick_list is None:\n return None\n\n pool = Pool(2)\n movies_list = pool.map(self.get_movie_from_id, pick_list)\n pool.close()\n pool.join()\n\n # Set price as maximum float value in start to find minimum value\n price = sys.float_info.max\n print(\"\\nMovie info from different worlds:\\n\")\n for movie in movies_list:\n if movie is None:\n continue\n print(\"[{}]\".format(movie['world']))\n for key, value in movie.items():\n print(\" {} = {}\".format(key, value))\n print(\"\\n\")\n try:\n movie_price = float(movie['Price'])\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n if movie_price < price:\n price = movie_price\n\n if price == sys.float_info.max:\n return None\n\n return str(price)", "def best_promo(order):\n return max(promo(order) for promo in promos)", "def best_bid_order(orders: pandas.DataFrame) -> pandas.Series:\n bds = bids(orders)\n index = bds[bds.price == bds.price.max()]['amount'].idxmin()\n return bds.loc[index]", "def best_ask_order(orders: pandas.DataFrame) -> pandas.Series:\n # DataFrames are mutable, thus not hashable. For this reason we cannot make use\n # of memoization but resort to such a hacky and stupid local-scoped cache.\n sks = asks(orders)\n index = sks[sks.price == sks.price.min()]['amount'].idxmax()\n return sks.loc[index]", "def _get_lip_best(self) -> float:\n pass", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def best_buy(self):\n return Library.functions.best_buy(self._book)", "def calculate_stop_loss_price(market_price: float, order_price: float, stop_loss_price: float, side: str):\n if side == 'LONG':\n if not stop_loss_price:\n stop_loss_price = order_price - (order_price / 100) * CONF.stop_loss_in_percent\n if market_price - (market_price / 100) * CONF.stop_loss_in_percent > stop_loss_price:\n stop_loss_price = market_price - (market_price / 100) * CONF.stop_loss_in_percent\n if not CONF.no_action_at_loss or stop_loss_price > order_price:\n return stop_loss_price\n return None\n if not stop_loss_price:\n stop_loss_price = order_price + (order_price / 100) * CONF.stop_loss_in_percent\n if market_price + (market_price / 100) * CONF.stop_loss_in_percent < stop_loss_price:\n stop_loss_price = market_price + (market_price / 100) * CONF.stop_loss_in_percent\n if not CONF.no_action_at_loss or stop_loss_price < order_price:\n return stop_loss_price\n return None", "def priceGetMost(soup):\n main = soup.find('span', class_='price-large')\n main = main.text\n main = main.strip()\n main = float(main)\n # Extract Cents\n centsList = soup.findAll('span', class_='a-size-small price-info-superscript')\n cents = centsList[1]\n cents = cents.text\n cents = cents.strip()\n cents = '.' + cents\n cents = float(cents)\n price = main + cents\n\n return price", "def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()", "def get_current_price(self):\n highest_bid = sorted([bid.amount for bid in self.bids])[-1] if self.bids else 0\n return max(self.starting_price, highest_bid)", "def get_prices(paths, side):\n prices = {}\n\n for path in paths:\n data = get_data(path)\n\n for pair in data.keys():\n\n if pair not in prices:\n prices[pair] = []\n\n if side == 'bids': # Best bid\n\n bids = data[pair]['bids'].keys()\n float_bids = [float(b) for b in bids]\n sorted_bids = sorted(float_bids)\n best_bid = sorted_bids[-1]\n prices[pair].append(best_bid)\n\n elif side == 'asks': # Best ask\n\n asks = data[pair]['asks'].keys()\n float_asks = [float(a) for a in asks]\n sorted_asks = sorted(float_asks)\n best_ask = sorted_asks[0]\n prices[pair].append(best_ask)\n\n else:\n raise Exception('Side must be either \"asks\" or \"bids\".')\n\n return prices", "def latest_price(self, symbol: str, state: Series, is_backtest: bool, crypto: bool, side: str = 'buy') -> float:\n if is_backtest:\n if crypto:\n if side == 'buy': return state['close']\n else: return state['close']\n else: return state['close']\n else:\n if crypto:\n try: ask, bid = self.cb_client.latest_symbol_price(symbol)\n except Exception as e:\n print(e)\n raise Exception(f'Latest crypto buy-side tick data for {symbol} not available: ', e)\n\n print(f'latest_price:spread: {ask-bid:.5f} ({(ask-bid)/ask*100:.3f})')\n if side == 'buy': return ask\n else: return bid\n else:\n if side == 'buy':\n if symbol in self.ib_client.latest:\n try: return float(self.ib_client.latest[symbol].ask)\n except: return self.ib_client.latest[symbol].ask\n else:\n print(f'{symbol} buy-side not in {self.ib_client.latest}')\n raise 'Latest market buy-side tick data for {} not available'.format(symbol)\n else:\n if symbol in self.ib_client.latest:\n try: return float(self.ib_client.latest[symbol].bid)\n except: return self.ib_client.latest[symbol].bid\n else:\n print(f'{symbol} sell-side not in {self.ib_client.latest}')\n raise 'Latest market sell-side tick data for {} not available'.format(symbol)", "def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]", "def open_exec_price(self, direction):\n if direction > 0:\n return self._market_ofr\n elif direction < 0:\n return self._market_bid\n else:\n return self._market_ofr", "def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )", "def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass", "def get_price(self):\n return self.sale_price if self.sale_price else self.price" ]
[ "0.73120564", "0.6958201", "0.67411786", "0.6623038", "0.6302165", "0.6228178", "0.6109067", "0.60349244", "0.60328555", "0.59425896", "0.5927218", "0.58721644", "0.5837019", "0.58329946", "0.57939583", "0.57860625", "0.57763773", "0.5764865", "0.5751211", "0.57504004", "0.57464314", "0.57462937", "0.57035327", "0.5695298", "0.56765205", "0.5648273", "0.5644852", "0.5616136", "0.5607592", "0.56068456" ]
0.80683696
0
Return the volume of the sell side of the book at the given price.
def volume_sell(self, price=None): if price is None: return Library.functions.volume_sell(self._book) return Library.functions.volume_sell_price(self._book, price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def volume(self, price=None):\n if price is None:\n return Library.functions.volume(self._book)\n return Library.functions.volume_price(self._book, price)", "def get_own_volume_at(self, price, typ=None):\r\n volume = 0\r\n for order in self.owns:\r\n if order.price == price and (not typ or typ == order.typ):\r\n volume += order.volume\r\n return volume", "def volume_buy(self, price=None):\n if price is None:\n return Library.functions.volume_buy(self._book)\n return Library.functions.volume_buy_price(self._book, price)", "def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def get_volume(self, ticker):\n return self.trading_client.ticker(ticker, \"usd\")['volume']", "def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)", "def calc_market_order_sell(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < buy_volume: # 2 means enough margin\n return buy_max\n else:\n return buy_max - PRICE_UNIT", "def volume (self):\n volume = self.sideLength**3\n return volume", "def sell(self, ticker, volume):\n if volume <= 0: \n raise errs.VolumeLessThanZeroError\n\n sell_trade = Trade(ticker=ticker, volume=volume, account_id=self.id)\n if trade.get_current_price(ticker) is None:\n raise errs.NoSuchTickerError\n else:\n sell_trade.unit_price = trade.get_current_price(ticker)\n \n decrease_position = Position.from_account_id_and_ticker(account_id=sell_trade.account_id, ticker=sell_trade.ticker)\n if decrease_position.shares < sell_trade.volume:\n raise errs.InsufficientSharesError\n decrease_position.shares -= sell_trade.volume\n decrease_position.save()\n\n sell_trade.volume *= -1 # Differentiates buys/sells with pos/negative volume\n sell_trade.save()", "def volume(self):\n vol = self.daily['Volume']\n sma = vol.rolling(20).mean()\n std = vol.rolling(20).std()\n upper = sma + std\n lower = sma - std\n\n if vol[-1] > upper[-1]:\n self.debug += '\\nVolume > 1 STD above sma: buys + 1 and sells + 1'\n self.sells += 1\n self.buys += 1\n else:\n self.debug += '\\nVolume in normal levels'", "def volume(self) -> float:\n volume = self.relay(\"volume\")\n if 0.0 <= volume <= 100.0:\n return volume\n raise exceptions.ProtocolError(f\"volume {volume} is out of range\")", "def sell_limit_order(self, price=0, volume=0):\n auth = CoinbaseExchangeAuth(self.api_key, self.secret_key, self.passphrase)\n data = {\n \"size\": volume,\n \"price\": price,\n \"side\": \"sell\",\n \"product_id\": self.rate,\n \"type\": \"limit\"\n }\n\n sell = requests.post(self.url + 'orders',\n data=json.dumps(data),\n auth=auth).json()\n\n sell['txid'] = sell['id']\n\n logging.debug(sell)\n return sell", "def sales_price(book):\n book = copy(book)\n book.price = round(book.price-book.price*.2, 2)\n return book", "def get_price_for_volume_series(conn, sticker, limit_price, volume, is_back):\n ticks = get_sticker_odds(conn, sticker)\n rets = get_volume_at_price(ticks, limit_price, volume, is_back)\n return rets", "def get_volume(cls) -> float:\n raise NotImplementedError", "def get_price_for_volume_at(conn, sticker, limit_price, volume, is_back, timestamp):\n tick = get_last_tick_before(conn, sticker, timestamp)\n rets = get_volume_at_price([tick], limit_price, volume, is_back)\n return rets[0]", "def sales_price(book):\n book = copy(book)\n book.price = round(book.price - book.price * .2, 2)\n return book", "def get_margin(self, selling_price):\n selling_price = ( selling_price - self.product_price ) / 2\n return selling_price", "def _update_total_bid(self, volume, price):\r\n self.total_bid += \\\r\n self.gox.base2float(volume) * self.gox.quote2float(price)", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def total_volume(self):", "def get_volume_at_price(ticks, limit_price, volume, is_back):\n ret_ticks = list()\n\n for tick in ticks:\n o, v = _game_avail_volume(tick, limit_price, volume, is_back)\n ret_ticks.append({'o': o, 'v': v, 't': tick[MarketTick.timestamp.db_field]})\n\n return ret_ticks", "def vol_from_price(self, price, f, K, T_expiry, payoff='Call'):\n def target_func( price, vol ):\n return self.price_from_vol(vol, f, K, T_expiry, payoff=payoff) - price \n \n try:\n return brentq(partial(target_func, price), 1e-8, 1e2, full_output=False)\n except Exception as e:\n print(\"Error: {}\".format(str(e)))\n print('Price: {}, strike: {}, payoff: {}'.format(price, K, payoff))", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def block2_price(self):\n return self._safe_value(VAR_BLOCK2PRICE, float)", "def update(self, price, volume):\r\n if price > self.hig:\r\n self.hig = price\r\n if price < self.low:\r\n self.low = price\r\n self.cls = price\r\n self.vol += volume", "def volume_left(self):\n total = 0\n for i in self.orders:\n total += i.total_volume\n return self.volume - total", "def volume_level(self):\n volume = self._state.get(\"volume\", None)\n if volume is not None and volume != \"\":\n volume = int(volume) / 100\n return volume", "def volume_level(self):\n return self._volumeLevel/100" ]
[ "0.82540333", "0.73508203", "0.7293334", "0.7108912", "0.67721134", "0.6653987", "0.6506303", "0.6498677", "0.6331713", "0.62851894", "0.6253708", "0.6248566", "0.6227219", "0.6194709", "0.619005", "0.618927", "0.61563796", "0.61169934", "0.61134404", "0.6086485", "0.60847485", "0.6072713", "0.59723693", "0.5970766", "0.59233767", "0.5904812", "0.5884003", "0.5875594", "0.5857418", "0.58498025" ]
0.81967163
1
Return the volume of the buy side of the book at the given price.
def volume_buy(self, price=None): if price is None: return Library.functions.volume_buy(self._book) return Library.functions.volume_buy_price(self._book, price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def volume(self, price=None):\n if price is None:\n return Library.functions.volume(self._book)\n return Library.functions.volume_price(self._book, price)", "def volume_sell(self, price=None):\n if price is None:\n return Library.functions.volume_sell(self._book)\n return Library.functions.volume_sell_price(self._book, price)", "def get_own_volume_at(self, price, typ=None):\r\n volume = 0\r\n for order in self.owns:\r\n if order.price == price and (not typ or typ == order.typ):\r\n volume += order.volume\r\n return volume", "def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )", "def get_volume(self, ticker):\n return self.trading_client.ticker(ticker, \"usd\")['volume']", "def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def volume (self):\n volume = self.sideLength**3\n return volume", "def volume(self) -> float:\n volume = self.relay(\"volume\")\n if 0.0 <= volume <= 100.0:\n return volume\n raise exceptions.ProtocolError(f\"volume {volume} is out of range\")", "def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)", "def _update_total_bid(self, volume, price):\r\n self.total_bid += \\\r\n self.gox.base2float(volume) * self.gox.quote2float(price)", "def get_volume(cls) -> float:\n raise NotImplementedError", "def total_volume(self):", "def calc_market_order_sell(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < buy_volume: # 2 means enough margin\n return buy_max\n else:\n return buy_max - PRICE_UNIT", "def volume(self):\n vol = self.daily['Volume']\n sma = vol.rolling(20).mean()\n std = vol.rolling(20).std()\n upper = sma + std\n lower = sma - std\n\n if vol[-1] > upper[-1]:\n self.debug += '\\nVolume > 1 STD above sma: buys + 1 and sells + 1'\n self.sells += 1\n self.buys += 1\n else:\n self.debug += '\\nVolume in normal levels'", "def calc_market_order_buy(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < sell_volume: # 1.5 means enough margin\n return sell_min\n else:\n return sell_min + PRICE_UNIT", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def sales_price(book):\n book = copy(book)\n book.price = round(book.price-book.price*.2, 2)\n return book", "def vwap(prices: pd.Series, volume: pd.Series) -> pd.Series:\n if isinstance(prices.index, pd.MultiIndex):\n return (volume * prices).groupby(level=1).cumsum() / volume.groupby(level=1).cumsum()\n else:\n return (volume * prices).cumsum() / volume.cumsum()", "def vol_from_price(self, price, f, K, T_expiry, payoff='Call'):\n def target_func( price, vol ):\n return self.price_from_vol(vol, f, K, T_expiry, payoff=payoff) - price \n \n try:\n return brentq(partial(target_func, price), 1e-8, 1e2, full_output=False)\n except Exception as e:\n print(\"Error: {}\".format(str(e)))\n print('Price: {}, strike: {}, payoff: {}'.format(price, K, payoff))", "def update(self, price, volume):\r\n if price > self.hig:\r\n self.hig = price\r\n if price < self.low:\r\n self.low = price\r\n self.cls = price\r\n self.vol += volume", "def total_volume(bottle_size, pressure=DEFAULT_BOTTLE_PRESSURE):\n return bottle_size * pressure", "def total_volume(self) -> int:\n return self.quantity * self.one_item_volume", "def volume_left(self):\n total = 0\n for i in self.orders:\n total += i.total_volume\n return self.volume - total", "def price(self) -> float:\n return self.close", "def sales_price(book):\n book = copy(book)\n book.price = round(book.price - book.price * .2, 2)\n return book", "def volume(self) -> float:\n return self._volume", "def get_price_for_volume_at(conn, sticker, limit_price, volume, is_back, timestamp):\n tick = get_last_tick_before(conn, sticker, timestamp)\n rets = get_volume_at_price([tick], limit_price, volume, is_back)\n return rets[0]", "def volume_per_100_households(volume, num_households):\n if num_households:\n return volume * 100.0 / num_households\n else:\n return 0" ]
[ "0.82664984", "0.7516078", "0.7488547", "0.7051576", "0.68015915", "0.67189145", "0.65990806", "0.6527134", "0.63496965", "0.6312829", "0.6272654", "0.626387", "0.62559175", "0.6233882", "0.6233526", "0.6188062", "0.6141007", "0.6114274", "0.60632724", "0.6055284", "0.605316", "0.6040525", "0.6009387", "0.60092527", "0.60038364", "0.5989403", "0.59718204", "0.595235", "0.5941754", "0.5920378" ]
0.80440706
1
Return the volume of the book at the given price.
def volume(self, price=None): if price is None: return Library.functions.volume(self._book) return Library.functions.volume_price(self._book, price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_own_volume_at(self, price, typ=None):\r\n volume = 0\r\n for order in self.owns:\r\n if order.price == price and (not typ or typ == order.typ):\r\n volume += order.volume\r\n return volume", "def get_volume(self, ticker):\n return self.trading_client.ticker(ticker, \"usd\")['volume']", "def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )", "def volume_buy(self, price=None):\n if price is None:\n return Library.functions.volume_buy(self._book)\n return Library.functions.volume_buy_price(self._book, price)", "def volume_sell(self, price=None):\n if price is None:\n return Library.functions.volume_sell(self._book)\n return Library.functions.volume_sell_price(self._book, price)", "def volume(self) -> float:\n volume = self.relay(\"volume\")\n if 0.0 <= volume <= 100.0:\n return volume\n raise exceptions.ProtocolError(f\"volume {volume} is out of range\")", "def get_volume(cls) -> float:\n raise NotImplementedError", "def get_price_for_volume_at(conn, sticker, limit_price, volume, is_back, timestamp):\n tick = get_last_tick_before(conn, sticker, timestamp)\n rets = get_volume_at_price([tick], limit_price, volume, is_back)\n return rets[0]", "def get_volume(self, volume):\n return self._get(_volume.Volume, volume)", "def volume(self) -> float:\n return self._volume", "def total_volume(self):", "async def volume(\n self, ctx: commands.Context, volume: int = None\n ) -> Optional[float]:\n\n if volume is None:\n return ctx.voice_client.source.volume * 100\n\n ctx.voice_client.source.volume = volume / 100\n self.queue[ctx.guild.id].volume = volume / 100\n return ctx.voice_client.source.volume * 100", "def volume_level(self):\n volume = self._state.get(\"volume\", None)\n if volume is not None and volume != \"\":\n volume = int(volume) / 100\n return volume", "def volume (self):\n volume = self.sideLength**3\n return volume", "def get_volume_at_price(ticks, limit_price, volume, is_back):\n ret_ticks = list()\n\n for tick in ticks:\n o, v = _game_avail_volume(tick, limit_price, volume, is_back)\n ret_ticks.append({'o': o, 'v': v, 't': tick[MarketTick.timestamp.db_field]})\n\n return ret_ticks", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def volume(self):\n return self.structure.volume", "def get_price_for_volume_series(conn, sticker, limit_price, volume, is_back):\n ticks = get_sticker_odds(conn, sticker)\n rets = get_volume_at_price(ticks, limit_price, volume, is_back)\n return rets", "def vol_from_price(self, price, f, K, T_expiry, payoff='Call'):\n def target_func( price, vol ):\n return self.price_from_vol(vol, f, K, T_expiry, payoff=payoff) - price \n \n try:\n return brentq(partial(target_func, price), 1e-8, 1e2, full_output=False)\n except Exception as e:\n print(\"Error: {}\".format(str(e)))\n print('Price: {}, strike: {}, payoff: {}'.format(price, K, payoff))", "def get_volume(self):\n return int(self.get(COMMAND_UIC, 'GetVolume')['volume'])", "def get_volume(self):\n return self.__volume", "def update(self, price, volume):\r\n if price > self.hig:\r\n self.hig = price\r\n if price < self.low:\r\n self.low = price\r\n self.cls = price\r\n self.vol += volume", "def volume():\n vol = sonos.volume\n return vol", "def volume_level(self):\n return self._volumeLevel/100", "def volume(self):\n return sum([x[\"counter_volume\"] for x in self.usage])", "def price_from_vol( self, vol ):\n if self._vol_type == \"LogNormal\":\n S = self._deal_terms[ \"underlyer\" ].spot_value\n K = self._deal_terms[ \"payoff\" ].payoff_terms[ \"strike\" ]\n time_to_mat = self._deal_terms[ \"maturity\" ] - self._pricing_date\n r = CSA_map[ self._deal_terms[ \"CSA\" ] ].short_rate\n d1 = 1 / ( vol * np.sqrt( time_to_mat ) ) * ( np.log( S / K ) + ( r + 0.5 * vol ** 2 ) * time_to_mat )\n d2 = d1 - vol * np.sqrt( time_to_mat ) \n CallPrice = S * norm.cdf( d1 ) - K * np.exp( -r * time_to_mat ) * norm.cdf( d2 ) \n\n if self._deal_terms[ \"payoff\" ].payoff_name == \"European Call\":\n return CallPrice\n elif self._deal_terms[ \"payoff\" ].payoff_name == \"European Put\":\n return CallPrice + K * np.exp( -r * time_to_mat ) - S \n else:\n raise NameError( \"Unsupported vol type : \" + self._deal_terms[ \"Payoff\" ].payoff_name )\n else:\n raise NameError( \"Unsupported vol type : \" + self._vol_type )", "def get_volume_from_name(item_name):\n item_id = get_id_from_name(item_name)\n return get_volume_from_id(item_id)", "def volume(self):\n vol = self.daily['Volume']\n sma = vol.rolling(20).mean()\n std = vol.rolling(20).std()\n upper = sma + std\n lower = sma - std\n\n if vol[-1] > upper[-1]:\n self.debug += '\\nVolume > 1 STD above sma: buys + 1 and sells + 1'\n self.sells += 1\n self.buys += 1\n else:\n self.debug += '\\nVolume in normal levels'", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v" ]
[ "0.74667495", "0.7274619", "0.7216883", "0.7213233", "0.7152009", "0.67414206", "0.6649474", "0.644604", "0.64349836", "0.63823044", "0.62451017", "0.6215957", "0.62002635", "0.6196766", "0.6136535", "0.6116863", "0.6111729", "0.6077984", "0.6072128", "0.60609454", "0.6050351", "0.60389763", "0.60340816", "0.6033226", "0.60253775", "0.6015216", "0.6012477", "0.59936005", "0.59897536", "0.59897536" ]
0.8870716
0
Return the count at the given limit price.
def count_at(self, price): return Library.functions.count_at(self._book, price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def price_count(self):\n return self.price_set.count()", "def Count(self, limit=None):\n if limit is None:\n count = 0\n for i in self.Run():\n count += 1\n return count\n else:\n return len(self.Get(limit))", "def limit(self, limit):\n\n # Return between 1 and 250 results, defaults to 10\n return max(1, min(250, int(limit) if limit else 10))", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def charge_limit(self, limit=None):\n if limit is None:\n done, data = self._request('GH')\n if done:\n return int(data[0])\n else:\n if self._request('SH', str(int(limit)))[0]:\n return limit\n\n raise EvseError", "def Count(self, limit=None):\n self.__compile = False\n if self.__cached_count:\n return self.__cached_count\n\n resp = api_base_pb.Integer64Proto()\n try:\n apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Count',\n self._ToPb(limit=limit), resp)\n except apiproxy_errors.ApplicationError, err:\n raise _ToDatastoreError(err)\n else:\n self.__cached_count = resp.value()\n\n return self.__cached_count", "def maximumToys(moneyAvailable, priceList):\n priceList.sort()\n count = 0\n for toyPrice in priceList:\n if toyPrice <= moneyAvailable:\n count += 1\n moneyAvailable -= toyPrice\n else:\n return count", "def calculate(self, limit):\r\n pass", "def calculate(self, limit):\n pass", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter", "def numberCounts(limit):\n\n sum = 0\n for number in range(1,limit+1):\n word = number2text(number)\n amount = countLetters(word)\n sum = sum + amount\n return sum", "def get_count(owner, repo_slug, auth_tokens, endpoint):\n count_url = make_req_url(owner, repo_slug, endpoint, 0)\n response = send_bitbucket_request(count_url, auth_tokens)\n if response and 'count' in response:\n return response['count']-1\n return 0", "def upper_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i >= resistance(stock):\n counter+=1\n return counter", "def _create_limit(self, price_limit):\n if self.price_limit is not None:\n return(order(self.symbol, -self.volume, style=LimitOrder(self.price_limit)))\n else:\n return", "def get_limit(self):\n return self.limit", "def getMostUsedCount( self, limit ):\n cur = self.__conn.cursor()\n cur.execute( \"\"\"SELECT Data, COUNT(Data) AS UseCount\n FROM PrivilegeUse\n GROUP BY Data\n ORDER BY UseCount DESC\n LIMIT %d\"\"\", limit )\n class Use:\n def __init__( self, faq, count ):\n self.faq = faq\n self.count = count\n \n return [ Use(row[0], row[1]) for row in cur.fetchall() ]", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def get_plan_limit(self, source):\n commitment = getattr(self.get_subscription(), 'commitment', {})\n return self.get_plan().get_price_data(source, commitment)[1]", "def count(self) -> Optional[float]:\n return pulumi.get(self, \"count\")", "def get_num_of_shares(stock, investment):\n return int(investment // float(stock['Price']))", "def count_buy(self):\n return Library.functions.count_buy(self._book)", "def numRescueBoats(self, people, limit):\n queue = collections.deque(sorted(people))\n count = 0\n while queue:\n count += 1\n last = queue.pop()\n if len(queue) >= 1:\n first = queue[0]\n if first + last <= limit:\n queue.popleft()\n return count", "def calculate(self, limit: int) -> None:\n raise NotImplementedError()", "def ticket_range(self):\n response = self.http_call(\"{0}/tickets.json\".format(self.uri))\n return math.ceil(response.json()[\"count\"] / 100) + 1", "def pages(self):\n if not self.limit:\n return 0 # pragma: no cover\n else:\n return int(ceil(self.total / float(self.limit)))", "def findLimit(name):\n return Limit(Cuebot.getStub('limit').Find(\n limit_pb2.LimitFindRequest(name=name), timeout=Cuebot.Timeout).limit)", "def count_sell(self):\n return Library.functions.count_sell(self._book)", "def get_limit(self):\n return self._limit", "def get_limit(self):\n return self._limit" ]
[ "0.68486047", "0.6711644", "0.62374175", "0.6154216", "0.60643214", "0.60539377", "0.5963375", "0.5950641", "0.59431195", "0.5908711", "0.58560854", "0.58281624", "0.5774528", "0.57603157", "0.5749395", "0.574189", "0.5730391", "0.5728614", "0.5705169", "0.5687857", "0.5653949", "0.56453747", "0.56208646", "0.5577761", "0.5571972", "0.5563988", "0.5543701", "0.55176204", "0.55154157", "0.55154157" ]
0.73425525
0
Return the count of the book on the sell side.
def count_sell(self): return Library.functions.count_sell(self._book)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_buy(self):\n return Library.functions.count_buy(self._book)", "def count(self):\n return Library.functions.count(self._book)", "def count_at(self, price):\n return Library.functions.count_at(self._book, price)", "def book_count(self):\n\n try:\n cur = self._db.cursor()\n cur.execute('SELECT COUNT(*) FROM books')\n return cur.fetchone()[0]\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e", "def price_count(self):\n return self.price_set.count()", "def lists_with_book_count(self, book):\n return self.lists.filter(List.books.contains(book)).count()", "def count_available_goods(offer_id):\n offer_id = int(offer_id) # sanitize input\n results = db.engine.execute(\"select count(sid) from good where good.offer_id=\\'%s\\' and good.order_id is NULL;\" % str(offer_id)) # safe\n return(results.fetchone()[0])", "def get_books_read(self):\n return len(self.books)", "def calculate_overbook_num(self):\n\n overbook_level_decimal = self.overbook_level / float(100.0)\n return self.num_rooms + math.ceil(overbook_level_decimal * self.num_rooms)", "def read_library_count(self):\n\t\tprint(\"You have \" + str(self.library_count) + \" books in your kindle library.\")", "def count(self):\n return self.get_count()", "def get_count(self):\r\n return self.count", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def total_bids(self):\n return Bid.objects.filter(bid_busket=self).count()", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def getCount(self):\n return self.count", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def product_count(self) -> int:\n return self._product_count", "def get_product_count(self):\n return self.products.count()", "def consumer_count(self, obj):\n return obj.get_or_set_consumer_count()", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def total_sold(album):\n return album.total_sold", "def count(self) -> float:\n return pulumi.get(self, \"count\")" ]
[ "0.78120375", "0.77192485", "0.70914364", "0.68656915", "0.6624515", "0.65457547", "0.64217335", "0.6259992", "0.62470585", "0.62161124", "0.61799794", "0.6173208", "0.616138", "0.6150478", "0.61196935", "0.61196935", "0.61010724", "0.6085189", "0.6085189", "0.6085189", "0.6085189", "0.60620826", "0.60541296", "0.6035154", "0.6029467", "0.59837204", "0.5972313", "0.5972313", "0.5967662", "0.5954802" ]
0.8766824
0
Return the count of the book on the buy side.
def count_buy(self): return Library.functions.count_buy(self._book)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_sell(self):\n return Library.functions.count_sell(self._book)", "def count(self):\n return Library.functions.count(self._book)", "def book_count(self):\n\n try:\n cur = self._db.cursor()\n cur.execute('SELECT COUNT(*) FROM books')\n return cur.fetchone()[0]\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e", "def count_at(self, price):\n return Library.functions.count_at(self._book, price)", "def price_count(self):\n return self.price_set.count()", "def get_count(self):\r\n return self.count", "def count_available_goods(offer_id):\n offer_id = int(offer_id) # sanitize input\n results = db.engine.execute(\"select count(sid) from good where good.offer_id=\\'%s\\' and good.order_id is NULL;\" % str(offer_id)) # safe\n return(results.fetchone()[0])", "def read_library_count(self):\n\t\tprint(\"You have \" + str(self.library_count) + \" books in your kindle library.\")", "def count(self):\n return self.get_count()", "def total_bids(self):\n return Bid.objects.filter(bid_busket=self).count()", "def getCount(self):\n return self.count", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def lists_with_book_count(self, book):\n return self.lists.filter(List.books.contains(book)).count()", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def get_books_read(self):\n return len(self.books)", "def GetCount(self):\n return(self.count)", "def get_count(self):\n return self._count", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def get_count(self):\n\n\t\treturn self.__count", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def tally(self):\n return self.count", "def getNumPurchased(self):\n return self.numberPurchased", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')" ]
[ "0.80874604", "0.791642", "0.71182454", "0.70671946", "0.6729253", "0.6499483", "0.6482776", "0.64741236", "0.64674014", "0.6455501", "0.64497244", "0.6448028", "0.6434358", "0.6434358", "0.64228356", "0.63472587", "0.63472587", "0.63472587", "0.63472587", "0.6326487", "0.62864214", "0.62759066", "0.62450165", "0.62446946", "0.62275296", "0.620237", "0.61938274", "0.61843336", "0.6182959", "0.6182959" ]
0.8772569
0
Return the total count of the book (number of orders).
def count(self): return Library.functions.count(self._book)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def total_count(self) -> int:\n return self.__total_count", "def get_total_count(self):\n return self.total_count", "def book_count(self):\n\n try:\n cur = self._db.cursor()\n cur.execute('SELECT COUNT(*) FROM books')\n return cur.fetchone()[0]\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e", "def count_total():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_bookmarks()\r\n trans.commit()", "def count_buy(self):\n return Library.functions.count_buy(self._book)", "def orders_total(self):\n return(len(self._d_orders['trades']))", "def total_count(self):\n res = self.con.execute('select sum(count) from cc').fetchone();\n if res == None:\n return 0\n return res[0]", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count", "def count(self):\n return len(self.order_lst)", "def total_count(count):\n return sum(count.values())", "def count(self):\n return len(self.order_items)", "def total(self) -> int:\n if self._total is None:\n self._total = self.counts.sum()\n return self._total", "def lists_with_book_count(self, book):\n return self.lists.filter(List.books.contains(book)).count()", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def get_TotalCount(self):\n return self._output.get('TotalCount', None)", "def count_total(self):\n\t\twith self._c_lock: # I can't believe I implemented a lock for a counter. Safety first, I guess...\n\t\t\treturn self._total_count", "def count_total(self):\n\t\twith self._c_lock: # I can't believe I implemented a lock for a counter. Safety first, I guess...\n\t\t\treturn self._total_count", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def get_all_orders_count(): \n data = order_obj.get_all_orders(\"1\")\n return data", "def total(**metafilter):\n metafilter = _clean(metafilter)\n search = _build(metafilter)\n return search.count()", "def get_binmodule_total_count(self):\n count = 0\n for binmodule in self.binmodule_list:\n count += binmodule.get_count()\n return count", "def totalcounts(self):\n return self.datacounts + self.bkgdcounts", "def count(self):\n return self.get_count()", "def count_at(self, price):\n return Library.functions.count_at(self._book, price)", "def total_count(self):\n return self.applied_count + self.error_count" ]
[ "0.7326504", "0.70894116", "0.7065717", "0.7048008", "0.70059586", "0.69761986", "0.6889632", "0.67862225", "0.6757659", "0.664989", "0.6642463", "0.6580156", "0.6578201", "0.65763205", "0.65512884", "0.65499413", "0.6543359", "0.6543359", "0.6543359", "0.6520974", "0.6512532", "0.6509261", "0.64978373", "0.6479025", "0.6469381", "0.6432442", "0.64181566", "0.6393112", "0.6365678", "0.63647276" ]
0.78544945
0
Return price of Promotion from given timecall(second)
def promotion(time, sum_price): time = second_to_minute(time) for (pro, price) in [(24*60, 150), (12*60, 100), (8*60, 80), (3*60, 40), (60, 15), (20, 10)]: sum_price = sum_price + (time//pro)*price time = time % pro oneminute = time - 3 return sum_price + oneminute if oneminute > 0 else sum_price
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_coin_price(asset, time=None):\n url = 'https://rest.coinapi.io/v1/exchangerate/{}/USD'.format(asset)\n if time is not None:\n url = url + '?time={}'.format(time)\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get(url, headers=headers)\n if r.status_code / 100 == 2:\n price = {\"price\": r.json()['rate']}\n return price\n else:\n return {\"error\": r.content.decode('utf-8')}", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def evaluate(self, time) -> float:\n ...", "def calculate_price(from_date_time, to_date_time, parking_spot_id):\n try:\n seconds_in_day = 24 * 60 * 60\n hour_difference = ((to_date_time - from_date_time).days * seconds_in_day + (to_date_time - from_date_time).seconds) / (60 * 60)\n hour_round_off = math.ceil(hour_difference)\n\n parking_obj = ParkingSpot.objects.get(id=parking_spot_id)\n\n total_price = hour_round_off * parking_obj.price\n return total_price\n except Exception as e:\n print(str(e))\n return None", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def poll_price_data():\n resp = requests.get(COINDESK_ENDPOINT) # Powered by CoinDesk\n if resp.status_code == 200:\n logging.info(\"GET request succeeded\")\n data = resp.json()\n data_dict = {\n \"id\": str(uuid.uuid1()),\n \"time\": data['time']['updated'],\n \"currency\": data['bpi']['USD']['code'],\n \"price\": data['bpi']['USD']['rate']\n }\n return data_dict\n else:\n logging.error(\"GET request failed\")", "def get_price(self, request, code=None, minutes=None):\n start_datetime = datetime.now()\n try:\n minutes = int(minutes)\n price = calc_price_parking(code,minutes,start_datetime)\n ret={'msg':\"\",'data':price, 'status':'OK'}\n except Exception as e:\n ret = {'msg':'Error calculating price.','data':{}, 'status':'ERR'}\n return Response(ret)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def sample_consumption():\n product = Product(\"NameA\", 15, 17.85, 0.07, \"oak\", 0.08, \"ENplusA1\",\n \"Pelletics.cz\", date(2020, 12, 20))\n delivery = Delivery(product, \"Pellets2Home\", 7350, 42500,\n date(2020, 12, 20))\n some_datetime = datetime(2020, 11, 20, 14, 22, 46, 0)\n consumption = Consumption(some_datetime, delivery, 30, \"30 kgs\")\n return consumption", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def price(temp):\n now = datetime.datetime.now()\n r = requests.get(\"https://bitcoin.co.th/\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n data = soup.find_all(\"div\", {\"class\": \"price\"})\n print(\"[%02i:%02i:%02i] Now BTC Price : \" % (now.hour, now.minute, now.second), end=\"\")\n for i in range(len(data)):\n price = (data[i].text)\n print(price)\n if price != temp: # Price Change\n line_sent(price)\n temp = price\n time.sleep(30) # Delay 30 second\n main(temp) # call function main for loop", "def margin_timed(self):\n sp = self.sale_price_timed or zero\n if u.isempty(sp):\n return zero\n cp = self.cost_price or zero\n return u.decimal((um-(cp/sp))*cem, True)", "def get_price(hours):\n price = round(hours * 5, 2)\n print(\"Total Price is $\", price)", "def get_price():\n return uniform(1.0, 350.0)", "def get_stock_price(stock):\n pass", "def get_price(res_obj):\n selector = '.price-current'\n price = res_obj.html.find(selector, first=True)\n return price.text", "def compute_kwh_price(supplier_with_transaction):\n\n supplier_item = supplier_with_transaction.get('supplier_detail')\n total_kwh_price = 0\n if supplier_item.get('has_time_based_kwh') and supplier_item.get('time_price'):\n # start to compute as complex\n for rec in supplier_item.get('time_price'):\n if rec.get('hour_from') and rec.get('hour_to'):\n if rec.get('hour_from') > rec.get('hour_to'):\n duration = (rec.get('hour_to') - rec.get('hour_from')) * 60\n else:\n duration = (rec.get('hour_to') - (24 - rec.get('hour_from'))) * 60\n else:\n duration = 0\n total_kwh_price += duration * rec.get('kwh_price', 0)\n else:\n # start to calculate the simple version for kwh price\n total_kwh_price = 24 * supplier_item.get('kwh_price', 0)\n return total_kwh_price", "def evaluate (self, time):\n return self._response.evaluate0(time)", "def getProductPrice(productID):\n return \"http://api.tcgplayer.com/pricing/product/\" + str(productID)", "def __call__(self, rate:'kW'):\n self.rate = rate\n self.cost = self.price * rate", "def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0", "def compute_price(self, date = None):\n\t\tif date is None:\n\t\t\tdate = datetime.now()\n\t\tself.price = 0\n\t\t# Getting list of product in cart\n\t\tcontent = self.cart.cart_content_set.all()\n\t\t# Dictionnary in order to compute minimum state of multi promotion\n\t\tstate = {\n\t\t\t'products':{},\n\t\t\t'promotions':{}\n\t\t}\n\t\trequirements = {}\n\n\t\tfor element in content:\n\t\t\tproduct = element.product\n\t\t\tquantity = element.quantity\n\n\t\t\t# First look for promotion\n\t\t\tsimple_promotions = product.promotion_set.filter(end__gte = date, type = 's').distinct('reference', 'end').order_by('-end', 'reference')\n\t\t\tmulti_promotions = product.promotion_set.filter(end__gte = date, type = 'm').distinct('reference', 'end').order_by('-end', 'reference')\n\t\t\tif len(simple_promotions)>0:\n\t\t\t\tpromotion = simple_promotions[0]\n\t\t\t\tself.price = self.price + quantity*promotion.after\n\t\t\t\n\t\t\telif len(multi_promotions)>0:\n\t\t\t\tfor promotion in multi_promotions:\n\t\t\t\t\tprice_before = promotion.before\n\t\t\t\t\tprice_after = promotion.after\n\t\t\t\t\tcontent = [ (p, 1) for p in promotion.content.all()]\n\t\t\t\t\tfound, requirement = self.get_promotion_requirement(content, price_before)\n\t\t\t\t\tif found and requirement is not None:\n\t\t\t\t\t\trequirements[promotion.id] = { p.id:q for p, q in requirement} # updating promotion multi requirements\n\n\t\t\t\t\t# Updating promotion multi state\n\t\t\t\t\tprod, price = self.get_simple_price([{'product':product, 'quantity':1}], date)[0]\n\t\t\t\t\t# print quantity\n\t\t\t\t\tstate['products'][product.id] = {'price': price, 'qte':quantity}\n\t\t\t\t\t# print state['products'][product.id]\n\t\t\t\t\tstate['promotions'][promotion.id] = {'price': price_after, 'qte':0}\n\t\t\telse:\n\t\t\t\thistory = product.history_set.filter(created__gte = date-timedelta(hours = 24)).order_by('-created')\n\t\t\t\tif len(history)>0:\n\t\t\t\t\tself.price = self.price + quantity*history[0].price\n\t\t\t\telse:\n\t\t\t\t\thistory = product.history_set.all().order_by('-created')\n\t\t\t\t\tif len(history)>0:\n\t\t\t\t\t\tself.price = self.price + quantity*history[0].price\n\n\t\t# Dealing with multi promotion:\n\t\tmin_state, min_price = self.get_min_state(state, requirements)\n\t\tself.price = self.price + min_price\n\n\t\tself.save()\n\n\t\treturn self.price", "def exptime(self):\n exptime = float(self.get('TRUITIME')) * int(self.get('COADDONE'))\n return exptime", "def _dynamic_price(self):\n adjust = PriceAdjustmentCalc(self)\n signals.satchmo_price_query.send(self, adjustment=adjust,\n slug=self.product.slug, discountable=self.product.is_discountable)\n return adjust.final_price()" ]
[ "0.67537755", "0.6225738", "0.61765724", "0.61343247", "0.6133116", "0.60193425", "0.60140777", "0.60004115", "0.5960098", "0.58534", "0.583223", "0.583061", "0.5796537", "0.57868993", "0.5755165", "0.57375234", "0.5699371", "0.56611836", "0.56481266", "0.5638357", "0.5602969", "0.5598063", "0.5566043", "0.5515676", "0.5503677", "0.5488461", "0.5464348", "0.5462425", "0.54567593", "0.5454701" ]
0.7114384
0
Convert second to minute
def second_to_minute(time): if time % 60 != 0: time = time + 60 return time // 60
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def int_convert_to_minute(value):\n min = int(int(value) / 60)\n sec = int(int(value) % 60)\n return \"%02d\" % min + \":\" + \"%02d\" % sec", "def get_minute(self):\n\n # First we get the first 8 bits stored in the minute register\n # and translate it to an integer\n minute_bcd = self.__read_register(_REGISTER_MINUTE)\n\n # We separate the tens from the digits\n\n tens = (minute_bcd & 0x70) >> 4 # 0x70 = 0b01110000\n digit = (minute_bcd & 0x0F) # 0x0F = 0b00001111\n\n return 10 * (tens) + digit", "def _to_minutes(seconds):\n return '%d:%d' % divmod(seconds, 60)", "def convert_time(time_passed):\n\n minutes = time_passed.seconds // 60\n\n return minutes", "def MINUTE(time):\n return _make_datetime(time).minute", "def get_minute(time):\n m = time[4] + (time[3]*60) + (time[2]*60*24) * time[1] * time[0]\n return m", "def convert_to_minutes(s):\r\n m = math.floor(s / 60)\r\n s -= m * 60\r\n return '%dm %ds' % (m, s)", "def calculate_minutes(time):\n return int(time / 60)", "def convert_to_minutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)", "def minutes_in(sec):\r\n return int((sec - (hours_in(sec)*3600))//60)", "def seconds_to_minutes(seconds: int, round: Optional[bool] = True) -> Union[int, float]:\n return int(seconds / 60) if round else seconds / 60", "def convert_time(min, sec):\n # Updated 11/19/16 \n total_time = min*60\n total_time = total_time + sec\n \n return str(total_time)+'.0' # string because being passed to GUI", "def convert_time(t):\n minutes = int(t/60)\n seconds = int(t-60*minutes)\n return minutes, seconds", "def minutes_to_seconds( minutes: str ) -> int:\r\n return int(minutes)*60", "def calculate_seconds_in_minutes(minutes):\n return int(minutes * 60)", "def date_minute(date):\n return date.minute", "def minutes_to_seconds(minutes):\n return minutes * 60", "def minutes_to_seconds(minutes) -> int:\n return int(minutes) * 60", "def timeToMinutes(timestamp):\n if len(timestamp) == 5: \n return int(timestamp[0])*600 + int(timestamp[1])*60 + int(timestamp[3])*10 + int(timestamp[4])\n return None", "def int_to_time(seconds):\n time1 = time()\n minutes, time1.second = divmod(seconds, 60)\n time1.hour, time1.minute = divmod(minutes, 60)\n return time1", "def int_to_time(seconds):\n time1 = time()\n minutes, time1.second = divmod(seconds, 60)\n time1.hour, time1.minute = divmod(minutes, 60)\n return time1", "def minutes_in_day_to_time(minutes):\n return seconds_in_day_to_time(minutes*60)", "def task11_time_converter(num):\n if num < 0:\n raise ValueError\n hour = num // 60\n minute = num % 60\n return f'{hour}:{minute}'", "def minutesToTime(minutes):\n h10, h1, m10, m1 = 0, 0, 0, 0\n h10 = int(minutes/600)\n h1 = int(minutes%600 / 60)\n m10 = int((minutes - (h10*600 + h1*60)) / 10)\n m1 = int((minutes - (h10*600 + h1*60)) % 10)\n return f\"{h10}{h1}:{m10}{m1}\"", "def multMinuteAlign(ts, min):\n\tintv = secInMinute * min\n\treturn int((ts / intv)) * intv", "def minute_and_hour_to_time(minute, hour):\n return hour * 60 + minute", "def convert_to_minute(arrival):\n time = arrival.split(' ')\n dd = datetime.datetime(int(time[0]), int(time[1]), int(time[2]), int(time[3]), int(time[4]))\n age = dd - datetime.datetime(2014, 1, 1, 0, 0)\n return int(age.total_seconds() / 60)", "def sec_to_min_pretty(time_secs: int) -> str:\n if time_secs % 60 == 0:\n return f'{time_secs // 60}'\n m = time_secs / 60\n return f'{m:.2g}'", "def set_minute(self, minute):\n if minute not in range(60):\n raise ValueError(\"Second value must be in range [0..59] but is {}\".format(minute))\n\n # First we separate the tens and the digit\n tens, digit = divmod(int(minute), 10)\n\n # Then we add them in a single int\n reg_value = (tens << 4) | digit\n\n # The we add it to the register\n self.__write_register(_REGISTER_MINUTE, reg_value)", "def minutes(self):\n return int(int(self) / 60)" ]
[ "0.81158906", "0.7529537", "0.7427662", "0.7400211", "0.7394658", "0.73557305", "0.725303", "0.7242647", "0.7215991", "0.7102178", "0.70426196", "0.69982487", "0.6993329", "0.69814235", "0.6980721", "0.6963728", "0.6873445", "0.6735053", "0.6705921", "0.66999775", "0.66999775", "0.66780734", "0.66681385", "0.66364187", "0.66302127", "0.6627321", "0.65615726", "0.654881", "0.6514648", "0.6484352" ]
0.8488949
0
The default path for auth files. Since auth is imported by common, not all functions from common are available yet, so we have to duplicate common.get_etc().
def default_path(): return os.path.join(os.environ.get('OVERRIDE_ETC', '/etc'), 'auth')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_default_path(self):\n\n raise NotImplementedError()", "def getuserbase():\n\tpass", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def _get_default_path(self):\n return os.path.join(cfg.ROOT_DIR, 'data', 'KITTI')", "def get_default_secrets_basedir():\n default_basedir = Path.home() / BASEDIR_BASENAME\n return Path(\n os.getenv('D2_SECRETS_BASEDIR', default_basedir)\n )", "def DefaultPath(self) -> str:\n return self.m_def_path", "def base_dir(self):\n pass", "def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH", "def getDefaultDataSearchPath():\n return FileSearchPath(os.path.dirname(__file__))", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')", "def _get_default_cache_dir(self):\n default_cache_dir = os.path.join(os.path.expanduser(\"~\"), 'dbcollection')\n return default_cache_dir", "def default_module_dir(self):\n return os.path.dirname(self._modules['default'].path)", "def auth_file(self):\n return self._auth_file", "def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path", "def _load_name_root(self):\n if self._pypath:\n return self._pypath[0]\n elif self._dirs:\n return secrets.token_hex()", "def get_default_config_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / CONFIG_FILE", "def get_local_default_file(cwd=None):\n # TODO(dittrich): May need to do this differently to support\n # Windows file systems.\n if cwd is None:\n cwd = os.getcwd()\n return Path(cwd) / '.python_secrets_environment'", "def find_default(self, fs_path):\n if os.path.isdir(fs_path):\n default = None\n for name in self.defaults:\n _path = os.path.join(fs_path, name)\n if os.path.isfile(_path):\n default = _path\n break\n if default is None:\n raise Response(403)\n fs_path = default\n return fs_path", "def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths", "def get_testcases_default_config_dir():\n global_conf_dir = '/etc/testcases'\n user_global_path = os.path.join(os.path.expanduser('~'), '.testcases/etc')\n if os.path.isdir(global_conf_dir):\n return global_conf_dir\n elif os.path.isdir(user_global_path):\n return user_global_path\n else:\n os.makedirs(user_global_path)\n return user_global_path", "def test_get_default_settings_path():\n\n root_path = application_services.get_pyrin_main_package_path()\n default_settings_path = os.path.abspath(os.path.join(root_path, 'settings', 'default'))\n assert application_services.get_default_settings_path() == default_settings_path", "def getDefaultFileLocation(self):\n\n label_env = os.getenv('DISPASS_LABELFILE')\n std_env = os.getenv('XDG_DATA_HOME') or os.getenv('APPDATA')\n home_file = '~/.dispass/labels'\n\n if label_env:\n return label_env\n if not exists(home_file) and std_env:\n return std_env + '/dispass/labels'\n else:\n return home_file", "def get_user_config_dir(options):\n return '/root/.spinnaker'", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def file_root(self):\n return os.path.join(CredentialApplication.FILE_ROOT, self.slug)", "def get_default_cookiejar_path():\n cache_dir = xdg.BaseDirectory.save_cache_path('AUR')\n return os.path.join(cache_dir, 'cookiejar.txt')", "def getRootPath():\n return '/'.join(__file__.split('/')[:-4]) # Path of this file with pagebot/__init__.py(c) removed.", "def get_default_path(name):\n name_ = name\n if isinstance(name, (DottedName, Symbol)):\n name_ = str(name)\n if name_ in pyccel_external_lib.keys():\n name = pyccel_external_lib[name_].split('.')\n if len(name)>1:\n return DottedName(*name)\n else:\n return name[0]\n return name", "def base_path(self):\n return self.setup.base_path", "def default_configfile():\n dirname=None\n if os.getenv(\"HOME\"):\n dirname=os.getenv(\"HOME\")\n elif os.getenv(\"USERPROFILE\"):\n dirname=os.getenv(\"USERPROFILE\")\n\n else:\n raise FattyException(\"No HOME or USERPROFILE variable set, unable to determine default config file\")\n\n return os.path.join(dirname,\".fattybugs\")" ]
[ "0.6578735", "0.6543579", "0.6311381", "0.6221308", "0.6197567", "0.59966385", "0.59602493", "0.5944631", "0.59310985", "0.5916046", "0.58995533", "0.588715", "0.5886533", "0.58850485", "0.58659554", "0.5845714", "0.58420026", "0.5840689", "0.5808696", "0.58013254", "0.5758225", "0.5747733", "0.57450074", "0.5724758", "0.57142955", "0.5714088", "0.57107157", "0.5699795", "0.5691403", "0.56776464" ]
0.7999646
0
Return True iff a == b, and do it in constant time.
def constant_time_equals(a, b): a = bytearray(a) b = bytearray(b) if len(a) != len(b): return False result = 0 for x, y in zip(a, b): result |= x ^ y return result == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_equal(self, a, b):\n return a is b", "def _eq(a, b):\n return (a - b) % 2 == 0", "def is_equal(self, a, b):\n return a == b", "def equals(x, y):\n return x == y", "def values_eq(self, a, b):\r\n return a == b", "def _isImmediatelyConcurrentWithHelper(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n offsetsOfPlusOne = 0\n offsetsOfMinusOne = 0\n equalities = 0\n for id in self.clock.keys():\n if (self.clock[id] + 1) == other.clock[id]:\n offsetsOfPlusOne += 1\n if (self.clock[id] - 1) == other.clock[id]:\n offsetsOfMinusOne += 1\n elif self.clock[id] == other.clock[id]:\n equalities += 1\n if offsetsOfPlusOne == 1 and offsetsOfMinusOne == 1 and equalities == len(self.clock.keys()) - 2:\n return True\n else:\n return False", "def equals(a, b, **kwargs):\n return lib.equals(a, b, **kwargs)", "def exact(cls, lhs, rhs):\n return lhs == rhs", "def testEqual(a, b):\n if a == b:\n print('Pass')\n else:\n print('Fail')", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def _is_equal(x, y):\n return x[0] == y", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def c_equals(a, b):\n alpha = library.PixelGetAlpha\n return bool(library.IsPixelWandSimilar(a, b, 0) and\n alpha(a) == alpha(b))", "def sim(a, b):\n ratio = SequenceMatcher(None, a, b).ratio()\n return ratio > 0.5", "def equal(lhs, rhs):\n return _make.equal(lhs, rhs)", "def checkSame(self, other):\n checkVector(self, other)\n futures = self.client.map(_call_checkSame, self.vecDask, other.vecDask, pure=False)\n results = self.client.gather(futures)\n return all(results)", "def _almost_equal(x, y):\n pass", "def test_equal(self):\n self.assertTrue(self.a == self.a)\n self.assertFalse(self.a != self.a)", "def eq_inplace(a,b):", "def _almost_coincident(a,b, rtol=RTOL, atol=ATOL):\n return (np.allclose(a, b, rtol=RTOL, atol=ATOL)\n or np.allclose(np.flipud(a),b, rtol=RTOL, atol=ATOL))", "def compare_equality(a, b):\n # Work around for https://github.com/python-quantities/python-quantities/issues/146\n try:\n a + b\n except TypeError:\n # We might be dealing with e.g. None (None + None raises TypeError)\n try:\n len(a)\n except TypeError:\n # Assumed scalar\n return a == b\n else:\n if len(a) != len(b):\n return False\n return all(compare_equality(_a, _b) for _a, _b in zip(a, b))\n except ValueError:\n return False\n else:\n return a == b", "def _coincident(a,b):\n return np.array_equal(a, b) or np.array_equal(np.flipud(a),b)", "def is_equal(a: list[int], b: list[int]) -> bool:\n i: int = 0\n if len(a) != len(b):\n return False\n while i < len(a):\n if a[i] != b[i]:\n return False\n else:\n i = i + 1\n return True", "def same(self, x, y):\n return self.find(x) == self.find(y)", "def _aresame(a, b):\n from .numbers import Number\n from .function import AppliedUndef, UndefinedFunction as UndefFunc\n if isinstance(a, Number) and isinstance(b, Number):\n return a == b and a.__class__ == b.__class__\n for i, j in zip_longest(_preorder_traversal(a), _preorder_traversal(b)):\n if i != j or type(i) != type(j):\n if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or\n (isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):\n if i.class_key() != j.class_key():\n return False\n else:\n return False\n return True", "def __eq__(self, other: t.Any) -> bool:\n return self._op_bool('__eq__', other)", "def point_to_same_memory(a, b):\n return a.data == b.data", "def eq(a, b):\n return abs(a - b) < .05", "def check_equivalent(self, a, b):\n assert len(a) == len(b)\n for x, y in zip(a, b):\n assert self.is_equal(x, y)", "def is_equal(self, state1, state2):\n return self._replace_unks(state1) == self._replace_unks(state2)" ]
[ "0.6991648", "0.6987159", "0.68904316", "0.6511493", "0.6491735", "0.6393032", "0.6370423", "0.62094575", "0.6165589", "0.61569965", "0.6101428", "0.6083719", "0.60202104", "0.6011132", "0.5964354", "0.5960481", "0.59527063", "0.5886779", "0.5871184", "0.5863785", "0.5838165", "0.58288145", "0.58204436", "0.58054537", "0.5790798", "0.5787722", "0.57850325", "0.5764285", "0.5762209", "0.57575345" ]
0.7278765
0
Check that the authentication data directory is owned by current user, with safe permissions. throw exception if not.
def check_sane(self): st = os.stat(self.path) if st.st_uid != os.getuid(): raise Exception('Auth dir %s not owned by user %d.' % ( self.path, os.getuid())) # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU) # In other words, a directory with mode bits rwx------ if st.st_mode != 16832: raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def is_admin():\n if os.name == 'nt':\n try:\n # Only Windows users with admin privileges can read \n # the C:\\windows\\temp directory.\n os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\\\windows'),'temp']))\n except:\n return False\n else:\n return True\n else:\n # Root has UID 0 on Unix systems.\n if 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False", "def test_lock_checks_user(tmpdir):\n uid = getuid()\n if uid not in group_ids():\n pytest.skip(\"user has no group with gid == uid\")\n\n # self-owned, own group\n tmpdir.chown(uid, uid)\n\n # safe\n path = str(tmpdir)\n tmpdir.chmod(0o744)\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o774)\n lk.check_lock_safety(path)\n\n # unsafe\n tmpdir.chmod(0o777)\n with pytest.raises(spack.error.SpackError):\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o474)\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o477)\n lk.check_lock_safety(path)", "def _check_app_dir(self, app_dir):\n try:\n if self._app_dir_in_oasis(app_dir):\n self.log('OSG_APP is an OASIS repository, skipping tests',\n level=logging.DEBUG)\n return True\n\n # Added for SOFTWARE-1567\n if utilities.blank(app_dir) or app_dir == 'UNSET':\n self.log('OSG_APP is UNSET or unavailable, skipping tests',\n level=logging.DEBUG)\n return True\n\n if not validation.valid_location(app_dir) or not os.path.isdir(app_dir):\n self.log(\"Directory not present: %s\" % app_dir,\n section=self.config_section,\n option='app_dir',\n level=logging.WARNING)\n return False\n\n etc_dir = os.path.join(app_dir, \"etc\")\n if not validation.valid_location(etc_dir) or not os.path.isdir(etc_dir):\n self.log(\"$OSG_APP/etc directory not present: %s\" % etc_dir,\n section=self.config_section,\n option='app_dir',\n level=logging.WARNING)\n return False\n\n permissions = stat.S_IMODE(os.stat(etc_dir).st_mode)\n # check to make sure permissions are 777, 1777 2777 775 1775 2775 755 1755 2755\n all_rwx = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO\n og_rwx = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH\n o_rwx = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP\n o_rwx |= stat.S_IROTH | stat.S_IXOTH\n allowed = [all_rwx | stat.S_ISVTX, # 1777\n all_rwx, # 777\n all_rwx | stat.S_ISGID, # 2777\n og_rwx, # 775\n og_rwx | stat.S_ISVTX, # 2775\n og_rwx | stat.S_ISGID, # 2775\n o_rwx, # 755\n o_rwx | stat.S_ISVTX, # 1755\n o_rwx | stat.S_ISGID] # 2755\n if permissions not in allowed:\n self.log(\"Permissions on $OSG_APP/etc should be 777, 1777, \" \\\n \"2777, 775, 1775, 2775, 755, 1755, 2755 \" \\\n \"for sites: %s\" % etc_dir,\n section=self.config_section,\n option='app_dir',\n level=logging.WARNING)\n # pylint: disable-msg=W0703\n except Exception:\n self.log(\"Can't check $OSG_APP, got an exception\",\n level=logging.ERROR,\n exception=True)\n return False\n\n return True", "def verify_user(self):\n if self.username == \"root\":\n print \"Error: Please do not run this script as root.\"\n sys.exit(1)\n\n members = grp.getgrnam(self.groupowner)[3]\n if not self.username in members:\n print \"Error: The user who runs this script must belong to the group: \" + self.groupowner\n sys.exit(1)", "def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success", "def test_directory_world_accessible(self):\n if os.name == \"nt\":\n self.skipTest(\"Windows does not use POSIX-style permissions.\")\n os.rmdir(self.info_dir)\n # The default umask is typically 0o022, in which case this test is\n # nontrivial. In the unlikely case that the umask is 0o000, we'll\n # still be covered by the \"restrictive umask\" test case below.\n manager.write_info_file(_make_info())\n self.assertMode(self.info_dir, 0o777)\n self.assertEqual(self._list_info_dir(), [\"pid-76540.info\"])", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "def _verify_keystore(self):\n keystore_uid = FileUtil(self.keystore_file).uid()\n if keystore_uid not in (-1, HostInfo.uid):\n raise IOError(\"not owner of keystore: %s\" % self.keystore_file)\n keystore_dir = os.path.dirname(self.keystore_file)\n if FileUtil(keystore_dir).uid() != HostInfo.uid:\n raise IOError(\"keystore dir not found or not owner: %s\" % keystore_dir)\n if (keystore_uid != -1 and (os.stat(self.keystore_file).st_mode & 0o077)):\n raise IOError(\"keystore is accessible to group or others: %s\" % self.keystore_file)", "def _check_permissions(server, priv):\n # Check user permissions\n user_pass_host = server.user\n if server.passwd is not None and len(server.passwd) > 0:\n user_pass_host += \":\" + server.passwd\n user_pass_host += \"@\" + server.host\n user = User(server, user_pass_host, False)\n if not user.has_privilege(\"*\", \"*\", priv):\n raise UtilError(\"Not enough permissions. The user must have the \"\n \"%s privilege.\" % priv)", "def user_data_folder_exists(username):\n repo_dir = os.path.abspath(\n os.path.join(os.sep, 'user_data', username))\n return os.path.exists(repo_dir)", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def root_user_check():\n\n if not os.getuid() == 0:\n print(\"This program requires ROOT privileges. Exiting.\")\n sys.exit()", "def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0", "def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0", "def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)", "def _have_permissions(self, location):\n if not os.path.isfile(location):\n return True\n \n stats = os.stat(location)\n # check specifically for write permission\n return bool(stats.st_mode & stat.S_IWUSR)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def _check_namespace_access(self, namespace, user):\n if not namespace.owners.filter(id=user.id).count():\n raise exceptions.PermissionDenied(\n 'The namespace listed on your filename must match one of '\n 'the namespaces you have access to.'\n )", "def test_permissions(self):\n exist = os.access('models/amenity.py', os.F_OK)\n self.assertTrue(exist)\n read = os.access('models/amenity.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/amenity.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/amenity.py', os.X_OK)\n self.assertTrue(exe)", "def has_repo_file_privilege(login, repo_base, repo, privilege):\n repo = repo.lower()\n repo_base = repo_base.lower()\n\n # Users always have privileges over their own files.\n if login == repo_base:\n return\n\n # Check if the current user or the public user has the privilege on\n # this repo.\n # The anonymous user is never explicitly shared with, so we don't need\n # to check for that.\n permitted_collaborators = Collaborator.objects.filter(\n repo_base=repo_base,\n repo_name=repo,\n file_permission__contains=privilege,\n user__username__in=[settings.PUBLIC_ROLE, login])\n if not next((c for c in permitted_collaborators), None):\n raise PermissionDenied()", "def is_user_root():\n return (True if os.getuid() == 0 else False)", "def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def check_root():\n if os.getuid():\n logging.critical(\"Please run as root.\")\n sys.exit(ExitCode.ROOT_REQUIRED)", "def user_is_root():\n return os.geteuid() == 0", "def _enforce_authorization(self, **kwargs):\n # Get the env\n env_dict = kwargs.get('env')\n\n # Although it may already be set in the env, just override in case it was only set via command line or config\n # Convert to string since execve() (called by Popen in base classes) wants string values.\n env_dict['EG_IMPERSONATION_ENABLED'] = str(self.impersonation_enabled) # TODO - Leave EG_ for kernelspec?\n\n # Now perform authorization checks\n if self.kernel_username in self.unauthorized_users:\n self._raise_authorization_error(\"not authorized\")\n\n # If authorized users are non-empty, ensure user is in that set.\n if self.authorized_users.__len__() > 0:\n if self.kernel_username not in self.authorized_users:\n self._raise_authorization_error(\"not in the set of users authorized\")", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True" ]
[ "0.69111305", "0.63244414", "0.618211", "0.61762416", "0.60865843", "0.6056135", "0.60496074", "0.60474795", "0.60096633", "0.5915627", "0.5915164", "0.5910356", "0.5902933", "0.5817912", "0.58168834", "0.58168834", "0.5816142", "0.57881606", "0.5775639", "0.57755697", "0.57643783", "0.575667", "0.57519674", "0.57433975", "0.57253486", "0.5717081", "0.5704112", "0.570092", "0.5695418", "0.5638493" ]
0.7962697
0
Save data into file, with mode bits rw.
def write(self, filename, data): owner_rw = 0600 fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw) # In case file existed already with wrong permissions, fix them. os.chmod(filename, owner_rw) os.write(fd, data) os.close(fd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def save(self, data):\n self.write(data)", "def saveIntoFile(self, fname, data, mode='a'):\n\t\tg = open(fname, mode)\n\t\tg.write(data)\n\t\tg.close()", "def write(self, data):\n return self._write(self.wfile, data)", "def save_to_file(self, data):\n\t\tif self.data_file.write(data):\n\t\t\tprint(\"Data successfully added to file\")\n\t\telse:\n\t\t\tPrint(\"Problem occured during adding to file\")", "def filewrite(self, filename, data):\n try:\n filedata = data.decode(\"utf-8\")\n except Exception:\n filedata = data\n lock = FileLock(filename)\n lock.acquire()\n with open(filename, 'w+') as f:\n f.write(filedata)\n lock.release()", "def write(self, filename, data):\n raise NotImplementedError", "def enablewrite(self):\n if self.mode == 'write':\n return\n self.file.close()\n self.mode = 'write'\n self._load_file()", "def write(self, data, mode=\"w\", ensure=False):\n if ensure:\n self.dirpath().ensure(dir=1)\n if \"b\" in mode:\n if not isinstance(data, bytes):\n raise ValueError(\"can only process bytes\")\n else:\n if not isinstance(data, str):\n if not isinstance(data, bytes):\n data = str(data)\n else:\n data = data.decode(sys.getdefaultencoding())\n f = self.open(mode)\n try:\n f.write(data)\n finally:\n f.close()", "def write(cls, file, data):\n file.write(data)", "def write_data():", "def saveFile(self, data, filelocation):\n with open(filelocation, 'w+') as f:\n f.write(data)", "def save_data(data, file_name):\r\n file = open(file_name, \"w\")\r\n file.write(data + \"\\n\")\r\n file.close()", "def write_binary(self, data, ensure=False):\n if ensure:\n self.dirpath().ensure(dir=1)\n with self.open(\"wb\") as f:\n f.write(data)", "def write(data):", "def _save_file(self, file_path, data):\n self._ensure_directory(os.path.dirname(file_path))\n with open(file_path, \"wb\") as f:\n f.write(data)", "def write(self, data_to_write):\n self.single_file.write(data_to_write)\n self.single_file.flush()", "def rewrite_all_file(self, data):\r\n with open(self.file_name, 'w', encoding='utf-8') as self.file:\r\n self.file.write(data)", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write( data ):", "def write_to_file(filepath, data):\n\n with open(filepath, 'w') as f:\n f.write(str(data))", "def save_to_file(self, name, data):\n if os.path.isdir(\"saved_data\"):\n with open(f'saved_data/{name}.txt', 'wb') as file:\n pickle.dump(data, file)\n else:\n os.mkdir(\"saved_data\")\n self.save_to_file(name, data)", "def write(self, content, mode='wb'):\r\n self.localpath.write(content, mode)", "def write_file(path, data):\n # opens file\n try:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n f = open(str(path), \"w\")\n f.write(data)\n f.close()\n except Exception as e:\n print(\"Error writing file: \", e)\n sys.exit(1)", "def save(fname, data):\r\n with open(fname, 'wb') as f:\r\n pickle.dump(data, f)", "def writable(path):", "def _write_to_file(dir_path: Text,\n filename: Text,\n content: Text,\n executable: bool = False):\n path = os.path.join(dir_path, filename)\n with open(path, 'w') as f:\n f.write(content)\n if executable:\n st = os.stat(path)\n os.chmod(path, st.st_mode | stat.S_IXUSR)", "def _write_to_file(dir_path: Text,\n filename: Text,\n content: Text,\n executable: bool = False):\n path = os.path.join(dir_path, filename)\n with open(path, 'w') as f:\n f.write(content)\n if executable:\n st = os.stat(path)\n os.chmod(path, st.st_mode | stat.S_IXUSR)", "def write(path, data):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n from sambatools.smb.smb_structs import OperationFailure\r\n try:\r\n samba.store_file(os.path.basename(path), data, os.path.dirname(path))\r\n except OperationFailure:\r\n logger.info(\"deportesalacarta.core.filetools write: Error al guardar el archivo: {0}\".format(path))\r\n return False\r\n else:\r\n return True\r\n\r\n else:\r\n try:\r\n f = open(path, \"wb\")\r\n f.write(data)\r\n f.close()\r\n\r\n # except EnvironmentError:\r\n except Exception, ex:\r\n logger.info(\"filetools.write: Error al guardar el archivo: \")\r\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n logger.info(message)\r\n # logger.info(\"deportesalacarta.core.filetools write: Error al guardar el archivo: {0}\".format(path))\r\n return False\r\n else:\r\n return True" ]
[ "0.7226291", "0.70650244", "0.68625534", "0.68410575", "0.6730982", "0.67121845", "0.66840976", "0.6669404", "0.66520023", "0.65721416", "0.6454757", "0.64442486", "0.64206964", "0.6347395", "0.63168377", "0.6304097", "0.6299632", "0.62755454", "0.62742877", "0.62730885", "0.6258053", "0.62494755", "0.6223573", "0.62155545", "0.61847025", "0.61808187", "0.61708766", "0.6166608", "0.6166608", "0.6153468" ]
0.7425339
0
Return the number of failed passwords the can be entered before logins attempts are disabled for a day. The rate limit information is stored as a count of failed attempts so far. If there have been no failed attempts, or they were more than a day ago, treat that as zero failed attempts.
def rate_limit_remaining(self): if os.path.isfile(self.rate_limit_filename): st = os.stat(self.rate_limit_filename) if time.time() - st.st_ctime > self.RATE_LIMIT_DURATION: return self.RATE_LIMIT_COUNT else: with open(self.rate_limit_filename, 'r') as f: failed_login_attempts = int(f.read()) return max(0, self.RATE_LIMIT_COUNT - failed_login_attempts) else: return self.RATE_LIMIT_COUNT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allowed_failed_attempts(self) -> int:\n return pulumi.get(self, \"allowed_failed_attempts\")", "def get_retry_count(self):\r\n return self.retried_nomax + self.retried_withmax", "def rate_limit_check():\n\n data = api.rate_limit_status()\n\n user_timeline_remaining = data['resources']['statuses'] \\\n ['/statuses/user_timeline'] \\\n ['remaining']\n\n followers_list_remaining = data['resources']['followers'] \\\n ['/followers/list']['remaining']\n\n rate_limit_remaining = data['resources']['application'] \\\n ['/application/rate_limit_status']['remaining']\n\n verify_credentials_remaining = data['resources']['account'] \\\n ['/account/verify_credentials'] \\\n ['remaining']\n\n user_timeline_reset = data['resources']['statuses'] \\\n ['/statuses/user_timeline'] \\\n ['reset']\n\n followers_list_reset = data['resources']['followers'] \\\n ['/followers/list']['reset']\n\n rate_limit_reset = data['resources']['application'] \\\n ['/application/rate_limit_status']['reset']\n\n verify_credentials_reset = data['resources']['account'] \\\n ['/account/verify_credentials'] \\\n ['reset']\n\n return {'utrem': user_timeline_remaining,\n 'ftrem': followers_list_remaining,\n 'rlrem': rate_limit_remaining,\n 'vcrem': verify_credentials_remaining,\n 'utres': user_timeline_reset,\n 'ftres': followers_list_reset,\n 'rlres': rate_limit_reset,\n 'vcres': verify_credentials_reset}", "def attempt_limit(self) -> int:\n return self._attempt_limit", "def checkRls():\n return api.rate_limit_status()['resources']['search']['/search/tweets']['remaining']", "def change_password_attempts(request):\n username = request.user.username\n password_attempts = f\"{CHANGE_PASSWORD_ATTEMPTS}{username}\"\n attempts = cache.get(password_attempts)\n\n if attempts:\n cache.incr(password_attempts)\n attempts = cache.get(password_attempts)\n if attempts >= MAX_CHANGE_PASSWORD_ATTEMPTS:\n cache.set(\n f\"{LOCKOUT_CHANGE_PASSWORD_USER}{username}\",\n datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\"),\n LOCKOUT_TIME,\n )\n if check_user_lockout(request):\n return check_user_lockout(request)\n\n return attempts\n\n cache.set(password_attempts, 1)\n\n return 1", "def failed_logins_ip(self, ip_id):\n #Get how many logins\n login_attempts = self.sql('SELECT count(*) FROM login_attempts WHERE success >= 0 AND attempt_time > UNIX_TIMESTAMP(NOW()) - %s AND ip_id = %s', BAN_TIME_IP, ip_id)\n remaining_attempts = MAX_LOGIN_ATTEMPTS_IP - login_attempts\n \n #Ban IP if not enough remaining attempts\n if remaining_attempts <= 0:\n self.ban_ip(ip_id)\n \n if not PRODUCTION_SERVER:\n print 'IP {} attempted to login to an account. Remaining attempts: {}'.format(ip_id, remaining_attempts)\n \n return remaining_attempts", "def password_count(self) -> int:\n return pulumi.get(self, \"password_count\")", "def check_attempts(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"check_attempts\")", "def test_password_reset_ratelimited(self):\r\n cache.clear()\r\n\r\n for i in xrange(30):\r\n good_req = self.request_factory.post('/password_reset/', {\r\n 'email': 'thisdoesnotexist{0}@foo.com'.format(i)\r\n })\r\n good_resp = password_reset(good_req)\r\n self.assertEquals(good_resp.status_code, 200)\r\n\r\n # then the rate limiter should kick in and give a HttpForbidden response\r\n bad_req = self.request_factory.post('/password_reset/', {'email': '[email protected]'})\r\n bad_resp = password_reset(bad_req)\r\n self.assertEquals(bad_resp.status_code, 403)\r\n\r\n cache.clear()", "def AddPasswordPolicyAllowedFailedAttempts(parser):\n parser.add_argument(\n '--password-policy-allowed-failed-attempts',\n type=int,\n required=False,\n default=None,\n help=(\n 'Number of failed login attempts allowed before a user is locked out.'\n ' This flag is available only for MySQL.'\n ),\n )", "def get_login_attempts(self):\n print(f\"User {self.last_name} tried login attempt(s) on {self.login_attempts} occasions\")", "def auditportallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def maximum_retry_attempts(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_retry_attempts\")", "def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()", "def password_validity(user):\n # password change information:\n delay = constants.USER_PASS_SPAN # default users\n\n if user.is_staff: # staff/admins\n delay = constants.ADMIN_PASS_SPAN\n\n # default last pass update, join date of the user\n date_joined = user.date_joined\n\n # last change log\n last_change = Password_Change_Log.objects.filter(user=user).aggregate(\n date_max=Max('timestamp'))['date_max']\n\n # if there is record of last password change, use it\n if last_change:\n last_date = last_change\n # or take the join date as default\n else:\n last_date = date_joined\n\n difference = (timezone.now()-last_date).days\n return max(0, delay-difference)", "def pass_attempts(self):\n return self._pass_attempts", "def failed_logins_account(self, account_id, field_data):\n \n hash = quick_hash(field_data)\n \n #Check if banned\n if account_id:\n try:\n ban_remaining = self.sql('SELECT GREATEST(ban_until, UNIX_TIMESTAMP(NOW())) - UNIX_TIMESTAMP(NOW()) FROM accounts WHERE id = %s', account_id)[0][0]\n except IndexError:\n ban_remaining = 0\n else:\n ban_remaining = 0\n \n #Check login attempts if not banned\n if ban_remaining:\n remaining_attempts = 0\n else:\n try:\n last_login = self.sql('SELECT attempt_time FROM login_attempts WHERE success = 1 AND BINARY field_data = %s ORDER BY attempt_time DESC LIMIT 1', hash)[0][0]\n except IndexError:\n last_login = 0\n \n #Get how many failed logins\n failed_logins = self.sql('SELECT count(*) FROM login_attempts WHERE attempt_time > GREATEST(%s, UNIX_TIMESTAMP(NOW()) - %s) AND BINARY field_data = %s', last_login, BAN_TIME_ACCOUNT, hash)\n remaining_attempts = MAX_LOGIN_ATTEMPTS_ACCOUNT - failed_logins\n \n #Ban account if not enough remaining attempts\n if remaining_attempts <= 0:\n ban_remaining = self.ban_account(account_id)\n \n #Workaround to get psuedo-ban for account that don't exist\n if not account_id:\n try:\n ban_offset = self.sql('SELECT UNIX_TIMESTAMP(NOW()) - attempt_time FROM login_attempts WHERE success < 1 AND BINARY field_data = %s ORDER BY attempt_time DESC LIMIT 1 OFFSET {}'.format(-remaining_attempts), hash)[0][0]\n print ban_offset\n except IndexError:\n ban_offset = 0\n ban_remaining -= ban_offset\n \n if not PRODUCTION_SERVER:\n print 'Account \"{}\" attempted to login. Remaining attempts: {}. Ban time remaining: {}'.format(field_data, remaining_attempts, ban_remaining)\n \n return remaining_attempts, ban_remaining", "def maximum_retry_attempts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"maximum_retry_attempts\")", "def reste_login_attempts(self):\n self.login_attempts = 0", "def remaining_requests(self):\n try:\n return self._get_limit('Remaining')\n except ValueError:\n logging.error(\n \"Unable to gather limit statistics until log() has been called. Returning -1\")\n return -1", "def get_num_attempts(self, username, descriptor):\r\n module = self.get_student_module(username, descriptor)\r\n state = json.loads(module.state)\r\n return state['attempts']", "def is_password_reset_frequency_restricted(cls):\r\n return settings.FEATURES['ADVANCED_SECURITY'] and \\\r\n settings.ADVANCED_SECURITY_CONFIG.get(\r\n 'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS', None\r\n )", "def attempts(difficulty):\n if difficulty == 'easy':\n attempts = 5\n else:\n attempts = 10\n \n return attempts", "def verify_is_allowed(self):\n if (\n self.throttling_enabled\n and self.throttling_failure_count > 0\n and self.throttling_failure_timestamp is not None\n ):\n now = timezone.now()\n delay = (now - self.throttling_failure_timestamp).total_seconds()\n # Required delays should be 1, 2, 4, 8 ...\n delay_required = self.get_throttle_factor() * (\n 2 ** (self.throttling_failure_count - 1)\n )\n if delay < delay_required:\n return (\n False,\n {\n 'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,\n 'failure_count': self.throttling_failure_count,\n 'locked_until': self.throttling_failure_timestamp\n + timedelta(seconds=delay_required),\n },\n )\n\n return super().verify_is_allowed()", "def maximum_retry_attempts(self) -> Optional[int]:\n return pulumi.get(self, \"maximum_retry_attempts\")", "def fourth_down_attempts(self):\n return self._fourth_down_attempts", "def check_attempts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_attempts\")", "def check_attempts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_attempts\")", "def password_count(self) -> Optional[int]:\n return pulumi.get(self, \"password_count\")" ]
[ "0.72841084", "0.6442284", "0.63025856", "0.62957674", "0.6294218", "0.6247011", "0.6221691", "0.6105994", "0.6071681", "0.6069109", "0.60401917", "0.6027968", "0.6010637", "0.6010314", "0.59940743", "0.59916854", "0.5940475", "0.5892049", "0.5857288", "0.5837647", "0.5832163", "0.5820413", "0.5794784", "0.5790159", "0.57866585", "0.5751859", "0.5739065", "0.57080674", "0.57080674", "0.570752" ]
0.7286241
0
Return whether a password file exists.
def password_exists(self): return os.path.isfile(self.password_filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_exists(file):\n try:\n Cryptography.read(file)\n return True\n except (FileNotFoundError, FileExistsError):\n return False", "def has_credentials(credentials_file=CREDENTIALS_FILE):\n return os.path.exists(credentials_file)", "def file_exist() -> bool:\n pass", "def exists_file(f):\n if os.path.exists(f):\n return True\n return False", "def _existFile(f):\n\treturn os.path.isfile(f)", "def hexists(file_path: str) -> bool:\n return os.path.exists(file_path)", "def file_exist(file_path):\n return os.path.isfile(file_path)", "def is_file_exists(self):\n pass", "def file_exists(path):\n return os.path.exists(path)", "def FileExists(file):\n return os.path.exists(file)", "def _search_for_key_file(path_to_key_file):\n\n return True if os.path.exists(path_to_key_file) else False", "def file_exists(filename):\n return os.path.isfile(filename)", "def test_6_1_2_etc_passwd_exists(host):\n assert host.file(ETC_PASSWD).exists", "def file_exists(filename):\n return os.path.exists(filename)", "def test_6_1_2_etc_passwd_isfile(host):\n assert host.file(ETC_PASSWD).is_file", "def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False", "def file_exists(path: str) -> bool:\n\treturn os.path.isfile(path)", "def file_exists(path: str) -> bool:\n return os.path.isfile(path)", "def has_file(path):\n return os.path.exists(path)", "def fileExist(file):\r\n return os.path.exists(file) and os.path.isfile(file)", "def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False", "def file_exists(filename: str) -> bool:\n\n return os.path.exists(filename)", "def fileExist(file):\n return os.path.exists(file) and os.path.isfile(file)", "def fileExist(file):\n return os.path.exists(file) and os.path.isfile(file)", "def fileExist(file):\n return os.path.exists(file) and os.path.isfile(file)", "def fileExist(file):\n return os.path.exists(file) and os.path.isfile(file)", "def file_exists(file_path):\r\n return exists(file_path) and isfile(file_path)", "def notExist(file_name):\n\twhile True:\n\t\tmessage(\"warning\", \"[+] The password file cannot be found. Do you want to create it? y/n.\")\n\t\tanswer = raw_input(\"> \")\n\t\tif answer == \"y\" or answer == \"Y\":\n\t\t\trandomKeyFile(randomKey)\n\t\t\tkeys = getpass.getpass()\n\t\t\tencryptor(file_name, randomKey, keys)\n\t\t\tmessage(\"succes\", \"[+] Succes! The password file has been created.\")\n\t\t\tbreak\n\t\telif answer == \"n\" or answer == \"N\":\n\t\t\tmessage(\"warning\", \"[+] Manual pass selected.\")\n\t\t\tkeys = getpass.getpass()\n\t\t\treturn keys\n\t\t\tbreak\n\t\telse:\n\t\t\tmessage(\"warning\", \"[+] Wrong input. Please enter y/n.\")", "def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False", "def password_exists_in_keyring(username):\n try:\n get_password_from_keyring(username)\n except PyiCloudNoStoredPasswordAvailableException:\n return False\n\n return True" ]
[ "0.73339146", "0.72148883", "0.711415", "0.704762", "0.6940705", "0.6932751", "0.68602616", "0.68285614", "0.68142194", "0.6799496", "0.6797534", "0.6792931", "0.67914045", "0.6779414", "0.67747366", "0.6768138", "0.67567444", "0.6748446", "0.67089844", "0.67058", "0.6699733", "0.66957694", "0.66820264", "0.66820264", "0.66820264", "0.66820264", "0.6679577", "0.6637221", "0.6635567", "0.6619973" ]
0.84719884
0
Generate a CSRF prevention token. We derive this token as the SHA256 hash of the auth token, which ensures the two are bound together, preventing cookie forcing attacks. Returns a valid CSRF prevention token.
def get_csrf_token(self): h = hashlib.new('sha256') h.update(self.__current_authentication_token()) return h.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_csrf_token() -> int:\r\n ...", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def create_csrf_token(salt=''):\n\tif not salt:\n\t\tsalt = Random.new().read(csrf_salt_len).encode('hex')\n\th = SHA256.new()\n\th.update(get_csrf_secret() + salt)\n\treturn h.hexdigest() + salt", "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)", "def generate_csrf_token(app_key, app_secret, user_key, user_secret):\n # We authenticate the user using the keys\n auth = OAuth1(app_key, app_secret, user_key, user_secret)\n\n # Get token\n token_request = requests.get('https://commons.wikimedia.org/w/api.php', params={\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json',\n }, auth=auth)\n token_request.raise_for_status()\n\n # We get the CSRF token from the result to be used in editing\n CSRF_TOKEN = token_request.json()['query']['tokens']['csrftoken']\n return CSRF_TOKEN, auth", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()", "def make_token():\n return secrets.token_urlsafe(36)", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def _generate_token_value():\n return secrets.token_urlsafe()", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def rotate_token(request: http.Request):\n if hasattr(request, '_csrf_hook'):\n request._csrf_hook.rotate_token()", "def get_csrf_token_from_response(self, response):\n return re.search(CSRF_REGEX, response.body).group(1)", "def get_csrf_token(self) -> str:\n url_csrf = 'https://www.instagram.com/accounts/login/'\n\n res = self.session.get(url_csrf, headers={\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\"#'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n })\n csrf = re.findall(r\"csrf_token\\\":\\\"(.*?)\\\"\", res.text)[0]\n return csrf", "def verify_csrf_token(token=''):\n\tif not token:\n\t\ttoken = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, \"\")\n\t\tif token:\n\t\t\ttoken = token[0]\n\tif len(token) != 2 * digest_size + 2 * csrf_salt_len:\n\t\tdebug('Incorrect csrf token length')\n\t\traise VDOM_csrf_exception()\n\tsalt = token[2*digest_size:]\n\tif token != create_csrf_token(salt):\n\t\tdebug('Incorrect csrf token value')\n\t\traise VDOM_csrf_exception()", "def get_xsrf_token(self, offset=0):\n if not self.xsrf_secret:\n self.xsrf_secret = os.urandom(8)\n self.put()\n m = md5.new(self.xsrf_secret)\n email_str = self.lower_email\n if isinstance(email_str, unicode):\n email_str = email_str.encode('utf-8')\n m.update(self.lower_email)\n when = int(time.time()) // 3600 + offset\n m.update(str(when))\n return m.hexdigest()", "def gen_csrfkey(force, randomness):\n\n def gen_randomkey(length):\n \"\"\"Generate random key, given a number of characters\"\"\"\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])\n\n csrf_key = gen_randomkey(randomness)\n session_key = gen_randomkey(randomness)\n\n file_name = '%s/secret_keys.py' % app4name\n file_template = Template('''# CSRF and Session keys\n\nCSRF_SECRET_KEY = '$csrf_key'\nSESSION_KEY = '$session_key'\n''')\n\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n\n if (os.path.exists(file_name)) and (force is False):\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n f = open(file_name, 'wb')\n f.write(output)\n f.close()", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''" ]
[ "0.7412592", "0.7199123", "0.7178343", "0.71217203", "0.69856286", "0.68552816", "0.651522", "0.6510274", "0.65009046", "0.63964945", "0.63690513", "0.6342274", "0.6288062", "0.6235347", "0.6221905", "0.62115526", "0.61450565", "0.61078674", "0.60888135", "0.6081029", "0.6066629", "0.60513777", "0.60259104", "0.60247636", "0.6019048", "0.6016622", "0.6016069", "0.6011641", "0.5996468", "0.59538823" ]
0.7876994
0
Return the HTTP headers required to log the user in. Specifically, set the auth cookie, the csrf token cookie, and an unsecured cookie logged_in=true, indicating the user is logged in even if the current request context doesn't have the auth cookies. The server should redirect users with the loggedin cookie to the HTTPS version of the site. Calling this method immediately regenerates the stored auth token, invalidating other active sessions.
def login_headers(self): auth_token = self.regenerate_authentication_token() csrf_token = self.get_csrf_token() # Set the secure flag on the cookie if the login occurred over HTTPS. secure = '' if 'HTTPS' in os.environ: secure = ' secure;' return ('Set-Cookie: %s=true; path=/\n' 'Set-Cookie: %s=%s; path=/; HttpOnly;%s\n' 'Set-Cookie: %s=%s; path=/;%s\n' % ( self.LOGGED_IN_COOKIE_NAME, self.AUTH_COOKIE_NAME, auth_token, secure, self.CSRF_COOKIE_NAME, csrf_token, secure))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout_headers(self):\n self.regenerate_authentication_token()\n return ('Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\\n'\n 'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\\n'\n 'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\\n' % (\n self.LOGGED_IN_COOKIE_NAME, self.AUTH_COOKIE_NAME,\n self.CSRF_COOKIE_NAME))", "def default_login_auth_header(self):\n return self.get_auth_header(self.default_login['login'], self.default_login['password'])", "def check_http_auth(request):\n from user.models import User\n\n if 'HTTP_AUTHORIZATION' in request.META:\n # If an Authorization header is supplied, but this request is\n # not allowed to use HTTP authentication, ignore the header.\n if not http_auth_allowed(request):\n return\n\n # If the user is already authenticated, ignore the header.\n if request.user.is_authenticated:\n return\n\n try:\n uid = request.session['pn_httpauth_uid']\n authhash = request.session['pn_httpauth_hash']\n user = User.objects.get(id=uid)\n except (KeyError, User.DoesNotExist):\n pass\n else:\n # Existing session is valid only if the password has not\n # changed.\n if constant_time_compare(user.get_session_auth_hash(),\n authhash) and user.is_active:\n request.user = user\n return\n\n tokens = request.META['HTTP_AUTHORIZATION'].split()\n if len(tokens) == 2 and tokens[0].lower() == 'basic':\n try:\n data = base64.b64decode(tokens[1], validate=True).decode()\n username, password = data.split(':', 1)\n except Exception:\n return\n\n user = auth.authenticate(request=request,\n username=username,\n password=password)\n if user and user.is_active:\n request.user = user\n\n # If the client supports cookies, save the state so\n # that we don't have to verify the password on\n # subsequent requests. If the client doesn't support\n # cookies, don't bother.\n if request.COOKIES:\n # We don't invoke auth.login() here, specifically\n # so that this session ID cannot be reused to\n # access URLs that don't permit HTTP\n # authentication.\n request.session['pn_httpauth_uid'] = user.id\n request.session['pn_httpauth_hash'] \\\n = user.get_session_auth_hash()", "def auth_user():\n\n logging.info(request.headers)\n validate(request)", "def set_logged_in_cookies(request, response, user):\n # Note: The user may not yet be set on the request object by this time,\n # especially during third party authentication. So use the user object\n # that is passed in when needed.\n\n if user.is_authenticated and not user.is_anonymous:\n\n # JWT cookies expire at the same time as other login-related cookies\n # so that cookie-based login determination remains consistent.\n cookie_settings = standard_cookie_settings(request)\n\n _set_deprecated_logged_in_cookie(response, cookie_settings)\n _set_deprecated_user_info_cookie(response, request, user, cookie_settings)\n _create_and_set_jwt_cookies(response, request, cookie_settings, user=user)\n CREATE_LOGON_COOKIE.send(sender=None, user=user, response=response)\n\n return response", "def get_auth_headers():\n\n auth_type = \"Basic\"\n if request.headers.get('UseXBasic'):\n auth_type = \"XBasic\"\n\n return {\n 'WWW-Authenticate': '%s realm=\"Login Required\"' % auth_type\n }", "def get_headers(self, session, **kwargs):\n token = self.get_token(session)\n\n if not token:\n return None\n\n return {IDENTITY_AUTH_HEADER_NAME: token}", "def get_authenticate_header(self):\n pass", "def get_headers(self):\r\n return {\r\n 'authenticate': {\r\n 'complexType': 'PortalLoginToken',\r\n 'userId': self.user_id,\r\n 'authToken': self.auth_token,\r\n }\r\n }", "def obtain_auth_cookies(self):\n\n try:\n r = requests.get(self.url, auth=(self.username, self.password))\n r.raise_for_status()\n except requests.exceptions.HTTPError as e:\n raise OpendaylightAuthError(msg=\"Failed to authenticate with \"\n \"OpenDaylight: %s\" % e)\n except requests.exceptions.Timeout as e:\n raise OpendaylightAuthError(msg=\"Authentication Timed Out: %s\" % e)\n\n jsessionid = r.cookies.get('JSESSIONID')\n jsessionidsso = r.cookies.get('JSESSIONIDSSO')\n if jsessionid and jsessionidsso:\n self.auth_cookies = dict(JSESSIONID=jsessionid,\n JSESSIONIDSSO=jsessionidsso)", "def authenticate_header(self, request):\n return '{0} realm=\"{1}\"'.format(settings.JWT_AUTH_HEADER_PREFIX,\n self.www_authenticate_realm)", "def on_before(self, controller):\n session_id = controller.get_cookie(self.session_config.cookie_id)\n cookie_id = str(self.session_config.auth_cookie)\n userid = controller.get_secure_cookie(cookie_id)\n user = None\n if userid:\n sname = self.session_config.auth_service\n logger.debug(self.application.models)\n auth_service = self.application.models[sname]\n user = auth_service.auth(userid)\n if user:\n if not session_id:\n session_id = self.gen_session_id(controller)\n setattr(user, 'just_signin', True)\n setattr(user, 'session_id', session_id)\n threadlocal.set_user(user)\n \n if not session_id:\n session_id = self.gen_session_id(controller)\n threadlocal.set_sessionid(session_id)\n threadlocal.set_ip(controller.request.remote_ip)\n if session_id:\n controller.set_cookie(self.session_config.cookie_id, session_id)\n\n if not user and controller.require_auth:\n h = controller.request.headers.get('X-Requested-With', None)\n if h and h == 'XMLHttpRequest':\n raise tornado.web.HTTPError(403, self.__class__.__name__)\n else:\n if controller.request.method in (\"GET\", \"HEAD\"):\n url = controller.get_login_url()\n if \"?\" not in url:\n if urlparse.urlsplit(url).scheme:\n # if login url is absolute, make next absolute too\n next_url = controller.request.full_url()\n else:\n next_url = controller.request.uri\n url += \"?\" + urllib.urlencode(dict(next=next_url))\n controller.redirect(url)\n else:\n raise tornado.web.HTTPError(403, self.__class__.__name__)", "def set_auth_header(self):\n self.auth_header = self.get_auth_header(self.login, self.password)\n return True if self.auth_header else False", "def __http_build_headers(self, with_authentication):\n\n dynamic_headers = {\n 'timestamp': str(self.__current_milli_time())\n }\n if with_authentication and self.__login_token:\n dynamic_headers['Authorization'] = 'Bearer ' + self.__login_token\n \n dynamic_headers.update(self.__http_default_headers)\n return dynamic_headers", "def auth_header(self):\n return self._auth_header", "def do_login(self):\n url = self.get_url('/accounts/login')\n cookies = None\n\n client = requests.session()\n csrf = None\n try:\n csrf = client.get(url).cookies.get('csrftoken')\n except RequestException as e:\n logger.warning('Unable to retrieve csrf: {}'.format(e))\n\n data = {\n 'username': self.auth[0],\n 'password': self.auth[1],\n 'csrfmiddlewaretoken': csrf,\n 'next': '/'\n }\n try:\n response = client.post(url, data=data, headers=dict(Referer=url))\n except RequestException as e:\n logger.warning('Unable to login to {} ({})'.format(self.name, e))\n else:\n if response.status_code == 200:\n cookies = {}\n for cookie in response.request.headers.get('Cookie').split(';'):\n cookie = cookie.strip()\n session = cookie.split('sessionid=')\n if len(session) == 2:\n sessionid = session[-1]\n cookies = dict(sessionid=sessionid)\n break\n return cookies", "def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }", "def logged_in(request):\n ctx = {\n 'version': version,\n 'last_login': request.session.get('social_auth_last_login_backend')\n }\n return render_to_response('content/logged_in.html', ctx, RequestContext(request))", "def headers(self):\r\n return {\r\n 'Content-type': 'application/json',\r\n 'Accept': 'application/json',\r\n 'X-CSRFToken': self.session_cookies.get('csrftoken', '')\r\n }", "def get_auth_headers(self):\n # type: () -> AnyHeadersContainer\n headers = {}\n if self.request and self.request.auth_headers:\n headers = self.request.auth_headers.copy()\n return CaseInsensitiveDict(headers)", "def get_auth_cookies(self):\n # type: () -> CookiesTupleType\n cookies = []\n if self.request and self.request.http_request:\n for name in [\"Cookie\", \"Set-Cookie\"]:\n headers = get_cookie_headers(self.request.http_request, name)\n cookies.extend([(key, value) for key, value in headers.items()])\n return cookies", "def get_auth_header(self, login, password):\n json = self.request('post',\n '/auth/login',\n json={'uid': login, 'password': password},\n msg='authenticating at {} with user {}'.format(self.admin_url, login),\n errorfatal=False,\n retfmt='json',\n autoauth=False\n )\n if json:\n return {'Authorization': 'token=%s' % json['token']}\n else:\n return None", "def authenticate(self, request):\n\n # Get the underlying HttpRequest object\n request = request._request\n user = getattr(request, 'user', None)\n\n # Unauthenticated, CSRF validation not required\n if not user or not user.is_active:\n return None\n\n #self.enforce_csrf(request)\n\n # CSRF passed with authenticated user\n return (user, None)", "def add_auth_to_headers(self):\n if not hasattr(self, \"headers\"):\n self.headers = {\"Content-Type\": \"application/json\"}\n\n login = {\"account_number\": self.account[\"account_number\"],\n \"pin\": self.account[\"pin\"]}\n token = json.loads(self.client.post(\n \"/accounts/login\",\n data=json.dumps(login),\n headers=self.headers).get_data())[\"token\"]\n self.headers[\"Authorization\"] = \"Bearer \" + token", "def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}", "def check_authentication(self):\n try:\n cookies = os.environ['HTTP_COOKIE'].split('; ')\n except KeyError:\n cookies = []\n for c in cookies:\n prefix = Auth.AUTH_COOKIE_NAME + '='\n if (c.startswith(prefix) and\n self.is_authentication_token(c[len(prefix):])):\n return True\n print 'Status: 403 Forbidden'\n print 'Content-Type: application/json'\n print self.logout_headers()\n print json.JSONEncoder().encode({'error': 'Not authenticated.'})\n sys.exit(1)", "def login(request, template_name='registration/login.html',\n redirect_field_name=REDIRECT_FIELD_NAME,\n authentication_form=AuthenticationForm,\n current_app=None, extra_context=None): \n is_ajax = False\n \n if request.is_ajax():\n is_ajax = True\n \n if request.user.is_authenticated():\n return HttpResponseRedirect( \"/\" )\n \n redirect_to = request.REQUEST.get(redirect_field_name, '')\n\n if request.method == \"POST\":\n form = authentication_form(data=request.POST)\n if form.is_valid():\n netloc = urlparse.urlparse(redirect_to)[1]\n\n # Use default setting if redirect_to is empty\n if not redirect_to:\n redirect_to = settings.LOGIN_REDIRECT_URL\n\n # Security check -- don't allow redirection to a different\n # host.\n elif netloc and netloc != request.get_host():\n redirect_to = settings.LOGIN_REDIRECT_URL\n \n # Okay, security checks complete. Log the user in.\n auth_login(request, form.get_user())\n\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\t\t\t#COMMENT-11142013: I have to comment out the next two lines because of error\n # revision.user = form.get_user()\n # revision_meta( request, 'Logging-in', \"Session\" )\n \n if not request.POST.has_key( \"stay_signed\" ):\n request.session.set_expiry( 0 )\n \n else:\n request.session[ \"stay_signed\" ] = True\n\n return HttpResponseRedirect(redirect_to)\n else:\n form = authentication_form(request)\n\n request.session.set_test_cookie()\n\n current_site = get_current_site(request)\n\n context = {\n 'form': form,\n redirect_field_name: redirect_to,\n 'site': current_site,\n 'site_name': current_site.name,\n\t'is_ajax': is_ajax,\n }\n context.update(extra_context or {})\n return render_to_response(template_name, context,\n context_instance=RequestContext(request, current_app=current_app))", "def before_request():\n # If request is for static content then skip\n if '/static/' in request.path:\n return\n # Sets the transaction trace id into the global object if it has been provided in the HTTP header from the caller.\n # Generate a new one if it has not. We will use this in log messages.\n g.trace_id = request.headers.get('X-Trace-ID', uuid.uuid4().hex)\n g.application_permissions = Permissions\n # We also create a session-level requests object for the app to use with the header pre-set, so other APIs will\n # receive it. These lines can be removed if the app will not make requests to other LR APIs!\n g.requests = requests.Session()\n g.requests.headers.update({'X-Trace-ID': g.trace_id})\n\n if '/health' in request.path:\n return\n\n session_key = None\n if Session.session_cookie_name in request.cookies:\n session_key = request.cookies[Session.session_cookie_name]\n\n if session_key is None:\n return build_no_session_response('/sign-in')\n\n sess = Session(session_key)\n\n if not sess.valid():\n # Redirect to logout to clear session as invalid\n return build_no_session_response('/logout')\n\n # Shouldn't be possible to not have the JWT, but redirect to /login instead of\n # 'Something went wrong' if the JWT is missing\n if sess.user is not None and sess.user.jwt is not None:\n jwt = sess.user.jwt\n else:\n return build_no_session_response('/sign-in')\n\n g.session = sess\n g.requests.headers.update({'Authorization': 'Bearer ' + jwt})", "def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}", "def session(self, request):\n if request.method != 'GET':\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n data = {'valid': request.user.is_authenticated()}\n return Response(data, status=status.HTTP_200_OK)" ]
[ "0.616512", "0.58029056", "0.5774238", "0.57243794", "0.572148", "0.5703384", "0.5694575", "0.56380635", "0.55908483", "0.55858535", "0.5576539", "0.556314", "0.55448025", "0.54460526", "0.54270995", "0.54115784", "0.54071826", "0.5374451", "0.53601944", "0.53586346", "0.53347373", "0.5317584", "0.5317043", "0.53040475", "0.52932334", "0.52784866", "0.52522516", "0.5241957", "0.52247864", "0.52229506" ]
0.71679306
0
Return the HTTP headers required to log the user out. Specifically, delete and invalidate the auth token and CSRF token.
def logout_headers(self): self.regenerate_authentication_token() return ('Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\n' 'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\n' 'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\n' % ( self.LOGGED_IN_COOKIE_NAME, self.AUTH_COOKIE_NAME, self.CSRF_COOKIE_NAME))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forget(self, environ, identity):\n challenge = \"MAC+BrowserID url=\\\"%s\\\"\" % (self.token_url,)\n return [(\"WWW-Authenticate\", challenge)]", "def logout(request):\n if request.method == 'POST':\n request.token.delete()\n return json_response({\n 'status': 'success'\n })\n elif request.method == 'OPTIONS':\n return json_response({})\n else:\n return json_response({\n 'error': 'Invalid Method'\n }, status=405)", "def logout(request):\n request.user.auth_token.delete()\n return Response({}, status=status.HTTP_200_OK)", "def logout(self, request):\n request.auth[1].delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)", "def logout(self, request):\n pass", "def logout_other(self, request):\n tokens_to_delete = request.user.auth_token_set.exclude(\n pk=request.auth[1].pk)\n num = tokens_to_delete.delete()\n return Response({\"deleted_sessions\": num[0]})", "def logout(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here let db know we are logging out by removing user's token\n if not db.remove_token(data['user_id']):\n return Response({'error': str('Error when logging out!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def logout():\n session.pop('user', None)\n return jsonify(csrf_token = generate_csrf_token())", "def logout():\n # TODO: handle this logout properly, very weird implementation.\n identity = get_jwt_identity()\n if not identity:\n print(\"Session Expired\")\n return jsonify({\"msg\": \"Token invalid\"}), Status.HTTP_BAD_UNAUTHORIZED\n logger.info('Logged out user !!')\n return 'logged out successfully', Status.HTTP_OK_BASIC", "def logout():\n resp = Response(render_template('admin/login.html',\n message='Your session has been canceled.'))\n unset_jwt_cookies(resp)\n return resp", "def logout():\n resp = Response(render_template('admin/login.html',\n message='Your session has been canceled.'))\n unset_jwt_cookies(resp)\n return resp", "def logout(self):\n\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')", "def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')", "def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')", "def logout():\n body = request.json\n user_id = body.get('user_id')\n user = User.get(User.id == user_id).username\n clear_token(user)\n return HTTPResponse(status=200, body={\"message\":\"Log out succesful.\"})", "def logout(self, request, *args, **kwargs):\n token = get_object_or_404(Token, key=request.auth)\n token.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def logout_all(self, request):\n request.user.auth_token_set.all().delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)", "def Logout(request):\n logout(request)\n return Response({\"success\": \"Successfully logged out.\"}, status=status.HTTP_200_OK)", "def logout():\n logout_user()\n\n return {\"status\": 200, \"message\": \"Logged Out\"}, 200", "def forget(self, request):\n return [('WWW-Authenticate', 'Bearer realm=\"%s\"' % self.realm)]", "def forget(self, request):\n return self._get_challenge_headers(request, check_stale=False)", "def logout(request):\n auth_logout(request)\n return Response({'message': 'Logged out'})", "def auth_logout(request):\n\n \"\"\"\n user = getattr(request, 'user', None)\n if hasattr(user, 'is_authenticated') and not user.is_authenticated():\n user = None\n user_logged_out.send(sender=user.__class__, request=request, user=user)\n \"\"\"\n request.session.flush()\n \"\"\"\n if hasattr(request, 'user'):\n from django.contrib.auth.models import AnonymousUser\n request.user = AnonymousUser()\n \"\"\"\n ri = rest_interface(opensso_url=OPEN_AM_SERVER_URL)\n\n if OPENAM_COOKIE_NAME_FOR_TOKEN in request.COOKIES:\n unsigned_token = request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN]\n print('logout: token ='+request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN])\n print('logout: unsigned_token ='+unsigned_token)\n ri.do_logout(subject_id=unsigned_token)\n #del request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN]\n #request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN] = 'logged_out'\n ##ssouser = SSOUser(False)\n ##request.ssouser = ssouser", "def logout():\n logout_user()\n return {'message': 'User logged out'}", "def logout(client):\n\n return client.post('/v1/auth/revoke')", "def logout():\n session['logged_in'] = False\n return '', 204", "def logout():\n return jsonify(result=logout_user())", "def logout(request):\n\n headers = forget(request)\n url = request.route_url('auth_logout', _app_url=get_app_url(request))\n return HTTPFound(location=url, headers=headers)", "def delete(self, request):\n serializer = UserLogoutSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n token = RefreshToken(serializer.validated_data[\"refresh\"])\n token.blacklist()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def deauthorize():\n\tPAYLOAD_HEADERS.pop('Authorization', None)" ]
[ "0.663149", "0.6626839", "0.64436823", "0.6353375", "0.62939614", "0.6245303", "0.6228222", "0.6208567", "0.61947745", "0.6191976", "0.6191976", "0.6157748", "0.6107373", "0.6107373", "0.6101193", "0.6085265", "0.6074504", "0.60680485", "0.6061079", "0.60475695", "0.6046746", "0.6013527", "0.60062474", "0.59887993", "0.5967005", "0.5942667", "0.5889338", "0.5887865", "0.58769935", "0.5860972" ]
0.7579529
0
Return the current authentication token if it still valid, else None.
def __current_authentication_token(self): if os.path.isfile(self.token_filename): with open(self.token_filename, 'r') as f: (stored_token, expires) = f.read().split(' ') t = time.time() if int(expires) > t: return stored_token return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auth_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_token\")", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def get_auth_token():\n auth_token_value = memcache.get('authtoken')\n if not auth_token_value:\n entity = Token.get_by_key_name(key_names = 'authtoken')\n if entity:\n auth_token_value= entity.value\n memcache.set('authtoken', auth_token_value)\n else:\n auth_token_value = None\n return auth_token_value", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def _get_token(self):\n return user.get_token()", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "def authenticationToken(self):\n return self.authToken", "def current_token() -> object:\n return get_async_backend().current_token()", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def auth0_token():\n redis_conn = token_redis_connection()\n token = redis_conn.get('auth0_token')\n token_valid = check_if_token_is_valid(token)\n if token is None or not token_valid:\n try:\n token = get_fresh_auth0_management_token()\n except (ValueError, requests.HTTPError) as e:\n logger.error('Failed to retrieve Auth0 token: %r', e)\n return\n redis_conn.set('auth0_token', token)\n return token", "def get_token(self):\n logging.debug(\"In the Token get_token() class method.\")\n\n if datetime.datetime.now() > self.token_expiry:\n logging.info(\"Token Expired.\")\n self.generate_tokens()\n return self.access_token", "def get_token(self):\n token = self._session.token\n return token", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def get_token(self, refresh_if_expired=False):\n if refresh_if_expired and self.test_token() is False:\n return self.refresh_token()\n\n if self.API_TOKEN is None:\n # try and get one\n return self.refresh_token()\n else:\n return self.API_TOKEN", "def get_token(self):\n if time.time() > self.expiration:\n # need to re-authenticate and get a new token and catalog\n self._authenticate()\n \n return self.token, self.catalog", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")" ]
[ "0.75732124", "0.7552719", "0.7514238", "0.74762905", "0.7412203", "0.7278422", "0.72320783", "0.7199977", "0.7183309", "0.7131585", "0.71267015", "0.7103127", "0.7067211", "0.7020245", "0.6927801", "0.6918491", "0.6909502", "0.68631387", "0.6821521", "0.6821521", "0.6819494", "0.67495495", "0.6723382", "0.671273", "0.6705731", "0.6704044", "0.6703555", "0.66731155", "0.66731155", "0.66731155" ]
0.78239816
0
Return true iff candidate authentication token matches stored one.
def is_authentication_token(self, candidate): current_token = self.__current_authentication_token() # TODO: Add expiry checking if (current_token and self.__valid_token_format(current_token) and self.__valid_token_format(candidate) and constant_time_equals(current_token, candidate)): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticated(self):\n return self.token is not None", "def check_token(self, user, token):\n try:\n data = signing.loads(token, max_age=properties.TOKEN_LOGIN_TIMEOUT)\n except signing.BadSignature:\n return False\n\n return (\n (\n (data['last_login'] is None and user.last_login is None) or\n data['last_login'] == user.last_login.strftime('%s')\n ) and\n data['user_id'] == user.pk\n )", "def verify_local_token(self, token):\n return token == self.master_local_token.get_token()", "def authenticated(self):\n client_token = self.get_cookie(\"PA-client-token\")\n if not client_token:\n print(\"no cookie\")\n return False\n\n headers = cherrypy.request.headers\n if \"Remote-Addr\" not in headers:\n print(\"no IP\")\n return False\n\n to_hash = \"Python-Aboard \" + headers.get(\"Remote-Addr\", \"none\")\n to_hash += \" \" + headers.get(\"User-Agent\", \"unknown\")\n to_hash = to_hash.encode()\n token = hashlib.sha256(to_hash).digest()\n return client == client_token", "def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True", "def authenticated():\n if 'user_id' in session and 'access_token' in session:\n user = db_session.query(User).filter_by(id=session['user_id']).first()\n\n if user:\n return user.access_token == session['access_token']\n return False", "def is_authenticated(self):\n if self._token is None:\n self.authenticate()\n\n return self._token is not None", "def has_token(self):\n user_id = getattr(self, '_id', None)\n user_token = getattr(self, 'token', None)\n if user_id is not None and user_token is not None:\n return True\n return False", "def is_authenticated(self):\n if not self.token:\n return False\n\n try:\n self.lookup_token()\n return True\n except Forbidden:\n return False\n except InvalidPath:\n return False\n except InvalidRequest:\n return False", "def authenticated(self, user_token, **validation_context):\n token = self.token_storage.get(user_token)\n if token and token.validate(user_token, **validation_context):\n return True\n\n return False", "def is_authenticated(self):\n return bool(get_auth_token())", "def is_csrf_token(self, candidate_csrf_token):\n valid_token = bytearray(self.get_csrf_token())\n candidate = bytearray(candidate_csrf_token)\n return constant_time_equals(valid_token, candidate)", "def verify_token(self, token):\n return False", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False", "def correct_token(name, token):\n if not User.created(name):\n return False\n user = User.get_user(name)\n return user.info['token'] == token", "def check_auth(self, token, allowed_roles, resource, method):\n if resource is None:\n resource = ''\n\n # Remove integer from end of string\n try:\n resource = resource.rstrip(string.digits)\n except:\n pass\n # print('Token: ', token, resource, method)\n # print(users.keys())\n try:\n if token in users.keys() and method in users[token]['resources'][resource]['methods']:\n self.resource_lookup = users[token]['resources'][resource]['lookup']\n\n self.user_id = users[token]['id']\n return True\n except: # Keyerror\n pass\n\n \"\"\"\n for app in apps:\n\n if token == app['token']:\n current_app = app\n return True\n \"\"\"\n\n return False", "def verify_token(auth_token):\n blacklisted_token = TokenBlacklisting.query.filter_by(\n token=str(auth_token)).first()\n if blacklisted_token:\n return True\n return False", "async def authenticate(self, token) -> bool:\n return True", "def authenticateUser(self, postedSecretKey):\n return (not self.deleted) and self.setup_confirmed and self.secret_key == postedSecretKey", "def compare_token(compare, token):\n algorithm, srounds, salt, _ = compare.split(':')\n hashed = hash_token(token, salt=salt, rounds=int(srounds), algorithm=algorithm).encode('utf8')\n compare = compare.encode('utf8')\n if compare_digest(compare, hashed):\n return True\n return False", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True", "def check_auth(uid, token, ts_check):\n if token is None:\n token_row = TokenAuth.objects.filter(user_id=uid).order_by(\"-created_at\")[:1]\n else:\n token_row = TokenAuth.objects.filter(user_id=uid, token=token).order_by(\"-created_at\")[:1]\n\n if not token_row:\n return False, None\n\n difference = ts_check - timezone.now()\n\n if difference.days > 90:\n return False, token_row[0].token\n return True, token_row[0].token", "def checkToken( self ):\n\n if ( self.token == None ):\n return False\n else :\n d = {\n \"auth_token\" : str(self.token) ,\n \"method\" : \"flickr.auth.checkToken\",\n \"format\" : \"json\",\n \"nojsoncallback\" : \"1\"\n }\n sig = self.signCall( d )\n\n url = self.urlGen( api.rest, d, sig )\n try:\n res = self.getResponse( url )\n if ( self.isGood( res ) ):\n self.token = res['auth']['token']['_content']\n self.perms = res['auth']['perms']['_content']\n return True\n else :\n self.reportError( res )\n except:\n print(str(sys.exc_info()))\n return False", "def authenticate(self, request=None):\r\n try:\r\n token = request.META.get('HTTP_AUTHORIZATION') or request.REQUEST['key']\r\n accesskey = AccessKey.objects.select_related('user').get(key=token)\r\n request.user = accesskey.user\r\n return request.user and request.user.is_active\r\n\r\n except(KeyError, AccessKey.DoesNotExist):\r\n return False", "def verify_token(*token): # pragma: no cover\n\n if current_app.config.get('IGNORE_AUTH') is True:\n return True\n\n g.user = APITokenModel.verify_token(token[0])\n\n if g.user is None:\n return False\n\n return g.user", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def _verify_token(self, token, request):\n # First check if this request was already verified.\n # `request.bound_data` is an attribute provided by Kinto to store\n # some data that is shared among sub-requests (e.g. default bucket\n # or batch requests)\n if REIFY_KEY not in request.bound_data:\n user_id = None\n client_name = None\n auth_client = self._get_auth_client(request)\n\n for scope, client in request.registry._fxa_oauth_scope_routing.items():\n try:\n profile = auth_client.verify_token(token=token, scope=aslist(scope))\n user_id = profile['user']\n scope = profile['scope']\n client_name = client\n\n # Make sure the bearer token scopes don't match multiple configs.\n routing_scopes = request.registry._fxa_oauth_scope_routing\n intersecting_scopes = [x for x in routing_scopes.keys()\n if x and set(x.split()).issubset(set(scope))]\n if len(intersecting_scopes) > 1:\n logger.warn(\"Invalid FxA token: {} matches multiple config\" % scope)\n return None, None\n\n break\n except fxa_errors.OutOfProtocolError:\n logger.exception(\"Protocol error\")\n raise httpexceptions.HTTPServiceUnavailable()\n except (fxa_errors.InProtocolError, fxa_errors.TrustError) as e:\n logger.debug(\"Invalid FxA token: %s\" % e)\n\n # Save for next call.\n request.bound_data[REIFY_KEY] = (user_id, client_name)\n\n return request.bound_data[REIFY_KEY]" ]
[ "0.6938202", "0.68884546", "0.6865633", "0.68002206", "0.6707966", "0.66578394", "0.6657788", "0.6632563", "0.6583302", "0.6578111", "0.65751725", "0.65728736", "0.6560326", "0.6539685", "0.6510684", "0.649782", "0.649383", "0.64898247", "0.64779943", "0.64613837", "0.6455331", "0.64522225", "0.64264804", "0.64087623", "0.64045954", "0.6373444", "0.6356162", "0.63318324", "0.6315625", "0.6307976" ]
0.84338975
0
Create and store a new random authentication token. Expires old sessions.
def regenerate_authentication_token(self): new_token = os.urandom(self.TOKEN_LENGTH).encode('hex') expires = int(time.time()) + Auth.SESSION_DURATION self.write(self.token_filename, ('%s %d' % (new_token, expires))) return new_token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_new_token(self):\n self.access_token = random_auth_key()", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def generate_auth_token(self, expiration):\n ser = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return ser.dumps({'id': self.id}).decode('utf-8')", "def generate_token(self, length=6, valid_secs=300, commit=True):\n self.token = random_number_token(length)\n self.valid_until = timezone.now() + timedelta(seconds=valid_secs)\n if commit:\n self.save()", "def __generate_session_token(self):\n\n return get_random_string(length=32)", "def refresh_auth_token(self):\n self._auth_token = self.generate_auth_token()", "def generate_token():\n return uuid4()", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def auth_token_generate(identity_param_val, expires_delta=False):\n access_token = ''\n try:\n if expires_delta is not False:\n expires_delta = timedelta(minutes=expires_delta)\n access_token = create_access_token(identity=identity_param_val, expires_delta=expires_delta)\n except Exception as e:\n print(e)\n\n return access_token", "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })", "def generate_refresh_token(self):\n return gen_api_key(length=self.token_length)", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def __get_new_token(self):\n keystone = {\n 'username': self.username,\n 'password': self.password,\n 'project_name': self.project,\n 'auth_url': self.auth_uri\n }\n\n ks_client = ksclient.KSClient(**keystone)\n convert_time = ciso8601.parse_datetime(str(ks_client._keystone.auth_ref.expires))\n token_exp = time.mktime(convert_time.timetuple())\n #tmp_str = str(convert_time).split('.')\n #token_exp = time.mktime(time.strptime(tmp_str[0], '%Y-%m-%d %H:%M:%S'))\n factor = self.__correct_token_time()\n\n print (\"Get new Token: {}\".format(ks_client.token))\n print (\"Expiration time in UTC: {}\".format(ks_client._keystone.auth_ref.expires))\n print (\"Expiration time in seconds since beginning of time: {}\".format(token_exp))\n print (\"The FACTOR: {}\".format(factor))\n return ks_client.token, (token_exp + factor)", "def password_token_oracle():\n past_time = int(time.time()) - random.randint(1, 3600)\n return generate_password_reset_token(past_time), past_time", "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def newToken(self, description):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n token = generate_session_token()\n user = self.getLoggedInUser()\n groups = ';'.join(self.__auth_session.groups)\n session_token = Session(token, user, groups, description, False)\n\n session.add(session_token)\n session.commit()\n\n LOG.info(\"New personal access token '%s...' has been generated \"\n \"by '%s'.\", token[:5], self.getLoggedInUser())\n\n return SessionTokenData(token,\n description,\n str(session_token.last_access))", "def refresh_token(self):\n token = json.loads(get_metadata(\n 'instance/service-accounts/%s/token' % self.service_account,\n ))\n seconds = token['expires_in'] - 60\n self._expiration_time = (\n datetime.datetime.now() + datetime.timedelta(seconds=seconds)\n )\n self._token = token['access_token']", "def _create_new_session_token(self):\n session_token = self.__generate_session_token()\n payload = {\n 'token' : session_token\n }\n self.encoded_token = jwt.encode(payload, 'secret', algorithm='HS256')\n Token.objects.create(session_token=session_token)", "def generate_new_token(cls):\n token = proquint.generate()\n\n # Try 100 times to generate a unique token.\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n # after TRIALS attempts and we didn't get a unique token,\n # just raise an error.\n # See https://stackoverflow.com/a/9980160 on what for-else loop does.\n else:\n raise ValueError(\"Cannot generate new token\")\n\n # We found a unique token! Save it\n return token" ]
[ "0.8077118", "0.7244117", "0.69622195", "0.69050765", "0.689591", "0.6889207", "0.6816747", "0.6809456", "0.6781932", "0.675099", "0.6749914", "0.67392135", "0.67329174", "0.6729028", "0.66792405", "0.6672653", "0.6654001", "0.66211134", "0.650831", "0.6499091", "0.6493759", "0.6482438", "0.6458738", "0.64555806", "0.64463985", "0.64167076", "0.6416105", "0.6402014", "0.6366937", "0.63149804" ]
0.8331127
0
In the context of a CGI request, check whether an authentication cookie is present and valid. If not, render an error.
def check_authentication(self): try: cookies = os.environ['HTTP_COOKIE'].split('; ') except KeyError: cookies = [] for c in cookies: prefix = Auth.AUTH_COOKIE_NAME + '=' if (c.startswith(prefix) and self.is_authentication_token(c[len(prefix):])): return True print 'Status: 403 Forbidden' print 'Content-Type: application/json' print self.logout_headers() print json.JSONEncoder().encode({'error': 'Not authenticated.'}) sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def check_auth(*args, **kwargs):\n r = cherrypy.request\n s = cherrypy.session\n\n username = s.get(USERNAME_SESSION_KEY, None)\n course = s.get(CUR_CRS_SESSION_KEY, None)\n # require a course to be selected\n if username and not course and r.path_info != '/auth/course':\n raise cherrypy.HTTPRedirect(\"/auth/course\")\n\n conditions = r.config.get('auth.restrict.require', None)\n if conditions is not None:\n if username:\n r.login = username\n for condition in conditions:\n # A condition is just a callable that returns true or false\n if not condition():\n raise cherrypy.HTTPRedirect(\"/auth/not-authorized\")\n else:\n s[FROMPATH_SESSION_KEY] = r.path_info\n raise cherrypy.HTTPRedirect(\"/auth/login\")", "def check_auth(*args, **kwargs):\n\tconditions = cherrypy.request.config.get('auth.require', None)\n\tif conditions is not None:\n\t\tusername = cherrypy.session.get(SESSION_KEY)\n\t\tif username:\n\t\t\tcherrypy.request.login = username\n\t\t\tfor condition in conditions:\n\t\t\t\t# A condition is just a callable that returns true or false\n\t\t\t\tif not condition():\n\t\t\t\t\traise cherrypy.HTTPRedirect(\"/auth/login\")\n\t\telse:\n\t\t\traise cherrypy.HTTPRedirect(\"/auth/login\")", "def check_authentication(self, request):\n if not self.request.user.is_authenticated:\n raise NotAuthenticated()", "def authenticate():\n\treturn Response(\n\t'Could not verify your access level for that URL.\\n'\n\t'You have to login with proper credentials', 401,\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})" ]
[ "0.657821", "0.65143234", "0.63110507", "0.62617683", "0.6211139", "0.6205653", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6156784", "0.6138285", "0.6138285", "0.6138285" ]
0.67513925
0
Test that the request() wrapper passes along expected headers
def test_request_headers(mock_send, mock_format): ClientSession().request('GET', 'https://url', access_token='token') request_obj = mock_send.call_args[0][0] assert request_obj.headers['Authorization'] == 'Bearer token'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)", "def test_make_request_headers(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.HEADERS_KEY: json.dumps(SAMPLE_REQUEST_HEADERS)})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL, \r\n headers=SAMPLE_REQUEST_HEADERS)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_headers(self):\n token = 'abc123'\n requests.get(self.url, auth=BearerAuth(token))\n self.assertEqual(httpretty.last_request().headers['Authorization'], 'Bearer {}'.format(token))", "def test_user_headers_sent_with_request(self):\n user_header = {'All-Request-Headers': 'Headers from user code'}\n req = self.httpbin.get_my_headers(headers=user_header, dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], user_header['All-Request-Headers'])", "def test_client_can_do_get_request(self):\n response = self.httpbin.get_my_headers(headers={'User-agent': 'Fake user agent'})\n self.assertEqual(response.request.method, 'GET')\n self.assertEqual(response.status_code, 200)", "def test_build_headers(self):\n\n headers = self_signed.build_headers()\n assert 'Content-Length' in headers\n assert 'X-Amz-Date' in headers\n assert 'Host' in headers\n assert 'X-Amz-Security-Token' in headers\n assert 'Content-Type' in headers\n assert 'Authorization' in headers", "def test_headers(self):\n self.assert_expected_token_value()", "def mock_request(auth_header):\n request = HttpRequest()\n request.META['HTTP_AUTHORIZATION'] = auth_header\n return request", "def test_specific_url_is_used_for_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n\n url = self.httpbin.client[\"get_my_headers\"][\"url\"]\n self.assertIn(url, req.prepared_request.url)", "def test_from_request_works_with_wsgi(self):\n url = \"http://sp.example.com/\"\n\n params = {\n 'oauth_version': \"1.0\",\n 'oauth_nonce': \"4572616e48616d6d65724c61686176\",\n 'oauth_timestamp': \"137131200\",\n 'oauth_consumer_key': \"0685bd9184jfhq22\",\n 'oauth_signature_method': \"HMAC-SHA1\",\n 'oauth_token': \"ad180jjd733klru7\",\n 'oauth_signature': \"wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D\",\n }\n\n req = oauth.Request(\"GET\", url, params)\n headers = req.to_header()\n\n # Munge the headers\n headers['HTTP_AUTHORIZATION'] = headers['Authorization']\n del headers['Authorization'] \n\n # Test from the headers\n req = oauth.Request.from_request(\"GET\", url, headers)\n self.assertEqual(req.method, \"GET\")\n self.assertEqual(req.url, url)\n self.assertEqual(params, req.copy())", "def test_lti20_rest_good_headers(self):\r\n self.xmodule.verify_oauth_body_sign = Mock(return_value=True)\r\n\r\n request = Mock(headers={u'Content-Type': u'application/vnd.ims.lis.v2.result+json'})\r\n self.xmodule.verify_lti_2_0_result_rest_headers(request)\r\n # We just want the above call to complete without exceptions, and to have called verify_oauth_body_sign\r\n self.assertTrue(self.xmodule.verify_oauth_body_sign.called)", "def test_request_kwargs(m):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0'\n }\n m.get('http://mock.com/', request_headers=headers, text='body')\n\n favicon.get('http://mock.com/', headers=headers)\n\n # Test deprecated header argument\n with pytest.warns(DeprecationWarning):\n favicon.get('http://mock.com/', headers)", "def test_basic(self):\n request = fake_twisted_request(request_headers={\n b'x-foo': [b'bar'],\n })\n self.assertThat(\n _nevow_request_to_request_map(request),\n ContainsDict({\n 'content_type': Equals(b'application/octet-stream'),\n 'content_length': Equals(0),\n 'character_encoding': Is(None),\n 'headers': Equals({b'Content-Length': [0],\n b'X-Foo': [b'bar'],\n b'Host': [b'example.com']}),\n 'remote_addr': Equals(b'192.168.1.1'),\n 'request_method': Equals(b'GET'),\n 'server_name': Equals(b'example.com'),\n 'server_port': Equals(80),\n 'scheme': Equals(b'http'),\n 'uri': Equals(URL.from_text(u'/one'))}))", "def test_default_headers_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'],\n self.httpbin.client[\"default_headers\"]['All-Request-Headers'])", "def test_authenticated_request(self):\n http = FakeHttp([(FakeResponse(200), {})])\n self.mock.Http.return_value = http\n self.mock.read_data_from_file.return_value = 'cached auth token'\n response, _ = http_utils.request('https://url/', configuration=self.config)\n\n # Ensure that all expected requests were made.\n self.assertEqual(http.replies, [])\n\n self.assertEqual(http.last_headers, {\n 'Authorization': 'cached auth token',\n 'User-Agent': 'clusterfuzz-reproduce'\n })\n self.assertEqual(response.status, 200)", "def test_headers(self):\n req = requests.options(\"http://{}:{}\".format(self.config.options.get(\"Server\", \"ListenAddress\"),\n self.config.options.get(\"Server\", \"Port\")))\n\n self.assertEqual(req.headers, {'Server': 'quick-serve', 'Allow': 'GET, PUT, HEAD, POST, DELETE, OPTIONS'})", "def test_header_parser_vanilla(self):\n lines = [\"Content-Type: application/json\", \"Accept: application/json\"]\n h = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n headers = parser._parse_headers(lines)\n self.assertEqual(h, headers)", "def testContentFromHeaderOnly(self):\n request = Request1()\n request.integer_field = 1\n request.string_field = 'a'\n request.enum_field = Enum1.VAL1\n self.rpc_mapper1.build_request(self.handler,\n Request1).AndReturn(self.request)\n\n def build_response(handler, response):\n output = '%s %s %s' % (response.integer_field,\n response.string_field,\n response.enum_field)\n handler.response.headers['Content-Type'] = (\n 'application/x-www-form-urlencoded')\n handler.response.out.write(output)\n self.rpc_mapper1.build_response(\n self.handler, mox.IsA(Response1)).WithSideEffects(build_response)\n\n self.mox.ReplayAll()\n\n self.handler.request.headers['Content-Type'] = None\n self.handler.request.environ['HTTP_CONTENT_TYPE'] = (\n 'application/x-www-form-urlencoded')\n\n self.handler.handle('POST', '/my_service', 'method1')\n\n self.VerifyResponse('200', 'OK', '1 a VAL1',\n 'application/x-www-form-urlencoded')\n\n self.mox.VerifyAll()", "def test_request_authorization(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', 'somekey', {'offset': None, 'limit': None})\n r._get_response(200, 20)\n headers = dict(httpretty.last_request().headers)\n assert_equals(headers['authorization'], 'somekey')", "def test_request_fetch(response, mocker):\n mocker.patch(\"requests.get\", autospec=True)\n requests.get.return_value = response\n request.fetch(\"http://localhost\")\n requests.get.assert_called_with(\"http://localhost\", request.headers)", "def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'", "def test_headers_for_upload_service(self, mocker):\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"})\n headers = {'x-rh-identity': 'ABC'}\n self.client.post(self.url, json=payload, headers=headers)\n\n headers = {\n 'x-rh-insights-request-id': 'stub_id',\n 'x-rh-identity': 'ABC'\n }\n self._retryable.assert_called_once_with(\n 'post',\n 'http://upload:8080/api/ingress/v1/upload',\n files=mocker.ANY,\n headers=headers\n )", "def requestheaders(self, flow: mitmproxy.http.HTTPFlow):", "def test_create_headers(headers, request_content_type_header, response_content_type_header, expected_headers):\n from HttpV2 import create_headers\n\n output = create_headers(headers, request_content_type_header, response_content_type_header)\n assert output == expected_headers", "def test_response_header(BASE_URL, COUNTRY_CODE):\n # make request\n result = requests.get(f'{BASE_URL}{COUNTRY_CODE}')\n assert result.headers['Content-Type'] == 'application/json'", "def test_from_request_is_case_insensitive_checking_for_auth(self):\n url = \"http://sp.example.com/\"\n\n params = {\n 'oauth_version': \"1.0\",\n 'oauth_nonce': \"4572616e48616d6d65724c61686176\",\n 'oauth_timestamp': \"137131200\",\n 'oauth_consumer_key': \"0685bd9184jfhq22\",\n 'oauth_signature_method': \"HMAC-SHA1\",\n 'oauth_token': \"ad180jjd733klru7\",\n 'oauth_signature': \"wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D\",\n }\n\n req = oauth.Request(\"GET\", url, params)\n headers = req.to_header()\n\n # Munge the headers\n headers['authorization'] = headers['Authorization']\n del headers['Authorization'] \n\n # Test from the headers\n req = oauth.Request.from_request(\"GET\", url, headers)\n self.assertEqual(req.method, \"GET\")\n self.assertEqual(req.url, url)\n self.assertEqual(params, req.copy())", "def test_specific_default_body_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n def_body = self.httpbin.client[\"get_my_headers\"][\"data\"]\n self.assertIn(urlencode(def_body), req.prepared_request.body)", "def test_unauthenticated_request(self):\n http = FakeHttp([(FakeResponse(200), {})])\n self.mock.Http.return_value = http\n response, _ = http_utils.request('https://url/', body='test body')\n\n # Ensure that all expected requests were made.\n self.assertEqual(http.replies, [])\n\n self.assertEqual(http.last_body, '\"test body\"')\n self.assertEqual(http.last_headers, {})\n self.assertEqual(response.status, 200)", "def test_headers(self):\n msg = self.shortDescription()\n self.assertTrue(False, msg=msg)\n pass", "def test_search(self):\n req = http.make_request('http://xxx', 'GET', None, None)\n self.assertIsNone(req.data)\n\n req = http.make_request('http://xxx', 'GET', 'ignored', None)\n self.assertIsNone(req.data)\n\n req = http.make_request('http://xxx', 'DELETE', None, None)\n self.assertIsNone(req.data)\n\n req = http.make_request('http://xxx', 'DELETE', 'ignored', None)\n self.assertIsNone(req.data)\n\n req = http.make_request('http://xxx', 'POST', '', None)\n self.assertEqual(0, len(req.data))\n\n req = http.make_request('http://xxx', 'POST', 'abc', None)\n self.assertEqual(3, len(req.data))\n\n req = http.make_request('http://xxx', 'POST', '', [('xxx', 'yyy'),\n ('foo',)])\n\n self.assertEqual('yyy', req.get_header('Xxx'))\n self.assertEqual('1', req.get_header('Foo'))" ]
[ "0.7787896", "0.77121085", "0.75712365", "0.7443091", "0.7166433", "0.71009004", "0.70632696", "0.7058741", "0.7019937", "0.7008756", "0.69866556", "0.69617903", "0.6924459", "0.687052", "0.68337685", "0.681641", "0.6786742", "0.6786234", "0.6743163", "0.6663947", "0.6661827", "0.66551435", "0.66532516", "0.66424304", "0.6633138", "0.6608079", "0.654091", "0.6499332", "0.6449022", "0.6413661" ]
0.80046374
0
wrapper to make async calls using gevent, concurrent not parallel
def asynchronous(urls, batch_size, delay=0, verbose=False): try: count=1 threads=[] print(urls.strip(' ').split(",")) for url in urls.strip(" '").split(","): print('On batch {}'.format(count)) threads.append(gevent.spawn(fetch, url, verbose)) responses = gevent.joinall(threads) time.sleep(delay) return responses except Exception as e: print(e) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call_async(self, name, *args, **kwargs):", "async def run():\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses", "def sync(async_func):\n\n def wrapFunc(self: User, *args, **kwargs):\n futures = []\n\n for sub_user in self.sub_users:\n futures.append(asyncio.run_coroutine_threadsafe(async_func(sub_user, *args, **kwargs), loop))\n gevent.sleep(2)\n\n while True:\n for f in futures:\n if not f.done():\n gevent.sleep(0.1)\n break\n else:\n e = f.exception()\n if e:\n raise e\n else:\n break\n\n return wrapFunc", "def spawn_greenlet(func, *args, **kwargs):\n\n g = greenlet.greenlet(func)\n result = g.switch(*args, **kwargs)\n while True:\n if isinstance(result, asyncio.Future):\n result = yield from result\n else:\n break\n return result", "def testBaseCase(self):\n r = []\n async_fn = utils.make_async()(lambda: r.append(\"a\"))\n async_fn()\n time.sleep(1)\n self.assertListEqual(r, [\"a\"])", "def spawn(self, func, *args, **kwargs):\n return gevent.spawn(func, *args, **kwargs)", "async def main(task, *args, **kwargs):\n\n aws = []\n for _ in range(NUM_TASKS):\n aws.append(task(*args, **kwargs))\n await asyncio.gather(*aws)", "async def _executor(self, func):\n return await asyncio.coroutine(func)()", "def test_sync_event_for_getter():\n injector.get(EnvironmentService).cache = {}\n handler = service(PgConnectionHandlerService)()\n\n handler.max_conn = (\n 2 # possible, because connection_pool will be created on first get_connection\n )\n block_event = threading.Event()\n block_event.clear()\n conn = handler.get_connection()\n thread_blocking_conn = Thread(\n target=thread_method_block,\n kwargs={\"handler\": handler, \"block_event\": block_event},\n )\n thread_blocking_conn.start()\n handler.sync_event.clear()\n\n threads: Thread = []\n for i in range(handler.max_conn):\n thread = Thread(target=thread_method, kwargs={\"handler\": handler, \"secs\": 0.1})\n thread.start()\n threads.append(thread)\n handler.sync_event.set()\n sleep(0.1)\n assert not handler.sync_event.is_set()\n handler.put_connection(conn)\n block_event.set()\n for i in range(handler.max_conn):\n threads[i].join()\n thread_blocking_conn.join()", "def async(fnc, *args, **kwargs):\n gen = fnc(*args, **kwargs)\n\n def perform(result):\n if (\n type(result) is tuple and len(result) and\n issubclass(result[0], Exception)\n ):\n gen.throw(result[0](result[1]))\n return\n\n try:\n actor, msg, data = gen.send(result)\n actor.send(msg, perform, **data)\n except StopIteration:\n return\n\n perform(None)", "def run_task(func):\n\n def _wrapped(*a, **k):\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(func(*a, **k))\n\n return _wrapped", "async def run(self):\n main_loop = asyncio.get_event_loop()\n # so many threads, name this so it's identifiable\n pfx = 'ThreadPoolExecutor-GPSEventConsumer'\n # NOTE: there should only be one thread pool executor worker\n # from here since this method is only called once from\n # gordon core, so there _should_ be no need to limit\n # workers\n executor = concurrent.futures.ThreadPoolExecutor(thread_name_prefix=pfx)\n coro = main_loop.run_in_executor(executor, self._manage_subs)\n await coro", "async def async_wrapper(*args: Any) -> None:\n with trace_path(suffix):\n await func(*args)", "async def run_service(loop):\n curr = time.time()\n results = []\n\n while True:\n # First get the list of pending tasks, if there exists any\n results = get_pending(results)\n\n # Now poll the endpoints\n for url in API_URLS:\n future = loop.create_task(poll_endpoint(url))\n results.append(future)\n \n await asyncio.gather(*results)\n \n delta = time.time() - curr\n diff = max(0, POLL_INTERVAL - delta)\n await asyncio.sleep(diff)\n curr = time.time()", "def _future_work_():\n pass", "def wrapper(*args, **kwargs):\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(method(*args, **kwargs))", "def asynchronous(method):\n def wrapper(*a, **kw):\n web.ctx.response._auto_finish = False\n return method(*a, **kw)\n\n return wrapper", "def verify_async(case_list,coroutine):\n from gevent import monkey\n monkey.patch_all()\n result = []\n geventPool = pool.Pool(coroutine)\n tasks = [geventPool.spawn(Verify.request_and_verify, case) for case in case_list]\n gevent.joinall(tasks)\n for i in tasks:\n if i.value is not None:\n result.append(i.value)\n print_info('Total Verify-Case is: %s, %s error happened.' % (len(case_list), Verify.ERROR_COUNT))\n return result", "async def async_event(self, event: str, *args, **kwargs):\n for cb in self.event_handlers[event]:\n asyncio.ensure_future(cb(*args, **kwargs), loop=self.loop)", "async def run_async(self, func, *args):\n return await self.eventloop.run_in_executor(None, func, *args)", "async def main():\n futures = [fetch_ip(service) for service in SERVICES]\n # Schedule tasks with Wait\n # Retrieve results from the coroutine. Use done, pending. \n done, pending = await asyncio.wait( \n futures, return_when=FIRST_COMPLETED\n )\n print(done.pop().result())", "def async_adapter(wrapped_func):\n\n @functools.wraps(wrapped_func)\n def run_sync(*args, **kwargs):\n loop = asyncio.get_event_loop()\n task = wrapped_func(*args, **kwargs)\n return loop.run_until_complete(task)\n\n return run_sync", "def __call__(self, **kwargs):\n kwargs.setdefault('timeout', self.timeout)\n kwargs.setdefault('send_line', self.send_line)\n kwargs['process_results'] = self.process_results\n return async_events(self.context, self.events, **kwargs)", "def run_async(method):\n Thread(target=method, args=[]).start()", "async def call(fn: Callable, *args, **kwargs) -> Any:\n async with websockets.connect(WS_SERVER_URI) as websocket:\n\n task = serialize((fn, args, kwargs))\n\n await websocket.send(task)\n message = await websocket.recv()\n\n results = deserialize(message)\n\n if isinstance(results, TaskExecutionError):\n raise results\n\n return results", "def _request(self, method, url, payload=None, **params):\n kwargs = dict(params=params)\n kwargs[\"timeout\"] = self._timeout\n if not url.startswith('http'):\n url = self.prefix + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n if payload:\n kwargs[\"data\"] = json.dumps(payload)\n gs = self._gpool.spawn if self._gpool else gevent.spawn\n r = gs(self.session.request, method, url, headers=headers, **kwargs)\n r.fetch = partial(self.join, r)\n update_wrapper(r.fetch, self.join)\n #gevent.sleep(0.05)\n return r", "def async_manager(self):\n while True:\n (request, args, kwargs) = self.pool.get()\n if request is None:\n break\n request(*args, **kwargs)", "def async_fetch(self):\n args = (async_get_pipe, self.zargs, self.connections)\n mapped = yield ait.async_map(*args)\n return_value(multiplex(mapped))", "def noblock(f):\n\n async def wrapper(*args, **kwargs):\n with concurrent.futures.ThreadPoolExecutor(max_workers = 20) as executor:\n loop = asyncio.get_event_loop()\n response = await loop.run_in_executor(executor,\n lambda: f(*args, **kwargs))\n return response\n\n return wrapper", "async def handle_async(req):\n return await logic_async(req)" ]
[ "0.6448774", "0.6373844", "0.6306433", "0.61689025", "0.6099593", "0.6085664", "0.60487", "0.59556615", "0.59057474", "0.58697575", "0.58590627", "0.5844879", "0.58389485", "0.5828767", "0.5826276", "0.5818216", "0.58082384", "0.58055556", "0.577954", "0.57730323", "0.576001", "0.5743427", "0.57402265", "0.57390356", "0.57201624", "0.57071537", "0.5701377", "0.56798273", "0.5671956", "0.56565714" ]
0.6791043
0
Main function to call This function should obtain results from generators and plot image and image intensity Create a for loop to iterate the generator functions
def display_images(): vc = cv2.VideoCapture(0) # Open webcam figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot count = 0 # Counter for number of aquired frames intensity = [] # Append intensity across time # For loop over generator here intensity.append(imageintensity) plot_image_and_brightness() # Call plot function count += 1 # This triggers exit sequences when user presses q if cv2.waitKey(1) & 0xFF == ord('q'): # Clean up here plt.close('all') # close plots generator.close() # Use generator exit for clean up, break # break loop
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def execute(self):\n train_full = self.load_df('./data/train.csv')\n train_split, test = train_test_split(train_full,shuffle=False,test_size=.2)\n train,val = train_test_split(train_split,shuffle=False,test_size=.2)\n \n train_generator = self.get_img_gen(train,'image_name','benign_malignant','data/jpeg/train/' )\n val_generator = self.get_img_gen(val,'image_name','benign_malignant','data/jpeg/train/')\n test_generator = self.get_img_gen(test,'image_name','benign_malignant','data/jpeg/train/')\n\n return train_generator,val_generator,test_generator", "def run(self):\n self.simulate_test_data()\n self.pipeline_test_data()\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def generator(data_dir, image_paths, steering_angles, batch_size, b_istraining):\n\n images = np.empty([batch_size, glob_image_height, glob_image_width, glob_image_channels])\n steers = np.empty(batch_size)\n nb_images=image_paths.shape[0]\n while True:\n for i in range(batch_size):\n index = random.randint(0, nb_images-1)\n center, left, right = image_paths[index]\n steering_angle = steering_angles[index]\n # argumentation\n if b_istraining:\n image, steering_angle = augument_data(data_dir, center, left, right, steering_angle)\n else:\n image = load_image(data_dir, center) \n \n image_height_orig =image.shape[0]\n # cropping out irrelevant part of the picture\n image = image[60:image_height_orig-30, :, :]\n # resize the image for the nvidia model\n image = cv2.resize(image, (glob_image_width, glob_image_height), cv2.INTER_AREA)\n # convert to yuv space for nvidia model\n image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n # add image and steering angle to the batch\n images[i] = image\n steers[i] = steering_angle\n yield images, steers", "def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()", "def main(showSamples=True, showConfusion=True):\n ndigit = 10\n elambda = [0.4, 0.6, 0.8]\n for i in elambda:\n test(ndigit, i, showSamples, showConfusion)\n if showSamples:\n pltmulti('graphs.pdf')", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def _get_test_generator(self):\n for data_element in self.test:\n image, heatmap = self._generate_input_tuple(data_element)\n \n yield (image, heatmap)", "def generating(\n self,\n prompt,\n width=512,\n height=512,\n guidance_scale=7.5,\n num_images_per_prompt=1,\n num_inference_steps=50,\n generator=None,\n **kwargs,\n ):\n pipe = self.get_pipe(\"generate\")\n images = pipe(\n prompt=prompt,\n width=width,\n height=height,\n guidance_scale=guidance_scale,\n num_images_per_prompt=num_images_per_prompt,\n num_inference_steps=num_inference_steps,\n generator=generator,\n **kwargs,\n ).images\n return images", "def my_generator(gen_args, b_size=64, im_size = (224,224)): \n\n data_aug_gen = ImageDataGenerator(**gen_args)\n train_it = data_aug_gen.flow_from_directory('/home/ubuntu/Notebooks/Datasets/RAF_occ/train/', class_mode='categorical',\n batch_size=b_size, target_size=im_size)\n val_it = data_aug_gen.flow_from_directory('/home/ubuntu/Notebooks/Datasets/RAF_occ/validation/', class_mode='categorical',\n batch_size=b_size, target_size=im_size)\n \n\n classes = np.unique(trn_lbls['target'])\n class_weights = class_weight.compute_class_weight('balanced',classes, trn_lbls['target'])\n class_weights_dict = dict(enumerate(class_weights))\n \n return train_it, val_it, test_it, class_weights_dict", "def evaluate(self):\n eval_save_dir = os.path.join(self.imsave_dir, \"test\")\n samples = self.sample(np.random.uniform(-1, 1, (self.batch_size, self.z_dim)))\n if not os.path.exists(eval_save_dir):\n os.makedirs(eval_save_dir)\n # save images\n for index, sample in enumerate(samples):\n if self.C == 1:\n imsave(os.path.join(eval_save_dir, \"%s.png\" % index), samples[index].reshape(self.H, self.W))\n else:\n imsave(os.path.join(eval_save_dir, \"%s.png\" % index),\n recover_img(samples[index].reshape(self.H, self.W, self.C)))\n\n # display some images\n row, col = 4, 4\n random_index = np.random.randint(0, self.batch_size, size=row * col)\n for i in range(row*col):\n plt.subplot(row, col, i+1)\n plt.imshow(recover_img(samples[random_index[i]].reshape(self.H, self.W, self.C))\n , cmap=\"gray\" if self.C==1 else None)\n plt.gca().axis(\"off\")\n plt.show()", "def genImage(self, img_num=1, mode=\"stabilization\"):\n self.Gmodel.eval()\n with torch.no_grad():\n for i in range(img_num):\n latent_z = torch.randn(1, 512, 1, 1).normal_().to(self.device)\n output = self.Gmodel(latent_z, mode)\n print(\"output size: \", output.size())\n output = torch.clamp(output, min=0, max=1)\n output = output.cpu().squeeze().numpy()\n fake_img = output.transpose(1, 2, 0)\n print(\"fake image size: \", fake_img.shape)\n plt.imshow(fake_img)\n plt.show()\n save_file = os.path.join(self.save_dir, str(self.load_resl), \"%05d.jpg\" % i)\n os.makedirs(os.path.dirname(save_file), exist_ok=True)\n plt.imsave(save_file, fake_img)", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def iter_fun(self):\n\n run_id = self._run_id\n etopo_dir = driver_home\n topodir = driver_home\n\n # load input info\n if self._input_info == None:\n scn_fname = os.path.join(self._run_home,'scenario_pts.txt') \n scn = np.loadtxt(scn_fname)\n scn_list = scn.tolist()\n else:\n scn_list = self._input_info\n \n # total number of runs\n M = len(scn_list)\n N = 8*M + 2 # 8*M runs plus two empty bathymetry runs\n\n if run_id == N:\n raise StopIteration()\n\n else:\n \n #=========================\n # set coarse and fine grids\n #\n t_shelf = 0. # time approaching continental slope\n t_harbor = 0. # time approaching harbor\n\n if ((run_id >= 0) and (run_id < 4*M)) or (run_id == 8*M):\n #------------------\n # setrun for coarse\n #\n grid = 'coarse'\n \n self._rundata.amrdata.amr_levels_max = 4\n # coarse grid run = 10\"\n # dx = 30', 5', 1', 10\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6]\n\n\n # add topography (coarse)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 4, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 3, 4, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n\n # add regions\n regions = self._rundata.regiondata.regions \n # between shelf and CC \n regions = []\n regions.append(\\\n [2, 3, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [3, 4, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [4, 4, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_coarse.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_coarse.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_coarse.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n \n elif ((run_id >= 4*M) and (run_id < 8*M)) or (run_id == 8*M+1):\n #----------------\n # setrun for fine\n #\n grid = 'fine'\n \n self._rundata.amrdata.amr_levels_max = 6\n\n ## fine grid run = 2/3\"\n ## dx = 30', 5', 1', 10\", 2\", 2/3\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6, 5, 3]\n\n regions = self._rundata.regiondata.regions \n regions = []\n # between shelf and CC\n regions.append(\\\n [2, 4, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [4, 5, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [6, 6, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # add topography (fine)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 6, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 4, 6, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n topofiles.append([3, 6, 6, 0., 1.e10, \\\n os.path.join(topodir,'cc-1_3sec-c_pierless.asc')])\n \n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_fine.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_fine.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_fine.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n\n\n #\n # set desired magnitude\n #\n if ((run_id >= 0) and (run_id < M)) \\\n or ((run_id >= 4*M) and (run_id < 5*M)):\n self.KL_Mw_desired = 8.6\n elif ((run_id >= M) and (run_id < 2*M)) \\\n or ((run_id >= 5*M) and (run_id < 6*M)):\n self.KL_Mw_desired = 8.8\n elif ((run_id >= 2*M) and (run_id < 3*M)) \\\n or ((run_id >= 6*M) and (run_id < 7*M)):\n self.KL_Mw_desired = 9.0\n elif ((run_id >= 3*M) and (run_id < 4*M)) \\\n or ((run_id >= 7*M) and (run_id < 8*M)):\n self.KL_Mw_desired = 9.2\n \n #\n # set slip distribution\n #\n run_id_mod = run_id - 100*(run_id/100)\n m = scn_list[run_id_mod]\n self.set_KL_slip(m)\n \n if run_id < 8*M:\n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_' + str(self.KL_Mw_desired)\n self._rundir = os.path.join(dir_grid_Mw, 'run_' + str(run_id_mod))\n else:\n # empty runs to obtain bathymetry\n \n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_B0'\n self._rundir = dir_grid_Mw\n self.KL_Mw_desired = 0.0\n self.set_KL_slip([0.]*len(m)) # set output\n self._rundata.clawdata.output_times = [1.0, 3.0]\n \n self._run_id += 1\n \n return self", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def main():\n\t#print(scipy.__version__)\n\t#image()\n\t#heat_capacity2()\n\t#hist()\n\t#single_plot()\n\n\t#heat_capacity2()\n\t#single_plot()\n\t#plt.show()\n\t#u0_tc()\n\t#multi_heat_capacity(\"HL_DM_flux5\",True)\n\t#multi_heat_capacity2()\n\t#plot_spin()\n\t#plt.show()\n\theat_capacity2(1,2)\n\t#hist()\n\tplt.show()\n\t#potential()\n\t#plt.show()\n\t#heat_capacity(3,4)\n\t#heat_capacity(5,6)\n\t#heat_capacity(7,8)\n\t#final_spins()\n\t#plot_spin()\n\t#plot_from_csv()\n\t#difference_plot()", "def main(filename, iterations, save_diagnostics, output_dir, burnin):\n #data = []\n #with open(filename,'rb') as json_data:\n #skip header\n #jsondata = json.load(json_data)\n #j=0\n #while j<271:\n #eruption_time = jsondata[j]['FIELD1']\n #waiting_time = jsondata[j]['FIELD2']\n #data.append([float(eruption_time), float(waiting_time)])\n #j=j+1\n\n #generate ida images\n data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[2230,5,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[1263,4,1],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,965,1303],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1257,55],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,965,1303],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1303],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[2230,1],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[1263,1],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,1303],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n \n ida.scatter_plot(data, '{0}/faithful_ida_scatter.png'.format(output_dir))\n ida.histogram(data, '{0}/faithful_ida_hist.png'.format(output_dir))\n ida.linear_regression(data, '{0}/faithful_ida_regression.png'.format(output_dir))\n\n #clustering\n km2 = __run_clustering(data, output_dir)\n\n #expectation-maximization\n __run_em(data, output_dir, km2)\n\n #build bayes fmm model\n __run_bayesfmm(data, iterations, save_diagnostics, output_dir, burnin, km2)", "def results():\n\n # # 1. tau_e graph\n # # ------------------------------------------------------------\n\n tau_es = np.load(datapath / \"tau_es.npy\", allow_pickle=True)\n\n # I want to plot tau_e against b for various Ns. Annoyingly this\n # means I have to do some index juggling.\n\n # This is all because of the way I set up datagen.DataSet... oh well.\n\n for i, N in enumerate(Ns):\n\n # values to plot against b for the specific N\n vals = []\n\n for j, b in enumerate(bs):\n\n k = Nb_to_ks[i][j]\n vals.append(tau_es[k])\n\n plt.plot(bs, vals, \"-\")\n\n plt.title(\"Auto-correlation e-folding timelag for \"\n \"variable temperatures, grid sizes\")\n\n plt.xlabel(\"$\\\\beta$\")\n plt.ylabel(\"$\\\\tau_e$\")\n\n plt.legend([f\"N={N}\" for N in Ns])\n\n plt.savefig(resultspath / \"tau_es.pdf\")\n # plt.show()\n plt.close()\n\n # 2. magnetisation graphs\n # ------------------------------------------------------------\n\n mags_list = [np.load(datapath / f\"mags-{k}.npy\") for k in range(kcount)]\n\n for i, N in enumerate(Ns):\n\n plt.title(f\"Square magnetisations N={N}\")\n plt.xlabel(\"t\")\n plt.ylabel(\"M\")\n\n for j, b in enumerate(bs):\n\n c = np.max([0, np.min([1, 10 * (b - 0.4)])])\n\n k = Nb_to_ks[i][j]\n vals = np.mean(mags_list[k]**2, axis=1)\n plt.plot(vals, color=(1 - c, 0, c))\n\n plt.savefig(resultspath / f\"mags-{N}.pdf\")\n # plt.show()\n plt.close()\n\n # 3. autoc graphs\n # ------------------------------------------------------------\n\n autocs_list = [\n np.load(datapath / f\"autocs-{k}.npy\") for k in range(kcount)]\n\n for i, N in enumerate(Ns):\n\n plt.figure(figsize=(8, 6))\n plt.axes(position=[.05, .05, .8, .9])\n\n plt.title(f\"Auto-correlation of $|M|$ with N={N}\")\n plt.xlabel(\"$ \\\\tau $\")\n plt.ylabel(\"$ A(\\\\tau) $\")\n\n for j, b in enumerate(bs):\n\n c = np.max([0, np.min([1, 10 * (b - 0.4)])])\n\n k = Nb_to_ks[i][j]\n autocs = np.load(datapath / f\"autocs-{k}.npy\")\n\n iternum = autocs.shape[0]\n sysnum = autocs.shape[1]\n vals = np.mean(autocs, axis=1)\n errs = np.std(autocs, axis=1, ddof=1) / np.sqrt(sysnum)\n\n plt.errorbar(range(iternum), vals, errs,\n color=(1 - c, 0, c), ecolor=(1 - c, 0, c, 0.4),\n elinewidth=1.5)\n\n # plt.plot(np.log(vals))\n\n plt.legend(bs, loc='center left', bbox_to_anchor=(1, 0.5))\n\n plt.savefig(resultspath / f\"autocs-{N}.pdf\")\n # plt.show()\n plt.close()", "def main(opt):\n\n outputDir = \"processedOutput\"\n os.makedirs(outputDir, exist_ok=True)\n\n print(\"-------------------\")\n print(\"Processing results:\")\n print(\"-------------------\")\n \n cuda = torch.cuda.is_available()\n\n hr_shape = (opt.hr_height, opt.hr_width)\n\n # Count the number of unique residual layers mentioned in the generator state dict:\n generatorStateDict = torch.load(GetModelDataPath(\"generator\")) # Load the max trained weights from the /saved_models directory\n resBlocks = {}\n for key in generatorStateDict:\n processedKey = re.split(r'^(res_blocks\\.[0-9].)', key)\n if len(processedKey) > 1:\n resBlocks[processedKey[1]] = processedKey[1] # Insert an arbitrary entry: We just care about counting the unique keys\n\n num_residual_blocks = len(resBlocks)\n print(\"Counted \" + str(num_residual_blocks) + \" residual blocks in loaded generator state dict\")\n\n # Initialize generator and discriminator\n generator = GeneratorResNet(n_residual_blocks=num_residual_blocks)\n \n if cuda:\n print(\"Cuda is supported!!!\")\n torch.cuda.empty_cache()\n\n generator = generator.cuda()\n\n # Load pretrained models\n generator.load_state_dict(generatorStateDict)\n\n Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n\n\n #----------------\n # Process images:\n #----------------\n print(\"Processing images using the trained model:\")\n\n torch.cuda.empty_cache()\n\n testStartTime = time.time()\n totalTestTime = 0\n numTests = 0\n\n with torch.no_grad(): # Prevent OOM errors\n\n # Set models to eval mode, so batchnorm is disabled\n generator.eval()\n\n dataPath = GetDataPath(opt.valid_dataset_name)\n\n dataloader = DataLoader(\n ImageLoader(dataPath),\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_cpu,\n )\n\n # Process:\n for i, imgs in enumerate(dataloader):\n testStartTime = time.time()\n\n # Configure model input\n imgs_lr = Variable(imgs[\"img\"].type(Tensor))\n\n # Generate a high resolution image from low resolution input\n gen_hr = generator(imgs_lr)\n\n # --------------\n # Log Progress\n # --------------\n testTime = time.time() - testStartTime\n sys.stdout.write(\n \"[Processed image %d/%d] [Test time: %fs]\\n\"\n % (i, len(dataloader), testTime)\n )\n \n gen_hr = make_grid(gen_hr, nrow=1, normalize=True)\n\n save_image(gen_hr, GetArbitraryPath(outputDir) + (\"0\" if i < 10 else \"\") + \"%d.png\" % (i + 1), normalize=False)\n\n # Record the iteration time:\n totalTestTime = totalTestTime + testTime\n numTests = numTests + 1\n\n\n # ------------\n # Print stats:\n # ------------\n testTime = time.time() - testStartTime\n averageTestTime = totalTestTime / numTests\n\n print(\"\\Processing results:\\n-------------\")\n print(\"Total processing time = \" + str(testTime) + \" (secs) for \" + str(len(dataloader.dataset)) + \" test images\")\n print(\"Average processing time = \" + str(averageTestTime) + \" (secs)\")", "def gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape, samples_limit=None):\n\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n for image_file in tqdm(image_paths, desc='Processing: ', unit='images', total=len(image_paths)):\n yield process_image_file(image_file, sess, logits, keep_prob, image_pl, image_shape)", "def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return", "def genInput(tasksInfo, results, origLocation, destLocation, typeGray, samplSize = 0.10):\n print '\\nbegin of genInput\\n'\n # Training / aplication\n treina = True\n verdade = False\n\n # Sampling pixels from image\n sampl = True\n if sampl == True:\n buildSampl = True\n else:\n buildSampl = False\n\n # Write data to file\n if treina:\n outInput = open('trainInput.dat', 'w')\n #~ outInput1par = open('trainInput1par.dat', 'w')\n outOutput = open('trainOutput.dat', 'w')\n outTasks = open('trainTasks.dat', 'w')\n #~ outOutputClass = open('trainOutputClass.dat', 'w')\n selecOut = open('selected.dat', 'w')\n else:\n outInput = open('aplicInput.dat', 'w')\n outTasks = open('aplicTasks.dat', 'w')\n #~ outInput1par = open('aplicInput1par.dat', 'w')\n if verdade:\n outOutput = open('verdadeOutput.dat', 'w')\n #~ outOutputClass = open('verdadeOutputClass.dat', 'w')\n\n #Setting info on temporary directory for images\n numberImages = 12\n tmpImg = []\n for i in range(numberImages):\n tmpImg.append(destLocation+\"tmpImg_n\"+str(i+1).zfill(2)+\"/\")\n\n imgFile = []\n imgFile.append('2011352')\n imgFile.append('2011353')\n imgFile.append('2011355')\n imgFile.append('2011357')\n imgFile.append('2011358')\n imgFile.append('2011359')\n imgFile.append('2011360')\n imgFile.append('2011361')\n imgFile.append('2011362')\n imgFile.append('2011363')\n imgFile.append('2011364')\n imgFile.append('2011365')\n\n #If we need to skip line\n finishLine = True\n #Getting number of tasks\n numberTasks = len(tasksInfo)\n print 'number of tasks: ', numberTasks\n for task in range(numberTasks):\n #Geting the selected day for each task\n taskId = tasksInfo[task]['taskId']\n for img in range(numberImages):\n imgName = tmpImg[img] + str(taskId) + '.tif'\n #Openning image (and testing)\n if os.path.exists(imgName) is False:\n print 'INPUT -> Task miss: ' + str(taskId) + ' Image: ' + str(img) + ' Name: ' + imgName\n finishLine = False\n continue\n print 'INPUT -> Task: ' + str(taskId) + ' Image: ' + str(img)\n fileSat = gdal.Open(imgName, GA_ReadOnly)\n if fileSat is None:\n print 'Could not open ' + imgName\n sys.exit(1)\n # Read band values from image\n rows = fileSat.RasterYSize\n cols = fileSat.RasterXSize\n R_band_sat = fileSat.GetRasterBand(1)\n G_band_sat = fileSat.GetRasterBand(2)\n B_band_sat = fileSat.GetRasterBand(3)\n R_data_sat = R_band_sat.ReadAsArray(0, 0, cols, rows)\n G_data_sat = G_band_sat.ReadAsArray(0, 0, cols, rows)\n B_data_sat = B_band_sat.ReadAsArray(0, 0, cols, rows)\n #Closing image\n fileSat = None\n\n #If we are sampling the image, then we'll pick our samples\n print 'sampl: ', sampl\n print 'buildSampl: ', buildSampl\n if ((sampl == True) and (buildSampl == True)):\n universe = []\n samplList = []\n random.seed(8225)\n for i in range(rows):\n for j in range(cols):\n universe.append([i,j])\n sizeUniverse = len(universe)\n samplSizeInt = int(samplSize * sizeUniverse)\n print 'Sampling mode activated.'\n print 'Using ', samplSizeInt, ' out of ', sizeUniverse, ' pixels.'\n for i in range(samplSizeInt):\n samplList.append(universe.pop(random.randint(0,len(universe)-1)))\n buildSampl = False\n\n sumValueGray = 0.0\n if (sampl == False):\n #Working with the values\n for i in range(rows):\n for j in range(cols):\n #~ valueString = str(float(R_data_sat[i,j])/255.0)+' '+str(float(G_data_sat[i,j])/255.0)+' '+str(float(B_data_sat[i,j])/255.0)\n valueGray = rgb2gray((float(R_data_sat[i,j])/255.0),(float(G_data_sat[i,j])/255.0),(float(B_data_sat[i,j])/255.0),typeGray)\n sumValueGray = sumValueGray + valueGray\n valueString = str(taskId)+' '+str(valueGray)\n #~ outInput.write(\"%s \"%valueString)\n sumValueString = str(taskId)+' '+str(sumValueGray/(rows*cols))\n #~ outInput1par.write(\"%s \"%sumValueString)\n outInput.write(\"%s \"%sumValueString)\n else:\n #Working with the sampled values\n for idx in range(samplSizeInt):\n i = samplList[idx][0]\n j = samplList[idx][1]\n valueGray = rgb2gray((float(R_data_sat[i,j])/255.0),(float(G_data_sat[i,j])/255.0),(float(B_data_sat[i,j])/255.0),typeGray)\n sumValueGray = sumValueGray + valueGray\n valueString = str(valueGray)\n #~ outInput.write(\"%s \"%valueString)\n sumValueString = str(sumValueGray/samplSizeInt)\n #~ outInput1par.write(\"%s \"%sumValueString)\n outInput.write(\"%s \"%sumValueString)\n\n #If we did not had a problem with missing task\n if finishLine == True:\n #Closing the line of the file\n outInput.write(\"\\n\")\n #~ outInput1par.write(\"\\n\")\n outTasks.write(str(taskId)+\"\\n\")\n else:\n finishLine = True\n\n #If we are training (or we know the truth), then we also generate the truth\n if treina or verdade:\n selecName = '/home/eduardo/ForestWatchers/ann2besttile/results/tmpMosaic_n0/' + str(taskId) + '.tif'\n #Openning image (and testing)\n if os.path.exists(selecName) is False:\n print 'OUTPUT -> Task miss: ' + str(taskId)\n continue\n #~ fileSelec = gdal.Open(selecName, GA_ReadOnly)\n #~ if fileSelec is None:\n #~ print 'Could not open ' + selecName\n #~ sys.exit(1)\n #~ # Read band values from image\n #~ rows = fileSelec.RasterYSize\n #~ cols = fileSelec.RasterXSize\n #~ R_band_selec = fileSelec.GetRasterBand(1)\n #~ G_band_selec = fileSelec.GetRasterBand(2)\n #~ B_band_selec = fileSelec.GetRasterBand(3)\n #~ R_data_selec = R_band_selec.ReadAsArray(0, 0, cols, rows)\n #~ G_data_selec = G_band_selec.ReadAsArray(0, 0, cols, rows)\n #~ B_data_selec = B_band_selec.ReadAsArray(0, 0, cols, rows)\n #~ #Closing image\n #~ fileSelec = None\n#~ \n #~ if (sampl == False):\n #~ #Working with the values\n #~ for i in range(rows):\n #~ for j in range(cols):\n #~ valueGray = rgb2gray((float(R_data_selec[i,j])/255.0),(float(G_data_selec[i,j])/255.0),(float(B_data_selec[i,j])/255.0),'gleam')\n #~ valueString = str(valueGray)\n #~ outOutput.write(\"%s \"%valueString)\n #~ else:\n #~ #Working with the values\n #~ for idx in range(samplSizeInt):\n #~ i = samplList[idx][0]\n #~ j = samplList[idx][1]\n #~ valueGray = rgb2gray((float(R_data_selec[i,j])/255.0),(float(G_data_selec[i,j])/255.0),(float(B_data_selec[i,j])/255.0),'gleam')\n #~ valueString = str(valueGray)\n #~ outOutput.write(\"%s \"%valueString)\n#~ \n #~ #Closing line of the file\n #~ outOutput.write(\"\\n\")\n\n selectedTile = results[task].index(max(results[task]))\n if selectedTile == 0:\n selectedName = str(taskId) + ' 2011352'\n selectedFile = '1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 1:\n selectedName = str(taskId) + ' 2011353'\n selectedFile = '0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 2:\n selectedName = str(taskId) + ' 2011355'\n selectedFile = '0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 3:\n selectedName = str(taskId) + ' 2011357'\n selectedFile = '0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 4:\n selectedName = str(taskId) + ' 2011358'\n selectedFile = '0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 5:\n selectedName = str(taskId) + ' 2011359'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 6:\n selectedName = str(taskId) + ' 2011360'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 7:\n selectedName = str(taskId) + ' 2011361'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0'\n elif selectedTile == 8:\n selectedName = str(taskId) + ' 2011362'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0'\n elif selectedTile == 9:\n selectedName = str(taskId) + ' 2011363'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0'\n elif selectedTile == 10:\n selectedName = str(taskId) + ' 2011364'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0'\n elif selectedTile == 11:\n selectedName = str(taskId) + ' 2011365'\n selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0'\n #~ outOutputClass.write(\"%s\\n\"%selectedFile)\n outOutput.write(\"%s\\n\"%selectedFile)\n selecOut.write(\"%s\\n\"%selectedName)\n\n # Close files\n outInput.close()\n outTasks.close()\n #~ outInput1par.close()\n if treina or verdade:\n outOutput.close()\n #~ outOutputClass.close()\n selecOut.close()\n\n statusGenInput = 0\n print '\\nend of genInput\\n'\n return statusGenInput", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen", "def image_generator(img_list):\n while True:\n img = random.choice(img_list)\n label = os.path.basename(os.path.dirname(img)) # add label function according to the dataset tree\n img = preprocess_image(img)\n yield img, label", "def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())", "def main():\n feature_extraction_model = \"HOG\"\n dimension_reduction_model = \"PCA\"\n k_value = get_input_k(\"k\")\n K_value = get_input_k(\"K\")\n folder = get_input_folder(\"Folder\")\n dim_k_value = 40\n\n query_images = get_input_image_list(folder)\n start = time.time()\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value, folder_metadata=folder,\n metadata_collection=\"labelled\")\n obj_feat = dim_red.get_object_feature_matrix()\n features_list = np.array(obj_feat['featureVector'].tolist())\n images_list = np.array(obj_feat['imageId'])\n cos_sim = cosine_similarity(features_list)\n\n sim_graph = sim_graph_from_sim_max(cos_sim, images_list, k_value)\n results = ppr(sim_graph, images_list, query_images)\n results = results[:K_value]\n\n print(\"Top {} images from Personalized page Rank are:\".format(K_value))\n for r in results:\n r[\"path\"] = os.path.abspath(os.path.join(folder, r['imageId']))\n print(r)\n\n query_images_list = [os.path.abspath(os.path.join(folder, img)) for img in query_images]\n title = {\"Model\": \"Personalized Page Rank\", \"k\": k_value, \"K\": K_value}\n show_images_ppr(query_images_list, title, results)\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def generator(samples, batch_size=32):\n \n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n samples = shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n \n images = []\n angles = []\n for batch_sample in batch_samples:\n \n name = \"./training_udacity/IMG/\"+batch_sample[0].strip().split('/')[-1]\n\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n\n center_angle = float(batch_sample[3])\n throttle = float(batch_sample[4])\n brake = float(batch_sample[5])\n speed = float(batch_sample[6])\n \n images.append(center_image)\n angles.append(center_angle)\n \n augment = True\n if augment:\n # 1. Add Flipped Picture\n image_flipped = np.fliplr(center_image)\n measurement_flipped = -center_angle\n \n images.append(image_flipped)\n angles.append(measurement_flipped)\n \n # 2. Handle left and right Images\n # create adjusted steering measurements for the side camera images\n correction = 0.4\n steering_left = center_angle + correction\n steering_right = center_angle - correction\n \n left_name = \"./training_udacity/IMG/\"+batch_sample[1].strip().split('/')[-1]\n right_name = \"./training_udacity/IMG/\"+batch_sample[2].strip().split('/')[-1]\n\n img_left = cv2.imread(left_name)\n img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2RGB)\n\n img_right = cv2.imread(right_name)\n img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2RGB)\n\n images.append(img_left)\n images.append(img_right)\n \n angles.append(steering_left)\n angles.append(steering_right)\n\n# Sanity check the code above by plotting each picture\n# fig = plt.figure()\n# plt.imshow(center_image)\n# plt.axis('off')\n# fig.savefig(\"center.jpg\")\n#\n# fig = plt.figure()\n# plt.imshow(image_flipped)\n# plt.axis('off')\n# fig.savefig(\"flipped.jpg\")\n#\n# fig = plt.figure()\n# plt.imshow(img_left)\n# plt.axis('off')\n# fig.savefig(\"left.jpg\")\n#\n# fig = plt.figure()\n# plt.imshow(img_right)\n# plt.axis('off')\n# fig.savefig(\"right.jpg\")\n\n X_train = np.array(images)\n y_train = np.array(angles)\n \n yield shuffle(X_train, y_train)" ]
[ "0.65730345", "0.64085895", "0.6363845", "0.63635534", "0.62890536", "0.62543195", "0.62491566", "0.6223364", "0.6188411", "0.618029", "0.61280805", "0.61128044", "0.61127687", "0.6095602", "0.60937095", "0.60769194", "0.6063557", "0.6053544", "0.6050049", "0.60302", "0.60264045", "0.6025254", "0.60059416", "0.60038334", "0.599498", "0.5965913", "0.5965409", "0.59513545", "0.59498006", "0.5939543" ]
0.7101953
0
This function plots image and intensity of image through time
def plot_image_and_brightness(axis, image, imageintensity, framecount): # Plot RGB Image axis[0].imshow(image) axis[0].axis('off') axis[0].set_title(f'Frame Number {framecount}') # Plot intensity axis[1].plot(imageintensity, '.-') axis[1].set_ylabel('Average Intensity') # Stuff to show and stream plot plt.show(block=False) plt.pause(0.001)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())", "def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def plot_img():\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)", "def matplotlibDisplay(img, title=\"Image\", colorFlag = 'gray'):\n plt.imshow(img, colorFlag)\n plt.title(title)\n plt.xticks([])\n plt.yticks([])\n plt.show()", "def plot_image_sequence(self):\r\n\r\n imv = pg.ImageView()\r\n\r\n imv.show()\r\n\r\n imv.setImage(self.imageData)\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 1, 0)", "def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def dat_imshow(x):\n plt.imshow(x,interpolation='nearest',aspect='auto')", "def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()", "def display_images():\n vc = cv2.VideoCapture(0) # Open webcam\n figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot\n\n count = 0 # Counter for number of aquired frames\n intensity = [] # Append intensity across time\n\n # For loop over generator here\n intensity.append(imageintensity)\n plot_image_and_brightness() # Call plot function\n count += 1\n\n # This triggers exit sequences when user presses q\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # Clean up here\n plt.close('all') # close plots\n generator.close() # Use generator exit for clean up,\n break # break loop", "def img_show(img, counter, mode, RGB):\n plt.figure(counter)\n plt.axis('off')\n if not RGB:\n img_aux = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img_aux\n if mode is None:\n plt.imshow(img)\n else:\n plt.imshow(img, cmap=mode)\n plt.show()\n return counter + 1", "def plot_data(self):\n # plot every log image\n for log_img in self.log_img_map.itervalues():\n log_img.plot()", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def plot_image(image):\n plt.imshow(image, cmap=\"gray\", interpolation=\"nearest\")\n plt.axis(\"off\")", "def visualizeImg(img):\n plt.figure(figsize=(10,4))\n plt.imshow(img)\n plt.show()", "def plotSate(s,i,seed):\r\n fig, ax = plt.subplots()\r\n\r\n im = ax.imshow(s)\r\n\r\n plt.xticks([i for i in range(dim)], \"\")\r\n plt.yticks([i for i in range(dim)], \"\")\r\n\r\n fig.tight_layout()\r\n plt.savefig(\"Systems/\" + str(dim) + \"_\" + str(seed) + \"/Images/\" + str(i) +\r\n \".jpeg\",quality=80,optimize=True,\r\n dpi=80,progressive=True,transparent=True)\r\n fig.clear()\r\n plt.close(fig)", "def show_shot(path_to_images, name_image):\n crrt_image = misc.imread(\"./{}/{}\".format(path_to_images, name_image))\n\n plt.imshow(crrt_image)\n\n plt.draw()\n plt.pause(0.5)", "def plot_i(im, Prior, nit, chi2_1, chi2_2, ipynb=False):\n\n plt.ion()\n plt.pause(0.00001)\n plt.clf()\n\n plt.imshow(im.reshape(Prior.ydim,Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian')\n xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)\n yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)\n plt.xticks(xticks[0], xticks[1])\n plt.yticks(yticks[0], yticks[1])\n plt.xlabel('Relative RA ($\\mu$as)')\n plt.ylabel('Relative Dec ($\\mu$as)')\n plt.title(\"step: %i $\\chi^2_1$: %f $\\chi^2_2$: %f\" % (nit, chi2_1, chi2_2), fontsize=20)\n #plt.draw()\n\n if ipynb:\n display.clear_output()\n display.display(plt.gcf())", "def plot_pixel_intensity(image, path='./pixel_intensity_before_normalization.png'):\n\n plt.figure(figsize=(10, 5))\n plt.subplot(1, 2, 1)\n plt.imshow(image)\n plt.axis('off')\n histo = plt.subplot(1, 2, 2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(image[:, :, 0].flatten(), bins=n_bins, lw=0, color='r', alpha=0.5)\n plt.hist(image[:, :, 1].flatten(), bins=n_bins, lw=0, color='g', alpha=0.5)\n plt.hist(image[:, :, 2].flatten(), bins=n_bins, lw=0, color='b', alpha=0.5)\n plt.savefig(path)\n plt.show()", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def plot_numpy_img(np_img):\n plt.imshow(np_img, interpolation='nearest')\n plt.show()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def plot_color_image(image):\n plt.imshow(image, interpolation=\"nearest\")\n plt.axis(\"off\")", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def visualize(self, save=False):\n import matplotlib.pyplot as plt\n import inspect\n\n plt.style.use('seaborn-whitegrid')\n plt.rcParams['figure.figsize'] = [10, 5]\n\n grid = np.linspace(self.lower, self.upper, 10000)\n func = self.intensity_function(np.linspace(self.lower, self.upper, 10000))\n try:\n plt.plot(grid, func)\n except:\n plt.plot(grid, np.repeat(func, 10000))\n plt.title('Intensity function')\n plt.xlabel('time')\n plt.ylabel('value')\n if save:\n try:\n plt.savefig('intensity_function_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n print('Saved as ' + 'intensity_function_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving intensity function failed!\")\n plt.show()\n plt.clf()\n\n t = self.generate()\n plt.step(t, list(range(0, len(t))))\n plt.title('Simulated trajectory')\n plt.xlabel('time')\n plt.ylabel('value')\n if save:\n try:\n plt.savefig(\n 'trajectory_' + inspect.getsource(self.intensity_function).split('return')[1].strip() + '.png')\n print('Saved as ' + 'trajectory_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving trajectory failed!\")\n plt.show()\n plt.clf()\n\n plt.plot(t, list(np.repeat(0, len(t))), '.')\n plt.title('Simulated points')\n plt.xlabel('time')\n if save:\n try:\n plt.savefig('points_' + inspect.getsource(self.intensity_function).split('return')[1].strip() + '.png')\n print('Saved as ' + 'points_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving points failed!\")\n plt.show()\n plt.clf()", "def show_image(dataset, domain, image_class, image_name):\n\timage_file = io.imread(os.path.join(\"data\", dataset, domain, \"images\", image_class, image_name))\n\tplt.imshow(image_file)\n\tplt.pause(0.001)\n\tplt.figure()", "def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image" ]
[ "0.69218147", "0.6863551", "0.668293", "0.667952", "0.66168284", "0.657051", "0.6569345", "0.65316135", "0.65316135", "0.65316135", "0.6529275", "0.6461676", "0.64531654", "0.6427083", "0.64266634", "0.640738", "0.6397536", "0.63804084", "0.63659954", "0.63561183", "0.63504815", "0.63449436", "0.6327371", "0.6317158", "0.63029754", "0.6290052", "0.62873244", "0.62268585", "0.6217833", "0.62066615" ]
0.71107686
0
Adds new reactions and metabolites to iEK1008.json while performing continuous testing
def main(): run_test_suite('../models/iEK1008.json') # runs test suite with iEK1008.json # rewrites iEK1008.json to iMtb_H37Rv.json so original model is not overwritten model_iek = cobra.io.load_json_model('../models/iEK1008.json') cobra.io.save_json_model(model_iek, '../models/iMtb_H37Rv.json') model = cobra.io.load_json_model('../models/iMtb_H37Rv.json') # removes 10 imbalanced reactions from iEK1008; all 10 reactions are added back with balanced formulas during update rxns_to_bal = [rxn.id for rxn in model.reactions if len(rxn.check_mass_balance()) > 0 if 'EX_' not in rxn.id and 'DM_' not in rxn.id and 'BIOMASS' not in rxn.id] for rxn_to_bal in rxns_to_bal: model.reactions.get_by_id(rxn_to_bal).remove_from_model() cobra.io.save_json_model(model, '../models/iMtb_H37Rv.json') run_test_suite('../models/iMtb_H37Rv.json', update='imbalanced_reactions_removed') # creates COBRApy Metabolite objects for new metabolites df_new_mets = pd.read_excel('../data/iEK1008_updates.xlsx', sheet_name='metabolites_added', usecols='A:C') new_mets = {} for index, row in df_new_mets.iterrows(): new_met_id = str(row['Metabolite_ID']) new_met_name = row['Metabolite_Name'] new_met_formula = row['Metabolite_Formula'] if new_met_id.endswith('c'): new_met_comp = 'c' elif new_met_id.endswith('e'): new_met_comp = 'e' else: print('Metabolite compartment could not be determined. Please check metabolite id.') new_met_comp = '' new_met = cobra.Metabolite(new_met_id, name=new_met_name, formula=new_met_formula, compartment=new_met_comp) new_mets[new_met_id] = new_met df_new_rxns = pd.read_excel('../data/iEK1008_updates.xlsx', sheet_name='reactions_added', usecols='A:G') with alive_bar(len(df_new_rxns), bar='blocks', spinner='notes_scrolling') as bar: for index, row in df_new_rxns.iterrows(): new_rxn_mets = {} new_rxn_form = row['Reaction_Formula'] if ' --> ' in new_rxn_form: new_rxn_form = new_rxn_form.split(' --> ') elif ' <=> ' in new_rxn_form: new_rxn_form = new_rxn_form.split(' <=> ') else: print('Unexpected symbol in ' + row['Reaction_Formula']) subs = new_rxn_form[0].split(' + ') for sub in subs: if '.0' in sub: sub_coeff = -1 * float(sub.split(' ')[0]) sub_id = sub.split(' ')[-1] try: new_rxn_sub = new_mets[sub_id] except KeyError: # metabolite is not new, i.e. already in iEK1008 new_rxn_sub = model.metabolites.get_by_id(sub_id) else: sub_coeff = -1.0 try: new_rxn_sub = new_mets[sub] except KeyError: new_rxn_sub = model.metabolites.get_by_id(sub) new_rxn_mets[new_rxn_sub] = sub_coeff pros = new_rxn_form[1].split(' + ') for pro in pros: if '.0' in pro: pro_coeff = float(pro.split(' ')[0]) pro_id = pro.split(' ')[-1] try: new_rxn_pro = new_mets[pro_id] except KeyError: new_rxn_pro = model.metabolites.get_by_id(pro_id) else: pro_coeff = 1.0 try: new_rxn_pro = new_mets[pro] except KeyError: new_rxn_pro = model.metabolites.get_by_id(pro) new_rxn_mets[new_rxn_pro] = pro_coeff # creates new reactions with new COBRApy Reaction and Metabolite objects create_reaction(model, row['Reaction_ID'], row['Reaction_Name'], row['Subsystem'], new_rxn_mets, float(row['Lower_Bound']), float(row['Upper_Bound']), row['Gene_Reaction_Rule']) cobra.io.save_json_model(model, '../models/iMtb_H37Rv.json') run_test_suite('../models/iMtb_H37Rv.json', update=row['Reaction_ID']) bar() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_reaction_add(reaction, user):\n #Before doing anything\n #Check to see if the reaction was a karma emoji\n if reaction.emoji == initKarma.goodKarma:\n consoleMessage = 'Writing to karmaData file :: Increasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '+1')\n if reaction.emoji == initKarma.badKarma:\n consoleMessage = 'Writing to karmaData file :: Decreasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '-1')", "def test_make_new_reaction(self):\n\n procnum = 2\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n\n cerm = CoreEdgeReactionModel()\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n \"\"\"\n 3 expected H-abstraction reactions:\n OH + CC = H2O + C[CH2]\n OH + [CH3] = H2O + [CH2]\n OH + [CH3] = [O] + C\n \"\"\"\n\n # count no. of entries in reactionDict:\n counter = 0\n for fam, v1 in cerm.reaction_dict.items():\n for key2, v2 in v1.items():\n for key3, rxnList in v2.items():\n counter += len(rxnList)\n\n self.assertEquals(counter, 3)", "def testMakeNewReaction(self):\n\n spcA = Species().fromSMILES('[OH]')\n spcs = [Species().fromSMILES('CC'), Species().fromSMILES('[CH3]')]\n spcTuples = [(spcA, spc) for spc in spcs]\n\n rxns = list(react(*spcTuples))\n\n cerm = CoreEdgeReactionModel()\n\n for rxn in rxns:\n cerm.makeNewReaction(rxn)\n\n \"\"\"\n 3 expected H-abstraction reactions:\n OH + CC = H2O + C[CH2]\n OH + [CH3] = H2O + [CH2]\n OH + [CH3] = [O] + C\n \"\"\"\n\n # count no. of entries in reactionDict:\n counter = 0\n for fam, v1 in cerm.reactionDict.iteritems():\n for key2, v2 in v1.iteritems():\n for key3, rxnList in v2.iteritems():\n counter += len(rxnList)\n\n self.assertEquals(counter, 3)", "def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)", "def run(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n print('Key in run')\n print(config.bigmacc.key)\n i = config.bigmacc.key\n print(i)\n # SCENARIO SETUP ---\n config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)\n print(config.general.project)\n cea.datamanagement.data_initializer.main(config)\n # use the scenario code to set the year for the lca and other operations that need the current year\n pathway_code = config.general.parent\n pathway_items = pathway_code.split('_')\n scenario_year = int(pathway_items[1])\n config.emissions.year_to_calculate = scenario_year\n\n bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)\n\n scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')\n experiment_key = 'exp_{}'.format(i)\n print(experiment_key)\n keys = [int(x) for x in str(i)]\n if experiment_key in scen_check['Experiments'].values.tolist():\n print('Experiment was finished previously, moving to next.')\n pass\n else:\n print('START: experiment {}.'.format(i))\n\n # INITIALIZE TIMER ---\n t0 = time.perf_counter()\n if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):\n print(' - Folder exists for experiment {}.'.format(i))\n else:\n os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))\n print(' - Folder does not exist for experiment {}, creating now.'.format(i))\n\n # run the archetype mapper to leverage the newly loaded typology file and set parameters\n print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))\n cea.datamanagement.archetypes_mapper.main(config)\n\n # run the rule checker to set the scenario parameters\n print(' - Running rule checker for experiment {}.'.format(i))\n cea.bigmacc.bigmacc_rules.main(config)\n\n # SIMULATIONS ---\n\n print(' - Run radiation is {}.'.format(config.bigmacc.runrad))\n print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))\n # checking on need for radiation simulation\n\n if config.bigmacc.runrad == True:\n # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation\n if config.bigmacc.rerun != True:\n print(' - Running radiation simulation for experiment {}.'.format(i))\n if os.path.exists(locator.get_radiation_building('B000')):\n print(' - Radiation folder exists for experiment {}, copying.'.format(i))\n else:\n print(' - Radiation running for experiment {}.'.format(i))\n cea.resources.radiation_daysim.radiation_main.main(config)\n else:\n # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))\n old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'solar-radiation')\n # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())\n else:\n radfiles = config.bigmacc.copyrad\n # print(' - Copying radiation results from {}.'.format(radfiles))\n # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())\n print(' - Experiment {} does not require new radiation simulation.'.format(i))\n\n # running demand forecasting\n if os.path.exists(locator.get_schedule_model_file('B000')):\n print(' - Schedules exist for experiment {}.'.format(i))\n else:\n print(' - Schedule maker running for experiment {}.'.format(i))\n schedule_maker.main(config)\n\n # check to see if we need to rerun demand or if we can copy\n if config.bigmacc.rerun != True:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n if keys[0] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n elif keys[6] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n cea.demand.demand_main.main(config)\n # print(' - Looking for demand results data from previous run for experiment {}.'.format(i))\n # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n # config.general.scenario_name, 'outputs', 'data', 'demand')\n # if os.path.exists(old_demand_files):\n # # print(' - Copy demand results files from previous run of experiment {}.'.format(i))\n # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())\n # pass\n # else:\n # print(' - No results found.')\n # print(' - Running demand simulation for experiment {}.'.format(i))\n # cea.demand.demand_main.main(config)\n\n if config.bigmacc.pv == True:\n print(' - Run PV is {}.'.format(config.bigmacc.pv))\n if config.bigmacc.rerun == True:\n print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))\n old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')\n if os.path.exists(old_pv_files):\n # print(' - Copying PV files from previous run of experiment {}.'.format(i))\n # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())\n pass\n else:\n print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n else:\n # if PV simulation is needed, run it.\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n\n print('Run water-body exchange is {}.'.format(config.bigmacc.water))\n # if water-body simulation is needed, run it.\n if config.bigmacc.water == True:\n print(' - Running water body simulation for experiment {}.'.format(i))\n water.main(config)\n\n # recalculating the supply split between grid and ng in the websrook DH\n if keys[4] == 1:\n print(' - Do not run district heat recalculation.')\n else:\n print(' - Run district heat recalculation.')\n cea.bigmacc.wesbrook_DH.main(config)\n\n if keys[7] == 1:\n print(' - PV use detected. Adding PV generation to demand files.')\n util.write_pv_to_demand(config)\n else:\n print(' - No PV use detected.')\n\n # running the emissions and costing calculations\n print(' - Run cost and emissions scripts.')\n cea.analysis.costs.system_costs.main(config)\n cea.analysis.lca.main.main(config)\n\n # clone out the simulation inputs and outputs directory\n print(' - Transferring results directory for experiment {}.'.format(i))\n\n new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'inputs')\n new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data')\n\n if config.bigmacc.rerun != True:\n distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)\n distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)\n\n time_elapsed = time.perf_counter() - t0\n\n # save log information\n log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),\n index_col='Unnamed: 0')\n log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),\n 'Completed': 'True',\n 'Experiment Time': '%d.2 seconds' % time_elapsed,\n 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)\n log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))\n log_df.to_csv(r\"C:\\Users\\justi\\Desktop\\126logger_backup.csv\", )\n\n # write netcdf of hourly_results\n netcdf_writer.main(config, time='hourly')\n\n if config.bigmacc.rerun != True:\n shutil.rmtree(locator.get_costs_folder())\n shutil.rmtree(locator.get_demand_results_folder())\n shutil.rmtree(locator.get_lca_emissions_results_folder())\n shutil.rmtree(locator.get_solar_radiation_folder())\n shutil.rmtree(locator.get_potentials_folder())\n else:\n print(' - Rerun does not require purging of the files.')\n\n # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here\n if keys[0] == 1:\n cea.datamanagement.data_initializer.main(config)\n else:\n pass\n print('END: experiment {}. \\n'.format(i))", "def test_immunization_3(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"immunization-example-reaction.json\"\n inst = immunization.Immunization.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Immunization\" == inst.resource_type\n\n impl_immunization_3(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Immunization\" == data[\"resourceType\"]\n\n inst2 = immunization.Immunization(**data)\n impl_immunization_3(inst2)", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def test_additional_parameter_analysis(self):\n request_json = \"\"\"\n {\n \"analysisId\": \"test_01\",\n \"datasets\": [\n {\n \"data\": \"\\\\tSample 1\\\\tSample2\\\\tSample 3\\\\nCD19\\\\t10\\\\t2\\\\t20\\\\nCD20\\\\t10\\\\t20\\\\t2\\\\nMITF\\\\t40\\\\t20\\\\t10\\\\n\",\n \"design\": {\n \"analysisGroup\": [\n \"Treatment\",\n \"Control\",\n \"Treatment\"\n ],\n \"comparison\": {\n \"group1\": \"Control\",\n \"group2\": \"Treatment\"\n },\n \"samples\": [\n \"Sample 1\",\n \"Sample 2\",\n \"Sample 3\"\n ]\n },\n \"name\": \"First experiment\",\n \"type\": \"rnaseq_counts\"\n }\n ],\n \"methodName\": \"camera\",\n \"parametes\": [\n {\n \"name\": \"permutations\",\n \"value\": \"10\"\n },\n {\n \"name\": \"permutations\",\n \"value\": \"10\"\n }\n ]\n }\n \"\"\"\n\n # make sure the JSON is valid\n obj = json.loads(request_json)\n self.assertIsNotNone(obj)\n\n # submit the request\n mq = reactome_mq.ReactomeMQ()\n mq.post_analysis(request_json, \"camera\")\n\n # download the gene sets\n gene_set_file = os.path.join(self.test_file_dir, \"reactome_homo_sapiens.pkl\")\n if not os.path.isfile(gene_set_file):\n geneset = self._get_gene_set()\n geneset.save(gene_set_file)\n\n # enable debug mode\n os.environ[\"REACTOME_WORKER_DEBUG\"] = \"True\"\n\n # start to listen to analyses\n worker = reactome_analysis_worker.ReactomeAnalysisWorker()\n worker.process_single_message()\n\n # fetch the result\n storage = reactome_storage.ReactomeStorage()\n result_text = storage.get_result(\"test_01\")\n\n self.assertIsNotNone(result_text, \"Result was not saved in redis\")\n json_obj = json.loads(result_text)\n result = AnalysisResult.from_dict(json_obj)\n\n self.assertIsNotNone(result)\n self.assertIsNotNone(result.mappings)\n self.assertIsNotNone(result.results)\n self.assertEqual(\"68\", result.release)\n\n self.assertEqual(1, len(result.results))\n self.assertIsNotNone(result.results[0].pathways)\n self.assertIsNotNone(result.results[0].fold_changes)\n\n pathway_lines = result.results[0].pathways.split(\"\\n\")\n self.assertEqual(23, len(pathway_lines))\n\n gene_lines = result.results[0].fold_changes.split(\"\\n\")\n self.assertEqual(4, len(gene_lines))", "def test_intent_classifier_update_testing_samples(self):\n pass", "def test_intent_classifier_update_training_samples(self):\n pass", "def test_modify_import_data_5(self):\n self.ticket_dict4[\"type\"] = \"replace\"\n self.ticket_dict4[\"description_field\"] = \"product\"\n self.ticket_dict4[\"eval_mode\"] = \"final\"\n result = tickets.modify_import_data(self.ticket_dict4,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"host_genus\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"cluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"subcluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_author\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_status\"], \"final\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"accession\"], \"retain\")", "def test_issue_get_issue_reactions(self):\n pass", "def test_issue_post_issue_reaction(self):\n pass", "def RIPReaction(sc, event):\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='rip')", "def test_signal_update_achievement(self):\n # Create two users for test all the achievements. Two for the podium\n client = Client()\n user_michu = create_user('passwordmichu', 'michu')\n create_user('passwordimmobile', 'immobile')\n client.login(username='immobile', password='passwordimmobile')\n # Create the Collection for the achievement NumSolvedCollectionAchievementDefinition and Problem\n coll = create_collection('Coleccion de cartas')\n # Create PodiumAchievementDefinition\n ach_podium = PodiumAchievementDefinition(name={\"es\":'Presidente del podio'},\n description={\"es\":'Consigue ser el primero'},\n num_problems=1, position=1)\n ach_podium.save()\n # Create NumSolvedCollectionAchievementDefinition\n ach_collection = NumSolvedCollectionAchievementDefinition(name={\"es\":'Coleccionista'},\n description={\"es\":'Resuelve 50\\\n problemas de esta coleccion'},\n num_problems=50,\n collection=coll)\n ach_collection.save()\n # Create NumSolvedAchievementDefinition\n ach_solved = NumSolvedAchievementDefinition(name={\"es\":'Resolvista'},\n description={\"es\":'Resuelve 50 problemas'},\n num_problems=50)\n ach_solved.save()\n # Create NumSolvedTypeAchievementDefinition\n ach_type = NumSolvedTypeAchievementDefinition(name={\"es\":'Procedista'},\n description={\"es\":'Resuelve un problema PROC'},\n num_problems=1, problem_type=ProblemType.PROC.name)\n ach_type.save()\n # Create NumSubmissionsProblemsAchievementDefinition\n ach_submi_pro = NumSubmissionsProblemsAchievementDefinition(name={\"es\":'Muchos envios'},\n description={\"es\":'Envia muchas soluciones'},\n num_submissions=80, num_problems=1)\n ach_submi_pro.save()\n # Create problem and submit correct answer with \"immobile\" user, for make this the first to solve the problem\n problem = create_select_problem(coll, 'Problema')\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n client.logout()\n # Login with \"michu\" and submit correct answer. All the checks will be with this user\n client.login(username='michu', password='passwordmichu')\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n # Whit this definitions our user \"michu\" don't have any achievement\n self.assertEqual(ObtainedAchievement.objects.filter(user=user_michu).count(), 0)\n # PodiumAchievementDefinition now only need to stay in podium\n # In this test our user \"michu\" stay at second position, that is why before he didn't have the achievement\n ach_podium.position = 3\n ach_podium.save()\n # NumSolvedCollectionAchievementDefinition only needs one correct submission\n # In this test our user only have one correct submission, that is why before he didn't have the achievement\n ach_collection.num_problems = 1\n ach_collection.save()\n # NumSolvedAchievementDefinition only needs one correct submission\n # In this test our user only have one correct submission, that is why before he didn't have the achievement\n ach_solved.num_problems = 1\n ach_solved.save()\n # NumSolvedTypeAchievementDefinition change to type SELECT\n # In this test our user only resolved a SELECT type problem, not PROC.\n ach_type.problem_type = ProblemType.SELECT.name\n ach_type.save()\n # NumSubmissionsProblemsAchievementDefinition only needs one submission now\n ach_submi_pro.num_submissions = 1\n ach_submi_pro.save()\n # Now our user \"michu\" have 5 achievements\n self.assertEqual(ObtainedAchievement.objects.filter(user=user_michu).count(), 5)", "async def feed_on(match, channel):\n global items\n chan_hash = str(hash(channel))\n\n item = {\"name\" : match, \"time\" : datetime.utcnow().isoformat() }\n if chan_hash in items:\n items[chan_hash].append(item)\n else:\n items[chan_hash] = [item]\n\n with open(os.path.join(BASEPATH, 'hell.json'), 'w') as cucumber:\n json.dump( items, cucumber )\n\n action = f\"_sneaks out a scaly hand and grabs {match}!_\"\n await channel.send(action)", "def generate_submissons_all_steps():\n\n\n data_en = read_json_file(\"Test_Data/test-en.json\")\n data_pr = read_json_file(\"Test_Data/test-pr.json\")\n data_es = read_json_file(\"Test_Data/test-es.json\")\n res_en = generate_embeddings_sentence_test_data(data_en, \"Test_Data/embd-en.pkl\")\n res_es = generate_embeddings_sentence_test_data(data_es, \"Test_Data/embd-es.pkl\")\n res_pr = generate_embeddings_sentence_test_data(data_pr, \"Test_Data/embd-pr.pkl\")\n model = load_model(\"model_doc\")\n make_submission(res_es, model, \"submission-es\")\n make_submission(res_pr, model, \"submission-pr\")\n make_submission(res_en, model, \"submission-en\")\n exit()", "def test_create_experiment(client, users):\n login_experimenter(client)\n\n exp = ExperimentFactory()\n datetime_format = \"%Y-%m-%d %H:%M:%S\"\n\n response = client.post(\"/experiments/\", data=dict(\n name=exp.name,\n start=exp.start.strftime(datetime_format),\n stop=exp.stop.strftime(datetime_format),\n blurb=exp.blurb))\n assert response.status_code == 200\n assert json_success(response.data)\n\n response = client.get(\"/experiments/\")\n data = response.data.decode(response.charset)\n assert response.status_code == 200\n assert exp.name in data\n\n response = client.post(\"/experiments/\", data=dict(\n start=exp.start.strftime(datetime_format),\n stop=exp.stop.strftime(datetime_format),\n blurb=exp.blurb))\n data = response.data.decode(response.charset)\n json_data = json.loads(data)\n assert json_data[\"success\"] == 0\n assert json_data[\"errors\"]\n\n response = client.post(\"/experiments/\", data=dict(\n name=exp.name,\n start=exp.start.strftime(datetime_format),\n stop=exp.start.strftime(datetime_format),\n blurb=exp.blurb))\n data = response.data.decode(response.charset)\n json_data = json.loads(data)\n assert json_data[\"success\"] == 0\n assert json_data[\"errors\"]\n\n response = client.post(\"/experiments/\", data=dict(\n name=exp.name,\n start=(datetime.now() - timedelta(days=5)).strftime(datetime_format),\n stop=(datetime.now() - timedelta(days=1)).strftime(datetime_format),\n blurb=exp.blurb))\n data = response.data.decode(response.charset)\n json_data = json.loads(data)\n assert json_data[\"success\"] == 0\n assert json_data[\"errors\"]", "def test_medicinalproductinteraction_1(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"] / \"medicinalproductinteraction-example.json\"\n )\n inst = medicinalproductinteraction.MedicinalProductInteraction.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"MedicinalProductInteraction\" == inst.resource_type\n\n impl_medicinalproductinteraction_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"MedicinalProductInteraction\" == data[\"resourceType\"]\n\n inst2 = medicinalproductinteraction.MedicinalProductInteraction(**data)\n impl_medicinalproductinteraction_1(inst2)", "def test_train_on_the_fly(self):\r\n input_seqs_file = NamedTemporaryFile(\r\n prefix='RdpTaxonAssignerTest_', suffix='.fasta')\r\n input_seqs_file.write(test_seq_coll.to_fasta())\r\n input_seqs_file.seek(0)\r\n\r\n exp_assignments = rdp_trained_test1_expected_dict\r\n\r\n app = RdpTaxonAssigner({\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_file.name,\r\n 'reference_sequences_fp': self.reference_seqs_file.name,\r\n })\r\n obs_assignments = app(self.tmp_seq_filepath)\r\n\r\n key = 'X67228 some description'\r\n self.assertEqual(obs_assignments[key], exp_assignments[key])", "def test_create_emobservation(self):\n comment = \"Message is {0}\".format(random.random())\n # Let's put in some made-up values\n raList = [1.0,1.0,1.0]\n raWidthList = 1.0\n decList = [1.0,1.0,1.0]\n decWidthList = 1.0\n dt = datetime(1900,1,1,1,1,1)\n startTimeList = [dt.isoformat() for i in range(3)]\n durationList = 1.0\n resp = gracedb.writeEMObservation(eventId, 'Test',\n raList, raWidthList, decList, decWidthList,\n startTimeList, durationList, comment)\n self.assertEqual(resp.status, 201)\n new_emobservation_uri = resp.getheader('Location')\n new_emobservation = resp.json()\n self.assertEqual(new_emobservation_uri, new_emobservation['self'])\n check_new_emobservation = gracedb.get(new_emobservation_uri).json()\n self.assertEqual(check_new_emobservation['comment'], comment)", "def test_populate(self):\n with self.app.test_client() as client:\n self.app.test_request_context().push()\n user = self.create_test_user()\n user2_id = self.create_test_user().id\n user3_id = self.create_test_user().id\n user4_id = self.create_test_user().id\n\n begin = datetime.utcnow()\n\n # test duplicate title\n r = client.post(\n '/ws/{}/channels/'.format(user.id),\n data=json.dumps(dict(\n title='new title',\n description='test channel for user {}'.format(user.id),\n category=1,\n cover=RockpackCoverArtData.comic_cover.cover,\n public=True)\n ),\n content_type='application/json',\n headers=[get_auth_header(user.id)]\n )\n channel_id = json.loads(r.data)['id']\n this_locale = 'en-us'\n\n models.ChannelLocaleMeta(\n channel=channel_id,\n locale=this_locale,\n date_added=datetime.utcnow()\n ).save()\n\n video_instance = models.VideoInstance(\n channel=channel_id,\n video=VideoData.video1.id\n ).save()\n\n UserActivity(\n user=user2_id,\n action='view',\n date_actioned=datetime.utcnow(),\n object_type='channel',\n object_id=channel_id,\n locale=this_locale\n ).save()\n\n UserActivity(\n user=user3_id,\n action='view',\n date_actioned=datetime.utcnow(),\n object_type='video',\n object_id=video_instance.id,\n locale=this_locale\n ).save()\n\n JobControl(job='update_channel_view_counts', last_run=begin).save()\n update_channel_view_counts()\n\n meta = models.ChannelLocaleMeta.query.filter(\n models.ChannelLocaleMeta.locale == this_locale,\n models.ChannelLocaleMeta.channel == channel_id).first()\n\n self.assertEquals(meta.view_count, 2)\n\n UserActivity(\n user=user4_id,\n action='view',\n date_actioned=datetime.utcnow(),\n object_type='channel',\n object_id=channel_id,\n locale=this_locale).save()\n\n update_channel_view_counts()\n\n self.assertEquals(meta.view_count, 3)", "def test_tracker_addHistory():\n\n trackers, cap = init_tracker()\n tr = trackers[0]\n tr.addHistory([1, 1, 1, 1])\n\n assert len(tr.history) >= 1", "def init_collected_data(self):\n for i in range(len(self.data)):\n\n item = self.data[i].copy()\n caption = item['captions'][item['cap_index']]\n item['caption'], item['caption_id'] = caption['caption'], caption['caption_id']\n\n self.collected_data.append({\n 'max_reward': 0.0,\n 'best_cap_type': -1, # -1 for ground truth, 0 for gathered w/o question, 1 for gathered w/ question\n 'gt_data': item,\n 'cap_dict': {}, # keeps track of all the captions seen for this image, and its reward\n 'ask_cap_dict': {}, # same thing but only captions where a question was asked\n 'best_cap_dict': {}, # keep track of all the best captions between rollout, replace, original\n 'best_ask_cap_dict': {} # same thing but a captions where a question was asked\n })", "def test_immunization_2(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"] / \"immunization-example-historical.json\"\n )\n inst = immunization.Immunization.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Immunization\" == inst.resource_type\n\n impl_immunization_2(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Immunization\" == data[\"resourceType\"]\n\n inst2 = immunization.Immunization(**data)\n impl_immunization_2(inst2)", "def setUp(self):\n super().setUp()\n self.data_model = json.loads(DATA_MODEL_JSON)\n self.set_source_parameter(\"reports\", [\"r1\", \"r3\"])\n self.expected_software_metrics = str(2 * len(self.data_model[\"subjects\"][\"software\"][\"metrics\"]))\n self.reports[\"reports\"].append(\n {\n \"title\": \"R3\",\n \"report_uuid\": \"r3\",\n \"subjects\": {\n \"s2\": {\n \"type\": \"software\",\n \"name\": \"S2\",\n \"metrics\": {\n \"m21\": {\n \"tags\": [\"security\"],\n \"scale\": \"count\",\n \"type\": \"violations\",\n \"target\": \"1\",\n \"sources\": {\"s1\": {\"type\": \"sonarqube\"}},\n },\n \"m22\": {\n \"tags\": [\"security\"],\n \"scale\": \"count\",\n \"type\": \"loc\",\n \"target\": \"1\",\n \"sources\": {\"s1\": {\"type\": \"sonarqube\"}},\n },\n \"m23\": {\n \"tags\": [\"security\"],\n \"scale\": \"count\",\n \"type\": \"accessibility\",\n \"target\": \"1\",\n \"sources\": {\"s1\": {\"type\": \"sonarqube\"}},\n },\n },\n },\n },\n },\n )\n self.entities = []\n for report in self.reports[\"reports\"]:\n for subject_uuid, subject in report.get(\"subjects\", {}).items():\n for metric_type in self.data_model[\"subjects\"][\"software\"][\"metrics\"]:\n if metric_type not in [\"violations\", \"accessibility\", \"loc\"]:\n self.entities.append( # noqa: PERF401\n {\n \"key\": f\"{report['report_uuid']}:{subject_uuid}:{metric_type}\",\n \"report\": report[\"title\"],\n \"report_url\": f\"https://quality_time/{report['report_uuid']}\",\n \"subject\": subject[\"name\"],\n \"subject_url\": f\"https://quality_time/{report['report_uuid']}#{subject_uuid}\",\n \"subject_uuid\": f\"{subject_uuid}\",\n \"subject_type\": self.data_model[\"subjects\"][subject[\"type\"]][\"name\"],\n \"metric_type\": self.data_model[\"metrics\"][metric_type][\"name\"],\n },\n )", "def test_create_activity_occurrence(self):\n pass", "def test_new_resource(self):\n if verbosity>=3: print \"\\n\"+\"~\"*80\n for resource in cfg.new_resources:\n args=copy.deepcopy(resource)\n self.run_init_new_resource(**args)\n if verbosity>=3: print \"\\n\"+\"~\"*80", "def test_intent_classifier_add_testing_samples(self):\n pass", "def test_intent_classifier_add_training_samples(self):\n pass" ]
[ "0.57377374", "0.5685594", "0.567169", "0.5549195", "0.54485726", "0.53651845", "0.5343828", "0.53168577", "0.52647924", "0.5197624", "0.5102805", "0.51013464", "0.5088614", "0.50403416", "0.5033594", "0.5025995", "0.5025969", "0.50153446", "0.4987246", "0.49858665", "0.4976686", "0.49724105", "0.49720666", "0.49716935", "0.4970633", "0.49689168", "0.49635476", "0.49605897", "0.49598843", "0.49595433" ]
0.69024295
0
Converts a cause of death index to a humanreadable string.
def cause_of_death_index_to_string(index: int) -> str: if index == CauseOfDeath.STARVATION.value: return "Starvation" elif index == CauseOfDeath.DEHYDRATION.value: return "Dehydration" elif index == CauseOfDeath.EATEN.value: return "Eaten" else: raise ValueError("Did not recognize CauseOfDeath index!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return \"{}_human\".format(self.index)", "def _index_to_unicode(cls, index: int) -> str:\n return \"\".join(cls._unicode_subscripts[int(_)] for _ in str(index))", "def _make_not_found_message(index: Union[int, slice, str]) -> str:\n msg = [f\"Analysis result {index} not found.\"]\n errors = self.errors()\n if errors:\n msg.append(f\"Errors: {errors}\")\n return \"\\n\".join(msg)", "def strIdx(idx):\n if not isinstance(idx, (int, np.integer)):\n raise ValueError(\"Index must be an integer.\")\n\n return str(idx) if idx >= 0 else str(-idx) + u'\\u0305'", "def reason(op_idx, details):\n\n return '%d %s'%(op_idx, details)", "def index_as_string(self):\n return self.index().to_string() if self.index() else None", "def cause(self) -> Optional[str]:\n return pulumi.get(self, \"cause\")", "def index_to_wellname(index, num_wells, direction=\"row\"):\n row, column = index_to_row_column(index, num_wells, direction)\n return coordinates_to_wellname((row, column))", "def visualise_cause_of_death(data: LogData, directory: Path):\n\n rabbit_stats: list[int] = [0 for _ in CauseOfDeath]\n deer_stats: list[int] = [0 for _ in CauseOfDeath]\n wolf_stats: list[int] = [0 for _ in CauseOfDeath]\n bear_stats: list[int] = [0 for _ in CauseOfDeath]\n\n for event in data.events():\n event_type: str = event[\"type\"]\n\n if event_type == \"death\":\n tag: str = event[\"tag\"]\n\n info = data.death_info(event[\"deathIndex\"])\n cause: int = info[\"cause\"]\n\n if tag == \"Rabbit\":\n rabbit_stats[cause] = rabbit_stats[cause] + 1\n\n elif tag == \"Deer\":\n deer_stats[cause] = deer_stats[cause] + 1\n\n elif tag == \"Wolf\":\n wolf_stats[cause] = wolf_stats[cause] + 1\n\n elif tag == \"Bear\":\n bear_stats[cause] = bear_stats[cause] + 1\n\n figure = create_grouped_bar_chart({\"Rabbits\": rabbit_stats,\n \"Deer\": deer_stats,\n \"Wolves\": wolf_stats,\n \"Bears\": bear_stats})\n figure.savefig(directory / Path(\"cause_of_death.png\"))\n plot.close()", "def getWeatherString(index):\n return Texts.weather_titles[index]", "def err_str(err):\n return \"\".join(format_exception_only(type(err), err))", "def get_oss_fuzz_summary(crash_type, crash_state):\n crash_type = crash_type.splitlines()[0]\n state_lines = crash_state.splitlines()\n if crash_type in ('ASSERT', 'CHECK failure', 'Security CHECK failure',\n 'Security DCHECK failure'):\n return crash_type + ': ' + state_lines[0]\n\n if crash_type == 'Bad-cast':\n return state_lines[0]\n\n if not crash_state or crash_state == 'NULL':\n return crash_type\n\n return crash_type + ' in ' + state_lines[0]", "def _exc_info_to_string(self, err, test):\n\t\texctype, value, tb = err\n\t\t# Skip test runner traceback levels\n\t\twhile tb and self._is_relevant_tb_level(tb):\n\t\t\ttb = tb.tb_next\n\n\t\tif exctype is test.failureException:\n\t\t\t# Skip assert*() traceback levels\n\t\t\tlength = self._count_relevant_tb_levels(tb)\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb, length)\n\t\telse:\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb)\t\t\n\t\treturn ''.join(msgLines)", "def _make_title(self, ind):\n start = self.df_event_time.loc[ind, 'time']\n date = np.datetime_as_string(start.astype('<M8[ns]'), unit='s')\n start_ns = start - (start // 10**9) * 10**9\n end = self.df_event_time.loc[ind, 'endtime']\n end_ns = end - start + start_ns\n return ''.join((f'##Event {ind} from run {self.run_id}\\n',\n f'##Recorded at ({date[:10]} {date[10:]}) UTC ',\n f'{start_ns} ns - {end_ns} ns'))", "def index_to_string(index):\n if index:\n s = \"/\".join(index)\n return Quote(s)\n else:\n return \".\"", "def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def error_num_to_desc(num):\r\n for t in ERROR_CODES:\r\n if t[0] == num:\r\n try:\r\n return t[2]\r\n except IndexError:\r\n return \"\"", "def Death_Blossom(self):\t\t\n\t\tprint(self.name.Title() + \"Die Die Die!\")", "def get_index_str(idxs, discard, cap, header=None):\n if header is None:\n header = 'Indexes of samples from mcmc chain ' \\\n f'(after slicing: discard={discard}, cap={cap})'\n string = f'{header}\\n'\n\n for i in idxs:\n string += f'{i}\\n'\n return string", "def errorNumToDesc(self, errorCode):\n for t in self.ERROR_CODES:\n if t[0] == errorCode:\n try:\n return t[2]\n except IndexError:\n return \"\"", "def episode_title_for_tvdb(self):\n \n # strip out the year from the episode title:\n return \"Episode %d\"%self.episode_number[1]", "def impact_to_string(impact):\n impact_map = {\n data_types.SecurityImpact.STABLE: 'Stable',\n data_types.SecurityImpact.BETA: 'Beta',\n data_types.SecurityImpact.HEAD: 'Head',\n data_types.SecurityImpact.NONE: 'None',\n data_types.SecurityImpact.MISSING: MISSING_VALUE_STRING\n }\n\n return impact_map[impact]", "def transformErr2Str(self,*args):\n error_code = c_int32(args[0])\n error_str = create_string_buffer(\"\\000\"*1024)\n status = self.__acqiris_QuantroDLL1.transformErr2Str(self.__instrumentID,error_code,error_str) \n return str(error_str)", "def get_index_repr(self):\r\n return \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.trf_id,\r\n self.trf_period,\r\n self.trf_array_length,\r\n self.trf_array_gc,\r\n self.trf_pvar,\r\n self.trf_gi,\r\n self.trf_l_ind,\r\n self.trf_r_ind,\r\n self.trf_chr)", "def exception_description(err):\n result = ''\n if isinstance(err, str):\n result = err\n elif isinstance(err, Exception):\n result = \"Exception class: %s.%s\\n\" % (err.__class__.__module__, \\\n err.__class__.__name__)\n if len(err.args) > 0:\n result += \"Args:\\n\"\n arg_num = 0\n for arg in err.args:\n if not isinstance(arg, str):\n arg = str(arg)\n\n arg = arg.replace('\\n', '\\n\\t' + ' '*(len(str(arg_num)) + 3))\n\n result += \"\\t%s : %s\\n\" % (arg_num, arg)\n arg_num += 1\n else:\n result = str(err)\n return result", "def get_oss_fuzz_details(issue_id, crash_type, crash_state):\n details = ''\n if issue_id:\n oss_fuzz_link = OSS_FUZZ_ISSUE_URL + issue_id\n details = f'OSS-Fuzz report: {oss_fuzz_link}\\n\\n'\n\n crash_type = crash_type.replace('\\n', ' ')\n return details + (f'Crash type: {crash_type}\\n'\n f'Crash state:\\n{crash_state}')", "def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))", "def get_deaths():\n # Deprecated warning\n url = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/\"\n warnings.warn(\"This function is deprecated. Use get_data_jhu instead; see tutorials at <https://github.com/PayneLab/covid19pandas/tree/master/docs/>.\", DeprecatedWarning, stacklevel=2)\n print(\"These data were obtained from Johns Hopkins University (https://github.com/CSSEGISandData/COVID-19).\")\n return _get_table(url, \"time_series_covid19_deaths_global.csv\", source=\"jhu\", update=True)", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def decodeindexes(self, idx):\n text = \"\"\n for elem in idx:\n char = self.index2char[elem]\n if char not in SPCHARS:\n text += char\n\n return text" ]
[ "0.5622562", "0.54020613", "0.5385206", "0.5309493", "0.52745426", "0.5257487", "0.5119563", "0.5113673", "0.51084805", "0.5053868", "0.501387", "0.5013531", "0.49857065", "0.49503946", "0.4940279", "0.48666745", "0.48354596", "0.48203522", "0.47872925", "0.4776707", "0.4720607", "0.47142056", "0.4701247", "0.4683691", "0.46817905", "0.46794027", "0.46768644", "0.46678728", "0.46430677", "0.4639558" ]
0.7642903
0
Creates and returns a grouped bar chart with the death causes. The lists are expected to feature an entry for each cause of death, where the value corresponds to how many specimens died of that cause.
def create_grouped_bar_chart(stats: dict[str, list[int]]): figure, axes = plot.subplots() labels = [str(e) for e in CauseOfDeath] x = numpy.arange(len(labels)) bar_width = 0.15 max_value = 0 rects = [] i = 0 for label, values in stats.items(): max_value = max(max_value, max(values)) rects.append(axes.bar(x + (i * bar_width), values, bar_width, label=label)) i = i + 1 axes.set_title("Deaths arranged by cause and animal type") axes.set_ylabel("Amount") axes.set_xticks(x) axes.set_xticklabels(labels) axes.legend() for rect in rects: attach_text_labels(rect, axes) figure.tight_layout() return figure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualise_cause_of_death(data: LogData, directory: Path):\n\n rabbit_stats: list[int] = [0 for _ in CauseOfDeath]\n deer_stats: list[int] = [0 for _ in CauseOfDeath]\n wolf_stats: list[int] = [0 for _ in CauseOfDeath]\n bear_stats: list[int] = [0 for _ in CauseOfDeath]\n\n for event in data.events():\n event_type: str = event[\"type\"]\n\n if event_type == \"death\":\n tag: str = event[\"tag\"]\n\n info = data.death_info(event[\"deathIndex\"])\n cause: int = info[\"cause\"]\n\n if tag == \"Rabbit\":\n rabbit_stats[cause] = rabbit_stats[cause] + 1\n\n elif tag == \"Deer\":\n deer_stats[cause] = deer_stats[cause] + 1\n\n elif tag == \"Wolf\":\n wolf_stats[cause] = wolf_stats[cause] + 1\n\n elif tag == \"Bear\":\n bear_stats[cause] = bear_stats[cause] + 1\n\n figure = create_grouped_bar_chart({\"Rabbits\": rabbit_stats,\n \"Deer\": deer_stats,\n \"Wolves\": wolf_stats,\n \"Bears\": bear_stats})\n figure.savefig(directory / Path(\"cause_of_death.png\"))\n plot.close()", "def graph_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"Yearly Vehicle Accident Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\", ci=None)\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"2_graph_cause_count.png\")", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def createChart(cladeGroup, data, taxonomyDict, outputFile):\n dfData = []\n for clade in cladeGroup: \n temp, other, totalTemp = valueCountsSpecies(data, cladeGroup[clade], taxonomyDict)\n relativeTemp = {}\n for val in temp:\n relativeTemp[val] = (temp[val] / sum(list(temp.values())))*100\n dfData.append(relativeTemp)\n\n tempDF = pd.DataFrame(dfData, index=list(cladeGroup.keys()))\n tempDF = tempDF.fillna(0)\n\n # Plotting\n sns.set(rc={'figure.figsize':(20,15)}, font_scale=2)\n ax = tempDF.plot(kind=\"bar\", stacked=True, colormap=ListedColormap(sns.color_palette(\"twilight\", 12)), rot=0)\n for rect in ax.patches:\n # Find where everything is located\n height = rect.get_height()\n width = rect.get_width()\n x = rect.get_x()\n y = rect.get_y()\n \n # The height of the bar is the data value and can be used as the label\n label_text = f'{height:.2f}%' # f'{width:.2f}' to format decimal values\n \n # ax.text(x, y, text)\n label_x = x + width / 2\n label_y = y + height / 2\n \n # only plot labels greater than given width\n if height > 0.00:\n ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=20, color=\"w\")\n\n plt.legend(loc=\"center right\", bbox_to_anchor=(1.25, 0.5), ncol=1)\n plt.savefig(outputFile, bbox_inches=\"tight\")\n plt.show()\n return", "def graph_cause_count_each(df, label):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(label.capitalize() + \" Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image with the correct cause naming\r\n name = \"2_graph_cause_count_\" + label + \".png\"\r\n fig.savefig(name)", "def graph_year_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=25)\r\n ax.set_title(\"2001 and 2007 Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"cause_short\", \"count\", data=df, palette=\"bone\", hue='year')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_top_cause_count.png\")", "def create_deaths(wof_settings,screen,deaths,levelMap):\n \n death_width = wof_settings.element_width\n death_height = wof_settings.element_height \n \n # Create deaths\n for death_position in levelMap['death']:\n death = Death(wof_settings,screen)\n death.x = death_position[1] * death_width\n death.y = death_position[0] * death_height\n death.rect.x = death.x\n death.rect.y = death.y\n deaths.add(death)", "def test_amount_of_deaths(self) -> None:\n # Get Data\n data = self.data_handler_1.amount_of_deaths()\n results = defaultdict(None,\n {'זכר': defaultdict(int, {'75-84': 97, '65-74': 93, '<65': 62, '85+': 62}),\n 'נקבה': defaultdict(int, {'85+': 63, '75-84': 52, '65-74': 41, '<65': 30})})\n # Data Validation\n self._test_two_level_depth_nested_dictionaries(data, results)", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def graph_max_cause(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 10)\r\n plt.xticks(rotation=20)\r\n ax.set_title(\"States' Max Police Death Causes >= 150\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"state\", \"max_count\", data=df, palette=\"bone\", hue='max_cause')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"3_graph_max_cause.png\")", "def grafica(self, timeList):\r\n n_groups = len(timeList)\r\n # create plot\r\n fig, ax = plt.subplots()\r\n index = np.arange(n_groups)\r\n bar_width = 0.2\r\n opacity = 1\r\n index2 = [x + bar_width for x in index]\r\n index3 = [x + bar_width for x in index2]\r\n index4 = [x + bar_width for x in index3]\r\n rects1 = plt.bar(index, self.ingresos, bar_width,\r\n alpha=opacity,\r\n color='r',\r\n label='Ingresos')\r\n\r\n rects2 = plt.bar(index2, self.compras, bar_width,\r\n alpha=opacity,\r\n color='yellow',\r\n label='Compras')\r\n rects3 = plt.bar(index3, self.gastos, bar_width,\r\n alpha=opacity,\r\n color='b',\r\n label='Gastos')\r\n rects4 = plt.bar(index4, self.total, bar_width,\r\n alpha=opacity,\r\n color='black',\r\n label='Saldo')\r\n\r\n plt.xlabel('Línea de tiempo')\r\n plt.ylabel('Total ($)')\r\n plt.title('Resultados')\r\n plt.xticks(index + bar_width, timeList)\r\n plt.grid()\r\n plt.legend()\r\n plt.tight_layout()\r\n plt.show()", "def create_spend_chart(categories: list):\n\n BAR = \"o\"\n TITLE = \"Percentage spent by category\"\n \n # Sum up the total withdrawn amount\n withdrawals = {}\n total_amount_withdrawn = 0\n for category in categories:\n amount = category.get_withdrawals()\n withdrawals[category.name] = {\"amount\" : amount, \"percentage\" : 0}\n total_amount_withdrawn += amount\n \n # Calculate the percentages\n for category_name in withdrawals:\n percentage = withdrawals[category_name][\"amount\"]/total_amount_withdrawn*100\n # Why use floor() instead of int():\n # https://stackoverflow.com/a/31195540\n percentage = int(floor(percentage/10.)*10)\n withdrawals[category_name][\"percentage\"] = percentage\n\n # Make the bars\n percentages_lines = []\n for percentage in range(100, -10, -10):\n percentages_line = \"{:3}|\".format(percentage)\n for category_name in withdrawals:\n if withdrawals[category_name][\"percentage\"] >= percentage:\n percentages_line += \" \" + BAR + \" \"\n else:\n percentages_line += \" \"\n percentages_lines.append(percentages_line + \" \")\n\n # Make the horizontal line\n horizontal_line = \" {}\".format(\"---\"*len(categories) + \"-\")\n \n # Make the names\n bar_names_lines = []\n # find the length of the longest name\n max_name_len = max([len(name) for name in withdrawals])\n for line_num in range(max_name_len):\n bar_names_line = \" \"\n for category_name in withdrawals:\n if line_num < len(category_name):\n bar_names_line += \" \" + category_name[line_num] + \" \"\n else:\n bar_names_line += \" \"\n bar_names_lines.append(bar_names_line + \" \")\n\n chart_lines = [TITLE] + percentages_lines + [horizontal_line] + bar_names_lines\n\n chart_lines = \"\\n\".join(chart_lines)\n\n return chart_lines", "def plotBarChart(resultConfirmed, resultDeath, resultVaccinated):\n fig, ax = plt.subplots(3)\n\n ax[0].plot(resultConfirmed['Date'], resultConfirmed['Confirmed Cases'])\n ax[0].title.set_text('Confirmed Cases')\n \n ax[1].plot(resultDeath['Date'], resultDeath['Death Cases'])\n ax[1].title.set_text('Death Cases')\n \n ax[2].plot(resultVaccinated['Date'], resultVaccinated['Vaccinated Person'])\n ax[2].title.set_text('Vaccinated Cases')\n fig.tight_layout()\n plt.show()", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def grant_outcomes_barchart(dframe):\n # prepare dataframe\n dframe = df.copy()\n dframe.columns = [col.lower().replace(' ','_') for col in dframe.columns]\n dframe = dframe[dframe['organization_name'].notnull()]\n dframe.drop(['thank_you_sent','report_due','report_sent'],axis=1,\n inplace=True)\n dframe.set_index(dframe['date_application_sent'],inplace=True)\n\n grant_stage = []\n [grant_stage.append(status.lower().strip()) for status in dframe.stage]\n dframe['stage'] = grant_stage\n grant_status = [] # merge status to 3 primary categories, make 'awarded' tag\n for status in dframe.stage:\n if status not in ['obligations complete','pledged','posted']:\n grant_status.append(status)\n else:\n grant_status.append('awarded')\n dframe['grant_status'] = grant_status\n\n # create chart\n color_dict = {'awarded':'#adebad','not approved':'#d6746f',\n 'submitted':'#ffffb3'}\n grant_count_trace = []\n for status in dframe.grant_status.unique():\n grant_count_trace.append(go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['stage'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['stage'].count(),\n name = status,\n marker = {'color':color_dict[status]},\n opacity = .8))\n\n layout = {'barmode':'stack',\n 'hovermode':'closest',\n 'paper_bgcolor':'#303939',\n 'plot_bgcolor':'#303939',\n 'legend':{'font':{'color':'#CCCCCC'}},\n 'yaxis':{'title':'no. applications',\n 'tickfont':{'color':'#CCCCCC'},\n 'titlefont':{'color':'#CCCCCC'},\n 'showgrid':False},\n 'xaxis':{'title':'quarter submitted',\n 'titlefont':{'color':'#CCCCCC'},\n 'tickfont': {'color':'#CCCCCC'}},\n 'title':'Grant Application<br>Status Overview',\n 'titlefont':{'color':'#CCCCCC'}}\n\n fig = {'data':grant_count_trace, 'layout':layout}\n return fig", "def summer_bar_chart(self):\n # Create top n countries data from 1996 to 2014\n df_summer = self.df_summer[self.df_summer['Year'] >= 1996]\n m = list(df_summer['Country'].value_counts()[:self.n_top].index)\n df_top = df_summer[df_summer['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n k = []\n # Create the dataframe in 2016.\n for j in self.df_2016_summer['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n summer_2016 = pd.DataFrame()\n for i in m:\n df_tmp = self.df_2016_summer[self.df_2016_summer['NOC'] == k[i]]\n summer_2016 = pd.concat([summer_2016, df_tmp])\n summer_2016['Country'] = m\n new_summer_2016 = summer_2016.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add the two dataframes and plot\n unstacked_df_top.add(new_summer_2016).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Summer Olympics since 1996')\n fname = './medal_figures_summer/summer_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def graph_decade_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 10)\r\n ax.set_title(\"Decade Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"decade\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_decade_count.png\")", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def make_bar_group_plot(x, Y, groups, colors, title):\n data = []\n for i in range(len(groups)):\n data.append(plotly.graph_objs.Bar(\n x=list(x),\n y=list(Y[i, :]),\n name=groups[i],\n marker={'color' : colors[i]}\n ))\n\n return plotly.graph_objs.Figure(\n data=data,\n layout=plotly.graph_objs.Layout(title=title, barmode='group')\n )", "def pledges_barchart(dframe, colors = ['#8dc16a','#d6746f']):\n # anonymize members & convert dollar values to float type\n anonymized = []\n for name in dframe['Last Name']:\n if str(name) == 'nan':\n anonymized.append('--')\n else:\n anonymized.append('M: {}'.format(np.random.randint(1,100)))\n\n dframe['anonymized'] = anonymized\n for col in ['Amount','Payment Amount Received','Remaining Balance']:\n dframe[col] = dframe[col].astype(float)\n\n # series of percentage donated against pledged\n pct_fulfiilled = pd.Series(dframe.groupby('Last Name')['Payment Amount Received'].sum() /\n dframe.groupby('Last Name')['Amount'].mean() * 100)\n\n # series of percentage donated against pledged\n # handle for negative values remaining for 'over achieving donors'\n normalized_balance_values = [0 if val < 0 else val for val in dframe.groupby('Last Name')['Remaining Balance'].sum() ]\n pct_outstanding = (normalized_balance_values /\n dframe.groupby('Last Name')['Amount'].mean() * 100)\n\n trace1 = go.Bar(\n x = pct_fulfiilled.values,\n y = pct_fulfiilled.index,\n name = 'received %',\n marker = {'color':'#8dc16a'},\n hoverinfo = 'x',\n opacity = .8,\n orientation = 'h'\n )\n trace2 = go.Bar(\n x = pct_outstanding.values,\n y = pct_outstanding.index,\n name = 'outstanding %',\n hoverinfo = 'x',\n marker = {'color':'#d6746f'},\n opacity = .8,\n orientation = 'h'\n )\n\n layout = go.Layout(\n legend = {'orientation': 'h'},\n xaxis = {'title': 'pct %',\n 'titlefont': {'color':'#CCCCCC'},\n 'tickfont': {'color': '#CCCCCC'}},\n # hide y axis names by matching text color to background\n yaxis = {'title': '',\n 'tickfont': {'color':'#303939'}},\n barmode = 'stack',\n hovermode = 'closest',\n title = 'Percent of Pledge Donated',\n titlefont = {'color':'white'},\n paper_bgcolor = '#303939',\n plot_bgcolor = '#303939')\n\n traces = [trace1,trace2]\n fig = {'data':traces,'layout':layout}\n\n return fig", "def day_delays(db: str) -> None:\n # Creating x and y variables for each month using the helper function\n # total_delays to get the cumalative minutes of delays on each day. \n month_1 = [total_delays(db, 0)]\n month_2 = [total_delays(db, 1)]\n month_3 = [total_delays(db, 2)]\n month_4 = [total_delays(db, 3)]\n month_5 = [total_delays(db, 4)]\n month_6 = [total_delays(db, 5)]\n \n # using the variables to plot bar graphs of each month.\n plt.figure(figsize=(14, 17))\n plt.xticks(fontsize=30)\n \n plt.subplot(2, 3, 1)\n plt.xlabel('Day')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Sept 2017') \n plt.bar(month_1[0][0], month_1[0][1])\n \n plt.subplot(2, 3, 2)\n plt.xlabel('Day')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Oct 2017')\n plt.bar(month_2[0][0], month_2[0][1]) \n \n plt.subplot(2, 3, 3)\n plt.xlabel('Day')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Nov 2017')\n plt.bar(month_3[0][0], month_3[0][1]) \n \n plt.subplot(2, 3, 4)\n plt.xlabel('Day')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Dec 2017')\n plt.bar(month_4[0][0], month_4[0][1]) \n \n plt.subplot(2, 3, 5)\n plt.xlabel('Day')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Jan 2018')\n plt.bar(month_5[0][0], month_5[0][1])\n \n plt.subplot(2, 3, 6)\n plt.xlabel('Day')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Feb 2018')\n plt.bar(month_6[0][0], month_6[0][1])\n \n plt.tight_layout()\n plt.savefig('day_delays.png')\n plt.close()", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def create_income_expense_grouped_bar_chart(year_id):\n month_objects = get_months_by_year(year_id)\n\n # get chart data\n months = convert_to_verbose_months(month_objects)\n\n y_expenses = get_transactions_sum_data(month_objects, amount_type='expenses')\n \n y_incomes = get_transactions_sum_data(month_objects, amount_type='incomes')\n\n # build chart\n fig = go.Figure(\n data=[\n go.Bar(name='Gastos', x=months, y=y_expenses, marker_color='#b22222'),\n go.Bar(name=\"Rendas\", x=months, y=y_incomes, marker_color='#22b222')\n ]\n )\n\n fig.update_layout(barmode='group')\n\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n\n return plot_div", "def outcome_bars(data, name=None, width=100):\n # if it's a dataframe already, just add the name for the legend\n if isinstance(data, pd.DataFrame):\n data_list = [data]\n elif isinstance(data, list):\n # check if it's a list of dicionaries, like player history, or a list\n # of lists\n for item in data:\n l_o_d = isinstance(item, dict)\n # if it's a list of dictionaries, just convert them\n if l_o_d:\n data_list = [pd.DataFrame(data)]\n else:\n data_list = [pd.DataFrame(item) for item in data]\n else:\n msg = \"'data' must be a DataFrame or list\"\n raise TypeError(msg)\n # calculate percentages\n # assign name to data\n if not name:\n name = [f\"Game{i}\" for i in range(len(data))]\n plot_data_list = [] # list to hold dataframes that will be plotted\n for _name, _data in zip(name, data_list):\n win, loss, push, surrender = results_pct(_data, as_series=False)\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Win\", \"pct\": win, \"order\": 1},\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Loss\", \"pct\": loss, \"order\": 2}\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Push\", \"pct\": push, \"order\": 3}\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Surrender\", \"pct\": surrender, \"order\": 3}\n )\n plot_data = pd.DataFrame(plot_data_list)\n\n # create altair chart\n chart = alt.Chart(plot_data, width=width).mark_bar().encode(\n x=alt.X(\n \"game\",\n axis=alt.Axis(labelAngle=-45),\n title=None,\n sort=[\"Win\", \"Loss\", \"Push\"]\n ),\n y=alt.Y(\n \"pct:Q\"\n ),\n color=alt.Color(\n \"game:O\",\n legend=None\n ),\n column=alt.Column(\n \"result:O\",\n title=\"Result\"\n ),\n tooltip=[\n alt.Tooltip(\"pct\", title=\"Pct\")\n ]\n )\n return chart", "def make_bar_plots(df_list,\n x_col, y_col,\n problems,\n legend_bbox=(.05, .95),\n to_file='',\n show=False,\n excluded=None): \n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1>0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2>0]\n assert len(a1) == len(a2) == 1\n \n action_nums = [a1[0], a2[0]]\n \n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n \n # Seach functions names should be common to all dfs:\n search = df_list[0].Searcher.tolist()\n \n # Sample cmap according to categories:\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i*m] for i in range(s_len)]\n \n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))\n \n # Use the minutes columns for the more complex problems:\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4: # applies to problems 3/4\n y_col = 'Minutes'\n else:\n ty_col = y_col\n \n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',\n y = 1.05, fontsize=14)\n\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n # log scale on NewNodes for df2, df3, df4:\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n \n axs[i].set_ylabel(ylab, fontsize=12)\n\n df[y_col].plot.bar(ax=axs[i], logy=ylog,\n color=colors,\n legend=False)\n \n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n # Modify the legend to indicate excluded searches\n # (bc colormap is identical to fig1/2, but some runs have no data).\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [excluded[i][0]-1 for i in range(excluded_len)]\n \n legend_patches = [] \n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n \n axs[1].legend(handles=legend_patches,\n title=legt,\n title_fontsize='14',\n fontsize='medium', \n bbox_to_anchor=legend_bbox, \n loc='upper left',\n labelspacing=0.6,\n fancybox=True)\n\n plt.tight_layout()\n \n if to_file:\n plt.savefig(to_file)\n \n if show:\n return axs", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=8, ncols=1, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n indexes = df.index.tolist()\n df[\"index\"] = indexes\n df[\"effect_size\"] = df[\"index\"].apply(lambda x: x[0])\n df[\"sd\"] = df[\"index\"].apply(lambda x: x[1])\n df[\"group\"] = df[\"index\"].apply(lambda x: x[2])\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n df_new = df.groupby(\"sd\")\n # for key, item in df_new:\n # print(df_new.get_group(key))\n for ax, (sd, dat) in zip(axs, df_new):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n es = dat[\"effect_size\"].iloc[0]\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_xlabel(f\"Mean error for sd = {sd} per group size\")\n print(dat[\"sig\"])\n print(\"\\n\\n\")\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n ax.legend()\n\n fig.suptitle(f\"Effect size = {es}\", y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def create_marriage_chart(region_list, comparison):\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'marriage')\n qty_chart = {\n 'chartType': 'bar',\n 'chartName': 'Status Pernikahan menurut Jumlah Orang',\n 'dataFields': qty_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n dataset_total = sum(qty_data['values'])\n pct_data = create_data_by_field_pct(qty_data, dataset_total)\n pct_chart = {\n 'chartType': 'doughnut',\n 'chartName': 'Status Pernikahan menurut Persentase Orang',\n 'dataFields': pct_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Persentase Orang',\n 'tooltipStringFormat': ['_', '%']\n }\n }\n\n chart_list = {'chartList': [qty_chart, pct_chart]}\n jsonprint(chart_list)\n return chart_list\n\n elif comparison == 'region':\n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'marriage')\n dataset_total_list = get_dataset_total_list(qty_list)\n pct_list = create_data_by_region_pct(qty_list,\n dataset_total_list)\n\n chart_list = {'chartList': [], 'labelList': label_list}\n for index, chart in enumerate(qty_list):\n pct_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', '%'],\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Persentase Orang'\n } \n qty_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Jumlah Orang'\n }\n\n field = pct_list[index]['field']\n if field == 'Kawin':\n pct_list[index]['chartName'] = \\\n 'Persentase Warga yang sudah ' + field + \\\n ' menurut Kecamatan'\n qty_list[index]['chartName'] = \\\n 'Jumlah Warga yang sudah ' + field + \\\n ' menurut Kecamatan'\n else:\n pct_list[index]['chartName'] = \\\n 'Persentase Warga yang ' + field + \\\n ' menurut Kecamatan'\n qty_list[index]['chartName'] = \\\n 'Jumlah Warga yang ' + field + \\\n ' menurut Kecamatan' \n\n chart_list['chartList'].append(pct_list[index])\n chart_list['chartList'].append(qty_list[index])\n\n jsonprint(chart_list)\n return chart_list", "def return_figures():\n graph_one = []\n df = cleandata()\n\n graph_one.append(\n go.Bar(name='Ones', x=['Related', 'Request', 'Offer',\n 'Aid related', 'Medical help', 'Medical products',\n 'Search and rescue', 'Security', 'Military', 'Child alone',\n 'Water', 'Food', 'Shelter', 'Clothing', 'Money', 'Missing people',\n 'Refugees', 'Death', 'Other aid', 'Infrastructure related',\n 'Transport', 'Buildings', 'Electricity', 'Tools', 'Hospitals',\n 'Shops', 'Aid centers', 'Other infrastructure', 'Weather related',\n 'Floods', 'Storm', 'Fire', 'Earthquake', 'Cold', 'Other weather',\n 'Direct report'], y=[df['related'].sum(),\n df['request'].sum(),\n df['offer'].sum(),\n df['aid_related'].sum(),\n df['medical_help'].sum(),\n df['medical_products'].sum(),\n df['search_and_rescue'].sum(),\n df['security'].sum(),\n df['military'].sum(),\n df['child_alone'].sum(),\n df['water'].sum(),\n df['food'].sum(),\n df['shelter'].sum(),\n df['clothing'].sum(),\n df['money'].sum(),\n df['missing_people'].sum(),\n df['refugees'].sum(),\n df['death'].sum(),\n df['other_aid'].sum(),\n df['infrastructure_related'].sum(),\n df['transport'].sum(),\n df['buildings'].sum(),\n df['electricity'].sum(),\n df['tools'].sum(),\n df['hospitals'].sum(),\n df['shops'].sum(),\n df['aid_centers'].sum(),\n df['other_infrastructure'].sum(),\n df['weather_related'].sum(),\n df['floods'].sum(),\n df['storm'].sum(),\n df['fire'].sum(),\n df['earthquake'].sum(),\n df['cold'].sum(),\n df['other_weather'].sum(),\n df['direct_report'].sum()]),\n )\n\n layout_one = dict(title='Distribution of message categories',\n xaxis=dict(tickangle=45),\n yaxis=dict(title='Count'),\n )\n\n graph_two = []\n graph_two.append(\n go.Bar(\n x=['Direct', 'News', 'Social'],\n y=df.groupby('genre').count()['message'],\n )\n )\n\n layout_two = dict(title='Distribution of message genres',\n xaxis=dict(title='Message Genres', ),\n yaxis=dict(title='Count'),\n )\n\n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n\n return figures", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()" ]
[ "0.7562296", "0.6105632", "0.6035507", "0.5984834", "0.58694273", "0.5796705", "0.57743406", "0.5752848", "0.5696702", "0.5637777", "0.5625334", "0.5550093", "0.55419266", "0.55158776", "0.547458", "0.5461855", "0.5438678", "0.5402194", "0.53831524", "0.5329398", "0.53166", "0.53052723", "0.5280489", "0.5271553", "0.5242682", "0.5215043", "0.52045256", "0.51866746", "0.51861775", "0.5183559" ]
0.806361
0
Produces a grouped bar chart of the different causes of deaths, arranged by the animal types.
def visualise_cause_of_death(data: LogData, directory: Path): rabbit_stats: list[int] = [0 for _ in CauseOfDeath] deer_stats: list[int] = [0 for _ in CauseOfDeath] wolf_stats: list[int] = [0 for _ in CauseOfDeath] bear_stats: list[int] = [0 for _ in CauseOfDeath] for event in data.events(): event_type: str = event["type"] if event_type == "death": tag: str = event["tag"] info = data.death_info(event["deathIndex"]) cause: int = info["cause"] if tag == "Rabbit": rabbit_stats[cause] = rabbit_stats[cause] + 1 elif tag == "Deer": deer_stats[cause] = deer_stats[cause] + 1 elif tag == "Wolf": wolf_stats[cause] = wolf_stats[cause] + 1 elif tag == "Bear": bear_stats[cause] = bear_stats[cause] + 1 figure = create_grouped_bar_chart({"Rabbits": rabbit_stats, "Deer": deer_stats, "Wolves": wolf_stats, "Bears": bear_stats}) figure.savefig(directory / Path("cause_of_death.png")) plot.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_grouped_bar_chart(stats: dict[str, list[int]]):\n\n figure, axes = plot.subplots()\n\n labels = [str(e) for e in CauseOfDeath]\n x = numpy.arange(len(labels))\n\n bar_width = 0.15\n max_value = 0\n\n rects = []\n i = 0\n for label, values in stats.items():\n max_value = max(max_value, max(values))\n rects.append(axes.bar(x + (i * bar_width), values, bar_width, label=label))\n i = i + 1\n\n axes.set_title(\"Deaths arranged by cause and animal type\")\n axes.set_ylabel(\"Amount\")\n axes.set_xticks(x)\n axes.set_xticklabels(labels)\n axes.legend()\n\n for rect in rects:\n attach_text_labels(rect, axes)\n\n figure.tight_layout()\n return figure", "def graph_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"Yearly Vehicle Accident Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\", ci=None)\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"2_graph_cause_count.png\")", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def createChart(cladeGroup, data, taxonomyDict, outputFile):\n dfData = []\n for clade in cladeGroup: \n temp, other, totalTemp = valueCountsSpecies(data, cladeGroup[clade], taxonomyDict)\n relativeTemp = {}\n for val in temp:\n relativeTemp[val] = (temp[val] / sum(list(temp.values())))*100\n dfData.append(relativeTemp)\n\n tempDF = pd.DataFrame(dfData, index=list(cladeGroup.keys()))\n tempDF = tempDF.fillna(0)\n\n # Plotting\n sns.set(rc={'figure.figsize':(20,15)}, font_scale=2)\n ax = tempDF.plot(kind=\"bar\", stacked=True, colormap=ListedColormap(sns.color_palette(\"twilight\", 12)), rot=0)\n for rect in ax.patches:\n # Find where everything is located\n height = rect.get_height()\n width = rect.get_width()\n x = rect.get_x()\n y = rect.get_y()\n \n # The height of the bar is the data value and can be used as the label\n label_text = f'{height:.2f}%' # f'{width:.2f}' to format decimal values\n \n # ax.text(x, y, text)\n label_x = x + width / 2\n label_y = y + height / 2\n \n # only plot labels greater than given width\n if height > 0.00:\n ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=20, color=\"w\")\n\n plt.legend(loc=\"center right\", bbox_to_anchor=(1.25, 0.5), ncol=1)\n plt.savefig(outputFile, bbox_inches=\"tight\")\n plt.show()\n return", "def visualize_type():\n \n data_file = parse(MY_FILE, ',')\n\n # num of incidents per category\n counter = Counter(item['Category'] for item in data_file)\n\n # Set the labels\n labels = tuple(counter.keys())\n\n # Set exactly where the labels hit the x-axis\n xlocations = na.array(range(len(labels))) + 0.5\n\n # Width of each bar\n width = 0.5\n\n # Assign data to a bar plot\n plt.bar(xlocations, counter.values(), width=width)\n\n # Assign labels and tick location to x-axis\n plt.xticks(xlocations + width / 2, labels, rotation=90)\n \n # Give some more room so the x-axis labels aren't cut off\n plt.subplots_adjust(bottom=0.4)\n\n # Make the overall graph/figure larger\n plt.rcParams['figure.figsize'] = 12, 8\n\n # save\n plt.savefig('Type.png')\n\n # close\n plt.clf()", "def graph_max_cause(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 10)\r\n plt.xticks(rotation=20)\r\n ax.set_title(\"States' Max Police Death Causes >= 150\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"state\", \"max_count\", data=df, palette=\"bone\", hue='max_cause')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"3_graph_max_cause.png\")", "def graph_year_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=25)\r\n ax.set_title(\"2001 and 2007 Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"cause_short\", \"count\", data=df, palette=\"bone\", hue='year')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_top_cause_count.png\")", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def visualize_type():\n\t\n\t#grab our parsed data\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in parsed data, and count how many incidents happen by category\n\tcounter = Counter(item[\"Category\"] for item in data_file)\n\t\n\t#set the labels which are based on the keys of our counter\n\t#since order doesn't matter, we can just use counter.keys()\n\tlabels = tuple(counter.keys())\n\t\n\t#set exactly where the labels should hit the x-axis\n\txlocations = np.arange(len(labels)) + 0.5\n\t\n\t#width of each bar that will be plotted\n\twidth = 0.5\n\t\n\t#assign data to a bar plot\n\tplt.bar(xlocations, counter.values(), width=width)\n\t\n\t#assign labels and tick location to x-axis\n\tplt.xticks(xlocations + width /2, labels, rotation=90)\n\t\n\t#give more room to the x-axis so the labels aren't cut off\n\tplt.subplots_adjust(bottom=0.4)\n\t\n\t#make the overall graph/figure larger\n\tplt.rcParams['figure.figsize'] = 12, 8\n\t\n\t#save the graph\n\tplt.savefig(\"type.png\")\n\t\n\t#close the plot figure\n\tplt.clf()", "def graph_cause_count_each(df, label):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(label.capitalize() + \" Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image with the correct cause naming\r\n name = \"2_graph_cause_count_\" + label + \".png\"\r\n fig.savefig(name)", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def grant_outcomes_barchart(dframe):\n # prepare dataframe\n dframe = df.copy()\n dframe.columns = [col.lower().replace(' ','_') for col in dframe.columns]\n dframe = dframe[dframe['organization_name'].notnull()]\n dframe.drop(['thank_you_sent','report_due','report_sent'],axis=1,\n inplace=True)\n dframe.set_index(dframe['date_application_sent'],inplace=True)\n\n grant_stage = []\n [grant_stage.append(status.lower().strip()) for status in dframe.stage]\n dframe['stage'] = grant_stage\n grant_status = [] # merge status to 3 primary categories, make 'awarded' tag\n for status in dframe.stage:\n if status not in ['obligations complete','pledged','posted']:\n grant_status.append(status)\n else:\n grant_status.append('awarded')\n dframe['grant_status'] = grant_status\n\n # create chart\n color_dict = {'awarded':'#adebad','not approved':'#d6746f',\n 'submitted':'#ffffb3'}\n grant_count_trace = []\n for status in dframe.grant_status.unique():\n grant_count_trace.append(go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['stage'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['stage'].count(),\n name = status,\n marker = {'color':color_dict[status]},\n opacity = .8))\n\n layout = {'barmode':'stack',\n 'hovermode':'closest',\n 'paper_bgcolor':'#303939',\n 'plot_bgcolor':'#303939',\n 'legend':{'font':{'color':'#CCCCCC'}},\n 'yaxis':{'title':'no. applications',\n 'tickfont':{'color':'#CCCCCC'},\n 'titlefont':{'color':'#CCCCCC'},\n 'showgrid':False},\n 'xaxis':{'title':'quarter submitted',\n 'titlefont':{'color':'#CCCCCC'},\n 'tickfont': {'color':'#CCCCCC'}},\n 'title':'Grant Application<br>Status Overview',\n 'titlefont':{'color':'#CCCCCC'}}\n\n fig = {'data':grant_count_trace, 'layout':layout}\n return fig", "def return_jitter_bar_fatality_chart(value=None):\n value = \"incident\" if value is None else value\n value_title_dict = {'incident': \"Incidents\",\n 'fatal_accident': \"Fatal Accidents\",\n 'fatalities': \"Fatalities\",\n 'lethality': \"Lethality\"}\n\n bar_plot = alt.Chart(data_wrangle.chart_1_data).encode(\n alt.X(f\"{value}_period:N\", title=\"Time period\"),\n alt.Y(f\"{value}_value:Q\", title=\"Count\"))\n\n jitter_plot = alt.Chart(data_wrangle.chart_1_data).encode(\n alt.X(f\"{value}_period:N\", title=\"Time period\"),\n alt.Y(f\"{value}_value:Q\", title=\"Count\"),\n tooltip=[\"airline\", f\"{value}_value:Q\"])\n\n plot1 = alt.layer(bar_plot.mark_boxplot(size=200,\n opacity=.4) +\n jitter_plot.mark_point()\n ).configure_title(fontSize=18\n ).configure_legend(labelFontSize=13\n ).configure_axis(labelAngle =0,\n labelFontSize=16,\n titleFontSize=24\n ).properties(width=800,\n height=600,\n title=f\"Count of Airline {value_title_dict[value]}\")\n\n return plot1", "def outcome_by_etio_no_pie(df):\n\n # TODO: possibly combine with etio by percentage\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6)) #still 6 if osa csa\n f, axes = plt.subplots(5, 1, figsize=(6, 9)) # 6, 2 if OSA CSA\n sns.despine(top=True, bottom=True)\n # f.suptitle(\"Outcome, Grouped by Contributing Etiology\")\n\n # contains used instead of equal to include patients with multiple etio (e.g. cardiac+medication count to both)\n neurologic_df = df.loc[df['PostDx'].str.contains(\"Neurologic\")].sort_values(by='Outcome')\n cardiac_df = df.loc[df['PostDx'].str.contains(\"Cardiac\")].sort_values(by='Outcome')\n medication_df = df.loc[df['PostDx'].str.contains(\"Medication\")].sort_values(by='Outcome')\n tecsa_df = df.loc[df['PostDx'].str.contains(\"TECSA\")].sort_values(by='Outcome')\n # osacsa_df = df.loc[df['PostDx'].str.contains(\"OSA-CSA\")].sort_values(by='Outcome')\n primary_df = df.loc[df['PostDx'].str.contains(\"Primary\")].sort_values(by='Outcome')\n\n # collapse possible outcomes\n neurologic_df['col_outcome'] = neurologic_df.apply(collapse_initial_outcome, axis=1)\n cardiac_df['col_outcome'] = cardiac_df.apply(collapse_initial_outcome, axis=1)\n medication_df['col_outcome'] = medication_df.apply(collapse_initial_outcome, axis=1)\n tecsa_df['col_outcome'] = tecsa_df.apply(collapse_initial_outcome, axis=1)\n # osacsa_df['col_outcome'] = osacsa_df.apply(collapse_initial_outcome, axis=1)\n primary_df['col_outcome'] = primary_df.apply(collapse_initial_outcome, axis=1)\n\n # Create count plot for each Etio on the left, then a Pie Chart with proportion on the right\n\n hatches = ['///', '|||', 'xxx', '\\\\\\\\\\\\', '', '+++']\n face_color = ['dimgray', 'silver', 'whitesmoke', 'grey', 'gainsboro', 'darkgrey']\n\n # Neurologic\n bar = sns.countplot(y='col_outcome', data=neurologic_df, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\"Neurologic\")\n\n\n # Cardiac\n bar = sns.countplot(y='col_outcome', data=cardiac_df, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"Cardiac\")\n\n # Medication\n bar = sns.countplot(y='col_outcome', data=medication_df, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"Medication\")\n\n # OSA-CSA\n # bar = sns.countplot(y='col_outcome', data=osacsa_df, ax=axes[3,0])\n # for i, this_bar in enumerate(bar.patches):\n # # Set a different hatch for each bar\n # this_bar.set_hatch(hatches[i])\n # axes[3].set(xlabel=\"\", ylabel=\"OSA-CSA\")\n # If adding OSA-CSA back, would need to increase by 1 all of the axes indices\n\n # TE-CSA\n bar = sns.countplot(y='col_outcome', data=tecsa_df, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[3].set(xlabel=\"\", ylabel=\"TE-CSA\")\n\n #Primary\n bar = sns.countplot(y='col_outcome', data=primary_df, ax=axes[4])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor('white')\n this_bar.set_facecolor(face_color[i])\n axes[4].set(xlabel=\"Outcome of initial treatment by etiology\", ylabel=\"Primary CSA\")\n\n # Combined X axis for L side\n axes[4].get_shared_x_axes().join(axes[4], axes[3], axes[2], axes[1], axes[0]) # axes[5] would need to be added back\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n axes[3].set_xticklabels(\"\")\n # axes[4].set_xticklabels(\"\")\n # Leave bottom labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n axes[4].autoscale()\n # axes[5].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1])\n f.savefig('Outcome by Etio no pie.png', dpi=100)\n # plt.show()", "def activityPlot(act):\n # Plot 1 is simple stacked bar\n plt.figure(figsize=(9,4), dpi=100)\n ax1 = plt.subplot(1,2,1)\n labels = [gr for gr in act.keys()]\n poses = [i+.5 for i in range(len(labels))]\n # b_means, b_stds, t_means, t_stds, s_means, s_stds = [], [], [], [], [], []\n stat = {'b_means': [], 'b_stds': [], 't_means': [], 't_stds': [],'s_means': [], 's_stds': []}\n grkey = {'b_means': 'burst', 'b_stds': 'burst', 't_means': 'tonic', 't_stds': 'tonic','s_means': 'silent', 's_stds': 'silent'}\n fnkey = {'b_means': np.mean, 'b_stds': np.std, 't_means': np.mean, 't_stds': np.std,'s_means': np.mean, 's_stds': np.std}\n \n \n for gr in labels:\n for k in stat.keys():\n try:\n temp_ = fnkey[k](act[gr][grkey[k]])\n if str(temp_) == 'nan':\n stat[k].append(0.)\n else:\n stat[k].append(temp_)\n except:\n stat[k].append(0.)\n \n p_b = ax1.bar(poses, stat['b_means'], color='blue', alpha=0.6, \n yerr=stat['b_stds'], edgecolor='white')\n p_t = ax1.bar(poses, stat['t_means'], bottom=stat['b_means'], color='red', alpha=0.6, \n yerr=stat['t_stds'], edgecolor='white')\n p_s = ax1.bar(poses, stat['s_means'], bottom=[stat['b_means'][i]+\\\n stat['t_means'][i] for i in range(len(stat['b_means']))],\n color='purple', alpha=0.6, yerr=stat['s_stds'],\n edgecolor='white')\n # Cosmetics\n plt.xticks(poses, labels, rotation=30)\n plt.legend((p_b[0], p_t[0], p_s[0]), ('Burst', 'Tonic', 'Silent'))\n \n # Plot 2 is complex\n # ax2 = plt.subplot2grid((1,3), (0,1), colspan=2)\n ax2 = plt.subplot(1,2,2)\n for gr in range(len(labels)):\n ax2.plot(np.random.normal(loc=poses[gr], scale=.1, size=len(act[labels[gr]]['burstLoc'])), \n act[labels[gr]]['burstLoc'], 'o', color='blue', alpha=0.6,\n markeredgecolor='none')\n ax2.plot(np.random.normal(loc=poses[gr], scale=.1, size=len(act[labels[gr]]['tonicLoc'])), \n act[labels[gr]]['tonicLoc'], 'o', color='red', alpha=0.6,\n markeredgecolor='none')\n \n # Cosmetics\n plt.xticks(poses, labels, rotation=30)\n print(stat)\n plt.show()\n return", "def plotBarChart(resultConfirmed, resultDeath, resultVaccinated):\n fig, ax = plt.subplots(3)\n\n ax[0].plot(resultConfirmed['Date'], resultConfirmed['Confirmed Cases'])\n ax[0].title.set_text('Confirmed Cases')\n \n ax[1].plot(resultDeath['Date'], resultDeath['Death Cases'])\n ax[1].title.set_text('Death Cases')\n \n ax[2].plot(resultVaccinated['Date'], resultVaccinated['Vaccinated Person'])\n ax[2].title.set_text('Vaccinated Cases')\n fig.tight_layout()\n plt.show()", "def visualize_type(parsed_data, output_dir):\n\n # Fetching incident data by category\n counter = fetch_incident_by_category_and_resolution(parsed_data)\n\n # List of total incidents by Category\n # list of unsolved incidents by Category\n y1_values = [item[0] for item in counter.values()]\n y2_values = [item[1] for item in counter.values()]\n\n # Category labels\n x_labels = tuple(counter.keys())\n\n # Width of each bar\n bar_width = 0.4\n\n # bar locations on x-axis\n x1_locations = np.arange(len(x_labels))\n x2_locations = x1_locations + bar_width\n\n # assigning data to a bar plot\n plt.bar(x1_locations, y1_values, width=bar_width, label = \"Total\")\n plt.bar(x2_locations, y2_values, width=bar_width, label = \"Unresolved\")\n\n # Assigning labels and tick location to x-axis\n plt.xlabel('Incident Category', fontweight='bold')\n plt.ylabel('Incident Count', fontweight='bold')\n plt.xticks(x1_locations + bar_width/2, x_labels, rotation=90)\n\n # Giving some more room below x-axis\n plt.subplots_adjust(bottom=0.4)\n\n # Making the overall graph/figure larger\n plt.rcParams['figure.figsize'] = 12, 8\n\n plt.legend()\n file_name = os.path.join(output_dir, TYPE_PLOT_FILENAME)\n plt.savefig(file_name)\n plt.show()", "def return_figures():\n graph_one = []\n df = cleandata()\n\n graph_one.append(\n go.Bar(name='Ones', x=['Related', 'Request', 'Offer',\n 'Aid related', 'Medical help', 'Medical products',\n 'Search and rescue', 'Security', 'Military', 'Child alone',\n 'Water', 'Food', 'Shelter', 'Clothing', 'Money', 'Missing people',\n 'Refugees', 'Death', 'Other aid', 'Infrastructure related',\n 'Transport', 'Buildings', 'Electricity', 'Tools', 'Hospitals',\n 'Shops', 'Aid centers', 'Other infrastructure', 'Weather related',\n 'Floods', 'Storm', 'Fire', 'Earthquake', 'Cold', 'Other weather',\n 'Direct report'], y=[df['related'].sum(),\n df['request'].sum(),\n df['offer'].sum(),\n df['aid_related'].sum(),\n df['medical_help'].sum(),\n df['medical_products'].sum(),\n df['search_and_rescue'].sum(),\n df['security'].sum(),\n df['military'].sum(),\n df['child_alone'].sum(),\n df['water'].sum(),\n df['food'].sum(),\n df['shelter'].sum(),\n df['clothing'].sum(),\n df['money'].sum(),\n df['missing_people'].sum(),\n df['refugees'].sum(),\n df['death'].sum(),\n df['other_aid'].sum(),\n df['infrastructure_related'].sum(),\n df['transport'].sum(),\n df['buildings'].sum(),\n df['electricity'].sum(),\n df['tools'].sum(),\n df['hospitals'].sum(),\n df['shops'].sum(),\n df['aid_centers'].sum(),\n df['other_infrastructure'].sum(),\n df['weather_related'].sum(),\n df['floods'].sum(),\n df['storm'].sum(),\n df['fire'].sum(),\n df['earthquake'].sum(),\n df['cold'].sum(),\n df['other_weather'].sum(),\n df['direct_report'].sum()]),\n )\n\n layout_one = dict(title='Distribution of message categories',\n xaxis=dict(tickangle=45),\n yaxis=dict(title='Count'),\n )\n\n graph_two = []\n graph_two.append(\n go.Bar(\n x=['Direct', 'News', 'Social'],\n y=df.groupby('genre').count()['message'],\n )\n )\n\n layout_two = dict(title='Distribution of message genres',\n xaxis=dict(title='Message Genres', ),\n yaxis=dict(title='Count'),\n )\n\n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n\n return figures", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=8, ncols=1, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n indexes = df.index.tolist()\n df[\"index\"] = indexes\n df[\"effect_size\"] = df[\"index\"].apply(lambda x: x[0])\n df[\"sd\"] = df[\"index\"].apply(lambda x: x[1])\n df[\"group\"] = df[\"index\"].apply(lambda x: x[2])\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n df_new = df.groupby(\"sd\")\n # for key, item in df_new:\n # print(df_new.get_group(key))\n for ax, (sd, dat) in zip(axs, df_new):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n es = dat[\"effect_size\"].iloc[0]\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_xlabel(f\"Mean error for sd = {sd} per group size\")\n print(dat[\"sig\"])\n print(\"\\n\\n\")\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n ax.legend()\n\n fig.suptitle(f\"Effect size = {es}\", y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3))\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.50, wspace=0.05)\n\n bar_width = 0.35\n for ax, (key, dat) in zip(axs.flatten(), df):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_title(f\"Effect size = {key}\")\n ax.set_xlabel(f\"Group Size\")\n ax.legend()\n\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n\n fig.suptitle(dict[\"title\"], y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def outcome_bars(data, name=None, width=100):\n # if it's a dataframe already, just add the name for the legend\n if isinstance(data, pd.DataFrame):\n data_list = [data]\n elif isinstance(data, list):\n # check if it's a list of dicionaries, like player history, or a list\n # of lists\n for item in data:\n l_o_d = isinstance(item, dict)\n # if it's a list of dictionaries, just convert them\n if l_o_d:\n data_list = [pd.DataFrame(data)]\n else:\n data_list = [pd.DataFrame(item) for item in data]\n else:\n msg = \"'data' must be a DataFrame or list\"\n raise TypeError(msg)\n # calculate percentages\n # assign name to data\n if not name:\n name = [f\"Game{i}\" for i in range(len(data))]\n plot_data_list = [] # list to hold dataframes that will be plotted\n for _name, _data in zip(name, data_list):\n win, loss, push, surrender = results_pct(_data, as_series=False)\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Win\", \"pct\": win, \"order\": 1},\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Loss\", \"pct\": loss, \"order\": 2}\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Push\", \"pct\": push, \"order\": 3}\n )\n plot_data_list.append(\n {\"game\": _name, \"result\": \"Surrender\", \"pct\": surrender, \"order\": 3}\n )\n plot_data = pd.DataFrame(plot_data_list)\n\n # create altair chart\n chart = alt.Chart(plot_data, width=width).mark_bar().encode(\n x=alt.X(\n \"game\",\n axis=alt.Axis(labelAngle=-45),\n title=None,\n sort=[\"Win\", \"Loss\", \"Push\"]\n ),\n y=alt.Y(\n \"pct:Q\"\n ),\n color=alt.Color(\n \"game:O\",\n legend=None\n ),\n column=alt.Column(\n \"result:O\",\n title=\"Result\"\n ),\n tooltip=[\n alt.Tooltip(\"pct\", title=\"Pct\")\n ]\n )\n return chart", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def summer_bar_chart(self):\n # Create top n countries data from 1996 to 2014\n df_summer = self.df_summer[self.df_summer['Year'] >= 1996]\n m = list(df_summer['Country'].value_counts()[:self.n_top].index)\n df_top = df_summer[df_summer['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n k = []\n # Create the dataframe in 2016.\n for j in self.df_2016_summer['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n summer_2016 = pd.DataFrame()\n for i in m:\n df_tmp = self.df_2016_summer[self.df_2016_summer['NOC'] == k[i]]\n summer_2016 = pd.concat([summer_2016, df_tmp])\n summer_2016['Country'] = m\n new_summer_2016 = summer_2016.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add the two dataframes and plot\n unstacked_df_top.add(new_summer_2016).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Summer Olympics since 1996')\n fname = './medal_figures_summer/summer_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def create_income_expense_grouped_bar_chart(year_id):\n month_objects = get_months_by_year(year_id)\n\n # get chart data\n months = convert_to_verbose_months(month_objects)\n\n y_expenses = get_transactions_sum_data(month_objects, amount_type='expenses')\n \n y_incomes = get_transactions_sum_data(month_objects, amount_type='incomes')\n\n # build chart\n fig = go.Figure(\n data=[\n go.Bar(name='Gastos', x=months, y=y_expenses, marker_color='#b22222'),\n go.Bar(name=\"Rendas\", x=months, y=y_incomes, marker_color='#22b222')\n ]\n )\n\n fig.update_layout(barmode='group')\n\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n\n return plot_div", "def test_amount_of_deaths(self) -> None:\n # Get Data\n data = self.data_handler_1.amount_of_deaths()\n results = defaultdict(None,\n {'זכר': defaultdict(int, {'75-84': 97, '65-74': 93, '<65': 62, '85+': 62}),\n 'נקבה': defaultdict(int, {'85+': 63, '75-84': 52, '65-74': 41, '<65': 30})})\n # Data Validation\n self._test_two_level_depth_nested_dictionaries(data, results)", "def plot_bar_chart_quantum_vs_classical(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n mapping_dict: Dict[str, str],\n categories_to_exclude: List[str] = [],\n categories_keep_only: List[str] = None,\n out_file_name: str = None,\n out_folder_path: str = None,\n horizontal: bool = False,\n map_value_since_beginning: bool = False,\n figsize: Tuple[int, int] = (10, 5),\n legend_placement: str = 'upper center'\n ):\n\n fig, ax = plt.subplots(figsize=figsize)\n\n df = expand_columns(df_bugs, column_to_inspect)\n df = df[~(df[column_to_inspect].isin(categories_to_exclude))]\n\n if categories_keep_only is not None:\n df = df[df[column_to_inspect].isin(categories_keep_only)]\n\n if map_value_since_beginning:\n df[column_to_inspect] = df[column_to_inspect].map(mapping_dict)\n\n categories_q_bugs = list(df[\n df['type'] == 'Quantum'].groupby(\n column_to_inspect).count().sort_values(\n by='type', ascending=False).index)\n\n for component in df[column_to_inspect].unique():\n if component not in categories_q_bugs:\n categories_q_bugs.append(component)\n\n args = {\n \"hue\": \"type\",\n \"data\": df,\n \"palette\": PALETTE,\n \"ax\": ax,\n \"order\": categories_q_bugs\n }\n\n if horizontal:\n sns.countplot(y=column_to_inspect, **args)\n ax.grid(axis='x')\n else:\n sns.countplot(x=column_to_inspect, **args)\n ax.grid(axis='y')\n\n if not map_value_since_beginning:\n # map the value at the latest stage, thus in the labels\n obj_labels = ax.get_xticklabels()\n for i, l in enumerate(obj_labels):\n obj_labels[i] = mapping_dict[l.get_text()]\n ax.set_xticklabels(obj_labels, rotation=60, ha='right')\n\n ax.set_xlabel(capitalize(column_to_inspect), fontsize=15)\n ax.set_ylabel(\"Count\", fontsize=15)\n plt.legend(title=\"Type of Bug\", loc=legend_placement)\n plt.tight_layout()\n\n if out_file_name is not None and out_folder_path is not None:\n fig.savefig(os.path.join(out_folder_path, out_file_name), format=\"pdf\")", "def stacked_grouped_bar_chart(df, **kwargs):\n\n fig = go.Figure()\n\n color = dict(\n zip(\n df.columns.levels[1],\n px.colors.qualitative.Plotly[: len(df.columns.levels[1])],\n )\n )\n showlegend = [i % len(df.columns.levels[0]) == 0 for i in range(len(df.columns))]\n\n # xaxis_tickformat doesn't appear to work so have to format the dataframe index\n if isinstance(df.index, pd.DatetimeIndex):\n df = df.copy()\n freq = pd.infer_freq(df.index)\n if freq is not None:\n if freq in (\"M\", \"MS\", \"ME\"):\n df.index = df.index.map(lambda x: x.strftime(\"%m-%Y\"), 1)\n if freq in (\"Y\", \"YS\", \"YE\"):\n df.index = df.index.map(lambda x: x.year, 1)\n if freq in (\"D\", \"B\"):\n df.index = df.index.map(lambda x: x.date(), 1)\n\n i = 0\n for col in df.columns:\n f = df[col[0]][col[1]]\n fig.add_trace(\n go.Bar(\n x=[f.index, [col[0]] * len(f.index)],\n y=f,\n name=col[1],\n marker_color=color[col[1]],\n legendgroup=col[1],\n showlegend=showlegend[i],\n )\n )\n i += 1\n\n fig.update_layout(\n title=kwargs.get(\"title\", \"\"),\n xaxis=dict(title_text=kwargs.get(\"xaxis_title\", None)),\n yaxis=dict(title_text=kwargs.get(\"yaxis_title\", None)),\n barmode=\"relative\",\n margin=preset_margins,\n )\n\n return fig" ]
[ "0.79494953", "0.6173366", "0.60805166", "0.60178256", "0.59662867", "0.59589386", "0.5912419", "0.59108937", "0.5900395", "0.5898058", "0.5876276", "0.5787513", "0.5760313", "0.5743132", "0.5596977", "0.55531824", "0.553276", "0.54811996", "0.5470746", "0.544473", "0.5411298", "0.54003406", "0.538347", "0.53833973", "0.53729403", "0.53559786", "0.53503907", "0.5345571", "0.53108174", "0.52976483" ]
0.7591147
1
Generates a new OperatorBuilder object. atomicTermExpr i an TorqExpression object. A sequence which matches it will be recognized an atomic item and not be parsed further. composedTermNodeLables is a list of labels. Nodes who have one of them will be recognized an expression, which may include another expression inside. generatedTermLabel is a label. An expression parsed by a TorqExpression object (generated by self.build__expr() method) will be enclosed by a node with this label.
def __init__(self, atomicTermExpr=None, composedTermNodeLabels=None, generatedTermLabel=None): self.__ate = atomicTermExpr if atomicTermExpr is not None else Never() self.__ctnls = composedTermNodeLabels self.__gtl = generatedTermLabel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile_term(self):\n\n\t\tself.outfile.write('<term>\\n')\n\n\t\tcount = 0\n\n\t\twhile(self.tokenizer.get_token() not in [')',']',';',',', '/', '|', '<', '>', '=', '*', '+', '&']):\n\t\t\tif self.tokenizer.get_token().isdigit():\n\t\t\t\tself.outfile.write(self.tokenizer.int_value())\n\t\t\telif '\"' in self.tokenizer.get_token():\n\t\t\t\tself.outfile.write(self.tokenizer.str_value())\n\t\t\telif self.tokenizer.get_token() in ['true', 'false', 'null', 'this']:\n\t\t\t\tself.outfile.write(self.tokenizer.keyword())\n\t\t\telif self.tokenizer.get_token() == '-' and count == 0:\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\t\tself.compile_term()\n\t\t\telif self.tokenizer.get_token() == '-' and count > 0:\n\t\t\t\tbreak\n\t\t\telif self.tokenizer.get_token() == '~':\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\t\tif self.tokenizer.get_token() != '(':\n\t\t\t\t\tself.compile_term()\n\n\t\t\t\telse:\n\t\t\t\t\tself.outfile.write('<term>\\n' + self.tokenizer.symbol())\n\t\t\t\t\tself.compile_expression()\n\t\t\t\t\txml = self.tokenizer.symbol() + '</term>\\n'\n\t\t\t\t\tself.outfile.write(xml)\n\n\t\t\telif self.tokenizer.get_token() == '(':\n\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\t\tself.compile_expression()\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\telif self.tokenizer.get_token() == '[':\n\t\t\t\txml = self.tokenizer.symbol()\n\t\t\t\tself.outfile.write(xml)\n\n\t\t\t\tself.compile_expression()\n\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\telif self.tokenizer.get_token() == '.':\n\t\t\t\txml = self.tokenizer.symbol() + self.tokenizer.identifier() + self.tokenizer.symbol() + '<expressionList>\\n'\n\t\t\t\tself.outfile.write(xml)\n\n\t\t\t\tif self.tokenizer.get_token() != ')':\n\t\t\t\t\tself.compile_expression_list()\n\n\t\t\t\tself.outfile.write('</expressionList>\\n' + self.tokenizer.symbol())\n\t\t\n\t\t\telse:\n\t\t\t\tself.outfile.write(self.tokenizer.identifier())\n\n\t\t\tcount = count + 1\n\n\t\tself.outfile.write('</term>\\n')\n\n\t\tif self.tokenizer.get_token() in self.tokenizer._operands:\n\t\t\tif self.tokenizer.get_token() in ['<', '>', '\"', '&']:\n\t\t\t\txml = '<symbol> ' + CompilationEngine._operands.get(self.tokenizer.get_token()) + ' </symbol>\\n'\n\t\t\t\tself.tokenizer.advance()\n\t\t\telse:\n\t\t\t\txml = self.tokenizer.symbol()\n\n\t\t\tself.outfile.write(xml)\n\t\t\tself.compile_term()", "def _build_term_op_(term, state_array, int_state_array, sorting_indices):\n\n space_size = state_array.shape[0]\n\n needs_to_be_occupied = []\n needs_to_be_unoccupied = []\n\n # We keep track of the number of creation and annihilation operators and\n # ensure that there are an equal number of them in order to help detect\n # invalid inputs.\n delta = 0\n for index, op_type in reversed(term):\n if op_type == 0:\n needs_to_be_occupied.append(index)\n delta -= 1\n else:\n if index not in needs_to_be_occupied:\n needs_to_be_unoccupied.append(index)\n delta += 1\n\n if delta != 0:\n raise ValueError(\n \"The supplied operator doesn't preserve particle number\")\n\n # We search for every state which has the necessary orbitals occupied and\n # unoccupied in order to not be immediately zeroed out based on the\n # creation and annihilation operators specified in term.\n maybe_valid_states = numpy.where(\n numpy.logical_and(\n numpy.all(state_array[:, needs_to_be_occupied], axis=1),\n numpy.logical_not(\n numpy.any(state_array[:, needs_to_be_unoccupied], axis=1))))[0]\n\n data = []\n row_ind = []\n col_ind = []\n shape = (space_size, space_size)\n\n # For each state that is not immediately zeroed out by the action of our\n # operator we check to see if the determinant which this state gets mapped\n # to is in the space we are considering.\n # Note that a failure to find any state does not necessarily indicate that\n # term specifies an invalid operator. For example, if we are restricting\n # ourselves to double excitations from a fixed reference state then the\n # action of term on some of our basis states may lead to determinants with\n # more than two excitations from the reference. These more than double\n # excited determinants are not included in the matrix representation (and\n # hence, will not be present in state_array).\n for _, state in enumerate(maybe_valid_states):\n determinant = state_array[state, :]\n target_determinant = determinant.copy()\n\n parity = 1\n for i, _ in reversed(term):\n area_to_check = target_determinant[0:i]\n parity *= (-1)**numpy.sum(area_to_check)\n\n target_determinant[i] = not target_determinant[i]\n\n int_encoding = target_determinant.dot(\n 1 << numpy.arange(target_determinant.size)[::-1])\n\n target_state_index_sorted = numpy.searchsorted(int_state_array,\n int_encoding,\n sorter=sorting_indices)\n\n target_state = sorting_indices[target_state_index_sorted]\n\n if int_state_array[target_state] == int_encoding:\n # Then target state is in the space considered:\n data.append(parity)\n row_ind.append(target_state)\n col_ind.append(state)\n\n data = numpy.asarray(data)\n row_ind = numpy.asarray(row_ind)\n col_ind = numpy.asarray(col_ind)\n\n term_op = scipy.sparse.csc_matrix((data, (row_ind, col_ind)), shape=shape)\n\n return term_op", "def __init__(self, orbital_operators, orbital_labels, op_type, prefactor=1.0):\n\n self.orbital_operators = np.array(orbital_operators, dtype=str)\n self.orbital_labels = np.array(orbital_labels, dtype=int)\n self.op_type = op_type\n\n if len(self.orbital_operators) != len(self.orbital_labels):\n ValueError('The number of orbital operators and labels is inconsistent for the OperatorString: {} {}'.format(len(self.orbital_operators), len(self.orbital_labels)))\n\n self.prefactor = prefactor\n\n # Stored for use in computing commutators.\n # A dictionary of the labels to their index in the operator string.\n self._indices_orbital_labels = dict()\n for ind_orbital in range(len(self.orbital_labels)):\n self._indices_orbital_labels[self.orbital_labels[ind_orbital]] = ind_orbital\n \n # Compute the prefactor automatically if a Majorana operator.\n if self.op_type == 'Majorana':\n # Stored for use in computing commutators.\n # The labels of orbital operators that are 'A' or 'B'.\n self._labels_ab_operators = np.array([self.orbital_labels[ind] for ind in range(len(self.orbital_labels)) if self.orbital_operators[ind] in ['A', 'B']], dtype=int)\n num_ab = len(self._labels_ab_operators)\n\n # The prefactor is 1 or 1j, depending\n # on whether reversing the order of operators creates\n # a +1 or -1 sign due to anti-commutation operators.\n num_swaps_to_reorder = (num_ab*(num_ab-1))/2\n if num_swaps_to_reorder % 2 == 1:\n self.prefactor = 1j\n\n if (self.op_type == 'Pauli' and self.prefactor != 1) \\\n or (self.op_type == 'Majorana' and self.prefactor not in [1, 1j]) \\\n or (self.op_type == 'Fermion' and self.prefactor not in [1, 1j]):\n raise ValueError('Invalid prefactor {} for operator string of op_type {}'.format(self.prefactor, self.op_type))\n \n name_list = [str(self.prefactor),' ']\n for (op, la) in zip(self.orbital_operators, self.orbital_labels):\n name_list.extend([op, ' ', str(la), ' '])\n\n self.name = ''.join(name_list)", "def to_ccnf(self):\n return And(*[term for term in self.iter_maxterms()])", "def _build_attention_equation(qkv_rank, attn_axes):\n import string\n\n _CHR_IDX = string.ascii_lowercase\n target_notation = _CHR_IDX[:qkv_rank]\n # `batch_dims` includes the head dim.\n batch_dims = tuple(np.delete(range(qkv_rank), attn_axes + (qkv_rank - 1,)))\n letter_offset = qkv_rank\n source_notation = \"\"\n for i in range(qkv_rank):\n if i in batch_dims or i == qkv_rank - 1:\n source_notation += target_notation[i]\n else:\n source_notation += _CHR_IDX[letter_offset]\n letter_offset += 1\n\n product_notation = \"\".join(\n [target_notation[i] for i in batch_dims]\n + [target_notation[i] for i in attn_axes]\n + [source_notation[i] for i in attn_axes]\n )\n dot_product_equation = \"%s,%s->%s\" % (\n source_notation,\n target_notation,\n product_notation,\n )\n attn_scores_rank = len(product_notation)\n combine_equation = \"%s,%s->%s\" % (\n product_notation,\n source_notation,\n target_notation,\n )\n return dot_product_equation, combine_equation, attn_scores_rank", "def build_expression_tree(token_list: Sequence[tokens.Token]) -> nodes.ExpNode:\r\n\r\n def is_unary_op(op) -> bool:\r\n return op in UNARYOP_TABLE\r\n\r\n def is_open_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenOpenBracket)\r\n\r\n def is_close_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenCloseBracket)\r\n\r\n def is_comma(token) -> bool:\r\n return isinstance(token, tokens.TokenSymbol) and token.symbol == Separators.SEP_COMMA\r\n\r\n def is_higher_or_equal_op_priority(op1, op2, table) -> bool:\r\n oi1 = table.get(op1)\r\n oi2 = table.get(op2)\r\n\r\n p1 = 0 if oi1 is None else oi1.priority\r\n p2 = 0 if oi2 is None else oi2.priority\r\n\r\n return p1 >= p2\r\n\r\n def read_exp_chain(index) -> Tuple[nodes.ExpNode, int]:\r\n token = token_list[index]\r\n if isinstance(token, tokens.TokenSymbol):\r\n if is_open_bracket(token):\r\n node, i = read_exp(index)\r\n elif is_unary_op(token.symbol):\r\n if UNARYOP_TABLE[token.symbol].affix == OperatorAffix.PREFIX:\r\n node, i = read_prefix_unary_exp(index)\r\n else:\r\n raise ParsingException(f\"unary operator '{token.symbol}' is not a prefix operator\", token.pos)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n node, i = read_exp(index)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if isinstance(next_token, tokens.TokenSymbol) and is_unary_op(next_token.symbol):\r\n if UNARYOP_TABLE[next_token.symbol].affix == OperatorAffix.POSTFIX:\r\n node, i = read_postfix_unary_exp(i, node)\r\n else:\r\n return (node, i)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if is_close_bracket(next_token):\r\n return (node, i)\r\n elif isinstance(next_token, tokens.TokenSymbol):\r\n if next_token.symbol == Separators.SEP_COMMA:\r\n return (node, i)\r\n elif next_token.symbol in BINOP_TABLE:\r\n return read_binary_exp(i, node)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{next_token.symbol}'\", next_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", next_token.pos)\r\n else:\r\n return (node, i)\r\n\r\n def read_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n if index >= len(token_list):\r\n raise ParsingException(\"unexpected token\", token_list[-1].pos)\r\n\r\n token = token_list[index]\r\n if is_open_bracket(token):\r\n return read_bracket_exp(index)\r\n elif isinstance(token, tokens.TokenNumber):\r\n return (nodes.NumberNode(token.num, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenName):\r\n if (index + 1) < len(token_list) and is_open_bracket(token_list[index + 1]):\r\n return read_func_call(index)\r\n else:\r\n return (nodes.NameConstantNode(token.name, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenSymbol):\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n raise ParsingException(\"unexpceted token\", token.pos)\r\n\r\n def read_bracket_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n node, i = read_exp_chain(index + 1)\r\n\r\n if i < len(token_list) and is_close_bracket(token_list[i]):\r\n return (node, i + 1)\r\n else:\r\n raise ParsingException(\"unmatch '('\", token_list[index].pos)\r\n\r\n def read_prefix_unary_exp(index) -> Tuple[nodes.UnaryOpNode, int]:\r\n node, i = read_exp(index + 1)\r\n token = token_list[index]\r\n return (nodes.UnaryOpNode(token.symbol, node, pos=token.pos), i)\r\n\r\n def read_postfix_unary_exp(index, child: nodes.ExpNode) -> Tuple[nodes.UnaryOpNode, int]:\r\n token = token_list[index]\r\n\r\n if isinstance(child, nodes.UnaryOpNode):\r\n if is_higher_or_equal_op_priority(token.symbol, child.op, UNARYOP_TABLE):\r\n node = nodes.UnaryOpNode(token.symbol, child.child, pos=token.pos)\r\n child.child = node\r\n node = child\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n\r\n return (node, index + 1)\r\n\r\n def read_binary_exp(index, left: nodes.ExpNode) -> Tuple[nodes.BinaryOpNode, int]:\r\n right, i = read_exp_chain(index + 1)\r\n\r\n token = token_list[index]\r\n if isinstance(right, nodes.BinaryOpNode) and not is_open_bracket(token_list[index + 1]):\r\n # check operator priority and rotate the expression tree when necessary.\r\n # when priority of two operators are equal, we also should rotate the tree\r\n # in case these operators don't follow the commutative law.\r\n if is_higher_or_equal_op_priority(token.symbol, right.op, BINOP_TABLE):\r\n node = nodes.BinaryOpNode(token.symbol, left, right.left, pos=token.pos)\r\n right.left = node\r\n node = right\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n\r\n return (node, i)\r\n\r\n def read_func_call(index) -> Tuple[nodes.FuncCallNode, int]:\r\n name_token = token_list[index]\r\n index += 2 # skip '('\r\n\r\n token_count = len(token_list)\r\n\r\n node = None\r\n i = index\r\n args = []\r\n\r\n while i < token_count and not is_close_bracket(token_list[i]):\r\n node, i = read_exp_chain(i)\r\n args.append(node)\r\n if i < token_count and is_comma(token_list[i]):\r\n i += 1\r\n else:\r\n break\r\n\r\n if i < token_count and is_close_bracket(token_list[i]):\r\n func_node = nodes.FuncCallNode(name_token.name, args, pos=name_token.pos)\r\n return (func_node, i + 1)\r\n else:\r\n raise ParsingException(\"unclose func call\", name_token.pos)\r\n\r\n\r\n node, i = read_exp_chain(0)\r\n\r\n if i < len(token_list):\r\n last_token = token_list[i]\r\n if is_close_bracket(last_token):\r\n raise ParsingException(\"unmatch ')'\", last_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", last_token.pos)\r\n else:\r\n return node", "def generate_term(self, **kwargs):\n term_map = kwargs.pop('term_map')\n if hasattr(term_map, \"termType\") and\\\n term_map.termType == NS_MGR.rr.BlankNode.rdflib:\n return rdflib.BNode()\n if not hasattr(term_map, 'datatype'):\n term_map.datatype = NS_MGR.xsd.anyURI.rdflib\n if hasattr(term_map, \"template\") and term_map.template is not None:\n template_vars = kwargs\n template_vars.update(self.constants)\n # Call any functions to generate values\n for key, value in template_vars.items():\n if hasattr(value, \"__call__\"):\n template_vars[key] = value()\n raw_value = term_map.template.format(**template_vars)\n if term_map.datatype == NS_MGR.xsd.anyURI.rdflib:\n return rdflib.URIRef(raw_value)\n return rdflib.Literal(raw_value,\n datatype=term_map.datatype)\n if term_map.reference is not None:\n # Each child will have different mechanisms for referencing the\n # source based\n return self.__generate_reference__(term_map, **kwargs)", "def from_dict(terms):\n op = MajoranaOperator()\n op.terms = terms\n return op", "def build_t_op(core_tensor, direction, jitted=True):\n assert direction in ['left', 'right', 'both']\n\n if direction == 'left':\n t_op = lambda mat: np.einsum('cai,ab,dbi->cd', \n core_tensor, mat, core_tensor)\n elif direction == 'right':\n t_op = lambda mat: np.einsum('aci,ab,bdi->cd', \n core_tensor, mat, core_tensor)\n elif direction == 'both':\n core_tensors = np.stack([core_tensor, \n np.swapaxes(core_tensor, 0, 1)])\n t_op = lambda mat: np.einsum('Baci,Bab,Bbdi->Bcd', \n core_tensors, mat, core_tensors)\n\n return jax.jit(t_op) if jitted else t_op", "def __create_nested_structure(nested_operator: PatternStructure):\n order = list(range(len(nested_operator.args))) if isinstance(nested_operator, CompositeStructure) else [0]\n operator_type = None\n if isinstance(nested_operator, AndOperator):\n operator_type = OperatorTypes.AND\n elif isinstance(nested_operator, SeqOperator):\n operator_type = OperatorTypes.SEQ\n ret = TreePlanLeafNode(order[0])\n for i in range(1, len(order)):\n ret = TreePlanBinaryNode(operator_type, ret, TreePlanLeafNode(order[i]))\n return ret", "def _operator_generator(self, index, conj):\n pterm = PauliTerm('I', 0, 1.0)\n Zstring = PauliTerm('I', 0, 1.0)\n for j in range(index):\n Zstring = Zstring*PauliTerm('Z', j, 1.0)\n\n pterm1 = Zstring*PauliTerm('X', index, 0.5)\n scalar = 0.5 * conj * 1.0j\n pterm2 = Zstring*PauliTerm('Y', index, scalar)\n pterm = pterm * (pterm1 + pterm2)\n\n pterm = pterm.simplify()\n return pterm", "def __mul__(self, other, nested=False):\n\n other = formula(other, namespace=self.namespace)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = termname.split('*')\n pieces.sort()\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames) \n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = name.split('*')\n pieces.sort()\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return N.array(out)\n\n sumterms = self + other\n sumterms.terms = [self, other] # enforce the order we want\n sumterms.namespace = self.namespace\n\n _term = quantitative(names, func=sumterms, termname=termname,\n transform=product_func)\n _term.namespace = self.namespace\n\n\n terms.append(_term)\n\n return formula(terms, namespace=self.namespace)", "def __init__(self,terms,copy=True,L=None):\n\n if L is None:\n L = config.global_L\n\n Operator.__init__(self,L=L)\n\n if copy:\n self.terms = [t.copy() for t in terms]\n else:\n self.terms = list(terms)\n\n if len(self.terms) == 0:\n raise ValueError('Term list is empty.')\n\n terms_L = None\n for t in self.terms:\n if t.L is not None:\n if terms_L is not None:\n if t.L != terms_L:\n raise ValueError('All terms must have same length L.')\n else:\n terms_L = t.L\n\n if len(self.terms) > 1:\n self.max_ind = max(o.max_ind for o in self.terms)\n elif len(self.terms) == 1:\n self.max_ind = self.terms[0].max_ind\n\n # pick up length from terms if it isn't set any other way\n if L is None:\n L = terms_L\n\n self.L = L", "def compile_term(self) -> None:\n token_type = self.tokenizer.token_type()\n\n if token_type == TokenTypes.IDENTIFIER:\n curr_token = self._get_current_token()\n self.tokenizer.advance()\n if self._get_current_token() in ('(', '.'):\n self.compile_subroutine_call(curr_token)\n elif self._get_current_token() == '[':\n self._consume('[')\n self.compile_expression()\n self._consume(']')\n\n kind = convert_kind(self.table.kind_of(curr_token))\n index = self.table.index_of(curr_token)\n\n self.writer.write_push(kind, index)\n self.writer.write_arithmetic('ADD')\n self.writer.write_pop('POINTER', 1)\n self.writer.write_push('THAT', 0)\n\n else:\n kind = convert_kind(self.table.kind_of(curr_token))\n index = self.table.index_of(curr_token)\n self.writer.write_push(kind, index)\n\n elif token_type == token_type.INT_CONST:\n self.writer.write_push('CONST', int(self._get_current_token()))\n self._consume(token_type)\n\n elif token_type == token_type.KEYWORD:\n curr_token = self._get_current_token()\n if curr_token in ['true', 'false', 'null']:\n self.writer.write_push('CONST', 0)\n if curr_token == 'true':\n self.writer.write_arithmetic('NOT')\n if curr_token == 'this':\n self.writer.write_push('POINTER', 0)\n self._consume(token_type)\n\n elif token_type == token_type.STRING_CONST:\n const_str = ''\n first = True\n while const_str.count('\"') < 2:\n if first:\n const_str += self._get_current_token()\n first = False\n else:\n const_str += ' ' + self._get_current_token()\n if self.tokenizer.has_more_tokens():\n self.tokenizer.advance()\n const_str = const_str.replace('\"', '')\n\n self.writer.write_push('CONST', len(const_str))\n self.writer.write_call('String.new', 1)\n\n for char in const_str:\n self.writer.write_push('CONST', ord(char))\n self.writer.write_call('String.appendChar', 2)\n\n else:\n if self._get_current_token() == '(':\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n else:\n op = self._get_current_token()\n self._consume(['-', '~']) # unaryOp term\n self.compile_term()\n if op == '-':\n self.writer.write_arithmetic('NEG')\n else:\n self.writer.write_arithmetic('NOT')", "def generate_operand(uri):\n pass", "def _define_grammar():\n expr = Forward()\n\n label_name = Word(LABEL_CHARS)\n label_name.setParseAction(LabelNode)\n\n string_literal = QuotedString('\"') | QuotedString(\"'\")\n string_literal.setParseAction(LiteralNode)\n\n set_literal = (Suppress(\"{\") +\n delimitedList(QuotedString('\"') | QuotedString(\"'\"), \",\") +\n Suppress(\"}\"))\n set_literal.setParseAction(SetLiteralNode)\n\n eq_comparison = label_name + Suppress(\"==\") + string_literal\n eq_comparison.setParseAction(LabelToLiteralEqualityNode)\n\n not_eq_comparison = label_name + Suppress(\"!=\") + string_literal\n not_eq_comparison.setParseAction(InequalityNode)\n\n in_comparison = label_name + Suppress(Keyword(\"in\")) + set_literal\n in_comparison.setParseAction(LabelInSetLiteralNode)\n\n not_in = Suppress(Keyword(\"not\") + Keyword(\"in\"))\n not_in_comparison = label_name + not_in + set_literal\n not_in_comparison.setParseAction(NotInNode)\n\n has_check = (Suppress(\"has(\") +\n Word(LABEL_CHARS) +\n Suppress(\")\"))\n has_check.setParseAction(HasNode)\n\n # For completeness, we allow an all() to occur in an expression like\n # \"! all()\". Note: we special-case the trivial selectors \"\" and\n # \"all()\" below for efficiency.\n all_op = (Suppress(\"all()\"))\n all_op.setParseAction(AllNode)\n\n comparison = (eq_comparison |\n not_eq_comparison |\n in_comparison |\n not_in_comparison |\n has_check |\n all_op)\n\n paren_expr = (Suppress(\"(\") + expr + Suppress(\")\"))\n\n value = ZeroOrMore(\"!\") + (comparison | paren_expr)\n value.setParseAction(simplify_negation_node)\n\n and_expr = value + ZeroOrMore(Suppress(\"&&\") + value)\n and_expr.setParseAction(simplify_and_node)\n\n or_expr = and_expr + ZeroOrMore(Suppress(\"||\") + and_expr)\n or_expr.setParseAction(simplify_or_node)\n\n expr << or_expr\n\n grammar = expr + StringEnd()\n return grammar", "def get_operator_to_make_TOD(self):\n if len(self) == 1:\n return self.get_operator()\n op = self._get_array_of_operators()\n return BlockRowOperator(op, new_axisin=0)", "def create() -> 'Tokenizer':\n token_op_table = [\n EOS,\n op.Concat,\n op.ConstStr,\n op.SubStr,\n op.GetSpan,\n op.Trim,\n ]\n\n # Nesting operators and their args get \"compacted\" into\n # \"primitive\" tokens\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetToken, type_, index))\n\n for case in op.Case:\n token_op_table.append((op.ToCase, case))\n\n for delim1 in op.DELIMITER:\n for delim2 in op.DELIMITER:\n token_op_table.append((op.Replace, delim1, delim2))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetUpto, dsl_regex))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetFrom, dsl_regex))\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetFirst, type_, index))\n\n for type_ in op.Type:\n token_op_table.append((op.GetAll, type_))\n\n # Primitive types\n\n for type_ in op.Type:\n token_op_table.append(type_)\n\n for boundary in op.Boundary:\n token_op_table.append(boundary)\n\n # Covers op.INDEX\n for position in range(op.POSITION[0], op.POSITION[1]+1):\n token_op_table.append(position)\n\n # This covers op.DELIMITER\n for character in op.CHARACTER:\n token_op_table.append(character)\n\n token_op_table = {\n token: op\n for token, op in enumerate(token_op_table)\n }\n\n op_token_table = {\n op: token\n for token, op in token_op_table.items()\n }\n\n assert len(token_op_table) == len(op_token_table)\n\n string_token_table = {\n char: token\n for token, char in enumerate(op.CHARACTER)\n }\n\n return Tokenizer(\n token_op_table=token_op_table,\n op_token_table=op_token_table,\n string_token_table=string_token_table,\n )", "def BuildTerm(self, p_node):\n\n max_comment_length = 60\n access_pn=Tree('access-policy ' + self.term.name )\n access_pn.AddParent(p_node)\n if self.verbose and self.term.comment:\n if len(self.term.comment[0]) < max_comment_length:\n comm=Tree('', '/* ' + self.term.comment[0] + ' */')\n else:\n comments = aclgenerator.WrapWords(self.term.comment, 60)\n comments.append( '*/')\n comments.insert(0, '/*')\n comm=Tree('', comments)\n comm.AddParent(access_pn)\n\n rule_match =Tree('match')\n rule_match.AddParent(access_pn)\n\n if self.from_zone:\n self.BuildTermZone(rule_match, 'src')\n\n if self.to_zone:\n self.BuildTermZone(rule_match, 'dest')\n\n if self.term.versa_application or self.app:\n self.BuildTermApp(rule_match)\n\n if self.term.dscp_match:\n self.BuildTermDscp(rule_match)\n\n if self.term.action:\n self.BuildTermLogging(access_pn)\n\n #print(\"\\n\".join(set_term.PrintTree()))", "def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)", "def __mul__(self, other, nested=False):\n\n other = Formula(other)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = sorted(termname.split('*'))\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames)\n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = sorted(name.split('*'))\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return np.array(out)\n\n cself = copy.copy(self.terms[i])\n cother = copy.copy(other.terms[j])\n sumterms = cself + cother\n sumterms.terms = [cself, cother] # enforce the order we want\n\n _term = Quantitative(names, func=sumterms,\n termname=termname,\n transform=product_func)\n\n if _namespace_equal(self.namespace, other.namespace):\n _term.namespace = self.namespace\n\n terms.append(_term)\n\n return Formula(terms)", "def _create_next_term(cls,\n context: 'IconScoreContext',\n prev_term: Optional['Term']) -> 'Term':\n new_preps: List['PRep'] = context.preps.get_preps(\n start_index=0, size=context.main_and_sub_prep_count)\n\n sequence = 0 if prev_term is None else prev_term.sequence + 1\n start_block_height = context.block.height + 1\n if prev_term:\n assert start_block_height == prev_term.end_block_height + 1\n\n # The current P-Rep term is over. Prepare the next P-Rep term\n if context.revision < Revision.SET_IREP_VIA_NETWORK_PROPOSAL.value:\n irep: int = cls._calculate_weighted_average_of_irep(new_preps[:context.main_prep_count])\n else:\n irep: int = context.inv_container.irep\n\n term = Term(\n sequence,\n start_block_height,\n context.term_period,\n irep,\n context.total_supply,\n context.preps.total_delegated\n )\n\n term.set_preps(new_preps, context.main_prep_count, context.main_and_sub_prep_count)\n\n return term", "def build_engine(self, children, conjunction, verbose=True):\n comp_children = []\n if verbose: print(f\"\\nCalling build_engine with {children}\")\n for child in children: \n if child.get('field'):\n comp_children.append(Evaluation(child['field'], child['value'], child['operator']))\n #print(comp_children)\n else:\n new_children = child.get('children')\n conj = child.get('conjunction')\n comp_children.append(self.build_engine(new_children, conjunction=conj, verbose=verbose))\n \n return Composite(comp_children, conjunction=conjunction)", "def create_terminal(self):\n\n indicator = self.__get_indicator()\n class_func = self.__get_classifier_function(indicator)\n classifier_detail = self.__get_classifier_detail(class_func)\n\n # class function\n self.node_data['class_func'] = class_func\n\n # map_type\n map_type_box = set(indicator.map_type) & set(classifier_detail['map_type']) # 取交集\n if not map_type_box:\n print('IndexError: Cannot choose from an empty sequence!!!! %s, %s' % (indicator, classifier_detail))\n self.create_terminal() # 如匹配错误,重新生成。\n self.node_data['map_type'] = random.choice(list(map_type_box))\n\n # node_type (output value type)\n if self.node_data['map_type'] == 'condition':\n self.node_data['node_type'] = 'abs_value'\n else:\n self.node_data['node_type'] = 'pos_value'\n\n # function group\n func_group = None\n for name, group in self.classifier_group.items():\n if class_func in group:\n func_group = name\n self.node_data['class_func_group'] = func_group\n\n # others\n if func_group == 'cut' or func_group == 'compare':\n self.__create_terminal_dict(indicator, classifier_detail)\n\n elif func_group == 'permutation' or func_group == 'trend':\n self.__create_terminal_list(indicator, classifier_detail)\n\n else:\n raise ValueError('Uncategorized class_function: %s. 9484' % class_func)\n\n self.lv_mut_tag = Classifier.lv_mut_tag.copy()\n self.node_result = self.cal()\n\n return self.node_result", "def _get_mergeable_operator_patterns(self, hw_config: Optional[HWConfig] = None) -> NodeExpression:\n # TODO: Implement \"repeating expressions\" so that any number of \"mergeable\" operations\n # immediately following a linear/convolutional/matrix op are merged into one block\n import nncf.dynamic_graph.patterns as p\n pattern = p.LINEAR_OPS + p.ANY_BN_RELU_COMBO | p.LINEAR_OPS + p.ELTWISE_UNIFORM_OPS\n return pattern", "def make_binary(sv, piece, o, op):\r\n here=piece.rfind(op) # look for last occurrence\r\n there=here+len(op)\r\n t1=piece[:here].strip(Space) # first term (sometimes omitted)\r\n t2=piece[there:].strip(Space) # second term must be present\r\n if not t2: \r\n print(\"\\n\", Err_op_syntax, o) # *** Syntax error in operator ***\r\n print(\" \", piece)\r\n raise ReferenceError\r\n first=tree_build(sv, t1) # process each term RECURSIVE\r\n second=tree_build(sv, t2)\r\n return (o, first, second)", "def expression_to_english_tree(expr):\n if isinstance(expr, ApplicationExpression):\n pred_name = expr.pred.variable.name\n if (not (pred_name in (adjectives + intransitive_verbs + transitive_verbs))):\n raise GenerationError(\"Invalid predicate: %s\" % pred_name)\n # might want to add a line enforcing variable name to begin with an x, y, or z?\n freevars = [Trace(Index(int(arg.variable.name[1:])), False) for arg in expr.args]\n if (pred_name in adjectives):\n tree = SynTree(default_featstructs['TP'], [SynTree(default_featstructs['DP_trace'], [SynTree(freevars[0], [])]), SynTree(default_featstructs['TBar'], [SynTree(default_featstructs['VP'], [SynTree(default_featstructs['VBar'], [SynTree(default_featstructs['V_link'], [SynTree('BE', [])]), SynTree(default_featstructs['AdjP'], [SynTree(default_featstructs['AdjBar'], [SynTree(default_featstructs['Adj'], [SynTree(pred_name, [])])])])])])])])\n elif (pred_name in intransitive_verbs):\n tree = SynTree(default_featstructs['TP'], [SynTree(default_featstructs['DP_trace'], [SynTree(freevars[0], [])]), SynTree(default_featstructs['TBar'], [SynTree(default_featstructs['VP'], [SynTree(default_featstructs['VBar'], [SynTree(default_featstructs['V_intrans'], [SynTree(pred_name, [])])])])])])\n else:\n tree = SynTree(default_featstructs['TP'], [SynTree(default_featstructs['DP_trace'], [SynTree(freevars[1], [])]), SynTree(default_featstructs['TBar'], [SynTree(default_featstructs['VP'], [SynTree(default_featstructs['VBar'], [SynTree(default_featstructs['V_trans'], [SynTree(pred_name, [])]), SynTree(default_featstructs['DP_trace'], [SynTree(freevars[0], [])])])])])])\n elif isinstance(expr, EqualityExpression):\n freevars = [Trace(Index(int(arg.variable.name[1:])), False) for arg in [expr.first, expr.second]]\n tree = SynTree(default_featstructs['TP'], [SynTree(default_featstructs['DP_trace'], [SynTree(freevars[0], [])]), SynTree(default_featstructs['TBar'], [SynTree(default_featstructs['VP'], [SynTree(default_featstructs['VBar'], [SynTree(default_featstructs['V_link'], [SynTree('BE', [])]), SynTree(default_featstructs['DP_trace'], [SynTree(freevars[1], [])])])])])])\n elif isinstance(expr, NLQuantifiedExpression):\n nucleus_tree = expression_to_english_tree(expr.nucleus)\n DPs = nucleus_tree.postorder_traverse(get_free_trace_DP, {'DPs' : []})['DPs']\n if (len(DPs) == 0):\n raise GenerationError(\"Quantifier %s must bind a free variable.\" % expr.getQuantifier())\n subj_tree = expression_to_english_DP_tree(expr.getQuantifier(), expr.restrictor)\n tree = SynTree(default_featstructs['TP'], [subj_tree, SynTree(default_featstructs['PA'], [SynTree(Index(-1), []), nucleus_tree])])\n for DP in DPs:\n if (int(expr.variable.name[1:]) == DP.children[0].label.index.index): # check that variables match\n tree[1][0].label = DP.children[0].label.index = tree[0].ID\n DP.children[0].label.bound = True # quantifier has bound the free variable\n break\n if (tree[1][0].label.index == -1):\n raise GenerationError(\"Quantifier %s failed to find corresponding free variable.\" % expr.getQuantifier())\n else:\n raise GenerationError(\"Invalid expression.\")\n tree.set_QR_level(1)\n tree.label_nodes()\n tree.make_nx_tree()\n return tree", "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def __init__(self, l, r, cond=Literal(True)):\n super(ThetaJoin, self).__init__(l, r)\n self.cond = cond", "def gen_compound_literal(self, expr: expressions.CompoundLiteral):\n # Alloc some room:\n ir_addr = self.emit_alloca(expr.typ)\n # ... and fill compound literal:\n self.gen_local_init(ir_addr, expr.typ, expr.init)\n return ir_addr" ]
[ "0.51256794", "0.50364447", "0.5035556", "0.49387896", "0.49171144", "0.48688382", "0.47859588", "0.47263345", "0.47252068", "0.46344826", "0.46341482", "0.46032685", "0.4565626", "0.45439956", "0.45345488", "0.45064116", "0.4506053", "0.4453859", "0.442932", "0.4415429", "0.44063872", "0.44001704", "0.43954718", "0.43860993", "0.43672794", "0.43596745", "0.4356076", "0.43398076", "0.43371102", "0.43286946" ]
0.6599325
0
Recursively iterate over issue dictionary and print errors.
def _print_issue(issue, ntabs): for key, value in issue.items(): if isinstance(value, dict): tabs = TAB*ntabs print('%s%s (section):' % (tabs, key)) ntabs += 1 print_issue(value, ntabs=ntabs) elif isinstance(value, bool): if value == False: tabs = TAB*ntabs print('%s%s parameter is missing.' % (tabs, key)) continue else: tabs = TAB*ntabs print('%s%s (parameter):' % (tabs, key)) tabs = TAB*(ntabs+1) print('%s%s' % (tabs, value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_wrapper(x):\n errors = list()\n for error_key, error_list in list(x.items()):\n for error in error_list:\n if error_key == 'non_field_errors':\n errors.append(error)\n else:\n errors.append(\"%s: %s\" % (error_key, error))\n return errors", "def printError(queryResults):\n print (queryResults[1])\n # For loop created for the httpErrors array\n for results in queryResults[0]:\n print (\n results[0], \"-\",\n str(results[1]) + \"% errors\")", "def print_log_message(error_msg):\r\n for msg in error_msg:\r\n print(msg)", "def issue(ctx, accountable, issue_key):\n accountable.issue_key = issue_key\n if not ctx.invoked_subcommand:\n issue = accountable.issue_meta()\n headers = issue.keys()\n rows = [headers, [v for k, v in issue.items()]]\n print_table(SingleTable(rows))", "def _print_invalids(invalids, verbose):\n if not invalids:\n print(\"## No Rez package was set as invalid.\")\n print(\"Nothing is invalid. Which is a good thing!\")\n\n return\n\n print(\"## Some packages were marked as invalid. Here's why:\")\n\n template = \"{package.name}: {message}\"\n\n if verbose:\n template = \"{package.name}: {path} {message}: {full}\"\n\n for message in sorted(\n (\n template.format(\n package=error.get_package(),\n path=error.get_path(),\n message=str(error),\n full=error.get_full_message(),\n )\n for error in invalids\n )\n ):\n print(message)", "def report(issues, show_urls=False):\r\n # titles may have unicode in them, so we must encode everything below\r\n if show_urls:\r\n for i in issues:\r\n role = 'ghpull' if 'merged' in i else 'ghissue'\r\n print('* :%s:`%d`: %s' % (role, i['number'],\r\n i['title'].encode('utf-8')))\r\n else:\r\n for i in issues:\r\n print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))", "def parse_errors(errors):\n\n try:\n return errors['detail']\n\n except KeyError:\n error_string = ''\n\n for key in errors:\n error_string += '{0}\\n'.format(errors[key][0])\n\n return error_string", "def print_errors(self,result,cause=False,detail=False):\n errors = result.get_errors()\n if errors:\n print ('=== ERRORS '+('='*59))\n for error in errors:\n print (error.id)\n if cause:\n print (' ',error.get_cause())\n if detail:\n for key in (k for k in error.keys() if k not in [Result.START_TIME,\n Result.END_TIME,\n Result.CAUSE]):\n print ('-' * 70)\n print ('%s:' % key)\n print (as_utf8(error[key]))", "def error_finder(folder, exception=POSKeyError, stop_on_first=None):\n for id, next_item in folder.objectItems():\n print(\"Inspecting %s\" % id)\n try:\n next_item.getId()\n except exception:\n print `exception`, \"in folder\",\n print '/'.join(folder.getPhysicalPath()),\n print \"at id:\", id\n if stop_on_first:\n raise \"done\" # hack to break out of recursion\n else:\n # no error, recurse if it's objectManagerish\n if hasattr(next_item.aq_base, 'objectItems'):\n error_finder(next_item, exception, stop_on_first)", "def report_errors(errors):\n if len(errors) > 0:\n for error in errors:\n logger.debug(error)\n sys.exit(0)", "def errors_fatal(self) -> List[Error]:", "def errors(conf, daemon):\n # persisted dict interface for long term memory\n errors = Shove('file://{0}'.format(conf.app.errors), protocol=2, flag='r')\n if any(errors):\n print(\"errors found\")\n for path, error in six.iteritems(errors):\n pp(error)\n errors.close()\n exit(1)\n # ⏏ exit the program with an error\n else:\n print(\"no errors found - OK\")\n print()\n errors.close()", "def display_form_errors(form):\n\n for fieldname, errors in form.errors.items():\n for error in errors:\n err_str = 'Error in field <' + fieldname + '>: ' + error\n flash(err_str, 'error')", "def print_error_data(error_data):\n\n print('\\nDays when there were more than 1% errors in HTTP :\\n')\n for day in error_data:\n print(str(day[0]) + '\\t-\\t' + str(day[1]) + '% \\n')\n print('-------------------------------------------------------\\n')", "def format_errordict(self, errordict):\n errormsg = f'Comparison between {self.ad1.filename} and {self.ad2.filename}'\n for k, v in errordict.items():\n errormsg += f'\\nComparison failure in {k}'\n errormsg += '\\n' + ('-' * (22 + len(k))) + '\\n'\n errormsg += '\\n '.join(v)\n return errormsg", "def _print_batch_exception(batch_exception):\n print(\"-------------------------------------------\")\n print(\"Exception encountered:\")\n if (\n batch_exception.error\n and batch_exception.error.message\n and batch_exception.error.message.value\n ):\n print(batch_exception.error.message.value)\n if batch_exception.error.values:\n print()\n for mesg in batch_exception.error.values:\n print(\"{}:\\t{}\".format(mesg.key, mesg.value))\n print(\"-------------------------------------------\")", "def process(self):\n for user in self.repos:\n for repo in self.repos[user]:\n self.process_issues(user, repo)", "def process_sub_serializer_errors(self, serializer_error_dict, error_type):\n sub_serializer_errors = serializer_error_dict.get('errors', [])\n sub_serializer_non_field_errors = serializer_error_dict.get('non_field_errors', None)\n result = []\n for sub_error in sub_serializer_errors:\n if sub_error['field'] is None:\n sub_error['field'] = error_type\n result.append(sub_error)\n if sub_serializer_non_field_errors is not None:\n result.extend(\n self.get_non_field_error_entries(sub_serializer_non_field_errors)\n )\n return result", "def invalid_entries_error(name, collection, yml):\n\n yml = symlink_target(yml)\n output_1 = path(yml) + '\\n'\n output_2 = colored(' - Error: No entries in ', 'red')\n output_3 = colored(name, attrs=['bold'])\n empty_output = output_1 + output_2 + output_3\n\n if isinstance(collection, list):\n return empty_output\n\n length = len(collection)\n if length is 0:\n return empty_output\n elif length > 1:\n output_2 = colored(' - Error: Unknown entries in ', 'red')\n else:\n output_2 = colored(' - Error: Unknown entry in ', 'red')\n\n dict_entries = ''.join('{}: {}\\n'.format(key, val) for key, val in sorted(collection.items())).rstrip()\n output_3 = colored(name + '\\n\\n' + str(dict_entries), attrs=['bold'])\n return output_1 + output_2 + output_3", "def run_missing_value_check():\n print(\"\\n### CHECKING FOR MISSING VALUES AND ZEROES ###\")\n for key, value in data.items():\n try:\n print(key, check_missing_values(value), check_zero(value))\n except TypeError:\n print(key, \"Failed\")\n print(\"### END ###\\n\")", "def printErrors(self, *args):\n return _libsbml.SBMLDocument_printErrors(self, *args)", "def print_errors(errors):\n print() # Add newline after character-results.\n if errors:\n print(\"\\n({}) Error{}:\".format(len(errors),\n \"s\" if len(errors) != 1 else \"\"))\n for e in errors:\n print(\"[{}:{}] In {}: {}\".format(\n e.filename, e.lineno, e.case, e.data\n ))\n print()", "def test_iter_errors_invalid_resume(self):\n errors = list(resumeschema.iter_errors(self.invalid_resume))\n self.assertEqual(len(errors), 3)\n\n self.assertEqual(list(errors[0].path), ['basics'])\n self.assertEqual(\n errors[0].message, 'Additional properties are not allowed (u\\'first_name\\', u\\'last_name\\' were unexpected)'\n )\n\n self.assertEqual(list(errors[1].path), ['basics', 'profiles'])\n self.assertEqual(\n errors[1].message,\n '{u\\'username\\': u\\'neutralthoughts\\', u\\'network\\': u\\'Facebook\\'} is not of type u\\'array\\''\n )\n\n self.assertEqual(list(errors[2].path), ['work'])\n self.assertEqual(\n errors[2].message,\n ('{u\\'website\\': u\\'http://piedpiper.com\\', u\\'startDate\\': u\\'2013-12-01\\', u\\'highlights\\': '\n '[u\\'Build an algorithm\\'], u\\'company\\': u\\'Pied Piper\\', u\\'summary\\': '\n 'u\\'Pied Piper is a multi-platform technology.\\', u\\'position\\': u\\'CEO/President\\'} '\n 'is not of type u\\'array\\'')\n )", "def fix(self):\n exceptionError = ''\n for each in self.errorNodes:\n try:\n pm.delete(each)\n except exceptionError:\n print exceptionError", "def _walk_error_details(self, error_detail, prefix=()):\n pairs = []\n if isinstance(error_detail, str):\n pairs.append((prefix, error_detail))\n elif isinstance(error_detail, dict):\n for key, value in error_detail.items():\n pairs.extend(self._walk_error_details(value, prefix + (key,)))\n return pairs", "def _flatten_errors(self, params, parent=None):\r\n data = OrderedDict()\r\n for key, val in params.items():\r\n full_key = parent + \"[\" + key + \"]\" if parent else key\r\n if full_key.endswith(\"[errors]\"):\r\n full_key = full_key[:-len(\"[errors]\")]\r\n if isinstance(val, dict):\r\n data.update(self._flatten_errors(val, full_key))\r\n elif key == \"errors\":\r\n for error in val:\r\n data[full_key + \"[\" + error[\"attribute\"] + \"]\"] = [error[\"message\"]]\r\n else:\r\n data[full_key] = [val]\r\n return data", "def bug_details_display(self,**kwargs):\n row=self.bug_data(**kwargs)\n print(\"*******************\")\n for k in row.keys():\n print(k,\":\", str(row[k]).replace(\"\\n\",\"\\n{}> \".format(k)))\n print(\"*******************\")", "def print_fails(self,result,cause=False,detail=False):\n fails = result.get_fails()\n if fails:\n print ('=== FAILS '+('='*60))\n for fail in fails:\n print (fail.id)\n if cause:\n print (' ',fail.get_cause())\n if detail:\n for key in ['ISQL_stripped_diff','Python_stripped_diff',\n 'ISQL_stderr_stripped_diff',\n 'Python_stderr_stripped_diff']:\n if fail.has_key(key):\n print ('-' * 70)\n print ('%s:' % key)\n print (as_utf8(fail[key]))\n print ()", "def get_short_errors(self):\n if not self.was_successful():\n for traceback in self.data.traceback.split(\n CaseData.TB_SEPARATOR):\n\n traceback = traceback.strip(\" \\n\")\n bottom_line = traceback.rsplit(\"\\n\", 1)[-1].strip()\n yield \"{}: {}\".format(self.data.name, bottom_line)", "def get_error_contexts(exp_tree, unknown_summary, soln_id, debug=False):\n \n if debug:\n print('get_error_contexts')\n messages = []\n \n # error_contexts: List of subgraphs limited to either neighboring\n # leaf nodes, or to immediately avaialable connecting unknowns/equations.\n error_contexts = list(nx.connected_components(\n exp_tree.subgraph([_ for _ in exp_tree if not 'mapped' in exp_tree.nodes[_]])\n ))\n \n for context_g in error_contexts:\n if debug:\n print(context_g)\n if len(context_g) == 1:\n # it's a leaf\n if is_symbol(exp_tree, list(context_g)[0]):\n # it's a var,unknown,param\n error_object = tree_report(\n nx.subgraph(exp_tree, context_g), unknown_summary, debug=debug\n )\n eq_context = get_eqbox_html(\n list(context_g)[0],\n exp_tree\n )\n messages.append(f\"{error_object} in {eq_context}\")\n else:\n #it's a constant, get root and process it\n # Get the subgraph rooted at its predecessor\n if message_text[soln_id]['status'] == False:\n if debug:\n print(\"It's a constant\")\n # Breakdown and report errors in constants\n # only when something really went wrong\n # otherwise don't bother\n messages.append(\n tree_report(\n get_rooted_subgraph(\n exp_tree,\n list(exp_tree[list(context_g)[0]])[0] #root\n ), unknown_summary, debug=debug\n )\n )\n else:\n if debug:\n print(\"It's a constant but the answer is correct\")\n continue\n \n elif len(context_g) == 2:\n # it's an edge, same logic basically after you find the leaf node\n node, root = tuple(context_g)\n if is_symbol(exp_tree, root):\n node,root = root,node\n \n if is_symbol(exp_tree, node):\n error_object = tree_report(\n nx.subgraph(exp_tree, [node]), unknown_summary, debug=debug\n )\n eq_context = get_eqbox_html(\n node,\n exp_tree,\n )\n messages.append(f\"{error_object} in {eq_context}\")\n \n else:\n messages.append(\n tree_report(\n nx.subgraph(exp_tree, context_g), unknown_summary, debug=debug\n )\n )\n \n return messages" ]
[ "0.57348907", "0.5411636", "0.53937906", "0.53712976", "0.5358204", "0.5333469", "0.52884054", "0.52464217", "0.5168919", "0.51145256", "0.50778586", "0.5039028", "0.5036515", "0.502055", "0.5012484", "0.5011434", "0.4996432", "0.49948236", "0.49832422", "0.49768898", "0.49736303", "0.4973099", "0.49705547", "0.496567", "0.4953536", "0.49496374", "0.49475265", "0.49466524", "0.49396962", "0.4917132" ]
0.6306336
0
POST /validate Validate GeoJSON data in POST body
def validate(request): testing = request.GET.get('testing') if request.method == 'POST': stringy_json = request.raw_post_data else: # GET try: remote_url = request.GET['url'] stringy_json = get_remote_json(remote_url) except KeyError: # The "url" URL parameter was missing return _geojson_error('When validating via GET, a "url" URL parameter is required.', status=400) except NonFetchableURLException: return _geojson_error('The URL passed could not be fetched.') try: test_geojson = json.loads(stringy_json) if not isinstance(test_geojson, dict): return _geojson_error('Data was not a JSON object.', testing) except: return _geojson_error('Data was not JSON serializeable.', testing) if not 'type' in test_geojson: return _geojson_error('The "type" member is required and was not found.', testing) try: validate_geojson(test_geojson) except GeoJSONValidationException as e: return _geojson_error(str(e), testing) # Everything checked out. Return 'ok'. resp = { 'status': 'ok', } return HttpResponse(json.dumps(resp), mimetype='application/json')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_geojson(data):\n \"\"\"Enforces camelcasing of properties\"\"\"\n if 'id' in data:\n del data['id']\n try:\n data['type'] = data['type'] if 'type' in data else \"Feature\"\n data['geometry'] = data['geometry'] if 'geometry' in data else None\n if 'properties' not in data:\n data['properties'] = {}\n for key, value in {key: value for (key, value) in data.items() if key not in ['type', 'geometry', 'properties']}.items():\n data['properties'][key] = strings.as_numeric(value)\n data = {'type': data['type'], 'geometry': data['geometry'], 'properties': data['properties']} \n for key, value in data['properties'].items():\n fixed_key = strings.camelcase(key) if key != 't_utc' else key\n fixed_key = \"pH\" if (fixed_key == \"Ph\" or fixed_key == \"PH\") else fixed_key ## hack\n data['properties'][fixed_key] = strings.as_numeric(value)\n if key != fixed_key: \n del data['properties'][key]\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def test_if_posted(self):\n reqdata = {\"lat\": 17.726675,\n \"long\": 83.312320,\n \"address\": \"CBM Compound\",\n \"state\": \"Andhra Pradesh\",\n \"pin\": 530003\n }\n\n res = req.post(post_loc_url, json=jsonify(reqdata))\n print(\"RES\", res.text)\n self.assertEqual(\"200\", json.loads(res.text)[\"Status\"])", "def is_validated_location_service(request_body):\n schema = schema_utils.get_location_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request_body)\n if validator.errors:\n logging.error(str(validator.errors))\n return result", "def validate_json(self):\n pass", "def validate(self, data):\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom).intersection(the_geom, field_name='the_geom')\n # sort the returned regions by area of intersection, largest first.\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection.area, reverse=True) \n data['region'] = validators.validate_region(sorted_regions)\n # remove unwanted fields, these are pulled from the request in the view if the serializer is valid\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'), data.pop('formats')\n return data", "def verify_geometry(data):\n lon, lat, alt = None, None, None\n properties = data['properties']\n delete = []\n try:\n for p, value in properties.items():\n if p.lower().strip() == 'longitude' or p.lower().strip() == 'lon' or p.lower().strip() == 'lng' or p.lower().strip() == 'long':\n lon = value\n delete.append(p)\n elif p.lower().strip() == 'latitude' or p.lower().strip() == 'lat': \n lat = value\n delete.append(p)\n elif p.lower().strip() == 'altitude' or p.lower().strip() == 'alt': \n alt = value\n delete.append(p) \n if lon is not None and lat is not None:\n if data['geometry'] is None: ## this retains geometry if it exists, is that ok?\n data['geometry'] = {'type': \"Point\", 'coordinates': [float(lon), float(lat), float(alt) if alt is not None else None]}\n for p in delete:\n del properties[p]\n data['properties'] = properties \n except Exception as e:\n log.error(\"Error parsing coordinates: %s\" % log.exc(e))\n return data", "def test_geo() -> None:\n soup = generate_case(\"geo\")\n\n tests.html_schema_doc_asserts.assert_property_names(soup, [\"latitude\", \"longitude\"])\n tests.html_schema_doc_asserts.assert_types(soup, [\"object\", \"number\", \"number\"])\n tests.html_schema_doc_asserts.assert_numeric_restrictions(\n soup,\n [\n \"Value must be greater or equal to -90 and lesser or equal to 90\",\n \"Value must be greater or equal to -180 and lesser or equal to 180\",\n ],\n )\n tests.html_schema_doc_asserts.assert_required(soup, [True] * 2)", "def validate(self, data):\n # TODO: Replace this with GeoDjango PointField.\n # https://trello.com/c/03yB0K0n\n if data['lat'] and data['lon']:\n try:\n location = Location.objects.get(lat=data['lat'], lon=data['lon'])\n raise serializers.ValidationError({\n 'error': 'Location already exists.',\n 'instance': location.id\n })\n except Location.DoesNotExist:\n pass\n\n return data", "def get_poly_obj(self):\n try:\n area = self.request.POST['area'].replace(\"\\n\", \"\")\n geo_poly_obj = Polygon(json.loads(area)['coordinates'][0])\n return geo_poly_obj\n except:\n raise ValidationError(\"Not proper geo json\")", "def validate(self, value):\n if isinstance(value, dict):\n if set(value.keys()) == {\"type\", \"coordinates\"}:\n if value[\"type\"] != self._type:\n self.error(f'{self._name} type must be \"{self._type}\"')\n return self.validate(value[\"coordinates\"])\n else:\n self.error(\n \"%s can only accept a valid GeoJson dictionary\"\n \" or lists of (x, y)\" % self._name\n )\n return\n elif not isinstance(value, (list, tuple)):\n self.error(\"%s can only accept lists of [x, y]\" % self._name)\n return\n\n validate = getattr(self, \"_validate_%s\" % self._type.lower())\n error = validate(value)\n if error:\n self.error(error)", "def test_no_longitude(self):\n data = self.valid_payload\n data[\"longitude\"] = \"\"\n response1 = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n del data[\"longitude\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_no_longitude(self):\n data = self.valid_payload\n data[\"longitude\"] = \"\"\n response1 = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n del data[\"longitude\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def is_valid_input(geometry, **kwargs):\n return lib.is_valid_input(geometry, **kwargs)", "def verify_post_data ( ):\n # check every field is present\n try:\n request.json[ 'source_lang' ]\n request.json[ 'target_lang' ]\n request.json[ 'text' ]\n\n TranslatorApp.verify_rpc_value ( request.json )\n\n except KeyError: # All the values are not present\n # 400 Bad Request\n abort ( 400, \"All mandatory fields are not provided\" )\n except ValueError as err:\n # 422 Unprocessable Entity\n abort ( 422, \"Unprocessable value: {0}\".format ( err.args ) )\n except BadRequest:\n # 400 Bad Request\n abort ( 400, \"Provided values are having malformed syntax\" )", "def post(self, request, format=None):\n success = False\n try:\n line1=request.data[\"line1\"]\n district=request.data[\"district\"]\n state=request.data[\"state\"]\n pincode=request.data[\"pincode\"]\n branch=request.data[\"branch\"]\n address_obj = Address(line1=line1,district=district,\n state=state,pincode=pincode,branch=Branch.objects.get(pk=branch))\n address_obj.save()\n address_string = district+\", \"+state+\", \"+pincode\n if address_obj.id:\n location_coordinates = GeolocationApi.get_lat_lng(address_string)\n geolocation_obj = Geolocation(address=address_obj,\n lat=location_coordinates[\"latitude\"],\n lng=location_coordinates[\"latitude\"])\n geolocation_obj.save()\n success=True\n except Exception as e:\n success=False\n print(e)\n return Response(success)", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def post(self):\n data = json.dumps(request.get_json())\n houseNumber = json.loads(data)['HouseNumber']\n street = json.loads(data)['Street']\n city = json.loads(data)['city']\n #address = '&housenumber='+houseNumber+'&street='+street+'&city='+city\n response = hereService.getLatLang(houseNumber, street, city)\n return response", "def validator(request, schema):\n try:\n body = request.body.decode('utf-8')\n dictbody = json.loads(body) if body else {}\n validate_against_schema(request, schema, dictbody)\n except ValueError as e:\n request.errors.add('body', 'body', six.text_type(e))", "def validate_json(d):\n if d['type'] != 'FeatureCollection':\n raise Exception('JSON file is not a \\\"FeatureColleciton\\\".')\n\n if len(d['features']) != 1:\n raise Exception('JSON file should contain excactly one feature.')\n\n f = d['features'][0]\n\n if 'reference' not in f['properties'].keys():\n raise Exception('Feature property dictionary should contain '\n '\\\"referencey\\\" key.')\n\n if f['type'] != 'Feature':\n raise Exception('Feature type should be \\\"Feature\\\".')\n\n geom = f['geometry']\n\n if geom['type'] != 'MultiPolygon':\n raise Exception('Geometry type should be \\\"MultiPolygon\\\".')\n\n if 'coordinates' not in geom.keys():\n raise Exception('Geometry dictionary should contain \\\"coordinates\\\" '\n 'key.')\n\n polygons = geom['coordinates'][0]\n\n n_polygons = len(polygons)\n for i in range(n_polygons):\n p = polygons[i]\n n_points = len(p)\n if n_points % 2 == 0:\n raise Exception('Number of points in polyon must be odd.')\n\n if p[0] != p[-1]:\n raise Exception('First and last points in polygon must be '\n 'identical.')\n\n n_pairs = int((n_points - 1) / 2)\n for j in range(n_pairs):\n #------------------------------------------------------------------\n # Points are paired and in each pair the top is first, as in:\n #\n # _.-P1-._\n # P0' 'P2---P3\n # | \\\n # P7---P6----P5-------P4\n #\n # Pairs: P0-P7, P1-P6, P2-P5, P3-P4\n #------------------------------------------------------------------\n top_depth = p[j][2]\n bot_depth = p[-(j + 2)][2]\n if top_depth > bot_depth:\n raise Exception(\n 'Top points must be ordered before bottom points.')", "def test_no_latitude(self):\n data = self.valid_payload\n data[\"latitude\"] = \"\"\n response1 = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n del data[\"latitude\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_no_latitude(self):\n data = self.valid_payload\n data[\"latitude\"] = \"\"\n response1 = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n del data[\"latitude\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def validate(self: object, body: dict) -> dict:\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/validate\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"validate\",\n body=body\n )", "def post(self, request):\n # GET REQUEST DATA\n fid = request.POST.get('fid', False)\n uuid = request.POST.get('uuid', False)\n title_text = request.POST.get('title', False)\n body = request.POST.get('body', False)\n photo = request.FILES.get('photo', False) # FOR STORAGE\n wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER\n data = {\n 'uuid': uuid,\n 'title_text': title_text,\n 'body': body,\n 'wfsxml': wfsxml\n }\n # VALIDATE FORM\n form = GeoPostForm(data, request.FILES)\n logger.info(\"\\ninstantiate Geopost form\\n\")\n # IF FORM VALIDATION ERROR\n if not form.is_valid():\n return server_error(request.body)\n #context = self.getContext(form)\n #return render(request, 'geopost/entry.html', context)\n else:\n pass\n # GET CLEAN VALUES\n uuid = form.cleaned_data['uuid']\n wfsxml = form.cleaned_data['wfsxml']\n # UPLOAD PHOTO TO BUCKET\n # if editing existing entry, first delete existing photo\n if fid:\n delete_from_bucket(uuid, self.imageBucket)\n else:\n pass\n photo.open('rb')\n error = upload_to_bucket(\n photo, self.imageBucket, photo.content_type, uuid)\n photo.close()\n # IF ERROR UPLOADING IMAGE\n if error:\n return server_error(error)\n else:\n pass\n # MAKE GEOSERVER WFS TRANSACTION\n error = post_to_geoserver(wfsxml, self.wfsURL)\n # ALL GOOD\n if not error:\n return HttpResponseRedirect(reverse('geopost_home'))\n # IF WFS TRANSACTION ERROR\n else:\n delete_from_bucket(uuid, self.imageBucket)\n return server_error(error)", "def test_search_polygon_successfuly(self, api_client):\n url = self.base_url + \"/polygons/\"\n prov = baker.make(Provider)\n self.polygon_data['provider'] = prov.id\n response = api_client().post(url, self.polygon_data)\n assert response.status_code == 201\n search_url = url + 'get_locations/?long=-98.503358&lat=-29.335668'\n response = api_client().get(search_url)\n assert response.status_code == 200\n assert len(response.data) == 1\n assert response.data[0]['name'] == self.polygon_data['name']", "def test_invalid_countries(self):\n data = self.valid_payload\n data['nationality'] = 500\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # self.assertEqual(response.data, 'ey')", "def validate_source(features):\n click.echo(f\"Validating features\", err=True)\n\n for feature in features:\n utils.validate_geojson(feature)\n\n click.echo(\"✔ valid\")", "def validate(self, data):\n request = self.context.get('request')\n data['poster'] = request.user\n\n return validate_complete_address(data)", "def get_data():\n\n if request.method == 'POST':\n content = request.json\n text = content['complaint_text']\n if len(text) == 0:\n return json.dumps({\"error\": \"Text field should not be left empty\"})\n text = translator.translate(str(text), dest='en').text\n latitude = content['cdlat']\n longitude = content['cdlon']\n categories = pred(text)\n return json.dumps({'categories': categories, \"location\": {\"latitude\": latitude, \"longitude\": longitude}})\n else:\n return json.dumps({\"error\": \"Try sending using POST request\"})", "def post(self):\n data = request.json\n return check_spelling(data)", "def test_test_json_form_data(self):\n pass" ]
[ "0.6258375", "0.61013263", "0.60835147", "0.6058721", "0.60300624", "0.60212815", "0.5967188", "0.580679", "0.58050185", "0.5788679", "0.5773974", "0.5773974", "0.57511264", "0.5706149", "0.5697552", "0.5684848", "0.56350404", "0.5609239", "0.55897486", "0.5548948", "0.5548948", "0.5514949", "0.54129446", "0.5391336", "0.53759307", "0.532974", "0.531045", "0.5259842", "0.5219119", "0.52162015" ]
0.67822635
0
Get a random software license.
def software_license(self) -> str: return self.random.choice(LICENSES)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_license(self) -> APIReturn:\n return await self._request(\"GET\", \"/getLicense\")", "def get_license():\n repo_fs()\n return LICENSE", "def license(self): # noqa: A003\n logger.debug(\"Get license\")\n return self._raw_api.license.get()", "def license_plate(self) -> str:\n return self.numerify(self.generator.parse(self.random_element(self.license_formats)))", "def getLicense(self, resource):\n\n if isinstance(resource, int):\n resource = 'licenses/{0}'.format(resource)\n\n res = self.getRequest(resource)\n if res:\n license = vsdModels.License(**res)\n\n return license\n else:\n return None", "def License(self, default=None):\n return self.data.get('license', default)", "def license_plate(self) -> str:\n temp = re.sub(\n r\"\\?\",\n lambda x: self.random_element(self.ascii_uppercase_azerbaijan),\n self.random_element(self.license_formats),\n )\n temp = temp.replace(\"##\", self.random_element(self.license_plate_initial_numbers), 1)\n # temp = temp.format(self.random_element(range(1, 999)))\n return self.numerify(temp)", "def license_plate(self) -> str:\n prefix: str = self.random_element(self.license_plate_prefix)\n suffix = self.bothify(\n self.random_element(self.license_plate_suffix),\n letters=string.ascii_uppercase,\n )\n return prefix + suffix", "def license(*args, borrow: bool=True, info: bool=True, isBorrowed: bool=True, isExported:\n bool=True, isTrial: bool=True, licenseMethod: bool=True, productChoice: bool=True,\n r: bool=True, showBorrowInfo: bool=True, showProductInfoDialog: bool=True, status:\n bool=True, usage: bool=True, **kwargs)->AnyStr:\n pass", "def license(p):\n # Input file\n f = '/'.join([p, 'collector.stats'])\n check_path(f)\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n for line in fh.readlines():\n if 'License key' in line:\n license = line.split(':')[1].strip()\n break\n\n return license", "def get_license_key(self):\n\t\treturn call_sdk_function('PrlLic_GetLicenseKey', self.handle)", "def custom_licenses(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE)\n if result < 0:\n raise errors.JLinkException(result)\n return ctypes.string_at(buf).decode()", "def get_license(self):\n etree = self.get_eml()\n project_license_dict = etree.find('.//intellectualRights/para/ulink')\n project_license = project_license_dict.get('url')\n return project_license", "def generate_license_plate(self, num):\n license_plate = []\n for _ in range(num):\n license_plate.append(self.fake.license_plate())\n return license_plate", "def get_license_info(self):\n\t\treturn Job(SDK.PrlSrv_GetLicenseInfo(self.handle)[0])", "def license_number(self):\n return self._license_number", "def grabLicence(self):\n\t\t\treturn pulpCPLEX.grabLicence()", "def get_licence(self, _return):\n return _return.licence.licence_number", "def show_license(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(lic)\n ctx.exit()", "def get(cls, client, name=\"\", option_=\"\") :\n try :\n if not name :\n obj = nslicense()\n response = obj.get_resources(client, option_)\n return response\n except Exception as e :\n raise e", "def test_default_license(self):\n # When no license is specified, the license should default to \"CC BY\"\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\")\n self.assertEqual(story.license, 'CC BY')\n\n # When a license is specified, it should be set\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", license=\"CC BY-NC-SA\")\n self.assertEqual(story.license, 'CC BY-NC-SA')", "def pickSecretNumber(): \n return random.randrange(1, 11)", "def license(new_key):\n if new_key is not None:\n # click.echo('Saving key to configuration')\n config.set_license(new_key)\n license_key = config.get_license()\n if license_key:\n click.echo(license_key)\n else:\n click.echo(\"No license found: Use --set to configure the key\")", "def licenses(self):\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n res = self._dll.JLINK_GetAvailableLicense(buf, buf_size)\n if res < 0:\n raise errors.JLinkException(res)\n return ctypes.string_at(buf).decode()", "def get_license_text(self):\n\n if self.license_file:\n license_text = self.license_file.read_text(encoding=\"utf-8\")\n else:\n license_text = (\n \"Could not find foxBMS 2 license file.\\n\"\n f\"Please check {FOXBMS_LICENSE_FALLBACK_URL}.\"\n )\n self.license_file_missing_msg_box = wx.MessageBox(\n license_text, \"License file missing\", wx.OK | wx.ICON_WARNING\n )\n # self.Bind(wx.EVT_BUTTON, self.license_file_missing_msg_box)\n return license_text", "def get_license_from_wiki(s, *, is_file=False):\n response = api.wiki_search_licence(s, file=is_file)\n\n idpage = response['query']['pageids'][0]\n\n try: \n imageinfo = response['query']['pages'][idpage].get('imageinfo', None)\n return imageinfo[0]['extmetadata']['UsageTerms']['value']\n except (KeyError, TypeError) as e:\n Querylog.error('License not found for %s' % s)\n return None", "def license_key(self):\n # type: () -> string_types\n return self._license_key", "def qs_license():\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('License', level=1)\r\n lic_metric = ['lef', 'serial', 'name', 'organization', 'product', 'numberOfCores', 'isExpired', 'expiredReason', 'isBlacklisted', 'isInvalid']\r\n qs_lic = get_qlik_sense.get_license()\r\n num_of_metric = len(qs_lic)\r\n table = document.add_table(rows=num_of_metric+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'details'\r\n\r\n for metric in range(len(qs_lic)):\r\n row = table.rows[metric+1]\r\n row.cells[0].text = str(lic_metric[metric])\r\n row.cells[1].text = str(qs_lic[metric][0])\r\n document.add_page_break()", "def computer_generate(self):\n return choice[random.randrange(3)]", "def get_public_license(style: str) -> str:\n raw = MIT_LICENSE\n if style == 'raw':\n return raw\n if style == 'python':\n # Add a line at the bottom since our python-formatters tend to smush\n # our code up against the license; this keeps things a bit more\n # visually separated.\n return ('\\n'.join('#' + (' ' if l else '') + l\n for l in raw.splitlines()) + '\\n' + '# ' + '-' * 77)\n if style == 'makefile':\n # Basically same as python except without the last line.\n return ('\\n'.join('#' + (' ' if l else '') + l\n for l in raw.splitlines()))\n if style == 'c++':\n return '\\n'.join('//' + (' ' if l else '') + l\n for l in raw.splitlines())\n raise RuntimeError(f'Invalid style: {style}')" ]
[ "0.7257501", "0.71489984", "0.70458555", "0.6904726", "0.6888948", "0.68682706", "0.6559753", "0.63974625", "0.6345092", "0.63441616", "0.63224334", "0.62866235", "0.6221269", "0.6201027", "0.6021887", "0.597207", "0.5964664", "0.593197", "0.59197384", "0.5912311", "0.58886063", "0.5864449", "0.5850298", "0.57972896", "0.5777882", "0.5719451", "0.56842864", "0.56731063", "0.56686085", "0.5663979" ]
0.8545533
0
Get a random programming language from the list.
def programming_language(self) -> str: return self.random.choice(PROGRAMMING_LANGS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def language():\r\n\r\n cursor.execute('SELECT name from languages order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def choose_language(self):\n\n current_dir = os.curdir\n path = os.path.join(current_dir, \"audio\")\n languages = os.listdir(path)\n language_num = randint(0, len(languages) - 1)\n return languages[language_num]", "def return_word():\n wordlist = load_words()\n word = random.choice(wordlist)\n return word", "def random_word(wordlist):\n return random.choice(wordlist)", "def random_word(a_list):\n\n return random.choice(a_list)", "def randomWord(wordList):\n return random.choice(wordList)", "def randomWord(wordList):\n return random.choice(wordList)", "def randomWord(wordList):\n return random.choice(wordList)", "def chosen():\n wordList = loadWords()\n w = random.choice(wordList)\n word = w[:-1]\n return word", "def get_random_phrase():\n return random.choices(PHRASES, WEIGHTS, k=1)[0]", "def get_word():\n index = random.randrange(3)\n print(index)\n if index == 0:\n return 'HAPPIE'\n elif index == 1:\n return 'PYTHON'\n else:\n return 'COMPUTER'", "def choose_word(word_list):\n word = random.choice(word_list)\n word = word.lower()\n return word", "def get_word():\n words = []\n for line in open(LEXICON_FILE):\n line = line.strip()\n words.append(line)\n\n index = random.randrange(0,len(words))\n return words[index]", "def select_random_lang(lang_len, tot_egs, random_selection):\n assert(tot_egs > 0)\n rand_int = random.randint(0, tot_egs - 1)\n count = 0\n for l in range(len(lang_len)):\n if random_selection:\n if rand_int <= (count + lang_len[l]):\n return l\n else:\n count += lang_len[l]\n else:\n if (lang_len[l] > 0):\n return l\n return -1", "def getRandom(self):\n return random.choice(self.ls)", "def choose_track(self, language):\n\n current_dir = os.curdir\n path = os.path.join(current_dir, language)\n tracks = os.listdir(path)\n track_num = randint(0, len(tracks) - 1)\n return tracks[track_num]", "def choice(L):\r\n LEN = len(L) # Get the length\r\n randomindex = int(LEN*random()) # Get a random index\r\n return L[randomindex] # Return that element\r", "def pick_word(self):\n self.chosen_word = random.choice(self.words_list)\n return self.chosen_word", "def choose_secret_word():\n # return random.choice(['python' , 'django' , 'concatenate'])\n return random.choice(['concatenate' , 'concatenate' , 'concatenate'])", "def get_random_phrase(self):\n return random.choice(self.phrases)", "def get_language():\n disabled_modules = ['tokenizer', 'tagger', 'parser', 'textcat']\n nlp = spacy.load('en_core_web_md', disable=disabled_modules)\n # we are not interested in stop-words as most of them are\n # needed in the short sentence examples in relation definitions\n spacy_wmd_hook = wmd.WMD.SpacySimilarityHook(nlp, ignore_stops=False)\n nlp.add_pipe(spacy_wmd_hook, last=True)\n return nlp", "def randomHelmet():\n return random.choice(HELMETS)", "def getRandomFromList(self, l):\n if (len(l) == 0):\n return -1\n return l[randint(0, len(l) - 1)]", "def en_word(cls):\n return cls.random_element(cls.words)", "def pick_random_word():\r\n # open the sowpods dictionary\r\n with open(\"resources/ex30/sowpos.txt\", 'r') as f:\r\n words = f.readlines()\r\n\r\n # generate a random index\r\n # -1 because len(words) is not a valid index into the list `words`\r\n index = random.randint(0, len(words) - 1)\r\n\r\n # print out the word at that index\r\n word = words[index].strip()\r\n return word", "def get_random_word(self):\n pass", "def get_user_language() -> str:\n languages = {\n \"arabic\": \"arb\",\n \"chinese\": \"cmn-CN\",\n \"danish\": \"da-DK\",\n \"english\": \"en-GB\",\n \"french\": \"fr-FR\",\n \"german\": \"de-DE\",\n \"portuguese\": \"pl-PT\",\n \"spanish\": \"es-ES\"\n }\n textlang = input(\"What language do you want to hear?\")\n try:\n return languages[textlang.lower()]\n except KeyError as e:\n print(\"Enter a valid language.\")\n sys.exit(1)", "def get_randword():\n with open('/home/sarga/text_words.txt','r') as f:\n rword = f.read().split(\" \")\n return random.choice(rword)", "def random_word():\n num = random.choice(range(9))\n if num == 0:\n return \"NOTORIOUS\"\n elif num == 1:\n return \"GLAMOROUS\"\n elif num == 2:\n return \"CAUTIOUS\"\n elif num == 3:\n return \"DEMOCRACY\"\n elif num == 4:\n return \"BOYCOTT\"\n elif num == 5:\n return \"ENTHUSIASTIC\"\n elif num == 6:\n return \"HOSPITALITY\"\n elif num == 7:\n return \"BUNDLE\"\n elif num == 8:\n return \"REFUND\"", "def get_word():\n word=words_dict[randrange(0,len(words_dict))]\n return word" ]
[ "0.7798375", "0.75217885", "0.6663096", "0.657567", "0.6547032", "0.6538302", "0.6538302", "0.6538302", "0.6450568", "0.6426865", "0.62987846", "0.6250212", "0.624393", "0.62252414", "0.6214424", "0.62047374", "0.61436826", "0.61385137", "0.6122823", "0.6067962", "0.6052699", "0.60259944", "0.59391344", "0.5936049", "0.5932842", "0.5918013", "0.5846685", "0.5830073", "0.5825997", "0.58142394" ]
0.7644547
1
Get a random operating system or distributive name.
def os(self) -> str: return self.random.choice(OS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_rand_name() -> str:\n suf = \"\".join(random.choices(string.ascii_uppercase + string.digits, k=6))\n return f\"exporters_{suf}\"", "def GetOSName():\n return Config.osName_", "def platform_distro():\n distro = platform_information()[0] or ''\n return distro.strip().lower()", "def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname", "def get_distrib_name():\n distrib, version, codename = _get_release_infos()\n \n if distrib.startswith('Red Hat Enterprise Linux'):\n return 'RHEL'\n elif distrib.startswith('CentOS'):\n return 'CentOS'\n else:\n abort(\"OS not supported.\")", "def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]", "def auto_detect_os(distro):\n if is_debian(distro):\n return \"Debian\"\n\n if is_redhat(distro):\n return \"Redhat\"\n\n return \"Unknown\"", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def generate_name():\n return random.choice(ADJECTIVES) + \"_\" + random.choice(TOOLS)", "def _get_random_name(self, length=10):\n return base64.b64encode(os.urandom(10)).translate(None, '=+/')", "def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"", "def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'", "def __generate_random_string():\n return uuid4().hex[:6].upper()", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def generate_name(self):\n letters = string.ascii_letters\n random_name = ''.join(random.choice(letters) for _ in range(10))\n assert isinstance(random_name, str)\n return random_name", "def get_os():\n\n os_platform = sys.platform\n\n if os_platform.startswith('darwin'):\n return 'mac'\n\n if os_platform.startswith('linux'):\n return 'linux'\n\n if os_platform.startswith('win'):\n return 'windows'\n\n raise RuntimeError('Unsupported operating system.')", "def os_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_name\")", "async def osname(self):\n\n await self.bot.say(box(system(), 'Bash'))", "def computer_username():\r\n\t#possible username the computer can have\r\n\tusernames = ['bella_123','$lade(99)','BADOO_0!','V1rus**',\t\t\r\n\t\t'Gh0stO_O', '1ce_man','MoneyBa9$','1ucy=_=', 'F1ash~_~',\r\n\t\t'<an9el>','-NeGaT1Ve-', '__M4dCat__','|Re$pEcT0|','-D1ggerR-',\r\n\t\t'k^T3st','n1ce!™']\r\n\trandom.SystemRandom().shuffle(usernames)\t\r\n\tselect_username = ''.join(random.sample(usernames, 1))\t#select a random username\r\n\treturn select_username", "def get_random_manor_name(owner = None):\n if not owner:\n owner = get_random_lastname_upperclass()\n manor_name = \"%s %s\" % (owner, random.choice((\"Hall\", \"Manor\")))\n\n return manor_name", "def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name", "def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name", "def random_useragent()->str:\n dir_path= os.path.dirname(__file__)\n useragents_file_path = os.path.join(dir_path, \"data\", \"useragents.txt\")\n _pc_useragents=[line.rstrip('\\n') for line in open(useragents_file_path)]\n return random.choice(_pc_useragents)", "def name():\r\n return _random.choice([male_first(), female_first()])", "def get_os_name(cls):\n return cls.get_os_type().name", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def random_name(size=6):\r\n chars = string.ascii_uppercase + string.digits\r\n return 'test-' + ''.join(random.choice(chars) for x in range(size))", "def known_os_type():\n return 'Linux'", "def identify_system() -> str:\n system = platform.system()\n if system not in [\"Linux\", \"Darwin\"]:\n raise ValueError(f\"Unsupported system {system}\")\n return system", "def software_license(self) -> str:\n return self.random.choice(LICENSES)" ]
[ "0.71472675", "0.7110732", "0.7060906", "0.6976703", "0.69478893", "0.6882059", "0.6790889", "0.6751498", "0.66147846", "0.659943", "0.6580307", "0.657741", "0.6558327", "0.653063", "0.65062135", "0.6499304", "0.6496545", "0.6487284", "0.6480682", "0.6479354", "0.64615834", "0.64615834", "0.64495474", "0.6440491", "0.6408814", "0.64067084", "0.64014405", "0.6391613", "0.63654613", "0.6341812" ]
0.7919382
0
Get a random system quality attribute. Within systems engineering, quality attributes are realized nonfunctional requirements used to evaluate the performance of a system. These are sometimes named "ilities" after the suffix many of the words share.
def system_quality_attribute(self) -> str: return self.random.choice(SYSTEM_QUALITY_ATTRIBUTES)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ility(self) -> str:\n return self.system_quality_attribute()", "def quality(self):\n try:\n qid = int((self.tool_metadata or {}).get(\"quality\", 0))\n except:\n qid = 0\n\n # We might be able to get the quality strings from the item's tags\n internal_name, name = \"normal\", \"Normal\"\n if self.tags:\n tags = {x.get('category'): x for x in self.tags}\n if 'Quality' in tags:\n internal_name, name = tags['Quality'].get('internal_name'), tags['Quality'].get('name')\n\n return qid, internal_name, name", "def getRandomRarity():\n r = random.randint(1,100)\n if r <= Rarities.IMPOSIBIL:\n return \"IMPOSIBIL\"\n elif r <= Rarities.LEGENDAR:\n return \"LEGENDAR\"\n elif r <= Rarities.EPIC:\n return \"EPIC\"\n else:\n return \"COMUN\"", "def genQuality(self):\n return np.clip(np.random.normal(self.qavgs, self.qstdevs), 0, 40)", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def quality_rating(PR):\n \n if PR <= 85:\n quality = \"poor\"\n elif PR < 90:\n quality = \"mediocre\"\n elif PR < 95:\n quality = \"good\"\n else:\n quality = \"great\"\n\n return quality", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def get_random(self):\n self.random_range = list(np.array(self.friendly_range) * self.conversion)\n return np.random.uniform(self.random_range[0], self.random_range[1], 1)[0]", "def sample_from_concept(self):\n return random.choice(self.active_concept.extension)", "def get_random_question(self):\n available_qs = self.available_qs\n if available_qs.exists():\n return random.choice(available_qs)", "def _cim_quality():\n return {\n 'type' : 'class',\n 'name' : 'cim_quality',\n 'base' : None,\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'The starting point for a quality record. It can contain any number of issues and reports. An issue is an open-ended description of some issue about a CIM instance. A record is a prescribed description of some specific quantitative measure that has been applied to a CIM instance.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('reports', 'quality.report', '0.N', None),\n ],\n 'decodings' : [\n ('meta', 'self::cim:cIM_Quality'),\n ('reports', 'child::cim:report'),\n ]\n }", "def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))", "def strategiaa(stan_gry):\n ruch = min(random.randint(1,3), stan_gry)\n return ruch", "def getDescQualidade(self, local='Itaquera'):\n quality = int(self.soup.find('td', text=local).parent.find('td', width=50).text)\n if quality >= 0 and quality <= 40:\n descript = 'Boa'\n elif quality >= 41 and quality <= 80:\n descript = 'Moderado'\n elif quality >= 81 and quality <= 120:\n descript = 'Ruim'\n elif quality >= 121 and quality <= 200:\n descript = 'Muito Ruim'\n elif quality >= 200:\n descript = 'Pessimo'\n return descript", "def test_sample_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def metallicity(method, emsystem):\n if method == 'PG16':\n # Requires Hbeta, [OII], [OIII], [NII], [SII]\n R2 = (emsystem.get_emline('[OII] 3726').attrib['flux'] +\n emsystem.get_emline('[OII] 3729').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n R3 = (emsystem.get_emline('[OIII] 4959').attrib['flux'] +\n emsystem.get_emline('[OIII] 5007').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n N2 = (emsystem.get_emline('[NII] 6548').attrib['flux'] +\n emsystem.get_emline('[NII] 6584').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n S2 = (emsystem.get_emline('[SII] 6716').attrib['flux'] +\n emsystem.get_emline('[SII] 6731').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n # Proceed\n if np.log10(N2) < -0.6:\n r_val = 7.932 + 0.944*np.log10(R3/R2) + 0.695*np.log10(N2) + \\\n ((0.97 - 0.291*np.log10(R3/R2)) - 0.019*np.log10(N2))*np.log10(R2)\n\n s_val = 8.072 + 0.789*np.log10(R3/S2) + 0.726*np.log10(N2) + \\\n (1.069 - 0.170*np.log10(R3/S2) +0.022*np.log10(N2))*np.log10(S2)\n else:\n r_val = 8.589 + 0.022*np.log10(R3/R2) + 0.399*np.log10(N2) + \\\n (-0.137 + 0.164*np.log10(R3/R2) + 0.589*np.log10(N2))*np.log10(R2)\n\n s_val = 8.424 + 0.030*np.log10(R3/S2) + 0.751*np.log10(N2) + \\\n (-0.349 + 0.182*np.log10(R3/S2) +0.508*np.log10(N2))*np.log10(S2)\n return r_val.decompose().value, s_val.decompose().value", "def quality(value: str) -> str:\n if \"HDTV\" in value:\n return \"HDTV\"\n else:\n return \"SD\"", "def test_sample_one_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def test_get_prior_string_uniform(self):\n dim = Dimension(\"yolo\", \"uniform\", 1, 2)\n assert dim.get_prior_string() == \"uniform(1, 3)\"", "def test_get_prior_string_precision(self):\n dim = Real(\"yolo\", \"uniform\", 1, 2, precision=5)\n assert dim.get_prior_string() == \"uniform(1, 3, precision=5)\"", "def __getattr__(self, key):\n return random.choice([\"world\", math.pi])", "def random():\n pars = dict(\n scale=10**np.random.uniform(1, 3),\n gamma=np.random.uniform(0, 6),\n q_0=10**np.random.uniform(-3, -1),\n )\n return pars", "def question(self, name: str) -> Optional[NumericalAttribute]:\n return super().attribute(name=name)", "def quality(self):\n return self.plays * self.number", "def standardid(self):\n return self.get(\"capabilityStandardID\")", "def getRandomSuitType(level, rng=random):\n return random.randint(max(level-4, 1 ), min(level, 8))", "async def random_skill_score(self, card_rarity):\n if card_rarity == \"common\":\n random.randint(1, 20)\n elif card_rarity == \"uncommon\":\n random.randint(21, 40)\n elif card_rarity == \"rare\":\n random.randint(41, 60)\n elif card_rarity == \"epic\":\n random.randint(61, 80)\n elif card_rarity == \"legendary\":\n random.randint(81, 99)\n else:\n raise self.ex.exceptions.ShouldNotBeHere(f\"random_skill_score received the card rarity: {card_rarity} \"\n f\"which is not a valid card_rarity.\")", "def attributes(c):\n global cfg # pylint: disable=global-variable-not-assigned\n if int(c['xp01']) >= cfg['card']['xp_limit']:\n return 'evolve'\n else:\n return 'level'", "def random_alternative(self, fmt_string):\n # Find alternatives\n try:\n alts = self[fmt_string]\n except KeyError:\n # There are no alternatives for this string\n return fmt_string\n return random.choice(alts)", "def randomHelmet():\n return random.choice(HELMETS)" ]
[ "0.6713973", "0.64897156", "0.61726063", "0.6065477", "0.5870625", "0.55420256", "0.55384314", "0.5526172", "0.541174", "0.53634155", "0.53470075", "0.53367925", "0.53191906", "0.52884704", "0.52797884", "0.52781254", "0.52720535", "0.52499086", "0.5233074", "0.5218162", "0.5184987", "0.51606315", "0.51518244", "0.51493514", "0.51347744", "0.5131504", "0.51312673", "0.5115597", "0.5098581", "0.5096746" ]
0.8372629
0