query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Adds a pump to the water network model. | def add_pump(self, name, start_node_name, end_node_name, pump_type='POWER',
pump_parameter=50.0, speed=1.0, pattern=None, initial_status='OPEN'):
assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, "name must be a string with less than 32 characters and contain no spaces"
assert isinstance(start_node_name, str) and len(start_node_name) < 32 and start_node_name.find(' ') == -1, "start_node_name must be a string with less than 32 characters and contain no spaces"
assert isinstance(end_node_name, str) and len(end_node_name) < 32 and end_node_name.find(' ') == -1, "end_node_name must be a string with less than 32 characters and contain no spaces"
assert isinstance(pump_type, str), "pump_type must be a string"
assert isinstance(pump_parameter, (int, float, str)), "pump_parameter must be a float or string"
assert isinstance(speed, (int, float)), "speed must be a float"
assert isinstance(pattern, (type(None), str)), "pattern must be a string"
assert isinstance(initial_status, (str, LinkStatus)), "initial_status must be a string or LinkStatus"
if isinstance(initial_status, str):
initial_status = LinkStatus[initial_status]
if pump_type.upper() == 'POWER':
pump = PowerPump(name, start_node_name, end_node_name, self)
pump.power = pump_parameter
elif pump_type.upper() == 'HEAD':
pump = HeadPump(name, start_node_name, end_node_name, self)
pump.pump_curve_name = pump_parameter
else:
raise ValueError('pump_type must be "POWER" or "HEAD"')
pump.base_speed = speed
pump.initial_status = initial_status
pump.speed_pattern_name = pattern
self[name] = pump | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_pump(self, name, start_node_name, end_node_name, pump_type='POWER',\n pump_parameter=50.0, speed=1.0, pattern=None, initial_status='OPEN'):\n self._link_reg.add_pump(name, start_node_name, end_node_name, pump_type, \n pump_parameter, speed, pattern, initial_status)",
"def add(self, p):\n self._pumps.add(p)",
"def add_lump(self, lumpname, lump):\n assert self.mode == 'W', \"Cannot write a WAD opened in read mode. \" \\\n \"Please consider copying your WAD() into a new one \" \\\n \"using to_bytes and from_bytes methods\"\n if lump is None:\n lump_bytes = bytes()\n else:\n lump_bytes = lump.to_bytes()\n size = len(lump_bytes)\n self['directory'].append(LumpInfo(filepos=self.current_lump_offset, size=size, name=lumpname))\n self['lumps'].append(lump_bytes)\n # Updating directory and header information\n self.current_lump_offset += size\n self['header']['numlumps'] += 1\n # The infotableoffset is always kept at the end of the file\n self['header']['infotableofs'] = self.current_lump_offset",
"def __manage_pump(self):\r\n with self.config_lock:\r\n if self.config['pump_auto_control'] == False:\r\n # Controller doesn't need to do anything about the pump as it is in manual control mode\r\n pass\r\n else:\r\n # Pump is in automatic mode\r\n if self.config['\"pump_auto_control_mode'] == 'normally_off':\r\n # For current functionality there is nothing that can force the pump to turn on (e.g.\r\n # fire extinguishing).\r\n pass\r\n else:\r\n # Pump is normally on.\r\n pump_parameters = self.well_tank_dev.parameters\r\n if self.config['pump_auto_control_turn_off_when_well_empty']:\r\n if pump_parameters ['well_water_presence'] == 'not_present':\r\n # No water in the well\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n # Water in the well is present\r\n if self.config['pump_auto_control_turn_off_when_tank_full']:\r\n if pump_parameters['tank'] == 'full':\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n self.well_tank_dev.send_command('pump', 'turn_on')\r\n else:\r\n # Do not turn off the pump if the well is empty\r\n if self.config['pump_auto_control_turn_off_when_tank_full']:\r\n if pump_parameters ['tank'] == 'full':\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n self.well_tank_dev.send_command('pump', 'turn_on')\r\n else:\r\n # Do not trun off the pump when the tank is full\r\n self.well_tank_dev.send_command('pump', 'turn_on')",
"def pump_water(pump_pin, delay=1):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pump_pin, GPIO.OUT)\n timeout = time.time() + 1.5*60 # 1.5 minutes\n\n try:\n print \"Watering plant...\"\n GPIO.output(pump_pin, GPIO.HIGH)\n\n while get_percent_wet() < 75:\n time.sleep(delay)\n if time.time() > timeout:\n break\n\n GPIO.output(pump_pin, GPIO.LOW)\n GPIO.cleanup(pump_pin)\n return\n\n except:\n GPIO.cleanup(pump_pin)\n\n return",
"def add_wolf_to_pack(self, wolf):\n logging.debug(\"Adding wolf {} to pack\".format(wolf.unique_id))\n # When a Wolf is part of a pack\n if (not wolf.pack):\n self.model.schedule.remove(wolf)\n self.model.grid.remove_agent(wolf)\n wolf.pack = True\n self.wolves.append(wolf)",
"def __init__(self, initScript=None):\n super(Pump, self).__init__(initScript)\n \n # the isentropic compressor\n self.ideal = IdealPump()\n self.AddUnitOperation(self.ideal, 'Ideal')\n \n # a heater to add the waste heat to the outlet\n self.waste = Heater.Heater()\n self.AddUnitOperation(self.waste, 'Waste')\n self.waste.GetPort(DELTAP_PORT).SetValue(0.0, FIXED_V)\n \n # connect them\n self.ConnectPorts('Ideal', OUT_PORT, 'Waste', IN_PORT)\n\n # energy sensors (needed for signals)\n self.idealQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.idealQ, 'IdealQ')\n self.ConnectPorts('Ideal', IN_PORT + 'Q', 'IdealQ', OUT_PORT)\n \n self.wasteQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.wasteQ, 'WasteQ')\n self.ConnectPorts('Waste', IN_PORT + 'Q', 'WasteQ', OUT_PORT)\n\n self.totalQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.totalQ, 'TotalQ')\n \n # create a signal stream for the efficiency\n self.effStream = Stream.Stream_Signal()\n self.effStream.SetParameterValue(SIGTYPE_PAR, GENERIC_VAR)\n self.AddUnitOperation(self.effStream, 'EfficiencySig')\n \n #set relation between ideal and total Q\n self.set = Set.Set()\n self.AddUnitOperation(self.set, 'Set')\n self.set.SetParameterValue(SIGTYPE_PAR, ENERGY_VAR)\n self.set.GetPort(Set.ADD_PORT).SetValue(0.0, FIXED_V)\n self.ConnectPorts('TotalQ',SIG_PORT, 'Set', SIG_PORT + '0')\n self.ConnectPorts('IdealQ',SIG_PORT, 'Set', SIG_PORT + '1')\n self.ConnectPorts('EfficiencySig', OUT_PORT, 'Set', Set.MULT_PORT)\n \n # energy stream balance\n self.mix = Balance.BalanceOp()\n self.AddUnitOperation(self.mix, 'Mix')\n self.mix.SetParameterValue(NUSTIN_PAR + Balance.S_ENE, 1)\n self.mix.SetParameterValue(NUSTOUT_PAR + Balance.S_ENE, 2)\n self.mix.SetParameterValue(Balance.BALANCETYPE_PAR, Balance.ENERGY_BALANCE)\n \n # connect the mixer ports\n self.ConnectPorts('IdealQ',IN_PORT,'Mix',OUT_PORT + 'Q0')\n self.ConnectPorts('WasteQ',IN_PORT,'Mix',OUT_PORT + 'Q1')\n self.ConnectPorts('TotalQ',OUT_PORT,'Mix', IN_PORT + 'Q0')\n \n # export the flow ports\n self.BorrowChildPort(self.ideal.GetPort(IN_PORT), IN_PORT)\n self.BorrowChildPort(self.waste.GetPort(OUT_PORT), OUT_PORT)\n self.BorrowChildPort(self.totalQ.GetPort(IN_PORT), IN_PORT + 'Q')\n self.BorrowChildPort(self.effStream.GetPort(IN_PORT), EFFICIENCY_PORT)\n self.BorrowChildPort(self.ideal.GetPort(DELTAP_PORT), DELTAP_PORT)\n \n #Change the type of the energy port such that it is in Work units and scaling\n self.totalQ.GetPort(IN_PORT).GetProperty().SetTypeByName(WORK_VAR)",
"def set_pump(self, pump: str, state: bool):\r\n if pump == \"NF\":\r\n if state:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x01]\r\n else:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x02]\r\n elif pump == \"NT\":\r\n if state:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x03]\r\n else:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x04]\r\n self.send_and_flush(self.msg_send_upr)",
"def addpool(miner: Miner, pool):\n api = MinerApi(host=miner.ipaddress, port=int(miner.port))\n jaddpool = api.addpool(\"{0},{1},{2}\".format(pool.url, pool.user, \"x\"))\n return jaddpool[\"STATUS\"][0][\"Msg\"]",
"def create_pumper():\n return _Kalamazoo()",
"def __init__(\n self,\n pump_io: HamiltonPumpIO,\n syringe_volume: str,\n name: str,\n address: int = 1,\n **config,\n ):\n super().__init__(name)\n # HamiltonPumpIO\n self.pump_io = pump_io\n ML600._io_instances.add(self.pump_io) # See above for details.\n\n # Pump address is the pump sequence number if in chain. Count starts at 1, default.\n self.address = int(address)\n\n # Syringe pumps only perform linear movement, and the volume displaced is function of the syringe loaded.\n try:\n self.syringe_volume = ureg.Quantity(syringe_volume)\n except AttributeError as attribute_error:\n logger.error(f\"Invalid syringe volume {syringe_volume}!\")\n raise InvalidConfiguration(\n \"Invalid syringe volume provided.\"\n \"The syringe volume is a string with units! e.g. '5 ml'\"\n ) from attribute_error\n\n if self.syringe_volume.m_as(\"ml\") not in ML600.VALID_SYRINGE_VOLUME:\n raise InvalidConfiguration(\n f\"The specified syringe volume ({syringe_volume}) is invalid!\\n\"\n f\"The volume (in ml) has to be one of {ML600.VALID_SYRINGE_VOLUME}\"\n )\n\n self._steps_per_ml = ureg.Quantity(f\"{48000 / self.syringe_volume} step/ml\")\n self._offset_steps = 100 # Steps added to each absolute move command, to decrease wear and tear at volume = 0\n self._max_vol = (48000 - self._offset_steps) * ureg.step / self._steps_per_ml\n\n # This enables to configure on per-pump basis uncommon parameters\n self.config = ML600.DEFAULT_CONFIG | config",
"def setPump(self, time, wait):\n c =\"/cli:python /app:matrix /cmd:pump /time:\"+str(time)+ \" /value:\"+ str(value)\n self.addtoCMDlist(c)\n self.sendCMDlist()",
"def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))",
"def pumps(self): \n return self._link_reg.pumps",
"def add_pack_to_pack(self, pack):\n logging.debug(\"Merging packs\")\n for wolf in pack.wolves:\n self.add_wolf_to_pack(wolf)\n logging.debug(\"Pack is now {} wolves\".format(len(self.wolves)))\n self.model.schedule.remove(pack)\n self.model.grid.remove_agent(pack)",
"def _add_population(self, population):\n self._populations.append(population)",
"def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)",
"def pump_view(request):\n form = BuildingForm(retrofit_type='Pump')\n return render(request, 'pump.html', context={'form':form})",
"def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()",
"def l7pool_add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n pool_main = {\n 'name': args.get('name'),\n 'loadBalancingAlgorithm': args.get('method'),\n 'protocol': args.get('protocol')\n }\n\n pool_members = list(args.get('server'))\n\n pool_health = {\n 'interval': args.get('healthinterval'),\n 'timeout': args.get('healthtimeout'),\n 'maxRetries': args.get('healthretry'),\n 'urlPath': args.get('healthpath')\n }\n\n pool_sticky = {\n 'type': args.get('sticky')\n }\n\n try:\n mgr.add_lb_l7_pool(uuid, pool_main, pool_members, pool_health, pool_sticky)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def add(self, p, s, node) -> None:\n self.place.append(p)\n self.station.append(s)\n self.pi.append(node.pi[p, s] if p != float('inf') else float('inf'))\n self.noncoverage.append(node.noncoverage.left + node.noncoverage.right)\n self.cost.append(node.cost)\n self.delay.append(node.delay)\n self.step.append(node.key)",
"def _add_bal(self):\n\n c = self.components\n p = self.pipes\n\n # TODO No mass flow reversal yet\n if self.temperature_driven:\n\n lines = self.params['lines'].v()\n\n self.block.mix_temp = Var(self.TIME, lines)\n\n def _temp_bal_incoming(b, t, l):\n\n incoming_comps = collections.defaultdict(list)\n incoming_pipes = collections.defaultdict(list)\n\n for name, comp in c.items():\n if value(comp.get_mflo(t)) >= 0:\n incoming_comps['supply'].append(name)\n else:\n incoming_comps['return'].append(name)\n\n for name, pipe in p.items():\n if value(pipe.get_edge_mflo(self.name, t)) >= 0:\n incoming_pipes['supply'].append(name)\n else:\n incoming_pipes['return'].append(name)\n # Zero mass flow rate:\n if value(\n sum(c[comp].get_mflo(t) for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) == 0:\n # mixed temperature is average of all joined pipes, actual value should not matter,\n # because packages in pipes of this time step will have zero size and components do not take over\n # mixed temperature in case there is no mass flow\n\n return b.mix_temp[t, l] == (\n sum(c[comp].get_temperature(t, l) for comp in c) +\n sum(p[pipe].get_temperature(self.name, t, l) for\n pipe in p)) / (\n len(p) + len(c))\n\n\n else: # mass flow rate through the node\n return (sum(\n c[comp].get_mflo(t) for comp in incoming_comps[l]) +\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) * b.mix_temp[t, l] == \\\n sum(c[comp].get_mflo(t) * c[comp].get_temperature(t,\n l)\n for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) * p[\n pipe].get_edge_temperature(self.name, t, l)\n for pipe in incoming_pipes[l])\n\n self.block.def_mixed_temp = Constraint(self.TIME,\n lines,\n rule=_temp_bal_incoming)\n\n def _temp_bal_outgoing(b, t, l, comp):\n\n outgoing_comps = collections.defaultdict(list)\n outgoing_pipes = collections.defaultdict(list)\n\n for name, comp_obj in c.items():\n if comp_obj.get_mflo(t) >= 0:\n outgoing_comps['return'].append(name)\n else:\n outgoing_comps['supply'].append(name)\n\n for name, pipe_obj in p.items():\n if pipe_obj.get_edge_mflo(self.name, t) >= 0:\n outgoing_pipes['return'].append(name)\n else:\n outgoing_pipes['supply'].append(name)\n\n if t == 0:\n return Constraint.Skip\n if comp in outgoing_pipes[l]:\n return p[comp].get_edge_temperature(self.name, t, l) == \\\n b.mix_temp[t, l]\n elif comp in outgoing_comps[l]:\n return c[comp].get_temperature(t, l) == b.mix_temp[t, l]\n else:\n return Constraint.Skip\n\n self.block.outgoing_temp_comps = Constraint(self.TIME,\n lines,\n c.keys(),\n rule=_temp_bal_outgoing)\n self.block.outgoing_temp_pipes = Constraint(self.TIME,\n lines,\n p.keys(),\n rule=_temp_bal_outgoing)\n\n elif self.repr_days is None:\n\n def _heat_bal(b, t):\n return 0 == sum(\n self.components[i].get_heat(t) for i in self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME,\n rule=_heat_bal)\n\n def _mass_bal(b, t):\n return 0 == sum(\n self.components[i].get_mflo(t) for i in self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME,\n rule=_mass_bal)\n\n else:\n def _heat_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_heat(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_heat_bal)\n\n def _mass_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_mflo(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_mass_bal)",
"def add(self, mp):\n \n self.tile_contents.append(mp)\n if(self.tile_contents[-1].raised == False):\n self.paint_blocks += 1.00",
"def publish_watering_message(uid):\n d = dict()\n d['watering'] = dict()\n d['watering']['timestamp'] = time.time()\n d['watering']['uid'] = uid\n\n message = json.dumps(d)\n logging.info('Publish watering request: %s', message)\n paho.mqtt.publish.single('planteur/watering', message)",
"def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each layer [mm]\r\n for lyr in soil.layers:\r\n soil_wat_avail[lyr] = ((soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr]) *\r\n soil.layer_thickness[lyr] *\r\n soil.WATER_DENSITY)\r\n # Water supply\r\n for lyr in soil.layers:\r\n soil_wat_supply[lyr] = (soil_wat_avail[lyr] * soil.kl[lyr])\r\n\r\n # Water uptake (no supply or demand)\r\n if (soil_wat_supply.sum() <= 0) or (transp_pot <= 0):\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = 0\r\n else:\r\n # Water uptake (water is not limiting)\r\n if transp_pot < soil_wat_supply.sum():\r\n # distribute demand proportionately to the water supply\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (soil_wat_supply[lyr] /\r\n soil_wat_supply.sum() *\r\n transp_pot)\r\n else:\r\n # Water uptake (water is limiting)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = soil_wat_supply[lyr]\r\n\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += transp_pot",
"def add_population(self, net_id, component_id, type_, **args):\n nmlnetwork = NeuroMLSimpleNetwork(net_id)\n nmlnetwork.add_component(component_id, type_, **args)\n self._population = nmlnetwork.build()",
"def add_to_pool(self, data: str):\n self.pool.append(data)",
"def push(self):\n self.stack.append(self.save())",
"def add(self, datapath):\n self.logger.info(\"Adding switch dpid=%s\", datapath.id)\n self.switch[datapath.id] = Switch(self._nmeta, self.logger,\n self._config, datapath)\n return 1",
"def have_pump(self, pump):\n if pump > MAX_PUMPS:\n return False\n return bool(self.pump_array[pump])"
] | [
"0.6585117",
"0.59037",
"0.52663594",
"0.5242984",
"0.52134216",
"0.5176983",
"0.50372326",
"0.5008027",
"0.4910384",
"0.48776206",
"0.4863274",
"0.48111284",
"0.47950867",
"0.47862783",
"0.4781908",
"0.47765955",
"0.47638512",
"0.47573772",
"0.4748549",
"0.47304165",
"0.47296476",
"0.472738",
"0.47127795",
"0.4707023",
"0.47057778",
"0.47017372",
"0.46876305",
"0.4684804",
"0.4668763",
"0.4632946"
] | 0.64027154 | 1 |
Adds a valve to the water network model. | def add_valve(self, name, start_node_name, end_node_name,
diameter=0.3048, valve_type='PRV', minor_loss=0.0,
initial_setting=0.0, initial_status='ACTIVE'):
assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, "name must be a string with less than 32 characters and contain no spaces"
assert isinstance(start_node_name, str) and len(start_node_name) < 32 and start_node_name.find(' ') == -1, "start_node_name must be a string with less than 32 characters and contain no spaces"
assert isinstance(end_node_name, str) and len(end_node_name) < 32 and end_node_name.find(' ') == -1, "end_node_name must be a string with less than 32 characters and contain no spaces"
assert isinstance(diameter, (int, float)), "diameter must be a float"
assert isinstance(valve_type, str), "valve_type must be a string"
assert isinstance(minor_loss, (int, float)), "minor_loss must be a float"
assert isinstance(initial_setting, (int, float, str)), "initial_setting must be a float or string"
assert isinstance(initial_status, (str, LinkStatus)), "initial_status must be a string or LinkStatus"
if isinstance(initial_status, str):
initial_status = LinkStatus[initial_status]
start_node = self._node_reg[start_node_name]
end_node = self._node_reg[end_node_name]
valve_type = valve_type.upper()
# A PRV, PSV or FCV cannot be directly connected to a reservoir or tank (use a length of pipe to separate the two)
if valve_type in ['PRV', 'PSV', 'FCV']:
if type(start_node)==Tank or type(end_node)==Tank or type(start_node)==Reservoir or type(end_node)==Reservoir:
msg = '%ss cannot be directly connected to a tank. Add a pipe to separate the valve from the tank.' % valve_type
logger.error(msg)
raise RuntimeError(msg)
if type(start_node)==Reservoir or type(end_node)==Reservoir:
msg = '%ss cannot be directly connected to a reservoir. Add a pipe to separate the valve from the reservoir.' % valve_type
logger.error(msg)
raise RuntimeError(msg)
# TODO check the following: PRVs cannot share the same downstream node or be linked in series
# TODO check the following: Two PSVs cannot share the same upstream node or be linked in series
# TODO check the following: A PSV cannot be connected to the downstream node of a PRV
if valve_type == 'PRV':
valve = PRValve(name, start_node_name, end_node_name, self)
valve.initial_setting = initial_setting
valve._setting = initial_setting
elif valve_type == 'PSV':
valve = PSValve(name, start_node_name, end_node_name, self)
valve.initial_setting = initial_setting
valve._setting = initial_setting
elif valve_type == 'PBV':
valve = PBValve(name, start_node_name, end_node_name, self)
valve.initial_setting = initial_setting
valve._setting = initial_setting
elif valve_type == 'FCV':
valve = FCValve(name, start_node_name, end_node_name, self)
valve.initial_setting = initial_setting
valve._setting = initial_setting
elif valve_type == 'TCV':
valve = TCValve(name, start_node_name, end_node_name, self)
valve.initial_setting = initial_setting
valve._setting = initial_setting
elif valve_type == 'GPV':
valve = GPValve(name, start_node_name, end_node_name, self)
valve.headloss_curve_name = initial_setting
valve.initial_status = initial_status
valve.diameter = diameter
valve.minor_loss = minor_loss
self[name] = valve | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_valve(self, name, start_node_name, end_node_name,\n diameter=0.3048, valve_type='PRV', minor_loss=0.0, \n initial_setting=0.0, initial_status='ACTIVE'):\n self._link_reg.add_valve(name, start_node_name, end_node_name, diameter, \n valve_type, minor_loss, initial_setting, initial_status)",
"def add_edge(self, u, v, val):\n raise NotImplementedError()",
"def append(self, v):\n self.data.append(v)",
"def add(self, value):",
"def add(self, value):\n pass",
"def addEdge(self,u,v):\r\n self.graph[u].append(v)",
"def set_is_watering(valve: Valve, value: bool) -> None:\n valve.is_watering = value",
"def append(self, val):\n self.val.append(val)",
"def add(self, name, value) -> None:\n ...",
"def AddEarthVelocity(self, ds):\n self.IsEarthVelocity = True\n self.EarthVelocity = ds",
"def add_edge(self, u, v):\n self.graph[u].append(v)",
"def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val",
"def add(self, value):\n self._resolve_copies()\n self.data.append(value)",
"def addEdge(self,u,v,w):\r\n self.graph.append([u,v,w])",
"def add_value(self, value):\n self.value = value",
"def add_electrode(self, e, name, kind, volt):\r\n\t\te.volt = volt\r\n\t\tself.electrode_dict[name] = (kind, e)\r\n\t\tif kind=='dc':\r\n\t\t\tself.dc_electrode_list.append((name,e))\r\n\t\tif kind=='rf':\r\n\t\t\tself.rf_electrode_list.append((name,e))",
"def add_vertex(self, u, val):\n raise NotImplementedError()",
"def add_data_single(self, pt, val):\n # It doesn't look like GPytorch has a way to add on data,\n # so we just have to create a new object.\n old_x = self.gp_core.train_inputs[0]\n old_y = self.gp_core.train_targets\n tensor_pt = torch.from_numpy(pt).reshape(1, len(pt))\n new_x = torch.cat((old_x, tensor_pt)).float()\n new_y = torch.cat((old_y, torch.tensor([val]).float())).float()\n self.gp_core = ExactGPModel(new_x, new_y, self.gp_core.covar_module,\n self.gp_core.likelihood)",
"def add_data_single(self, pt, val):\n self.gp_core.add_data_single(pt, val)",
"def add_ens(self, ens):\n self.plotly_bt_range.add_ens(ens)",
"def add_data_single(self, pt, val):\n raise NotImplementedError('Abstract Method')",
"def add(self, featVect, label):\n if label in self.labelToNum:\n l = self.labelToNum[label]\n else:\n l = len(self.numToLabel)\n self.numToLabel.append(label)\n self.labelToNum[label] = l\n \n self.blocks.append((featVect.reshape((1,featVect.shape[0])).astype(numpy.double),[l]))",
"def addAresta(self,u,v,peso):\n self.grafo.append([u,v,peso])",
"def add_cost_value(self, var_name, val):\n self.add_other_value(self.cost, var_name, val)\n\n # add the change of cost automatically\n if len(self.cost[var_name]) > 1: # if the cost is not empty\n last_val = self.get_last_last_cost_val(var_name)\n cost_change = abs(val - last_val) / last_val\n self.add_other_value(self.cost_change, var_name + '_change', cost_change)",
"def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)",
"def __init__(self, upstream=None, downstream=None,\n name='', Kv = 0.0, mdot0 = 0.0, verbose=0): \n global _valvecount\n if name == '':\n name = 'Valve_'+`_valvecount`\n _valvecount += 1\n FlowDevice.__init__(self,3,name,verbose)\n if upstream and downstream:\n self.install(upstream, downstream)\n self.setValveCoeff(Kv)",
"def add_var(self, name, comp):\n self._main_model.add_var(name, comp)",
"def add(self, value: object) -> None:\n self.da.append(value)",
"def add(self, value: object) -> None:\n self.da.append(value)",
"async def added(self, value):\n pass"
] | [
"0.6676488",
"0.58828056",
"0.5747676",
"0.5671536",
"0.56325495",
"0.55430126",
"0.55213994",
"0.55173045",
"0.5460681",
"0.541968",
"0.5416407",
"0.53715646",
"0.53324413",
"0.53102165",
"0.5299238",
"0.52667105",
"0.5255671",
"0.5233254",
"0.520563",
"0.51657045",
"0.515628",
"0.51245457",
"0.5118475",
"0.5094947",
"0.5048698",
"0.5045614",
"0.50395805",
"0.50281805",
"0.50281805",
"0.50246626"
] | 0.6390443 | 1 |
Generator to get all pipes with check valves Yields | def check_valves(self):
for name in self._pipes:
if self._data[name].check_valve:
yield name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]",
"def run(self):\n assert len(self.elements) >= 2, \"In order flow, pipe needs 2 or more elements\"\n in_pipe = self.elements[0]\n other_pipes = self.elements[1:-1]\n out_pipe = self.elements[-1]\n\n self.make_assertions(in_pipe, other_pipes, out_pipe)\n\n for data in in_pipe.grasp():\n write = True\n\n for element in other_pipes:\n if isinstance(element, elements.DataPypElement):\n data = element.extend(data)\n elif isinstance(element, elements.FilterPypElement):\n if not element.stay(data):\n write = False\n break\n if write:\n out_pipe.extract(data)",
"def pipes(self): \n return self._link_reg.pipes",
"def fission_pipes():\n def _pipes(num):\n return [base.BasePipe(i) for i in range(1, num + 1)]\n yield _pipes\n base.reset()",
"def get_processes():\n yield from psutil.process_iter()",
"def get_pipes(self, num = 1):\n if self.api is None:\n self.api = ChessAPI(self)\n self.api.start()\n return [self.api.create_pipe() for _ in range(num)]",
"def piped(self):\n\t\tpass",
"def run(self):\n checks = [\n self.check_files_permissions,\n self.check_suid_bin,\n self.check_nfs_root_squashing,\n self.is_docker_installed,\n self.check_sudo_rules,\n self.get_possible_exploit,\n ]\n\n for check in checks:\n yield check()",
"def items():\n for output in outputs:\n if isinstance(output, boolfunc.Function):\n output = output.unbox()\n if output in (0, \"0\"):\n yield PC_ZERO\n elif output in (1, \"1\"):\n yield PC_ONE\n elif output in \"-xX\":\n yield PC_DC\n else:\n fstr = \"expected output in [01-xX], got {}\"\n raise ValueError(fstr.format(output))",
"def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)",
"def fission_pipe():\n yield base.BasePipe(1)\n base.reset()",
"def distributor(ls_feed_pipe_open,low,high):\n def getNumber(low,high):\n i = low\n if i%2 == 0: #if i is even, then start from i+1 odd.\n i += 1\n while i<=high:\n yield i\n i+=2 #no need to check for even numbers, so skip it here at begining\n yield -1 #when generator yields -1, it reached high, so terminate\n\n next_pipe = 0\n number = getNumber(low,high)\n while True:\n msg = next(number)\n if msg == -1: #to check when generator reached high.\n break\n else:\n #feed pipes in a round robin fashion,\n #so that over time each generatePrime process experiences same load.\n ls_feed_pipe_open[next_pipe].send(msg)\n next_pipe += 1\n if next_pipe == len(ls_feed_pipe_open):\n next_pipe = 0\n for p in ls_feed_pipe_open:\n p.send(-1) #-1 is sentinel value for all generatePrime processs\n return 0",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a | c, b & d\n yield ((ab | cd) & 2) | ((ab & cd) & 1)",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & c, b | d\n yield ((ab & cd) & 2) | ((ab | cd) & 1)",
"def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)",
"def _LessPipe():\n try:\n # pylint: disable=unexpected-keyword-arg\n proc = subprocess.Popen(['less'],\n stdin=subprocess.PIPE,\n stdout=sys.stdout,\n encoding='utf-8')\n yield proc.stdin\n proc.stdin.close()\n proc.wait()\n except IOError:\n pass # Happens when less is quit before all data is written.\n except KeyboardInterrupt:\n pass # Assume used to break out of less.",
"def test_spin_loop_no_pipe(self):\n\n # Reset relax.\n reset()\n\n # Function for the problem of catching an error in a generator function.\n def fail_test():\n for spin in mol_res_spin.spin_loop():\n pass\n\n # Test for the no pipe error.\n self.assertRaises(RelaxNoPipeError, fail_test)",
"def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input",
"def evaluate_batch(self, pipelines):",
"def pipe_names(self):\n return self._pipes",
"def make_pipes(self, stderr=True):\r\n\r\n pipes = {'child_stdin':None,\r\n 'stdin':None,\r\n 'stdout':None,\r\n 'child_stdout':None,\r\n 'stderr':None,\r\n 'child_stderr':None}\r\n try:\r\n stdin, child_stdin = os.pipe()\r\n pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin\r\n stdout, child_stdout = os.pipe()\r\n pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout\r\n if stderr:\r\n stderr, child_stderr = os.pipe()\r\n pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr\r\n for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']):\r\n if fd is not None:\r\n fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | os.O_NDELAY)\r\n return pipes\r\n except OSError:\r\n for fd in pipes.values():\r\n if fd is not None:\r\n self.close_fd(fd)",
"def run(self, data, rewrap=False, prefetch=0):\n if rewrap:\n data = [data]\n\n for pipe in self._pipes:\n pipe.feed(data)\n data = pipe\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data",
"def test_pipe():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog\")\n assert isinstance(out[0], ProgramNode)\n assert out[0].program_desc == posandtwo\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert out[2].program_desc == valprog\n assert isinstance(out[3], EndOfCommandNode)",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & d | b & c, a & c | b & d\n a, b, c, d = ab >> 1, ab & 1, cd >> 1, cd & 1\n yield ((a & d | b & c) << 1) | (a & c | b & d)",
"def __iter__(self):\n\n collector = FIFOArray(self.chunksize, self.axis)\n for arr, maskarr in zip(self.data, self.mask):\n\n if not np.any(maskarr):\n continue\n\n filtered = np.take(arr, np.flatnonzero(maskarr), axis=self.axis)\n collector.put(filtered)\n\n while collector.full():\n\n yield collector.get()\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n if collector.qsize() > 0:\n\n yield collector.get()",
"def test_pipeline_basic(mockpipe, testdir):\n test = testdir.makepyfile(TEST_OK)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 1\n assert len(skipped) == 0\n assert len(failed) == 0",
"def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)",
"def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w",
"def check_results(self):\n\n\t\twhile True:\n\n\t\t\t# If no checks left, stop\n\t\t\tif len(self._check_results) == 0:\n\t\t\t\tbreak\n\n\t\t\t# Return earliest result and remove from list\n\t\t\tyield self._check_results.pop(0)",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]"
] | [
"0.69781387",
"0.6245936",
"0.6085474",
"0.6084167",
"0.5932344",
"0.58900034",
"0.566551",
"0.5663454",
"0.5653883",
"0.56096005",
"0.5510788",
"0.54717225",
"0.54694694",
"0.54615724",
"0.5452949",
"0.54185396",
"0.5410619",
"0.53977126",
"0.5377694",
"0.53600794",
"0.5351941",
"0.53357714",
"0.53290635",
"0.53177494",
"0.52998143",
"0.5285061",
"0.52652544",
"0.52513856",
"0.5246127",
"0.5241715"
] | 0.79929996 | 0 |
A list of all pipe names | def pipe_names(self):
return self._pipes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipe_name_list(self):\n return list(self._link_reg.pipe_names)",
"def pipes(self): \n return self._link_reg.pipes",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]",
"def get_pipeline_names() -> Iterable[str]:\n for item in sorted((SRC / \"pipelines\").iterdir()):\n if not item.name.startswith(\"_\") and not item.is_file():\n yield item.name",
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def pump_name_list(self):\n return list(self._link_reg.pump_names)",
"def pbv_name_list(self):\n return list(self._link_reg.pbv_names)",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def get_vsys_fifo_names(backend):\n return (_VSYS_FMT_IN % backend, _VSYS_FMT_OUT % backend)",
"def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)",
"def pump_names(self):\n return self._pumps",
"def get_output_names():\n names = [device.name for device in get_devices() if device.is_output]\n return list(sorted(names))",
"def _name_of_all_containers(compose_project: str) -> List[str]:\n run_result = subprocess.run(\n [\n \"docker\",\n \"ps\",\n \"--all\",\n \"--filter\",\n f\"name={compose_project}\",\n \"--format\",\n \"table {{.Names}}\",\n ],\n capture_output=True,\n )\n containers: List[str] = run_result.stdout.decode(\"utf-8\").split(\"\\n\")\n containers = containers[1:] # remove the table column header\n containers = [c for c in containers if c] # filter empty\n if not containers:\n raise ValueError(f\"Couldn't find any containers for '{compose_project}'\")\n return containers",
"def get_command_names(self):\n return list(self.commands.keys())",
"def containers():\n # TODO: can there be multiple names?\n cmd = [ 'docker', 'ps', '--format', '{{.Names}}' ]\n with popen_text(cmd) as docker:\n for ln in docker.stdout:\n yield ln[:-1]",
"def num_pipes(self):\n return len(self._link_reg.pipe_names)",
"def names(self) -> list[str]:",
"def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval",
"def output_names(self):\n return []",
"def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res",
"def available_shells(self):\n return list(iterkeys(self._shells))",
"def valve_name_list(self):\n return list(self._link_reg.valve_names)",
"def CmdList(self):\n return sorted(self._cmd_alias_list)",
"def gpv_name_list(self):\n return list(self._link_reg.gpv_names)",
"def _go_list(self, *args):\n return subprocess.check_output((\"go\", \"list\") + self.tag_args + args).strip().split(\"\\n\")",
"def names(self) -> List[str]:\n return sorted(self.hyperparams)",
"def namelist(self):\n return []"
] | [
"0.8775803",
"0.7536944",
"0.6878034",
"0.66032434",
"0.62503797",
"0.6237352",
"0.6237352",
"0.6235358",
"0.6111345",
"0.6094931",
"0.6094931",
"0.60878557",
"0.60391945",
"0.6012478",
"0.6002667",
"0.59189594",
"0.58737737",
"0.5866282",
"0.5862265",
"0.5851056",
"0.582319",
"0.5803022",
"0.57661176",
"0.5764388",
"0.5758958",
"0.57483256",
"0.5706244",
"0.5701841",
"0.5692439",
"0.56829983"
] | 0.87508154 | 1 |
A list of all valve names | def valve_names(self):
return self._valves | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def valve_name_list(self):\n return list(self._link_reg.valve_names)",
"def names(self) -> list[str]:",
"def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())",
"def names(self):\r\n return self.get_field(self.name_field)",
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def names(cls) -> List[str]:",
"def names(self) -> List:\n ...",
"def values(cls) -> t.List[t.Union[str, NameTitle]]:\n return list(cls.__labels__.values())",
"def names(self):\n if type(self.name) is types.StringType:\n return [self.name]\n else:\n return list(self.name)",
"def name(self) -> List[NameAndValue]:\n return self._name",
"def getNames(self) -> List[unicode]:\n ...",
"def valves(self):\n for name in self._valves:\n yield name, self._data[name]",
"def names(self):\n if isinstance(self.name, string_types):\n return [self.name]\n else:\n return list(self.name)",
"def names(self):\n\t\treturn",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def getOptionsNames(self) -> List[unicode]:\n ...",
"def get_vlans_list(self):\n return self.vlans.keys()",
"def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")",
"def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")",
"def names(self):\n return [x for x in self._dict.keys()]",
"def namelist(self):\n return []",
"def gpv_name_list(self):\n return list(self._link_reg.gpv_names)",
"def get_name_value(self):\n name, value = self.get()\n if not isinstance(name, list):\n name = [name]\n if not isinstance(value, list):\n value = [value]\n return list(zip(name, value))",
"def names():\n pass",
"def varNames(self):\n return self.__varNames",
"def var_names(self):\n return self._var_names",
"def names(self):\n return [da.name for da in self]",
"def names(self):\n return self.__names",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames"
] | [
"0.8678239",
"0.722796",
"0.70305383",
"0.6951191",
"0.69359225",
"0.6885781",
"0.68206483",
"0.67995125",
"0.67975444",
"0.67893094",
"0.6780042",
"0.6764439",
"0.67487985",
"0.6708615",
"0.66600686",
"0.6612003",
"0.6603266",
"0.65829057",
"0.65829057",
"0.65781415",
"0.65378326",
"0.6524276",
"0.6493863",
"0.6484852",
"0.64847195",
"0.64525825",
"0.6444164",
"0.6424673",
"0.6393233",
"0.6393233"
] | 0.84573394 | 1 |
A list of all pump names | def pump_names(self):
return self._pumps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pump_name_list(self):\n return list(self._link_reg.pump_names)",
"def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)",
"def power_pump_names(self):\n return self._power_pumps",
"def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)",
"def head_pump_names(self):\n return self._head_pumps",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def get_pump_stringlist(self):\n return text_pump",
"def get_pump_list(self):\n return self.pump_array",
"def pumps(self): \n return self._link_reg.pumps",
"def names(self) -> list[str]:",
"def namelist(self):\n return []",
"def names(self) -> List:\n ...",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def getNames(self) -> List[unicode]:\n ...",
"def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result",
"def get_pinnames(self):\n return self.pnames",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def names(cls) -> List[str]:",
"def pump_curve_names(self):\n return list(self._pump_curves)",
"def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def names():\n pass",
"def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END",
"def output_names(self):\n return []",
"def names(self):\n\t\treturn",
"def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)"
] | [
"0.8727356",
"0.81778276",
"0.7632195",
"0.7526829",
"0.7371685",
"0.69479823",
"0.69024384",
"0.68919235",
"0.6714724",
"0.6562541",
"0.6372503",
"0.632661",
"0.63156277",
"0.6308453",
"0.62279123",
"0.6187299",
"0.61433476",
"0.61433476",
"0.61387813",
"0.6105723",
"0.6105723",
"0.61030614",
"0.6098041",
"0.60729784",
"0.6047872",
"0.60028535",
"0.5965304",
"0.5929705",
"0.59061587",
"0.58774936"
] | 0.8491093 | 1 |
A list of all head pump names | def head_pump_names(self):
return self._head_pumps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)",
"def pump_name_list(self):\n return list(self._link_reg.pump_names)",
"def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)",
"def pump_names(self):\n return self._pumps",
"def head_pumps(self):\n for name in self._head_pumps:\n yield name, self._data[name]",
"def power_pump_names(self):\n return self._power_pumps",
"def names(self) -> list[str]:",
"def names(self) -> List:\n ...",
"def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names",
"def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]",
"def namelist(self):\n return []",
"def names():\n pass",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def getNames(self) -> List[unicode]:\n ...",
"def head_pumps(self):\n return self._link_reg.head_pumps",
"def tank_name_list(self):\n return list(self._node_reg.tank_names)",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def full_names(self) -> List[str]:\n self.names = [\n \".\".join(prod)\n for prod in product(*self._namespaces, self.terminals)\n ]\n return self.names",
"def get_pinnames(self):\n return self.pnames",
"def get_names_short(self):\r\n return [p.get_name() for p in self.people]",
"def headloss_curve_names(self):\n return list(self._headloss_curves)",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def names(self):\n\t\treturn",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def get_pump_list(self):\n return self.pump_array",
"def HeadList(self):\n return [(rname, repo.currenthead) for rname, repo in self.repos.items()\n ]",
"def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string",
"def name(self):\n return [o.name for o in self.obs]"
] | [
"0.8724",
"0.7792026",
"0.73323274",
"0.73240656",
"0.66754407",
"0.66447985",
"0.6259627",
"0.6223956",
"0.61419255",
"0.6129673",
"0.611922",
"0.60248595",
"0.60225195",
"0.6019179",
"0.5971755",
"0.5959312",
"0.595692",
"0.5945068",
"0.5944744",
"0.5905057",
"0.58932376",
"0.58773685",
"0.58773685",
"0.58673286",
"0.5834185",
"0.5834185",
"0.58293974",
"0.58071375",
"0.5806054",
"0.57865125"
] | 0.85981405 | 1 |
A list of all power pump names | def power_pump_names(self):
return self._power_pumps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)",
"def pump_name_list(self):\n return list(self._link_reg.pump_names)",
"def pump_names(self):\n return self._pumps",
"def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)",
"def head_pump_names(self):\n return self._head_pumps",
"def pumps(self): \n return self._link_reg.pumps",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def get_pump_list(self):\n return self.pump_array",
"def get_pump_stringlist(self):\n return text_pump",
"def power_pumps(self):\n for name in self._power_pumps:\n yield name, self._data[name]",
"def get_powerups() -> tuple:\n return tuple(PowerUp.powers.keys())",
"def pump_curve_names(self):\n return list(self._pump_curves)",
"def list_power_supply_units(self):\n\n doc = self.client.enumerate(uris.CIM_PowerSupply)\n\n psus = doc.find('.//s:Body/wsen:EnumerateResponse/wsman:Items',\n wsman.NS_MAP)\n\n return [self._parse_psus(psu) for psu in psus]",
"def names(self) -> list[str]:",
"def namelist(self):\n return []",
"def names(self) -> List:\n ...",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def poller_names(self):\n return [i for i in self._config.sections() if i not in ['Local', 'GitHub', 'Logging',\n 'DEFAULT']]",
"def get_pinnames(self):\n return self.pnames",
"def getNames(self) -> List[unicode]:\n ...",
"def listBuilderNames():",
"def listBuilderNames():",
"def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result",
"def get_list_powers(self):\r\n return self._api.get_list_powers()",
"def pipe_name_list(self):\n return list(self._link_reg.pipe_names)",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def get_list_powers(self):\r\n return self.ps"
] | [
"0.87139165",
"0.8404074",
"0.83245194",
"0.7125149",
"0.7082756",
"0.6990292",
"0.6640658",
"0.66311836",
"0.65746576",
"0.65421534",
"0.6278142",
"0.6173576",
"0.6115929",
"0.610486",
"0.59376335",
"0.5861546",
"0.58374655",
"0.58374655",
"0.58089185",
"0.5807593",
"0.5805085",
"0.5759457",
"0.5759457",
"0.57584596",
"0.5750538",
"0.5744604",
"0.5744589",
"0.57409286",
"0.57409286",
"0.5737941"
] | 0.84876 | 1 |
A list of all prv names | def prv_names(self):
return self._prvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prv_name_list(self):\n return list(self._link_reg.prv_names)",
"def names(self) -> list[str]:",
"def names(self) -> List:\n ...",
"def getNames(self) -> List[unicode]:\n ...",
"def namelist(self):\n return []",
"def names(cls) -> List[str]:",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def return_names(self):\n return self.__name_list",
"def names():\n pass",
"def gpv_name_list(self):\n return list(self._link_reg.gpv_names)",
"def pbv_name_list(self):\n return list(self._link_reg.pbv_names)",
"def all_names(cls) -> List[str]:\n return list(member_name for member_name in cls.__members__.keys())",
"def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def get_names(self):\n return self.names",
"def getnames(self) -> List[Dict[str, Any]]:\n # NOTE: warning this does not yet support pagination\n return self.rpc_call(\"getnames\")",
"def get_names(dep):\n res = [dep.name]\n return res",
"def get_ordered_adversary_names(self) -> List[str]:\n pass",
"def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")",
"def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")",
"def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]",
"def names(self):\n\t\treturn",
"def names(self):\n return [x for x in self._dict.keys()]",
"def nameList(self, excludeFileInfo=False):\n names = self.keys()\n if excludeFileInfo and nodeformat.FileInfoFormat.name in self:\n names.remove(nodeformat.FileInfoFormat.name)\n names.sort()\n return names",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result"
] | [
"0.85557103",
"0.73457515",
"0.7101858",
"0.70191836",
"0.7012281",
"0.69744515",
"0.6807625",
"0.6759487",
"0.6675716",
"0.6675716",
"0.6605555",
"0.6605555",
"0.6467588",
"0.64606243",
"0.6454526",
"0.644459",
"0.64327073",
"0.63664395",
"0.6364416",
"0.63528275",
"0.6321446",
"0.6320474",
"0.63191795",
"0.63191795",
"0.62934154",
"0.62773484",
"0.62711823",
"0.62695104",
"0.62668383",
"0.62047964"
] | 0.81238776 | 1 |
A list of all psv names | def psv_names(self):
return self._psvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def psvs(self):\n for name in self._psvs:\n yield name, self._data[name]",
"def gpv_names(self):\n return self._gpvs",
"def names(self) -> list[str]:",
"def gpv_name_list(self):\n return list(self._link_reg.gpv_names)",
"def pbv_names(self):\n return self._pbvs",
"def prv_names(self):\n return self._prvs",
"def names(cls) -> List[str]:",
"def listPVs(self):\n for pv in self._pvlist:\n print pv",
"def names(self) -> List:\n ...",
"def pbv_name_list(self):\n return list(self._link_reg.pbv_names)",
"def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def getNames(self) -> List[unicode]:\n ...",
"def output_names(self):\n return []",
"def prvs(self):\n for name in self._prvs:\n yield name, self._data[name]",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def names():\n pass",
"def names(self):\n\t\treturn",
"def namelist(self):\n return []",
"def valve_name_list(self):\n return list(self._link_reg.valve_names)",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def prv_name_list(self):\n return list(self._link_reg.prv_names)",
"def psvs(self): \n return self._link_reg.psvs",
"def return_names(self):\n return self.__name_list",
"def occr_p_names(self):\n\n occr_names = []\n\n for i in range(len(self._P_boundaries) - 1):\n occr_names.append(\"{:.3g} - {:.3g}\".format(\n self._P_boundaries[i], self._P_boundaries[i+1]))\n\n return occr_names",
"def get_pokemon_names():\n with open(POKEMON_FILE, 'r') as fh:\n pokemon = json.load(fh)\n return [name.lower() for name in pokemon]",
"def svcs_list(self) -> List[str]:\n return self._svcs_list"
] | [
"0.8099349",
"0.7514087",
"0.71493274",
"0.69765407",
"0.6971079",
"0.6922447",
"0.6882586",
"0.6620836",
"0.658964",
"0.65696895",
"0.65589327",
"0.6492404",
"0.641927",
"0.63236064",
"0.6311279",
"0.63063765",
"0.63063765",
"0.628922",
"0.62541693",
"0.62522995",
"0.6230833",
"0.6178279",
"0.61778396",
"0.61778396",
"0.6097343",
"0.6097045",
"0.6083039",
"0.6078082",
"0.603004",
"0.6022508"
] | 0.8471766 | 0 |
A list of all pbv names | def pbv_names(self):
return self._pbvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pbv_name_list(self):\n return list(self._link_reg.pbv_names)",
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def gpv_name_list(self):\n return list(self._link_reg.gpv_names)",
"def pbvs(self):\n for name in self._pbvs:\n yield name, self._data[name]",
"def gpv_names(self):\n return self._gpvs",
"def listPVs(self):\n for pv in self._pvlist:\n print pv",
"def psv_names(self):\n return self._psvs",
"def prv_names(self):\n return self._prvs",
"def names(self) -> list[str]:",
"def prv_name_list(self):\n return list(self._link_reg.prv_names)",
"def valve_name_list(self):\n return list(self._link_reg.valve_names)",
"def names(cls) -> List[str]:",
"def fcv_name_list(self):\n return list(self._link_reg.fcv_names)",
"def get_vgs() -> List[str]:\n p = subprocess.run(\n [\"vgs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n return [vg[\"vg_name\"] for vg in output[\"report\"][0][\"vg\"]]",
"def names(self) -> List:\n ...",
"def Vps(self):\n return [elem['Vp'] for elem in self.__compartments]",
"def pbvs(self): \n return self._link_reg.pbvs",
"def tcv_name_list(self):\n return list(self._link_reg.tcv_names)",
"def get_pokemon_names():\n with open(POKEMON_FILE, 'r') as fh:\n pokemon = json.load(fh)\n return [name.lower() for name in pokemon]",
"def psvs(self):\n for name in self._psvs:\n yield name, self._data[name]",
"def get_pv_names(k8s_cli, namespace, error_template):\n cmd = \"{} get -n {} PersistentVolumeClaim --selector={} -o=custom-columns=VOLUME:.spec.volumeName --no-headers\" \\\n .format(k8s_cli, namespace, OPERATOR_LABEL)\n missing_resource_template = f\"Namespace '{namespace}': Skip collecting information for PersistentVolumeClaim. \" \\\n f\"Server has no resource of type PersistentVolumeClaim\"\n output = run_shell_command_with_retries(cmd, KUBCTL_GET_YAML_RETRIES, error_template, missing_resource_template)\n return output.split()",
"def getNames(self) -> List[unicode]:\n ...",
"def valve_names(self):\n return self._valves",
"def get_notebook_pvcs(nb):\n pvcs = []\n if not nb[\"spec\"][\"template\"][\"spec\"][\"volumes\"]:\n return []\n\n vols = nb[\"spec\"][\"template\"][\"spec\"][\"volumes\"]\n for vol in vols:\n # Check if the volume is a pvc\n if not vol.get(\"persistentVolumeClaim\"):\n continue\n pvcs.append(vol[\"persistentVolumeClaim\"][\"claimName\"])\n\n return pvcs",
"def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval",
"def name_list(qbo_session):\n\n return qbo_session.name_list()",
"def pump_names(self):\n return self._pumps",
"def pump_name_list(self):\n return list(self._link_reg.pump_names)",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()"
] | [
"0.84974104",
"0.739632",
"0.72933024",
"0.72434616",
"0.7021867",
"0.69781375",
"0.67738646",
"0.65906185",
"0.6421085",
"0.64076006",
"0.6209149",
"0.6204148",
"0.6148312",
"0.6124793",
"0.6053868",
"0.6003238",
"0.5986601",
"0.5904927",
"0.5855399",
"0.5826886",
"0.5823659",
"0.5820712",
"0.58195263",
"0.57858235",
"0.5779393",
"0.5756421",
"0.57484454",
"0.57379425",
"0.57371646",
"0.57371646"
] | 0.86136687 | 0 |
A list of all tcv names | def tcv_names(self):
return self._tcvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tcv_name_list(self):\n return list(self._link_reg.tcv_names)",
"def fcv_names(self):\n return self._fcvs",
"def get_names(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append(k)\n opts_list = sorted(opts_list)\n return opts_list",
"def fcv_name_list(self):\n return list(self._link_reg.fcv_names)",
"def names(self) -> list[str]:",
"def names(cls) -> List[str]:",
"def names(self) -> List:\n ...",
"def getNames(self) -> List[unicode]:\n ...",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def names(self):\n\t\treturn",
"def names():\n pass",
"def tank_name_list(self):\n return list(self._node_reg.tank_names)",
"def labels(self) -> List[str]:\n\n return list(self.t0.keys())",
"def namelist(self):\n return []",
"def tcvs(self):\n for name in self._tcvs:\n yield name, self._data[name]",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def get_names(self):\n return [doc['name'] for doc in self.vocab]",
"def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names",
"def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names",
"def get_cora_label_names():\n # type: () -> List[str]\n return _label_names",
"def names(self):\n return [x for x in self._dict.keys()]",
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def get_feature_names(self):\n ...",
"def getFeatureNames(self):\n pass",
"def list_feature_tests(self):\n\t\treturn self.test_names",
"def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def pbv_names(self):\n return self._pbvs"
] | [
"0.86334425",
"0.7213029",
"0.7083568",
"0.695902",
"0.6909198",
"0.6866125",
"0.6510622",
"0.6448307",
"0.63815206",
"0.62628335",
"0.6236588",
"0.62123656",
"0.6204504",
"0.6166822",
"0.6115471",
"0.607714",
"0.607714",
"0.60699075",
"0.6054246",
"0.6054246",
"0.60278165",
"0.6025823",
"0.5992052",
"0.5976573",
"0.59564954",
"0.59441966",
"0.5924911",
"0.5892395",
"0.5892395",
"0.58651125"
] | 0.8502852 | 1 |
A list of all fcv names | def fcv_names(self):
return self._fcvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fcv_name_list(self):\n return list(self._link_reg.fcv_names)",
"def get_fnames(self):\n return self.fnames[:]",
"def getFeatureNames(self):\n return [\"f100\", \"f103\", \"f104\"]",
"def tcv_name_list(self):\n return list(self._link_reg.tcv_names)",
"def facenames ( self ):\n self._facenames = []\n self.EnumerateFacenames()\n return self._facenames",
"def names(self) -> list[str]:",
"def names(cls) -> List[str]:",
"def getFeatureNames(self):\n feature_names = super().getFeatureNames()\n feature_names.extend([\"f101\", \"f102\", \"f105\", \"fNum\", \"fCapStart\", \"fCapNoStart\"])\n return feature_names",
"def getafNames(self):\n names = [self.af_dict['polar_files'][i] for i in range(len(self.af_dict['polar_files']))]\n return names",
"def all_facenames ( ):\n global facenames\n \n if facenames is None:\n facenames = FontEnumerator().facenames()\n facenames.sort()\n return facenames",
"def tcv_names(self):\n return self._tcvs",
"def names(self) -> List:\n ...",
"def getFofns():\n return list(map(_getAbsPath,\n [\"datasets/fofn.fofn\"]))",
"def getFeatureNames(self):\n pass",
"def all_fov_names(fov_directory=FOV_DIRECTORY):\n fov_names = [fname[:-4] for fname in os.listdir(fov_directory)\n if (fname.endswith(\".txt\")) and (not fname.startswith(\"$\"))]\n return fov_names",
"def furanoses(self):\n return sorted(set([self[x.split(\"_\")[-1]][\"name\"] for x in self.furanose_fac.keys()]))",
"def filenames(self):\n names = []\n for furi in np.asarray(self.fileuris).flat:\n names.append(furi)\n return names",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]",
"def fqns(self):\n return [fqn for fqn in self.runinfos]",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def featureNames(self):\n return [feature.name for feature in self.features]",
"def get_feature_names(self):\n ...",
"def fcvs(self): \n return self._link_reg.fcvs",
"def getOfcNames( self ):\n\n if self.ofcNames:\n return self.ofcNames.keys()\n \n n = self.adb.get( \"nFans\" )\n for indx in xrange( n ):\n name = self.adb.get( \"fanName\", indx )\n self.ofcNames[ name ] = indx\n\n return self.ofcNames.keys()",
"def namelist(self):\n return []",
"def getNames(self) -> List[unicode]:\n ...",
"def fcvs(self):\n for name in self._fcvs:\n yield name, self._data[name]"
] | [
"0.84923357",
"0.71780765",
"0.7002602",
"0.6976435",
"0.68118674",
"0.66828746",
"0.66750735",
"0.66390705",
"0.66099983",
"0.65976095",
"0.64258903",
"0.6316046",
"0.62973255",
"0.6229896",
"0.6176718",
"0.61347145",
"0.61020917",
"0.6095672",
"0.6095672",
"0.6094873",
"0.6094349",
"0.6087608",
"0.6087608",
"0.60828394",
"0.6072965",
"0.60698235",
"0.6058696",
"0.6055838",
"0.6054046",
"0.60399413"
] | 0.8390784 | 1 |
A list of all gpv names | def gpv_names(self):
return self._gpvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gpv_name_list(self):\n return list(self._link_reg.gpv_names)",
"def psv_name_list(self):\n return list(self._link_reg.psv_names)",
"def get_vgs() -> List[str]:\n p = subprocess.run(\n [\"vgs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n return [vg[\"vg_name\"] for vg in output[\"report\"][0][\"vg\"]]",
"def gpvs(self):\n for name in self._gpvs:\n yield name, self._data[name]",
"def psv_names(self):\n return self._psvs",
"def pbv_name_list(self):\n return list(self._link_reg.pbv_names)",
"def list_gpo(self, _):\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n for gpo in results:\n print(\"{cn}: {name}\".format(cn=gpo[\"cn\"], name=gpo[\"displayName\"]))",
"def listPVs(self):\n for pv in self._pvlist:\n print pv",
"def pbv_names(self):\n return self._pbvs",
"def names(self) -> list[str]:",
"def prv_names(self):\n return self._prvs",
"def names(cls) -> List[str]:",
"def names(self) -> List:\n ...",
"def prv_name_list(self):\n return list(self._link_reg.prv_names)",
"def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)",
"def get_goniometers_names():\n return [gon.name for gon in goniometers]",
"def getNames(self) -> List[unicode]:\n ...",
"def get_ocsp_gnames(self):\n urls = ['uri:' + u for u in self.ocsp_urls]\n return self.load_gnames(urls)",
"def get_san_gnames(self):\n return self.load_gnames(self.san)",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def get_srv_ppgrp_name(self):\n pp_grp_name_lst = list()\n for srv_grp in self.srv_grp_lst:\n pp_grp = list()\n for srv in srv_grp:\n pp_grp.append(\n (srv['name'] + '_pt_in', srv['name'] + '_pt_out'))\n pp_grp_name_lst.append(pp_grp)\n return pp_grp_name_lst",
"def namelist(self):\n return []",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def occr_p_names(self):\n\n occr_names = []\n\n for i in range(len(self._P_boundaries) - 1):\n occr_names.append(\"{:.3g} - {:.3g}\".format(\n self._P_boundaries[i], self._P_boundaries[i+1]))\n\n return occr_names",
"def psvs(self):\n for name in self._psvs:\n yield name, self._data[name]",
"def _get_pvds(self):\n pvds = []\n for path in self.paths():\n if path.reqstate == ReqState.enabled and path.provider.name not in pvds:\n pvds += [path.provider.name]\n return pvds",
"def valve_name_list(self):\n return list(self._link_reg.valve_names)",
"def names():\n pass",
"def sorted_gnames():\n return sorted(group_names.keys())",
"def namelist(self):\n return self._handle.getnames()"
] | [
"0.8789435",
"0.7412494",
"0.7322222",
"0.7071808",
"0.70511836",
"0.699086",
"0.6955757",
"0.69377375",
"0.6858557",
"0.6748364",
"0.65743333",
"0.65418184",
"0.65207136",
"0.64639956",
"0.64244753",
"0.6405372",
"0.6399502",
"0.63892186",
"0.63781345",
"0.63513714",
"0.6340365",
"0.6293196",
"0.62626153",
"0.6261788",
"0.6172836",
"0.6150665",
"0.6123095",
"0.60901284",
"0.60491365",
"0.59810925"
] | 0.87895864 | 0 |
Generator to get all pipes Yields | def pipes(self):
for name in self._pipes:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fission_pipes():\n def _pipes(num):\n return [base.BasePipe(i) for i in range(1, num + 1)]\n yield _pipes\n base.reset()",
"def pipes(self): \n return self._link_reg.pipes",
"def get_processes():\n yield from psutil.process_iter()",
"def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w",
"def fission_pipe():\n yield base.BasePipe(1)\n base.reset()",
"def get_pipes(self, num = 1):\n if self.api is None:\n self.api = ChessAPI(self)\n self.api.start()\n return [self.api.create_pipe() for _ in range(num)]",
"def __iter__(self):\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n yield line",
"def run(self, data, rewrap=False, prefetch=0):\n if rewrap:\n data = [data]\n\n for pipe in self._pipes:\n pipe.feed(data)\n data = pipe\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data",
"def piped(self):\n\t\tpass",
"def _generators(self):\n return self.free_group.generators",
"def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)",
"def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input",
"def make_pipes(self, stderr=True):\r\n\r\n pipes = {'child_stdin':None,\r\n 'stdin':None,\r\n 'stdout':None,\r\n 'child_stdout':None,\r\n 'stderr':None,\r\n 'child_stderr':None}\r\n try:\r\n stdin, child_stdin = os.pipe()\r\n pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin\r\n stdout, child_stdout = os.pipe()\r\n pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout\r\n if stderr:\r\n stderr, child_stderr = os.pipe()\r\n pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr\r\n for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']):\r\n if fd is not None:\r\n fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | os.O_NDELAY)\r\n return pipes\r\n except OSError:\r\n for fd in pipes.values():\r\n if fd is not None:\r\n self.close_fd(fd)",
"def pipe_names(self):\n return self._pipes",
"def _LessPipe():\n try:\n # pylint: disable=unexpected-keyword-arg\n proc = subprocess.Popen(['less'],\n stdin=subprocess.PIPE,\n stdout=sys.stdout,\n encoding='utf-8')\n yield proc.stdin\n proc.stdin.close()\n proc.wait()\n except IOError:\n pass # Happens when less is quit before all data is written.\n except KeyboardInterrupt:\n pass # Assume used to break out of less.",
"def distributor(ls_feed_pipe_open,low,high):\n def getNumber(low,high):\n i = low\n if i%2 == 0: #if i is even, then start from i+1 odd.\n i += 1\n while i<=high:\n yield i\n i+=2 #no need to check for even numbers, so skip it here at begining\n yield -1 #when generator yields -1, it reached high, so terminate\n\n next_pipe = 0\n number = getNumber(low,high)\n while True:\n msg = next(number)\n if msg == -1: #to check when generator reached high.\n break\n else:\n #feed pipes in a round robin fashion,\n #so that over time each generatePrime process experiences same load.\n ls_feed_pipe_open[next_pipe].send(msg)\n next_pipe += 1\n if next_pipe == len(ls_feed_pipe_open):\n next_pipe = 0\n for p in ls_feed_pipe_open:\n p.send(-1) #-1 is sentinel value for all generatePrime processs\n return 0",
"def semigroup_generators(self):",
"async def __aiter__(self):\n message = b''\n # wait until the pipe is opened by a writer\n await wait_readable(self.fd)\n while True:\n try:\n item = os.read(self.fd, self.blocksize)\n except BlockingIOError:\n # pipe is empty, yield message and wait for another\n if self.encoding is not None:\n message = message.decode(self.encoding)\n yield message\n message = b''\n await wait_readable(self.fd)\n else:\n if not item:\n # pipe is closed, return\n break\n else:\n message += item\n if self.encoding is not None:\n message = message.decode(self.encoding)\n yield message",
"def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)",
"def yield_output(self, args, *popenargs, **kwargs):\n p = self.create_process(args,\n stdout=subprocess.PIPE,\n *popenargs,\n **kwargs)\n for line in p.stdout:\n yield line\n p.wait()\n if p.returncode:\n raise subprocess.CalledProcessError(p.returncode,\n self.build_args(args))",
"def run(self):\n assert len(self.elements) >= 2, \"In order flow, pipe needs 2 or more elements\"\n in_pipe = self.elements[0]\n other_pipes = self.elements[1:-1]\n out_pipe = self.elements[-1]\n\n self.make_assertions(in_pipe, other_pipes, out_pipe)\n\n for data in in_pipe.grasp():\n write = True\n\n for element in other_pipes:\n if isinstance(element, elements.DataPypElement):\n data = element.extend(data)\n elif isinstance(element, elements.FilterPypElement):\n if not element.stay(data):\n write = False\n break\n if write:\n out_pipe.extract(data)",
"def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)",
"def walk_commands(self) -> typing.Generator[Command, None, None]:\n for command in self.commands:\n yield command\n if isinstance(command, Group):\n yield from command.walk_commands()",
"def generators(self):\n return self._generators",
"def sequences(self):\n # i am one\n yield self\n # nothing further\n return",
"def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)",
"def __iter__(self):\n yield from self.gen",
"def __next__(self):\n for child in self.children:\n yield child",
"def pipelines(self):\r\n return pipelines.Pipelines(self)",
"def __next__(self):\n if self.current_pipe_idx >= len(self.configuration):\n raise StopIteration\n else:\n this_idx = self.current_pipe_idx\n wavelength_, spectra_ = run_pipeline(self.wavelength.copy(),\n self.spectra.copy(),\n self.configuration[this_idx])\n self.current_pipe_idx += 1\n return wavelength_, spectra_, self.configuration[this_idx]"
] | [
"0.74286675",
"0.69176614",
"0.68744016",
"0.6859538",
"0.67979777",
"0.6790938",
"0.6445842",
"0.64438236",
"0.64320713",
"0.6423539",
"0.6421947",
"0.63071144",
"0.62980175",
"0.62546927",
"0.62468296",
"0.6155192",
"0.60791737",
"0.60716903",
"0.60712874",
"0.60122687",
"0.6011071",
"0.59927106",
"0.59798515",
"0.5973445",
"0.5973254",
"0.59726375",
"0.59658366",
"0.5964204",
"0.5962377",
"0.5953225"
] | 0.82922226 | 0 |
Generator to get all pumps Yields | def pumps(self):
for name in self._pumps:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_processes():\n yield from psutil.process_iter()",
"def _generators(self):\n return self.free_group.generators",
"def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]",
"def sender_iter(self):\n while 1:\n yield self.send_next()",
"def __iter__(self):\n yield from self.gen",
"def get_generator(self):\n while self._is_running():\n yield self._queue.get()",
"def get_generator(self):\n while self._is_running():\n yield self._queue.get()",
"def generators(self):\n return self._generators",
"def sequences(self):\n # i am one\n yield self\n # nothing further\n return",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]",
"def power_pumps(self):\n for name in self._power_pumps:\n yield name, self._data[name]",
"def pumps(self): \n return self._link_reg.pumps",
"def iter_asynchronously(gen_func):\n q = Queue()\n p = Process(target=_async_queue_manager, args=(gen_func, q))\n p.start()\n while True:\n item = q.get()\n if item is PoisonPill:\n break\n else:\n yield item",
"def yieldRPC(remoteYields): #Status: WIP\r\n pass",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def data_generator():\n msg = Message(Message.ADD, queue.uuid, queue)\n PROVIDER_MQ.put(msg)\n keep_running = True\n while keep_running:\n try:\n chunk = queue.get()\n yield chunk\n except Empty:\n app.logger.info('Queue empty. Ending stream')\n keep_running = False",
"def semigroup_generators(self):",
"def generator(self):\n return [None, 1]",
"def __iter__(self) -> Generator:\r\n yield from self.sequence",
"def __iter__(self):\n while True:\n results = self.poll()\n for x in results:\n yield x\n if not results:\n time.sleep(self.poll_delay)",
"def __iter__(self):\n for run in self.runs:\n yield run",
"def emptyGenerator():\n return\n yield",
"def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)",
"def loop(self):\n yield self\n e = self.next\n while e is not self:\n yield e\n e = e.next",
"def iterator(self):\n yield",
"def generators(self) -> List[Generator]:\n return self._generators",
"def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)",
"def _generator(self):\n\t\twhile(1):\n\t\t\ttry:\n\t\t\t\tm = self.messages.pop(0) # pop the first Flash2Message in the list\n\t\t\t\tyield m\n\t\t\texcept IndexError:\n\t\t\t\traise StopIteration",
"def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item",
"def __iter__(self):\n for p in self.pkg.datastream:\n act = self.action(self.pkgmap, p, p.name)\n if act:\n yield act\n\n # for some reason, some packages may have directories specified\n # in the pkgmap that don't exist in the archive. They need to\n # be found and iterated as well.\n #\n # Some of the blastwave packages also have directories in the\n # archive that don't exist in the package metadata. I don't see\n # a whole lot of point in faking those up.\n for p in self.pkg.manifest:\n if p.pathname.startswith(\"/\"):\n dir = \"root\"\n else:\n dir = \"reloc/\"\n if p.type == \"d\" and \\\n dir + p.pathname not in self.pkg.datastream:\n act = self.action(self.pkgmap, None,\n dir + p.pathname)\n if act:\n yield act\n if p.type in \"ls\":\n act = self.action(self.pkgmap, None,\n dir + p.pathname)\n if act:\n yield act"
] | [
"0.6545596",
"0.6510258",
"0.63968307",
"0.6269583",
"0.6256678",
"0.6230063",
"0.6230063",
"0.6223532",
"0.61827946",
"0.6177478",
"0.61692303",
"0.6152778",
"0.6135424",
"0.61134017",
"0.610873",
"0.60942984",
"0.5993936",
"0.5971733",
"0.59500283",
"0.59470314",
"0.59225816",
"0.5876147",
"0.5872725",
"0.5872092",
"0.5856464",
"0.5841359",
"0.582556",
"0.582086",
"0.5818315",
"0.58111036"
] | 0.78001964 | 0 |
Generator to get all valves Yields | def valves(self):
for name in self._valves:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n for val in self.value:\n yield val",
"def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values",
"def __iter__(self):\n\n for lit in self.fvals:\n yield lit",
"def itervaluerefs(self):\n for value in self.itervalues():\n yield ref(value)",
"def itervalues(self):\r\n for sleek_ref in self.data.itervalues():\r\n try:\r\n yield sleek_ref()\r\n except SleekRefDied:\r\n pass",
"def __iter__(self):\n for value in self.__dict__.values():\n yield value",
"def __iter__(self):\n yield from self.gen",
"def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()",
"def __iter__(self):\n for v in self._items:\n yield v",
"def values(self):\n while True:\n try:\n yield self.value\n except GPIODeviceClosed:\n break",
"def iterator(self):\n yield",
"def check_valves(self):\n for name in self._pipes:\n if self._data[name].check_valve:\n yield name",
"def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)",
"def __iter__(self):\n yield from chain.from_iterable(self.data.values())",
"def __iter__(self):\n yield from self.calls",
"def _iter_remote_values(self):\n yield self.scene_value",
"def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V",
"def itervalues(self):\n for key in self:\n yield self[key]",
"def all(self):\n for num in range(self.bound):\n vec, exp = self(num)\n yield num, np.array([exp]), vec",
"def __iter__(self):\n for run in self.runs:\n yield run",
"def enumerate(self):\n\n done = False\n while not done:\n mcs = self.compute()\n\n if mcs != None:\n yield mcs\n else:\n done = True",
"def __iter__(self):\n for instresult in self.instresults:\n yield instresult",
"def __iter__(self):\n for x in self.seq: yield x",
"def get_val_iterator(self) -> Iterable[Batch]:\n if self._val_name not in self._datasets:\n raise ValueError(\"Val data not provided.\")\n return self.get_iterator(self._val_name)",
"def values(self):\n for ts in self:\n yield self[ts]",
"def __iter__(self):\n return self._product_generator()",
"def sequences(self):\n # i am one\n yield self\n # nothing further\n return",
"def __iter__(self):\n for runspec in self.runspecs:\n yield runspec",
"def ticker_generator():\n return (v for v in load_equities().values)",
"def generator(self):\n return [None, 1]"
] | [
"0.7613851",
"0.71577305",
"0.70840234",
"0.6988916",
"0.6914428",
"0.6893951",
"0.6869398",
"0.6847936",
"0.6836424",
"0.6807768",
"0.68007344",
"0.67966443",
"0.6792792",
"0.6753803",
"0.67188644",
"0.66389185",
"0.6590935",
"0.65789545",
"0.65655345",
"0.6540785",
"0.6523075",
"0.65152717",
"0.65062505",
"0.6495845",
"0.64449555",
"0.64392895",
"0.6436175",
"0.6419653",
"0.641123",
"0.6404746"
] | 0.76746064 | 0 |
Generator to get all head pumps Yields | def head_pumps(self):
for name in self._head_pumps:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def hbins(self):\n h = HBINBlock(self._buf, self.first_hbin_offset(), self)\n yield h\n\n while h.has_next():\n h = h.next()\n yield h",
"def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]",
"def Hrep_generator(self):\n for H in self.Hrepresentation():\n yield H",
"def head_pump_names(self):\n return self._head_pumps",
"def generator(self):\n return [None, 1]",
"def semigroup_generators(self):",
"def sender_iter(self):\n while 1:\n yield self.send_next()",
"def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]",
"def __iter__(self):\n for key in chain(\n self.HEAD_KEYS, (key for key, _ in self.HEAD_EXTRA), self.HEAD_FROM\n ):\n yield key",
"def sequences(self):\n # i am one\n yield self\n # nothing further\n return",
"def generate(th1):\n pro = 1\n while True:\n if th1.is_alive() and pro < 99:\n pro = pro + 1\n time.sleep(1)\n yield \"data:\" + str(pro) + \"\\n\\n\"\n else:\n if not th1.is_alive():\n yield \"data:\" + str(100) + \"\\n\\n\"",
"def emptyGenerator():\n return\n yield",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]",
"def start(self):\n\n while True:\n try:\n _ = self.assigned.pop(0)\n yield self.assemble_substructure()\n\n except IndexError:\n break",
"def __iter__(self):\n yield from self.gen",
"def headloss_curves(self):\n for key in self._headloss_curves:\n yield key, self._data[key]",
"def yieldRPC(remoteYields): #Status: WIP\r\n pass",
"def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame",
"def pkt_gen(self):\n for i in range(self.num_pkts):\n # create the test packets\n pkt = Ether()/IP()/TCP()/'hello there pretty world!!!'\n rank = random.sample(range(0, 100), 1)[0]\n pkt_id = i\n tuser = Tuser(len(pkt), 0b00000001, 0b00000100, rank, pkt_id)\n print ('@ {:.2f} - Send: {} || {}'.format(self.env.now, pkt.summary(), tuser))\n # write the pkt and metadata into storage\n self.pkt_in_pipe.put((pkt, tuser))\n\n # wait for 10 cycles\n #for j in range(PREAMBLE + len(pkt) + IFG):\n yield self.wait_line_clks(self.PREAMBLE + len(pkt) + self.IFG)",
"def __iter__(self):\n cur = self.head\n while cur is not None:\n yield cur.data\n cur = cur.next",
"def _generators(self):\n return self.free_group.generators",
"def pumps(self): \n return self._link_reg.pumps",
"def frame_generator(self):\n frame = 0\n while not self.process.termination:\n yield frame\n frame += 1",
"def __iter__(self):\n for b in self.dl: \n yield to_device(b, self.device) # yield pauses the execution, not store values in memory, forgets about them once iterated\n # no need to remove batch of data from device, done automatically",
"def IterateTAILQ_HEAD(headval, element_name):\n iter_val = headval.tqh_first\n while unsigned(iter_val) != 0 :\n yield iter_val\n iter_val = iter_val.__getattr__(element_name).tqe_next\n #end of yield loop",
"def __iter__(self):\n i = self.head\n while True:\n if not i:\n break\n yield i\n i = i.next\n if not i:\n break",
"def loop(self):\n yield self\n e = self.next\n while e is not self:\n yield e\n e = e.next",
"def iter(self):\n\n current = self.head\n while current:\n yield current\n current = current.next",
"def iterator(self):\n yield"
] | [
"0.6747126",
"0.6405931",
"0.6113696",
"0.6022144",
"0.5963356",
"0.5941465",
"0.59063745",
"0.5810048",
"0.5788001",
"0.5783232",
"0.573605",
"0.57166445",
"0.57086796",
"0.5692615",
"0.5641295",
"0.5627808",
"0.56171083",
"0.5583167",
"0.5573611",
"0.5567212",
"0.5544064",
"0.5541497",
"0.55394554",
"0.55355185",
"0.5530504",
"0.5529427",
"0.55134654",
"0.5499403",
"0.5498699",
"0.54867756"
] | 0.7006387 | 0 |
Generator to get all power pumps Yields | def power_pumps(self):
for name in self._power_pumps:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def pumps(self): \n return self._link_reg.pumps",
"def power_supply(request):\n true_context = request.config.getoption(\"--true-context\")\n if not true_context:\n with DeviceTestContext(PowerSupply) as proxy:\n yield proxy\n else:\n database = tango.Database()\n instance_list = database.get_device_exported_for_class(\"PowerSupply\")\n for instance in instance_list.value_string:\n yield tango.DeviceProxy(instance)\n break",
"def _generators(self):\n return self.free_group.generators",
"def power_points():\n next_reading = power_readings()\n stretch = []\n\n def next():\n nonlocal stretch, next_reading\n stretch.append(next_reading())\n if len(stretch) > XMAX + 1:\n stretch.pop(0)\n x = XMAX + 1 - len(stretch)\n points = []\n for y in stretch:\n points.append((x, y))\n points.append((x, 0))\n x += 1\n return points\n\n return next",
"def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]",
"def get_processes():\n yield from psutil.process_iter()",
"def __iter__(self):\n return self._product_generator()",
"def values(self):\n while True:\n try:\n yield self.value\n except GPIODeviceClosed:\n break",
"def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max",
"def generator(self):\n return [None, 1]",
"def __iter__(self):\n yield from self.gen",
"def power_list():",
"def __iter__(self):\n while True:\n results = self.poll()\n for x in results:\n yield x\n if not results:\n time.sleep(self.poll_delay)",
"def power_pump_names(self):\n return self._power_pumps",
"def test_generator_downward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: -1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_min",
"def generators(self):\n return self._generators",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def power_readings():\n chain = [sin(x / (XMAX * 0.1)) * 0.1 + 0.6 for x in range(0, XMAX + 1)]\n cnt = 0\n\n def next():\n nonlocal chain, cnt\n next_reading = chain[cnt % len(chain)]\n cnt += 1\n return next_reading\n\n return next",
"def sender_iter(self):\n while 1:\n yield self.send_next()",
"def ticker_generator():\n return (v for v in load_equities().values)",
"def get_generator(self):\n while self._is_running():\n yield self._queue.get()",
"def get_generator(self):\n while self._is_running():\n yield self._queue.get()",
"def customer_generator(env, inventory_stock):\n for i in itertools.count():\n yield env.timeout(random.randint(*T_INTER))\n env.process(customer(env, inventory_stock, 'Customer_'+str(i+1)))",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]",
"def random_values():\n while True:\n yield random()",
"def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p",
"def getPurchasableGenerators(self) -> list:\n pass",
"def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]",
"def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)"
] | [
"0.7357476",
"0.65779585",
"0.630143",
"0.6167034",
"0.60643387",
"0.6042559",
"0.59550077",
"0.59423023",
"0.59166485",
"0.59069633",
"0.5845324",
"0.5834484",
"0.5793289",
"0.5775339",
"0.57738566",
"0.5773728",
"0.57282877",
"0.5725708",
"0.5683938",
"0.56534415",
"0.5646514",
"0.55876046",
"0.55876046",
"0.55725265",
"0.5567444",
"0.55561393",
"0.5553452",
"0.55477244",
"0.55325866",
"0.5528738"
] | 0.7255371 | 1 |
Generator to get all PRVs Yields | def prvs(self):
for name in self._prvs:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V",
"def __iter__(self):\n yield from self.gen",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def __iter__(self):\n for runspec in self.runspecs:\n yield runspec",
"def pbvs(self):\n for name in self._pbvs:\n yield name, self._data[name]",
"def _generators(self):\n return self.free_group.generators",
"def iterator(self):\n yield",
"def __iter__(self):\n return self._product_generator()",
"def __iter__(self):\n for plug in self.plugs:\n yield plug",
"def get_processes():\n yield from psutil.process_iter()",
"def __iter__(self):\n for sample in self.samples:\n yield sample",
"def __iter__(self):\n for run in self.runs:\n yield run",
"def __iter__(self):\n for benchinst in sorted(self.instances.values()):\n yield benchinst",
"def yieldRPC(remoteYields): #Status: WIP\r\n pass",
"def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)",
"def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element",
"def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y",
"def generator(self):\n return [None, 1]",
"def psvs(self):\n for name in self._psvs:\n yield name, self._data[name]",
"def __iter__(self):\n for instresult in self.instresults:\n yield instresult",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def generators(self):\n return self._generators",
"def __iter__(self):\n for sample in self.data:\n yield sample",
"def gpvs(self):\n for name in self._gpvs:\n yield name, self._data[name]",
"def __iter__(self):\n yield from self.calls",
"def ticker_generator():\n return (v for v in load_equities().values)",
"def __iter__(self):\n for benchclass in sorted(self.classes.values()):\n yield benchclass",
"def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol",
"def enumerate():\n for p in __all__:\n yield (p, scanners[p])",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]"
] | [
"0.6743858",
"0.6484807",
"0.6422032",
"0.63920593",
"0.63864964",
"0.6282362",
"0.62352467",
"0.6220532",
"0.61812013",
"0.6163182",
"0.60983425",
"0.6042183",
"0.60415554",
"0.6041262",
"0.60401404",
"0.6005562",
"0.59914434",
"0.59614915",
"0.5941197",
"0.59345305",
"0.5916342",
"0.5906904",
"0.59051085",
"0.58988523",
"0.5876848",
"0.5858053",
"0.5851061",
"0.5830939",
"0.5823256",
"0.58195025"
] | 0.6654521 | 1 |
Generator to get all PSVs Yields | def psvs(self):
for name in self._psvs:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V",
"def prvs(self):\n for name in self._prvs:\n yield name, self._data[name]",
"def pbvs(self):\n for name in self._pbvs:\n yield name, self._data[name]",
"def gpvs(self):\n for name in self._gpvs:\n yield name, self._data[name]",
"def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)",
"def get_processes():\n yield from psutil.process_iter()",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v",
"def vertex_generator(self):\n for V in self.Vrepresentation():\n if V.is_vertex():\n yield V",
"def iter_sequence(self):\n for res_name, fragment in self.sequence_fragment_list:\n yield res_name",
"def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]",
"def __iter__(self):\n term_v = yicespy.term_vector_t()\n yicespy.yices_init_term_vector(term_v)\n #todo here\n status = yicespy.yices_model_collect_defined_terms(self.yices_model, term_v)\n self._check_error(status)\n for d in term_v:\n try:\n pysmt_d = self.converter.back(d())\n yield pysmt_d, self.get_value(pysmt_d)\n except UndefinedSymbolError:\n # avoids problems with symbols generated by z3\n pass\n yicespy.yices_delete_term_vector(term_v)",
"def parse(self):\n gen = self.v6_gen() # read from workers\n gen = self.tuple_gen(gen) # convert v6->tuple\n gen = self.batch_gen(gen) # assemble into batches\n for b in gen:\n yield b",
"def iterator(self):\n yield",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def iter_svgs(self):\n for name in self.parent.layers:\n yield name, self.parent.layers[name]\n for elem in self.parent.elements:\n if isinstance(elem, SVG):\n yield None, elem",
"def _get_all(cls) -> Iterator[\"PermutationStatistic\"]:\n yield from (cls(name, func) for name, func in PermutationStatistic._STATISTICS)",
"def __iter__(self):\n yield from self.gen",
"def iter_segments(self):\n return\n yield",
"def valves(self):\n for name in self._valves:\n yield name, self._data[name]",
"def __next__(self):\n if self.usespark:\n raise ValueError(\"DVID source iteration in Spark not supported\")\n \n if self.current_spot >= len(self.partitions):\n raise StopIteration()\n\n # RDD or array of [(partition, vol)]\n vols = self._retrieve_vol(self.current_spot, self.iteration_size)\n self.current_spot += self.iteration_size\n return vols",
"def yield_stats(go_analysis):\n for i in xrange(go_analysis.nrow()):\n yield go_analysis[0][i], go_analysis[1][i], go_analysis[2][i], go_analysis[3][i], p_value_from_r(go_analysis[4][i]), p_value_from_r(go_analysis[5][i])",
"def check_valves(self):\n for name in self._pipes:\n if self._data[name].check_valve:\n yield name",
"def semigroup_generators(self):",
"def __iter__(self):\n for sample in self.data:\n yield sample",
"def __iter__(self):\n for partition in self._partition_set:\n yield partition",
"def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2",
"def __iter__(self):\n for runspec in self.runspecs:\n yield runspec",
"def enumerate():\n for p in __all__:\n yield (p, scanners[p])",
"def __iter__(self):\n for sample in self.samples:\n yield sample"
] | [
"0.66791296",
"0.6633431",
"0.65403515",
"0.62039876",
"0.59340304",
"0.5796213",
"0.5741342",
"0.57115084",
"0.56647044",
"0.5630856",
"0.55973125",
"0.5592173",
"0.5588894",
"0.55812263",
"0.55714786",
"0.55646497",
"0.5525963",
"0.55100465",
"0.5499898",
"0.5481003",
"0.5474632",
"0.5470918",
"0.5468836",
"0.5454808",
"0.5448127",
"0.5431111",
"0.54262733",
"0.54209405",
"0.54201007",
"0.54155415"
] | 0.7467829 | 0 |
Generator to get all PBVs Yields | def pbvs(self):
for name in self._pbvs:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def __iter__(self):\n yield from self.gen",
"def get_processes():\n yield from psutil.process_iter()",
"def generator(self):\n return [None, 1]",
"def yieldRPC(remoteYields): #Status: WIP\r\n pass",
"def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol",
"def iterator(self):\n yield",
"def __iter__(self):\n return self._product_generator()",
"def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]",
"def _generators(self):\n return self.free_group.generators",
"def __iter__(self):\n for b in self.x:\n yield b",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def __iter__(self):\n for sample in self.samples:\n yield sample",
"def __iter__(self) -> Generator:\r\n yield from self.sequence",
"def __iter__(self):\n for i in self.ref:\n yield PythonBytecodeInPreproc(i)",
"def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element",
"def blob_generator(self):\n for blob in self.data:\n yield blob",
"def __iter__(self):\n return self.new_generator()",
"def __iter__(self):\n for x in self.seq: yield x",
"def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)",
"def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element",
"def enumerate(self):\n\n done = False\n while not done:\n mcs = self.compute()\n\n if mcs != None:\n yield mcs\n else:\n done = True",
"def gpvs(self):\n for name in self._gpvs:\n yield name, self._data[name]",
"def sequences(self):\n # i am one\n yield self\n # nothing further\n return",
"def __iter__(self):\n for runspec in self.runspecs:\n yield runspec",
"def __iter__(self):\n for sample in self.data:\n yield sample",
"def get_volume_batch_generators(self):\n # volgeninfo = []\n def create_volgen(shape, w, padding, features, masks):\n w = np.asarray(w)\n padding = np.asarray(padding)\n W = w - padding * 2\n iters = np.int32(np.ceil((np.asarray([s for s in shape if s > 1]) + padding) * 1.0 / (W + padding)))\n for counts in counter_generator(iters):\n start = -padding + (w - padding) * counts\n end = (w - padding) * (counts + 1)\n subf, subm = self._extract_sample(features, masks, copy.deepcopy(start), copy.deepcopy(end), shape)\n ma = np.asarray([subm])\n fe = np.asarray([subf])\n if self.channels_first:\n ndims = len(fe.shape)\n neworder = [0, ndims - 1] + [i for i in range(1, ndims - 1)]\n fe = np.transpose(fe, neworder)\n ma = np.transpose(ma, neworder)\n yield fe, ma, start, end\n\n def volgeninfo(tps):\n for tp in tps:\n features, masks = self._get_features_and_masks(tp)\n spatial_shape = np.shape(features[0])\n volgen = create_volgen(spatial_shape, self.w, self.p, features, masks)\n yield [volgen, tp, spatial_shape, self.w, self.p]\n\n return volgeninfo(self.tps)",
"def parse(self):\n gen = self.v6_gen() # read from workers\n gen = self.tuple_gen(gen) # convert v6->tuple\n gen = self.batch_gen(gen) # assemble into batches\n for b in gen:\n yield b",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]"
] | [
"0.6779908",
"0.6634297",
"0.66080254",
"0.63646996",
"0.6219644",
"0.61993605",
"0.61894935",
"0.61807907",
"0.61705124",
"0.6165992",
"0.6161226",
"0.61310405",
"0.6110338",
"0.6062078",
"0.6056704",
"0.6054844",
"0.60530126",
"0.6015372",
"0.60131645",
"0.60105705",
"0.60054755",
"0.5999554",
"0.5991821",
"0.5977608",
"0.5963708",
"0.59555835",
"0.5948622",
"0.59368235",
"0.5924751",
"0.5919977"
] | 0.7325567 | 0 |
Generator to get all GPVs Yields | def gpvs(self):
for name in self._gpvs:
yield name, self._data[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generators(self):\n return self.free_group.generators",
"def __iter__(self):\n yield from self.gen",
"def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V",
"def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element",
"def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data",
"def ticker_generator():\n return (v for v in load_equities().values)",
"def generator(self):\n return [None, 1]",
"def getGenerators(self) -> list:\n return self.state[GENERATORS]",
"def pbvs(self):\n for name in self._pbvs:\n yield name, self._data[name]",
"def semigroup_generators(self):",
"def generators(self):\n return self._generators",
"def yieldRPC(remoteYields): #Status: WIP\r\n pass",
"def random_values():\n while True:\n yield random()",
"def rvg(g: Generator):\n # noinspection PyUnreachableCode\n try:\n return next(g)\n except StopIteration as r:\n return r.value",
"def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element",
"def generator(self):\n global_index = 0\n n_params = len(self.params)\n while (global_index < self.NXFLTEXP*self.paramspace):\n # skip row that have data already\n while (np.sum(self.spectra_hdu.data[global_index][1]) > 0.0): \n global_index += self.NXFLTEXP\n if (global_index >= self.NXFLTEXP*self.paramspace): break\n if (global_index >= self.NXFLTEXP*self.paramspace): break\n\n # get indexes in each grid; the last grid changing the fastest\n param_indexes = np.zeros(n_params, dtype=int)\n param_values = np.zeros(n_params)\n N0 = self.paramspace\n for i in range(n_params):\n (p_name, p_grid, p_log, p_frozen) = self.params[i]\n N = len(p_grid)\n N0 /= N\n p_index = int((global_index/3)//N0 % N)\n #print('global_index',global_index)\n #print('p_index',p_index)\n #print('p_grid[p_index]',p_grid[p_index])\n #print('p_grid',p_grid)\n param_indexes[i] = p_index\n param_values[i] = p_grid[p_index]\n\n # write parameter values (repeat the same parameters for each spectrum of the set) \n for i in range(self.NXFLTEXP):\n self.spectra_hdu.data[global_index+i][0] = param_values\n #end for\n\n # return total index, array of grid indexes, and array of grid values\n #sys.stderr.write(\"> generator: passing spectrum index %d (%s %s)\\n\" % (global_index, str(param_indexes), str(param_values)))\n yield (global_index, param_values, param_indexes, self.energies)\n global_index += self.NXFLTEXP\n #end while",
"def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)",
"def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)",
"def sample_generator(self, sess):\n\n to_return = {\n 'g_sample': self.G_sample_test,\n }\n return sess.run(to_return)",
"def __iter__(self):\n for sample in self.samples:\n yield sample",
"def generators(self) -> List[Generator]:\n return self._generators",
"def get_volume_batch_generators(self):\n # volgeninfo = []\n def create_volgen(shape, w, padding, features, masks):\n w = np.asarray(w)\n padding = np.asarray(padding)\n W = w - padding * 2\n iters = np.int32(np.ceil((np.asarray([s for s in shape if s > 1]) + padding) * 1.0 / (W + padding)))\n for counts in counter_generator(iters):\n start = -padding + (w - padding) * counts\n end = (w - padding) * (counts + 1)\n subf, subm = self._extract_sample(features, masks, copy.deepcopy(start), copy.deepcopy(end), shape)\n ma = np.asarray([subm])\n fe = np.asarray([subf])\n if self.channels_first:\n ndims = len(fe.shape)\n neworder = [0, ndims - 1] + [i for i in range(1, ndims - 1)]\n fe = np.transpose(fe, neworder)\n ma = np.transpose(ma, neworder)\n yield fe, ma, start, end\n\n def volgeninfo(tps):\n for tp in tps:\n features, masks = self._get_features_and_masks(tp)\n spatial_shape = np.shape(features[0])\n volgen = create_volgen(spatial_shape, self.w, self.p, features, masks)\n yield [volgen, tp, spatial_shape, self.w, self.p]\n\n return volgeninfo(self.tps)",
"def __iter__(self):\n return self._product_generator()",
"def iterator(self):\n yield",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p",
"def items():\n for point in boolfunc.iter_points(inputs):\n gpnt = {v: val for v, val in point.items()\n if v not in unmapped}\n gval = gfunc.restrict(gpnt)\n # mapped function must be completely specified\n assert isinstance(gval, TTConstant)\n fpnt = {v: val for v, val in point.items()\n if v in unmapped}\n fpnt[gvar] = int(gval)\n yield func.restrict(fpnt).pcdata[0]",
"def get_processes():\n yield from psutil.process_iter()",
"def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)",
"def items(self):\n for ene, row in zip(self.energies, self.yield_matrix):\n yield ene, FissionYield(self.products, row)"
] | [
"0.65539247",
"0.6486396",
"0.63541824",
"0.6308996",
"0.6222051",
"0.62187046",
"0.6163751",
"0.6140971",
"0.61115086",
"0.6093468",
"0.60042065",
"0.59355825",
"0.59219235",
"0.5896601",
"0.5890809",
"0.58848965",
"0.58635044",
"0.5859202",
"0.58404005",
"0.5819139",
"0.5817807",
"0.5817615",
"0.58147043",
"0.5813731",
"0.5809013",
"0.57894295",
"0.5776172",
"0.57325864",
"0.5710965",
"0.57071275"
] | 0.69035214 | 0 |
Provide a transactional scope around a series of operations. | def session_scope():
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def session_scope(raise_exception=True):\n session = cls.Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()",
"def transaction_scope(session, should_close=False):\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n if should_close:\n session.close()",
"def session_scope(Session):\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except InvalidRequestError:\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope(raise_exception=True):\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()",
"def transactional(*tr_args, **tr_kwargs):\n\n def decorate(func):\n try:\n parameter = tr_kwargs[\"parameter\"]\n except KeyError:\n parameter = \"tr\"\n\n wfunc = func\n while getattr(wfunc, \"__wrapped__\", None):\n wfunc = wfunc.__wrapped__\n if hasattr(inspect, \"getfullargspec\"):\n index = inspect.getfullargspec(wfunc).args.index(parameter)\n else:\n index = inspect.getargspec(wfunc).args.index(parameter)\n\n if getattr(func, \"_is_coroutine\", False):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if isinstance(args[index], TransactionRead):\n raise asyncio.Return((yield asyncio.From(func(*args, **kwargs))))\n\n largs = list(args)\n tr = largs[index] = args[index].create_transaction()\n\n while True:\n try:\n ret = yield asyncio.From(func(*largs, **kwargs))\n yield asyncio.From(tr.commit())\n raise asyncio.Return(ret)\n except FDBError as e:\n yield asyncio.From(tr.on_error(e.code))\n\n else:\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # We can't throw this from the decorator, as when a user runs\n # >>> import fdb ; fdb.api_version(fdb.LATEST_API_VERSION)\n # the code above uses @transactional before the API version is set\n if fdb.get_api_version() >= 630 and inspect.isgeneratorfunction(func):\n raise ValueError(\n \"Generators can not be wrapped with fdb.transactional\"\n )\n\n if isinstance(args[index], TransactionRead):\n return func(*args, **kwargs)\n\n largs = list(args)\n tr = largs[index] = args[index].create_transaction()\n\n committed = False\n # retries = 0\n # start = datetime.datetime.now()\n # last = start\n\n while not committed:\n ret = None\n try:\n ret = func(*largs, **kwargs)\n if fdb.get_api_version() >= 630 and inspect.isgenerator(ret):\n raise ValueError(\n \"Generators can not be wrapped with fdb.transactional\"\n )\n tr.commit().wait()\n committed = True\n except FDBError as e:\n tr.on_error(e.code).wait()\n\n # now = datetime.datetime.now()\n # td = now - last\n # elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)\n # if elapsed >= 1:\n # td = now - start\n # print (\"fdb WARNING: long transaction (%gs elapsed in transactional function \\\"%s\\\" (%d retries, %s))\"\n # % (elapsed, func.__name__, retries, committed and \"committed\" or \"not yet committed\"))\n # last = now\n\n # retries += 1\n return ret\n\n return wrapper\n\n if not tr_args:\n # Being called with parameters (possibly none); return a\n # decorator\n return decorate\n elif len(tr_args) == 1 and not tr_kwargs:\n # Being called as a decorator\n return decorate(tr_args[0])\n else:\n raise Exception(\"Invalid use of transactional decorator.\")",
"def session_scope():\n session = session_factory()\n try:\n yield session\n session.commit()\n except: # noqa\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope():\n\n session = Session()\n try:\n yield session\n session.commit()\n except sqlalchemy.exc.SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope(session: Session) -> Generator[Session, None, None]:\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope():\n s = get_session()\n s.expire_on_commit = False\n try:\n yield s\n s.commit()\n except:\n s.rollback()\n raise\n finally:\n s.close()",
"def transaction(fn):\n @wraps(fn)\n def transaction_inner(*args, **kwargs): #1\n start = time()\n stmp_id = id_gen()\n session = operation.session\n sessionid = id(session)\n \n # set distributed transaction id to 0 for single transaction\n try:\n operation.id\n except: \n operation.id = str(uuid4())\n \n try:\n # get runtime info\n cp = current_process()\n ct = current_thread() \n \n # format request params\n params = []\n for item in args:\n params.append(unicode(item))\n for k,v in kwargs.iteritems():\n params.append(u\"'%s':'%s'\" % (k, v))\n \n # call internal function\n res = fn(*args, **kwargs)\n \n session.commit()\n elapsed = round(time() - start, 4)\n logger.debug(u'%s.%s - %s - transaction - %s - %s - OK - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n \n return res\n except ModelError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n if ex.code not in [409]:\n #logger.error(ex.desc, exc_info=1)\n logger.error(ex.desc)\n \n session.rollback()\n raise TransactionError(ex.desc, code=ex.code)\n except IntegrityError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n\n session.rollback()\n raise TransactionError(ex.orig)\n except DBAPIError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n \n session.rollback()\n raise TransactionError(ex.orig)\n \n except Exception as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex, exc_info=1)\n logger.error(ex)\n \n session.rollback()\n raise TransactionError(ex)\n\n return transaction_inner",
"def session_scope():\n # Create session.\n session = sessionmaker()\n session.configure(bind=engine)\n session = session()\n\n try:\n yield session\n session.commit()\n\n # Rollback on any exception.\n except Exception as e:\n logging.info('Rollback: %s', e)\n session.rollback()\n raise\n # Close no matter what.\n finally:\n session.close()",
"def session_scope():\n session = Session(bind=engine)\n try:\n yield session\n except:\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope(engine):\n DBSession = sessionmaker(bind=engine)\n Session = DBSession()\n try:\n yield Session\n Session.commit()\n except:\n Session.rollback()\n raise\n finally:\n Session.close()",
"def session_scope(session_factory):\n session = session_factory()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()",
"def session_scope(session=None):\n if not session:\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n\n try:\n session.close()\n except:\n session.remove()",
"def session_scope(session_builder):\n session = session_builder()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()",
"def Transaction(db):\n def wrapper(f):\n def transaction_wrapper(*args, **kwargs):\n tx = db.beginTx()\n \n try: args[0].transaction = tx\n except: pass\n \n result = f(*args, **kwargs)\n tx.success()\n tx.close()\n return result\n return transaction_wrapper\n return wrapper",
"def db_scope(self):\n\t\t# session: orm.Session = __class__.session_maker()\n\t\ttry:\n\t\t\tyield self\n\t\t\tself.session.flush()\n\t\t\tself.session.commit()\n\t\texcept:\n\t\t\tself.session.rollback()\n\t\t\traise",
"def session_scope(session=None, *, isolation_level=None):\n if session is None:\n session = Session()\n\n if isolation_level:\n session.connection(\n execution_options={\"isolation_level\": isolation_level},\n )\n\n try:\n yield session\n session.commit()\n except Exception as e:\n session.rollback()\n raise\n finally:\n session.close()",
"def netsted_transaction(fn):\n @wraps(fn)\n def netsted_transaction_inner(*args, **kwargs): #1\n start = time()\n stmp_id = id_gen()\n session = operation.session\n sessionid = id(session)\n \n commit = False\n if operation.transaction is None:\n operation.transaction = id_gen()\n commit = True\n logger.debug(u'Create transaction %s' % operation.transaction)\n else:\n logger.debug(u'Use transaction %s' % operation.transaction)\n \n # set distributed transaction id to 0 for single transaction\n try:\n operation.id\n except: \n operation.id = str(uuid4())\n \n try:\n # get runtime info\n cp = current_process()\n ct = current_thread() \n \n # format request params\n params = []\n for item in args:\n params.append(unicode(item))\n for k,v in kwargs.iteritems():\n params.append(u\"'%s':'%s'\" % (k, v))\n \n # call internal function\n res = fn(*args, **kwargs)\n \n if commit is True:\n session.commit()\n logger.debug(u'Commit transaction %s' % operation.transaction)\n operation.transaction = None\n \n elapsed = round(time() - start, 4)\n logger.debug(u'%s.%s - %s - transaction - %s - %s - OK - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n \n return res\n except ModelError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n if ex.code not in [409]:\n logger.error(ex.desc, exc_info=1)\n logger.error(ex.desc)\n \n #session.rollback()\n rollback(session, commit)\n raise TransactionError(ex.desc, code=ex.code)\n except IntegrityError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n\n #session.rollback()\n rollback(session, commit)\n raise TransactionError(ex.orig)\n except DBAPIError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n \n #session.rollback()\n rollback(session, commit)\n raise TransactionError(ex.orig)\n except TransactionError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n \n #session.rollback()\n rollback(session, commit)\n raise\n except Exception as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex, exc_info=1)\n logger.error(ex)\n \n #session.rollback()\n rollback(session, commit)\n raise TransactionError(ex)\n\n return netsted_transaction_inner",
"def transaction(session: Union[scoped_session, Session, \"SessionlessContext\"]):\n # temporary hack; need to fix access to scoped_session callable, not proxy\n if isinstance(session, scoped_session):\n session = session()\n # hack: this could be model.store.SessionlessContext; then we don't need to do anything\n elif not isinstance(session, Session):\n yield\n return # exit: can't use as a Session\n\n if not session.in_transaction():\n with session.begin():\n yield\n else:\n yield",
"def transact(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with transaction() as conn:\n fn(conn, *args, **kwargs)\n return wrapper",
"def atomic(self, savepoint=True):\n return TransactionContext(*self.values(), savepoint=True)",
"async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n stats.inc('transactions', 'SQL')\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n try:\n ret = await func(cur, *args, **kwargs)\n except:\n await conn.rollback()\n raise\n else:\n await conn.commit()\n return ret",
"def session_scope(self):\n DBSession = sessionmaker(bind=self.engine)\n self.session = DBSession()\n try:\n yield self\n self.session.commit()\n except:\n self.session.rollback()\n raise\n finally:\n self.session.close()",
"def with_transaction(session, f):\n try:\n f(session)\n session.commit()\n except Exception as e:\n session.rollback()\n raise e",
"def transaction() -> Generator:\n session = current_session()\n logger.debug('transaction with session %s', id(session))\n try:\n yield session\n # Only commit if there are un-flushed changes. The caller may commit\n # explicitly, e.g. to do exception handling.\n if session.dirty or session.deleted or session.new:\n session.commit()\n logger.debug('committed!')\n except ClassicBaseException as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise # Propagate exceptions raised from this module.\n except InvalidEvent:\n session.rollback()\n raise\n except Exception as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise TransactionFailed('Failed to execute transaction') from e",
"def transaction(self, retries=0):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n attempts = 0\n with self.get_pool().connection() as conn:\n proxy = ConnectionProxy(conn)\n while True:\n try:\n retval = func(proxy, *args, **kwargs)\n conn.commit()\n except IntegrityError:\n attempts += 1\n if attempts > retries:\n raise\n conn.rollback()\n except Exception:\n conn.rollback()\n raise\n else:\n return retval\n return wrapper\n return decorator",
"def transactionally(transactionCreator):\n def thunk(operation):\n return inTransaction(transactionCreator, operation)\n return thunk"
] | [
"0.6485288",
"0.64705354",
"0.6458157",
"0.645436",
"0.6433442",
"0.642318",
"0.63953996",
"0.63516915",
"0.62020296",
"0.61909795",
"0.61490947",
"0.6068438",
"0.6058901",
"0.6056808",
"0.6052401",
"0.60080963",
"0.59846854",
"0.5968762",
"0.59686553",
"0.59340906",
"0.59041834",
"0.586926",
"0.5842099",
"0.5790376",
"0.57675755",
"0.57520175",
"0.57456857",
"0.57428503",
"0.572996",
"0.5712844"
] | 0.6569059 | 0 |
Error returned if only invalid IP addresses are passed in input file. | def test_input_file_invalid_ip_addresses_passsed(self, api_client):
runner = CliRunner()
expected = (
"Error: at least one valid IP address must be passed either as an "
"argument (IP_ADDRESS) or through the -i/--input_file option."
)
result = runner.invoke(
subcommand.ip,
["-i", StringIO("not-an-ip")],
parent=Context(main, info_name="greynoise"),
)
assert result.exit_code == -1
assert "Usage: greynoise ip" in result.output
assert expected in result.output
api_client.ip.assert_not_called() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_input_file_invalid_ip_addresses_passsed(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one valid IP address must be passed either as an \"\n \"argument (IP_ADDRESS) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.quick,\n [\"-i\", StringIO(\"not-an-ip\")],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise quick\" in result.output\n assert expected in result.output\n api_client.quick.assert_not_called()",
"def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()",
"def __checkIPAddr(self,ip):\n if not iplib.checkIPAddrWithoutMask(ip):\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_IP_ADDRESS\")%ip)",
"def read_in_address_file(file):\n address_list = list()\n lines = 0\n valid_ips = 0\n with file as f:\n for n in file:\n lines += 1\n if validate_ip(n.strip()):\n address_list.append(n.strip())\n valid_ips += 1\n if valid_ips < lines:\n print(\"Of the {} lines in the file you supplied, only {} were valid. The latter will be used to call the \"\n \"API.\".format(lines, valid_ips))\n if valid_ips == 0:\n print(\"Please supply a valid IP address.\")\n address_list = None\n return address_list",
"def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False",
"def is_valid_ip(ip):\n ...",
"def validate_ip_address(self):\n\t\tip_list = webnotes.conn.get_value('Profile', self.user, 'restrict_ip', ignore=True)\n\t\t\n\t\tif not ip_list:\n\t\t\treturn\n\n\t\tip_list = ip_list.replace(\",\", \"\\n\").split('\\n')\n\t\tip_list = [i.strip() for i in ip_list]\n\n\t\tfor ip in ip_list:\n\t\t\tif webnotes.remote_ip.startswith(ip):\n\t\t\t\treturn\n\t\t\t\n\t\twebnotes.msgprint('Not allowed from this IP Address')\n\t\traise webnotes.AuthenticationError",
"def test_try_create_invalid_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_0_430_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Error save new IP.: 10.0.0.430',\n response.data['detail'])",
"def IsValidIP(ip):\n if ip != None:\n if ip.count('.') == 3:\n ipNumbers = ip.split('.')\n for number in ipNumbers:\n if not number.isdigit() or int(number) > 255:\n return False\n return ipNumbers\n return False",
"def validate_ip(ip):\n try:\n ipobj = IPy.IP(ip)\n if ipobj.iptype() == 'PRIVATE':\n print(\"IP addresses {} will be ignored as it is in a private network range.\".format(ip))\n ip = None\n except ValueError as ve:\n print(\"Invalid IP: {}\".format(ve.args))\n ip = None\n finally:\n return ip",
"def test_ip_addresses_exists():\n load_ips()\n validate_names()",
"def validate_ip(self, ip):\n if not ip:\n raise ValidationError(\"Please provide an actual IP or web address. You gave me: \" + ip)",
"def validate_ip_address(data, valid_values=None):\n\n msg = None\n try:\n # netaddr.core.ZEROFILL is only applicable to IPv4.\n # it will remove leading zeros from IPv4 address octets.\n ip = netaddr.IPAddress(validate_no_whitespace(data),\n flags=netaddr.core.ZEROFILL)\n # The followings are quick checks for IPv6 (has ':') and\n # IPv4. (has 3 periods like 'xx.xx.xx.xx')\n # NOTE(yamamoto): netaddr uses libraries provided by the underlying\n # platform to convert addresses. For example, inet_aton(3).\n # Some platforms, including NetBSD and OS X, have inet_aton\n # implementation which accepts more varying forms of addresses than\n # we want to accept here. The following check is to reject such\n # addresses. For Example:\n # >>> netaddr.IPAddress('1' * 59)\n # IPAddress('199.28.113.199')\n # >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))\n # IPAddress('199.28.113.199')\n # >>>\n if ':' not in data and data.count('.') != 3:\n msg = \"'%s' is not a valid IP address\" % data\n # A leading '0' in IPv4 address may be interpreted as an octal number,\n # e.g. 011 octal is 9 decimal. Since there is no standard saying\n # whether IP address with leading '0's should be interpreted as octal\n # or decimal, hence we reject leading '0's to avoid ambiguity.\n elif ip.version == 4 and str(ip) != data:\n msg = (\"'%(data)s' is not an accepted IP address, \"\n \"'%(ip)s' is recommended\") % {\"data\": data, \"ip\": ip}\n except Exception:\n msg = \"'%s' is not a valid IP address\" % data\n if msg:\n raise exceptions.DiagnoseException(msg)",
"def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.ip, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.ip.assert_not_called()",
"def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None",
"def test_try_create_out_of_range_ip_in_network(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/out_of_range_ipv4_172_0_0_5_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Ip 172.0.0.5 not available for network 5.',\n response.data['detail'])",
"def validate_input(self, IP, Port):\n exception = ServerInitError\n try:\n if IP != None:\n if re.match(IPRegex, IP):\n self.IP = IP\n else:\n exception = InavlidIPError\n raise\n\n if Port != None:\n if int(Port) in range(0,65535):\n self.Port = Port\n else:\n exception = InvalidPortError\n raise\n except:\n traceback_print_exc()",
"def valid_ip(ip):\n return valid_ipv4(ip) or valid_ipv6(ip)",
"def validate_iplist(argname, param, safe):\n _validate_all(argname, param, safe, _check_ip)",
"def test_addr_zip_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_zip(val))",
"def _valid_ip_addresses(self, addresses):\r\n if addresses == '':\r\n return ''\r\n error_addresses = []\r\n for addr in addresses.split(','):\r\n address = addr.strip()\r\n if not self._is_valid_ip(address):\r\n error_addresses.append(address)\r\n if error_addresses:\r\n msg = 'Invalid IP Address(es): {0}'.format(error_addresses)\r\n msg += ' Please fix the error(s) and try again.'\r\n raise forms.ValidationError(msg)\r\n\r\n return addresses",
"def check_IP_addr(self, iplist):\n\n if type(iplist) != list:\n print(\"Error: please provide a list of IPv4 addresses to check (as a list of strings).\")\n return False\n\n for ip_addr in iplist:\n # Converts ip_addr to string, in case of bad type being passed\n ip_addr = str(ip_addr)\n\n # Checks ip_addr format\n try: \n inet_aton(ip_addr)\n except:\n print(\"Error: '{}' is an invalid IPv4 address.\\n\"\\\n \"Please use a valid IPv4 address (e.g.: 192.168.0.1)\".format(ip_addr))\n return False\n return True",
"def test_bad_addresses_are_invalid(self):\n val = gnome.gh.EventSourceValidator()\n for addr in BAD_MOCK_ADDRESSES:\n validity = val.ip_str_is_valid(addr)\n self.assertFalse(validity)",
"def input_file_check(router_list):\n if os.path.exists(router_list):\n print(\"Found device file ({}): OK\".format(router_list))\n else:\n print('''\nUnable to find device list >>>{}<<<, please verify it exists and/or update the\nvariable ___router_list___ at the top of this script file to point to a new one.\n\nScript error, exiting.'''.format(router_list))\n sys.exit(1)\n\n with open(router_list) as f:\n for line in f:\n if \";\" not in line:\n if \"r\" in line:\n if \"p\" in line:\n print(\"Processing line:\", line.strip())\n else:\n print(\"ERROR with line:\", line.strip())\n print('''\nYour {} file may contain invalid entries, please double check it.\n\nExamples:\n\nOne Juniper router with one peer\nr10.10.10.10, p3.3.3.3\n\nTwo Juniper routers, one with one peer, the other with multiple\nr10.20.30.40, p4.4.4.4\nr192.168.1.22, p5.5.5.5, p6.6.6.6, p7.7.7.7\n\n'''.format(router_list))\n sys.exit(1)\n\n print(\"Line check: OK\")\n return",
"def ip_address(addr):\n parts = addr.split('.')\n if len(parts) != 4:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n for part in parts:\n try:\n num = int(part)\n if num < 0 or num > 255:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n except ValueError:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n return addr",
"def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.quick, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.quick.assert_not_called()",
"def is_valid_ip(arg):\n try:\n nacaddr.IP(arg)\n except:\n raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg)\n return arg",
"def validateIP(ip):\n # type: (str)->None\n try:\n socket.inet_aton(ip)\n except socket.error:\n socket.inet_pton(socket.AF_INET6, ip)",
"def validate_ip(argname, param, safe, optional = False):\n _validate_one(argname, param, safe, _check_ip, optional)",
"def request_valid_ip():\n ip = input(\"Enter a valid IP address you would like to check: \")\n return validate_ip(ip)"
] | [
"0.7329706",
"0.69877046",
"0.6758954",
"0.6752534",
"0.64434695",
"0.6431371",
"0.63859427",
"0.6324199",
"0.62635887",
"0.62278163",
"0.6187877",
"0.6162502",
"0.6157764",
"0.6089943",
"0.60771656",
"0.6074368",
"0.606984",
"0.6059683",
"0.60493106",
"0.6029607",
"0.60199815",
"0.6018719",
"0.5992116",
"0.5980642",
"0.5979238",
"0.5978726",
"0.5965999",
"0.59553105",
"0.59519863",
"0.59321916"
] | 0.77088946 | 0 |
IP subcommand fails when ip_address is invalid. | def test_invalid_ip_address_as_argument(self, api_client):
runner = CliRunner()
expected = 'Error: Invalid value for "[IP_ADDRESS]...": not-an-ip\n'
result = runner.invoke(subcommand.ip, ["not-an-ip"])
assert result.exit_code == 2
assert expected in result.output
api_client.ip.assert_not_called() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.quick, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.quick.assert_not_called()",
"def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()",
"def test_input_file_invalid_ip_addresses_passsed(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one valid IP address must be passed either as an \"\n \"argument (IP_ADDRESS) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.ip,\n [\"-i\", StringIO(\"not-an-ip\")],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n assert expected in result.output\n api_client.ip.assert_not_called()",
"def is_valid_ip(arg):\n try:\n nacaddr.IP(arg)\n except:\n raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg)\n return arg",
"def validate_ip(argname, param, safe, optional = False):\n _validate_one(argname, param, safe, _check_ip, optional)",
"def __checkIPAddr(self,ip):\n if not iplib.checkIPAddrWithoutMask(ip):\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_IP_ADDRESS\")%ip)",
"def test_input_file_invalid_ip_addresses_passsed(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one valid IP address must be passed either as an \"\n \"argument (IP_ADDRESS) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.quick,\n [\"-i\", StringIO(\"not-an-ip\")],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise quick\" in result.output\n assert expected in result.output\n api_client.quick.assert_not_called()",
"def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.ip, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n api_client.ip.assert_not_called()",
"def is_valid_ip(ip):\n ...",
"def validate_ip(ip):\n try:\n ipobj = IPy.IP(ip)\n if ipobj.iptype() == 'PRIVATE':\n print(\"IP addresses {} will be ignored as it is in a private network range.\".format(ip))\n ip = None\n except ValueError as ve:\n print(\"Invalid IP: {}\".format(ve.args))\n ip = None\n finally:\n return ip",
"def check_valid_ip_int(value):\n try:\n address = int(value)\n except ValueError:\n raise argparse.ArgumentTypeError('value is not a positive number: {}'.format(value))\n try:\n ipaddress.ip_address(address)\n except ValueError:\n raise argparse.ArgumentTypeError('is out of IPv4/IPv6 boundaries')\n return address",
"def validate_ip(self, ip):\n if not ip:\n raise ValidationError(\"Please provide an actual IP or web address. You gave me: \" + ip)",
"def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False",
"def ip_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, ipaddress._IPAddressBase):\n name = type(var).__name__\n raise IPError(\n 'Function {} expected IP address, {} got instead.'.format(func, name))",
"def validateIP(ip):\n # type: (str)->None\n try:\n socket.inet_aton(ip)\n except socket.error:\n socket.inet_pton(socket.AF_INET6, ip)",
"def validate_ip_address(ip_addr):\n try:\n ip_object = ipaddress.ip_address(ip_addr)\n return True\n except ValueError:\n return False",
"def test_validate_ip_exit():\n ip = '1.1'\n with pytest.raises(SystemExit) as err:\n howisresolved.validate_ip(ip)\n assert 'Invalid ip specified.' in str(err.value)",
"def is_ip_address(value: str) -> bool:\n with suppress(ValueError):\n ipaddress.ip_address(value)\n return True\n\n return False",
"def request_valid_ip():\n ip = input(\"Enter a valid IP address you would like to check: \")\n return validate_ip(ip)",
"def valid_ip(ip):\n return valid_ipv4(ip) or valid_ipv6(ip)",
"def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error",
"def validate_ip(ip):\n valid_ip = ''\n try:\n valid_ip = str(ipaddress.ip_address(ip))\n except ValueError:\n logging.error('ip address \\'{}\\' is not valid: '.format(ip))\n \n return valid_ip",
"def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))",
"def is_ip(value):\n try:\n IP(value)\n except ValueError:\n return False\n return True",
"def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None",
"def valid_ip(ip_addr):\n try:\n inet_aton(ip_addr)\n return True\n\n except error:\n return False",
"def is_ip_address(value, messages=None):\n if value is None:\n return\n _messages = {\n 'type-string': \"must be a string\",\n 'invalid': \"is invalid\",\n }\n if messages:\n _messages.update(messages)\n if not isinstance(value, basestring):\n raise Invalid(_messages['type-string'])\n if _ip_address_regex.match(value) is None:\n raise Invalid(_messages['invalid'])",
"def ip(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n if ip:\n puts_err(colored.green(ip))\n else:\n puts_err(colored.red(\"Unknown IP address\"))",
"def is_valid_ip(address):\n return is_valid_ipv4_address(address) or is_valid_ipv6_address(address)",
"def is_ip_address(value):\r\n # IPv6 added with Django 1.4\r\n from django.core.validators import validate_ipv46_address as ip_validator\r\n\r\n try:\r\n ip_validator(value)\r\n except ValidationError:\r\n return False\r\n return True"
] | [
"0.7779022",
"0.7753371",
"0.75348413",
"0.73027337",
"0.72888625",
"0.72064656",
"0.6963811",
"0.67920077",
"0.6779812",
"0.6777175",
"0.6720939",
"0.67078966",
"0.66838294",
"0.6551545",
"0.65412176",
"0.6540782",
"0.64748305",
"0.6447082",
"0.6426793",
"0.64139205",
"0.63681614",
"0.6365979",
"0.6319588",
"0.631613",
"0.63134605",
"0.6285819",
"0.6220055",
"0.6210424",
"0.6198518",
"0.61962444"
] | 0.7990856 | 0 |
Run query from stdin. | def test_stdin_input(self, api_client):
runner = CliRunner()
query = "<query>"
api_client.query.return_value = []
expected = json.dumps([[]], indent=4, sort_keys=True)
result = runner.invoke(subcommand.query, ["-f", "json"], input=query)
assert result.exit_code == 0
assert result.output.strip("\n") == expected
api_client.query.assert_called_with(query=query) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query(args):\n dbh = despydb.DesDbi(args.service, args.section)\n if args.query not in \"-+\":\n do1Query(dbh, args.query, args)\n elif args.query == \"-\":\n line = sys.stdin.readline()\n while line:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n pass\n else:\n do1Query(dbh, line, args)\n line = sys.stdin.readline()\n else: #if args.query == \"+\":\n lines = sys.stdin.read()\n do1Query(dbh, lines, args)\n dbh.close()",
"def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query",
"def _queuing_input(procQueue, stdin_fd, query, valid, default):\n sys.stdin = os.fdopen(stdin_fd)\n procQueue.put(_get_user_input(query, valid, default))",
"def process(cmd_string, stdin=None):\n return process_results(process_run(cmd_string, stdin=stdin))",
"def test_stdin_input(self, api_client):\n runner = CliRunner()\n\n query = \"<query>\"\n api_client.stats.return_value = []\n expected = json.dumps([[]], indent=4, sort_keys=True)\n\n result = runner.invoke(subcommand.stats, [\"-f\", \"json\"], input=query)\n assert result.exit_code == 0\n assert result.output.strip(\"\\n\") == expected\n api_client.stats.assert_called_with(query=query)",
"def main(self):\n cmd = \"self.%s(sys.stdin)\" % sys.argv[1]\n exec(cmd)",
"def stdin(self):\n pass",
"def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r",
"def run(args, input_query, output_topic):\n pipeline_options = PipelineOptions(args, save_main_session=True, streaming=True)\n\n with beam.Pipeline(options=pipeline_options) as pipeline:\n # Read the rows from BigQuery\n messages = (\n pipeline\n | 'ReadTable' >> ReadFromBigQuery(query=input_query, use_standard_sql=True)\n | 'ConvertToJsonLine' >> beam.Map(to_json_line)\n )\n\n # Output the results to a pubsub topic\n messages | 'WriteToPubSub' >> WriteToPubSub(topic=output_topic).with_output_types(bytes)",
"def runin(cmd, stdin):\n result = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n result.wait()\n return result.returncode",
"def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)",
"def command(self, args):\n try:\n with Reader(args.filename, args.sql_command) as odb_reader:\n for row in odb_reader:\n print(row)\n except InterfaceError as err:\n print(f\"Query interface error: {err}\")\n except ProgrammingError as err:\n if \"Assertion failed\" in str(err):\n print(f\"Query error: {args.filename} does not appear to be a valid ODB2 file.\")\n else:\n print(f\"Query error: {err}\")",
"def query_cmdline():",
"def main():\n for line in sys.stdin:\n _id, title, desc = line[:-1].split(\"\\t\")\n sql = \"select * from langlinks where ll_from = %s && ll_lang = 'en';\" % _id\n res = execute_and_fetch(DB, sql)\n langlink = \"\"\n if len(res) > 0:\n langlink = res[0][1] + \":\" + res[0][2].decode('utf-8')\n print \"\\t\".join([_id, langlink, title, desc])",
"def run_codeql_query(query, database, output, search_path):\n # --search-path is required when the CLI needs to upgrade the database scheme.\n subprocess_run([\"codeql\", \"query\", \"run\", query, \"--database\", database,\n \"--output\", output + \".bqrs\", \"--search-path\", search_path])\n subprocess_run([\"codeql\", \"bqrs\", \"decode\", output + \".bqrs\",\n \"--format=csv\", \"--no-titles\", \"--output\", output])\n os.remove(output + \".bqrs\")",
"def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result",
"def scan_input(self):\n proc = subprocess.Popen([\"ssh\", \"-tt\", \"[email protected]\"],\n stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n # give time to connect\n time.sleep(5)\n proc.stdin.write(\n b\"/home/pi/Desktop/GAssist/env/bin/google-assistant-demo\\n\")\n proc.stdin.flush()\n while True:\n next_line = proc.stdout.readline()\n if next_line != '':\n # the real code does filtering here\n tmp = next_line.decode(\"utf-8\")\n print(tmp)\n tmp = tmp.strip().lower()\n for test_re in self.text_regexes:\n match_test = test_re.match(tmp)\n if match_test:\n self.handle_input(match_test.group(1).strip())\n else:\n time.sleep(.01)",
"def read_user_input(self):\n\n self.commandline = raw_input(\"Enter the string you want to parse\\n\")",
"def run_loop(self):\n while True:\n try:\n command = input('fuel-yaql> ').strip()\n except EOFError:\n return\n if not command:\n continue\n\n try:\n if command.startswith(':'): # Check for internal command\n r = self.execute_command(command)\n else:\n r = self.evaluate_expression(command)\n\n if isinstance(r, (list, dict)):\n print(json.dumps(r, indent=4))\n elif r is not None:\n print(r)\n\n except Exception as e:\n print(\"Unexpected error: {0}\".format(e))\n traceback.print_exc(sys.stdout)",
"def hxlselect_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):\n\n # Command-line arguments\n parser = make_args('Filter rows in a HXL dataset.')\n parser.add_argument(\n '-q',\n '--query',\n help='Query expression for selecting rows (may repeat option for logical OR). <op> may be =, !=, <, <=, >, >=, ~, or !~',\n action='append',\n metavar='<tagspec><op><value>',\n required=True\n )\n parser.add_argument(\n '-r',\n '--reverse',\n help='Show only lines *not* matching criteria',\n action='store_const',\n const=True,\n default=False\n )\n args = parser.parse_args(args)\n\n do_common_args(args)\n\n with make_source(args, stdin) as source, make_output(args, stdout) as output:\n filter = hxl.filters.RowFilter(source, queries=args.query, reverse=args.reverse)\n hxl.input.write_hxl(output.output, filter, show_tags=not args.strip_tags)\n\n return EXIT_OK",
"def run():\n print_intro()\n print_table()\n while True:\n inp = input('>>').split(sep=' ')\n fill_op_table(inp[0], inp[1:])\n exec_cmd(inp[0])\n clear_table()",
"def test_input_file(self, api_client):\n runner = CliRunner()\n\n query = \"<query>\"\n api_client.query.return_value = []\n expected = json.dumps([[]], indent=4, sort_keys=True)\n\n result = runner.invoke(subcommand.query, [\"-f\", \"json\", \"-i\", StringIO(query)])\n assert result.exit_code == 0\n assert result.output.strip(\"\\n\") == expected\n api_client.query.assert_called_with(query=query)",
"def run(self):\n\n\n if not self.args.input_path:\n if self.args.additional_statements:\n # this is before handle_statements() is called, so each statement is at a\n # minimum of 2 args, e.g. ['OR', '> 10']\n # and max 3, e.g. ['OR', 'col_1,col_2', '>10']\n last_state = self.args.additional_statements[-1]\n if last_state['extra_args']:\n self.args.input_path = last_state['extra_args'][0]\n\n\n\n # if len(self.last_expr) > 2:\n # # could be either 3 or 4\n # self.args.input_path = self.last_expr.pop()\n # elif len(self.last_expr) == 2:\n # pass\n # # do nothing, but be warned that if there is no stdin,\n # # then -E might have eaten up the input_file argument\n # # and interpreted it as pattern\n # else:\n # # else, last_expr has an implied third argument, and\n # # input_path is hopefully stdin\n # self.args.input_path = None\n\n\n self.input_file = self._open_input_file(self.args.input_path)\n\n try:\n with warnings.catch_warnings():\n if getattr(self.args, 'no_header_row', None):\n warnings.filterwarnings(action='ignore', message='Column names not specified', module='agate')\n\n self.main()\n finally:\n self.input_file.close()",
"def __ask_query(self):\n self.__output = list()\n return input(form('What do you want to search?\\n> '))",
"def runQueryRead(d, query):\n with d.session() as s:\n results = s.read_transaction(runQuery, query, True)\n return results",
"def stdin(config, input):\n if input == 'history':\n home = str(Path.home())\n with open(home + '/.bash_history', 'r') as file:\n output = file.read()\n input = None\n m = SearchMatches(input, output, config.regex, config.color, config.underline)\n m.print_match_lines()\n else:\n input = shlex.split(input)\n output = subprocess.check_output(input).decode('ascii')\n input = None\n m = SearchMatches(input, output, config.regex, config.color, config.underline)\n m.print_match_lines()",
"def run_cmd(servable, input):\n\n if not any([servable, input]):\n format_output(HELP_STR)\n return\n\n client = get_dlhub_client()\n\n data = json.loads(input)\n\n res = client.run(servable, data)\n\n format_output(res)\n return res",
"def read_input(args, parser):\n if args.text == sys.stdin:\n # check if stdin is empty\n stdin_ready, _, _ = select.select([sys.stdin], [], [], 0)\n if stdin_ready:\n return sys.stdin.read().strip()\n\n parser.print_help()\n sys.exit(1)\n\n return args.text",
"def read_input(inp):\n epoll = select.epoll()\n epoll.register(sys.stdin.fileno(), select.EPOLLIN)\n while inp.running:\n if is_terminated():\n return\n\n events = epoll.poll(1)\n for fileno, event in events:\n line = \"[\"\n while \"[\" in line:\n line = sys.stdin.readline().strip(\",\").strip()\n inp.has_event = True\n try:\n event = json.loads(line)\n if \"instance\" in event:\n inp.callback(event)\n inp.redraw()\n except ValueError:\n pass\n epoll.unregister(sys.stdin.fileno())\n epoll.close()\n inp.has_event = True\n inp.clean_exit = True",
"def run_query(self):\n return _run_query(self.query)"
] | [
"0.7017035",
"0.6000595",
"0.59943163",
"0.5860624",
"0.5844626",
"0.5836932",
"0.5705197",
"0.5692738",
"0.5685291",
"0.5660167",
"0.5622734",
"0.56151575",
"0.55897874",
"0.55367315",
"0.5528968",
"0.5525039",
"0.5509141",
"0.54871726",
"0.5451612",
"0.54344296",
"0.5418843",
"0.54159254",
"0.53969014",
"0.5393474",
"0.5392958",
"0.53761965",
"0.5371908",
"0.5367669",
"0.5366145",
"0.5350352"
] | 0.65404975 | 1 |
Error returned if only invalid IP addresses are passed in input file. | def test_input_file_invalid_ip_addresses_passsed(self, api_client):
runner = CliRunner()
expected = (
"Error: at least one valid IP address must be passed either as an "
"argument (IP_ADDRESS) or through the -i/--input_file option."
)
result = runner.invoke(
subcommand.quick,
["-i", StringIO("not-an-ip")],
parent=Context(main, info_name="greynoise"),
)
assert result.exit_code == -1
assert "Usage: greynoise quick" in result.output
assert expected in result.output
api_client.quick.assert_not_called() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_input_file_invalid_ip_addresses_passsed(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one valid IP address must be passed either as an \"\n \"argument (IP_ADDRESS) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.ip,\n [\"-i\", StringIO(\"not-an-ip\")],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n assert expected in result.output\n api_client.ip.assert_not_called()",
"def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()",
"def __checkIPAddr(self,ip):\n if not iplib.checkIPAddrWithoutMask(ip):\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_IP_ADDRESS\")%ip)",
"def read_in_address_file(file):\n address_list = list()\n lines = 0\n valid_ips = 0\n with file as f:\n for n in file:\n lines += 1\n if validate_ip(n.strip()):\n address_list.append(n.strip())\n valid_ips += 1\n if valid_ips < lines:\n print(\"Of the {} lines in the file you supplied, only {} were valid. The latter will be used to call the \"\n \"API.\".format(lines, valid_ips))\n if valid_ips == 0:\n print(\"Please supply a valid IP address.\")\n address_list = None\n return address_list",
"def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False",
"def is_valid_ip(ip):\n ...",
"def validate_ip_address(self):\n\t\tip_list = webnotes.conn.get_value('Profile', self.user, 'restrict_ip', ignore=True)\n\t\t\n\t\tif not ip_list:\n\t\t\treturn\n\n\t\tip_list = ip_list.replace(\",\", \"\\n\").split('\\n')\n\t\tip_list = [i.strip() for i in ip_list]\n\n\t\tfor ip in ip_list:\n\t\t\tif webnotes.remote_ip.startswith(ip):\n\t\t\t\treturn\n\t\t\t\n\t\twebnotes.msgprint('Not allowed from this IP Address')\n\t\traise webnotes.AuthenticationError",
"def test_try_create_invalid_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_0_430_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Error save new IP.: 10.0.0.430',\n response.data['detail'])",
"def IsValidIP(ip):\n if ip != None:\n if ip.count('.') == 3:\n ipNumbers = ip.split('.')\n for number in ipNumbers:\n if not number.isdigit() or int(number) > 255:\n return False\n return ipNumbers\n return False",
"def validate_ip(ip):\n try:\n ipobj = IPy.IP(ip)\n if ipobj.iptype() == 'PRIVATE':\n print(\"IP addresses {} will be ignored as it is in a private network range.\".format(ip))\n ip = None\n except ValueError as ve:\n print(\"Invalid IP: {}\".format(ve.args))\n ip = None\n finally:\n return ip",
"def test_ip_addresses_exists():\n load_ips()\n validate_names()",
"def validate_ip(self, ip):\n if not ip:\n raise ValidationError(\"Please provide an actual IP or web address. You gave me: \" + ip)",
"def validate_ip_address(data, valid_values=None):\n\n msg = None\n try:\n # netaddr.core.ZEROFILL is only applicable to IPv4.\n # it will remove leading zeros from IPv4 address octets.\n ip = netaddr.IPAddress(validate_no_whitespace(data),\n flags=netaddr.core.ZEROFILL)\n # The followings are quick checks for IPv6 (has ':') and\n # IPv4. (has 3 periods like 'xx.xx.xx.xx')\n # NOTE(yamamoto): netaddr uses libraries provided by the underlying\n # platform to convert addresses. For example, inet_aton(3).\n # Some platforms, including NetBSD and OS X, have inet_aton\n # implementation which accepts more varying forms of addresses than\n # we want to accept here. The following check is to reject such\n # addresses. For Example:\n # >>> netaddr.IPAddress('1' * 59)\n # IPAddress('199.28.113.199')\n # >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))\n # IPAddress('199.28.113.199')\n # >>>\n if ':' not in data and data.count('.') != 3:\n msg = \"'%s' is not a valid IP address\" % data\n # A leading '0' in IPv4 address may be interpreted as an octal number,\n # e.g. 011 octal is 9 decimal. Since there is no standard saying\n # whether IP address with leading '0's should be interpreted as octal\n # or decimal, hence we reject leading '0's to avoid ambiguity.\n elif ip.version == 4 and str(ip) != data:\n msg = (\"'%(data)s' is not an accepted IP address, \"\n \"'%(ip)s' is recommended\") % {\"data\": data, \"ip\": ip}\n except Exception:\n msg = \"'%s' is not a valid IP address\" % data\n if msg:\n raise exceptions.DiagnoseException(msg)",
"def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.ip, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.ip.assert_not_called()",
"def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None",
"def test_try_create_out_of_range_ip_in_network(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/out_of_range_ipv4_172_0_0_5_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Ip 172.0.0.5 not available for network 5.',\n response.data['detail'])",
"def validate_input(self, IP, Port):\n exception = ServerInitError\n try:\n if IP != None:\n if re.match(IPRegex, IP):\n self.IP = IP\n else:\n exception = InavlidIPError\n raise\n\n if Port != None:\n if int(Port) in range(0,65535):\n self.Port = Port\n else:\n exception = InvalidPortError\n raise\n except:\n traceback_print_exc()",
"def valid_ip(ip):\n return valid_ipv4(ip) or valid_ipv6(ip)",
"def validate_iplist(argname, param, safe):\n _validate_all(argname, param, safe, _check_ip)",
"def test_addr_zip_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_zip(val))",
"def _valid_ip_addresses(self, addresses):\r\n if addresses == '':\r\n return ''\r\n error_addresses = []\r\n for addr in addresses.split(','):\r\n address = addr.strip()\r\n if not self._is_valid_ip(address):\r\n error_addresses.append(address)\r\n if error_addresses:\r\n msg = 'Invalid IP Address(es): {0}'.format(error_addresses)\r\n msg += ' Please fix the error(s) and try again.'\r\n raise forms.ValidationError(msg)\r\n\r\n return addresses",
"def check_IP_addr(self, iplist):\n\n if type(iplist) != list:\n print(\"Error: please provide a list of IPv4 addresses to check (as a list of strings).\")\n return False\n\n for ip_addr in iplist:\n # Converts ip_addr to string, in case of bad type being passed\n ip_addr = str(ip_addr)\n\n # Checks ip_addr format\n try: \n inet_aton(ip_addr)\n except:\n print(\"Error: '{}' is an invalid IPv4 address.\\n\"\\\n \"Please use a valid IPv4 address (e.g.: 192.168.0.1)\".format(ip_addr))\n return False\n return True",
"def test_bad_addresses_are_invalid(self):\n val = gnome.gh.EventSourceValidator()\n for addr in BAD_MOCK_ADDRESSES:\n validity = val.ip_str_is_valid(addr)\n self.assertFalse(validity)",
"def input_file_check(router_list):\n if os.path.exists(router_list):\n print(\"Found device file ({}): OK\".format(router_list))\n else:\n print('''\nUnable to find device list >>>{}<<<, please verify it exists and/or update the\nvariable ___router_list___ at the top of this script file to point to a new one.\n\nScript error, exiting.'''.format(router_list))\n sys.exit(1)\n\n with open(router_list) as f:\n for line in f:\n if \";\" not in line:\n if \"r\" in line:\n if \"p\" in line:\n print(\"Processing line:\", line.strip())\n else:\n print(\"ERROR with line:\", line.strip())\n print('''\nYour {} file may contain invalid entries, please double check it.\n\nExamples:\n\nOne Juniper router with one peer\nr10.10.10.10, p3.3.3.3\n\nTwo Juniper routers, one with one peer, the other with multiple\nr10.20.30.40, p4.4.4.4\nr192.168.1.22, p5.5.5.5, p6.6.6.6, p7.7.7.7\n\n'''.format(router_list))\n sys.exit(1)\n\n print(\"Line check: OK\")\n return",
"def ip_address(addr):\n parts = addr.split('.')\n if len(parts) != 4:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n for part in parts:\n try:\n num = int(part)\n if num < 0 or num > 255:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n except ValueError:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n return addr",
"def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.quick, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.quick.assert_not_called()",
"def is_valid_ip(arg):\n try:\n nacaddr.IP(arg)\n except:\n raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg)\n return arg",
"def validateIP(ip):\n # type: (str)->None\n try:\n socket.inet_aton(ip)\n except socket.error:\n socket.inet_pton(socket.AF_INET6, ip)",
"def validate_ip(argname, param, safe, optional = False):\n _validate_one(argname, param, safe, _check_ip, optional)",
"def request_valid_ip():\n ip = input(\"Enter a valid IP address you would like to check: \")\n return validate_ip(ip)"
] | [
"0.77088946",
"0.69877046",
"0.6758954",
"0.6752534",
"0.64434695",
"0.6431371",
"0.63859427",
"0.6324199",
"0.62635887",
"0.62278163",
"0.6187877",
"0.6162502",
"0.6157764",
"0.6089943",
"0.60771656",
"0.6074368",
"0.606984",
"0.6059683",
"0.60493106",
"0.6029607",
"0.60199815",
"0.6018719",
"0.5992116",
"0.5980642",
"0.5979238",
"0.5978726",
"0.5965999",
"0.59553105",
"0.59519863",
"0.59321916"
] | 0.7329706 | 1 |
Quick subcommand fails when ip_address is invalid. | def test_invalid_ip_address_as_argument(self, api_client):
runner = CliRunner()
expected = 'Error: Invalid value for "[IP_ADDRESS]...": not-an-ip\n'
result = runner.invoke(subcommand.quick, ["not-an-ip"])
assert result.exit_code == 2
assert expected in result.output
api_client.quick.assert_not_called() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.ip, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.ip.assert_not_called()",
"def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()",
"def test_input_file_invalid_ip_addresses_passsed(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one valid IP address must be passed either as an \"\n \"argument (IP_ADDRESS) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.ip,\n [\"-i\", StringIO(\"not-an-ip\")],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n assert expected in result.output\n api_client.ip.assert_not_called()",
"def test_input_file_invalid_ip_addresses_passsed(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one valid IP address must be passed either as an \"\n \"argument (IP_ADDRESS) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.quick,\n [\"-i\", StringIO(\"not-an-ip\")],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise quick\" in result.output\n assert expected in result.output\n api_client.quick.assert_not_called()",
"def is_valid_ip(arg):\n try:\n nacaddr.IP(arg)\n except:\n raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg)\n return arg",
"def __checkIPAddr(self,ip):\n if not iplib.checkIPAddrWithoutMask(ip):\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_IP_ADDRESS\")%ip)",
"def validate_ip(argname, param, safe, optional = False):\n _validate_one(argname, param, safe, _check_ip, optional)",
"def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.ip, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n api_client.ip.assert_not_called()",
"def is_valid_ip(ip):\n ...",
"def test_validate_ip_exit():\n ip = '1.1'\n with pytest.raises(SystemExit) as err:\n howisresolved.validate_ip(ip)\n assert 'Invalid ip specified.' in str(err.value)",
"def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False",
"def test_host_validation(runner: CliRunner) -> None:\n invalid_res = runner.invoke(cli.main, [\"-b\", \"1.2.3.4.5\"])\n assert invalid_res.exit_code == 2\n assert 'Invalid value for \"-b\" / \"--bind-address\"' in invalid_res.output\n assert \"'host' is invalid in configuration\" in invalid_res.output",
"def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None",
"def check_valid_ip_int(value):\n try:\n address = int(value)\n except ValueError:\n raise argparse.ArgumentTypeError('value is not a positive number: {}'.format(value))\n try:\n ipaddress.ip_address(address)\n except ValueError:\n raise argparse.ArgumentTypeError('is out of IPv4/IPv6 boundaries')\n return address",
"def ip_check():\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 '+host+' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)",
"def validate_ip(self, ip):\n if not ip:\n raise ValidationError(\"Please provide an actual IP or web address. You gave me: \" + ip)",
"def validate_ip_address(ip_addr):\n try:\n ip_object = ipaddress.ip_address(ip_addr)\n return True\n except ValueError:\n return False",
"def ip_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, ipaddress._IPAddressBase):\n name = type(var).__name__\n raise IPError(\n 'Function {} expected IP address, {} got instead.'.format(func, name))",
"def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.quick, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise quick\" in result.output\n api_client.quick.assert_not_called()",
"def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))",
"def request_valid_ip():\n ip = input(\"Enter a valid IP address you would like to check: \")\n return validate_ip(ip)",
"def validate_ip(ip):\n try:\n ipobj = IPy.IP(ip)\n if ipobj.iptype() == 'PRIVATE':\n print(\"IP addresses {} will be ignored as it is in a private network range.\".format(ip))\n ip = None\n except ValueError as ve:\n print(\"Invalid IP: {}\".format(ve.args))\n ip = None\n finally:\n return ip",
"def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def set_ip():\r\n fail_count = 0\r\n while fail_count < 3:\r\n address = moves.input('What is the IP address of the Packetmaster you want to access?: ')\r\n if pm_input_check.ipv4(address) != 0:\r\n address = pm_input_check.ipv4(address)\r\n return address\r\n else:\r\n print(\"That is not a valid IPv4 address.\")\r\n fail_count += 1\r\n print(\"That is not a valid IPv4 address. Exiting\")\r\n exit()",
"def test_ip_addresses_exists():\n load_ips()\n validate_names()",
"def validate_ip_address(self):\n\t\tip_list = webnotes.conn.get_value('Profile', self.user, 'restrict_ip', ignore=True)\n\t\t\n\t\tif not ip_list:\n\t\t\treturn\n\n\t\tip_list = ip_list.replace(\",\", \"\\n\").split('\\n')\n\t\tip_list = [i.strip() for i in ip_list]\n\n\t\tfor ip in ip_list:\n\t\t\tif webnotes.remote_ip.startswith(ip):\n\t\t\t\treturn\n\t\t\t\n\t\twebnotes.msgprint('Not allowed from this IP Address')\n\t\traise webnotes.AuthenticationError",
"def validate_ip(ip):\n valid_ip = ''\n try:\n valid_ip = str(ipaddress.ip_address(ip))\n except ValueError:\n logging.error('ip address \\'{}\\' is not valid: '.format(ip))\n \n return valid_ip",
"def address_blank_test():\n \n Debug.info('Enter an IP address should not be possible')\n\n click(\"1499782256475.png\")\n if exists(\"1499782281377.png\"):\n Debug.info('************ Pass ******************')\n click(\"1499782294209.png\")\n \n else: \n Debug.info('************ Fail ******************')\n click(\"1499782317985.png\")",
"def is_ip_address(value: str) -> bool:\n with suppress(ValueError):\n ipaddress.ip_address(value)\n return True\n\n return False",
"def valid_ip_address (ip_address):\n return valid_key(ip_address, ip_hash, ip_hash_threshold)"
] | [
"0.7873539",
"0.772955",
"0.72761595",
"0.72378004",
"0.71418965",
"0.7112859",
"0.7034858",
"0.6839719",
"0.6757991",
"0.6727207",
"0.66167384",
"0.6610867",
"0.65946215",
"0.6463145",
"0.6456307",
"0.64217573",
"0.6408132",
"0.63588095",
"0.6344168",
"0.6322485",
"0.63148254",
"0.62939054",
"0.62313235",
"0.6229864",
"0.6206061",
"0.61383605",
"0.61371595",
"0.61221415",
"0.61189693",
"0.6116716"
] | 0.8023229 | 0 |
Error is displayed if API key is not found. | def test_api_key_not_found(self):
runner = CliRunner()
with patch("greynoise.cli.decorator.load_config") as load_config:
load_config.return_value = {"api_key": ""}
result = runner.invoke(
subcommand.quick,
["0.0.0.0"],
parent=Context(main, info_name="greynoise"),
)
assert result.exit_code == -1
assert "Error: API key not found" in result.output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_api_key_error(api):\n\twith pytest.raises(top_stories.APIKeyError):\n\t\tmissingAPI = top_stories.TopStoriesAPI()",
"def check_api_key(x_api_key: str = Security(api_key_header_auth)):\n\n if x_api_key != API_KEY:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid API Key\",\n )",
"def _transport_key_not_found():\n pecan.abort(404, u._('Not Found. Transport Key not found.'))",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.stats, [\"query\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def _check_api_key(self):\n try:\n self.maps.places_nearby(\n location=(53.909804, 27.580184),\n radius=650,\n open_now=False,\n language=config.LANGUAGE,\n type='cafe',\n # rank_by='distance', # IMPORTANT: cannot use rank_by and radius options together\n page_token=None,\n )\n except Exception as e:\n\n with self.__writelock:\n self.print(f'ERROR: bad API key \"{self.maps.key}\" (tracker={self.stats.previous_requests})\\n')\n raise e",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.query,\n [\"<query>\"],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.ip, [\"0.0.0.0\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def test_incorrect_api_key(self):\n with self.subTest(\"Missing API key\"):\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"ApiKey {self.web_user.username}:\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Missing username\"):\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"ApiKey :{self.web_user_api_key.key}\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Missing header\"):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Incorrect API key\"):\n response = self.client.get(\n self.url,\n HTTP_AUTHORIZATION=f\"ApiKey {self.web_user.username}:Incorrectkey\",\n )\n self.assertEqual(response.status_code, 401)",
"def test_no_api_key(self):\n\n self.assertRaises(Exception, kaput.init, None, '123')",
"def __virtual__():\n if _apikey():\n return True\n return (\n False,\n 'The API key was not specified. Please specify it using the \"apikey\" config.',\n )",
"def test_failure_with_invalid_api_key(self):\n self.geocoder = Yandex(\n api_key='bad key'\n )\n with self.assertRaises(GeocoderInsufficientPrivileges):\n self.geocode_run(\n {\"query\": \"площадь Ленина Донецк\"},\n {}\n )",
"def get_api_key(api_key):\n api.get(api_key)",
"def test_unknown_api_key(self, app, data_queues, redis, metricsmock, logs):\n res = self._call(app, api_key=\"abcdefg\", ip=self.test_ip, status=400)\n self.check_response(data_queues, res, \"invalid_key\")\n metricsmock.assert_incr_once(\n self.metric_type + \".request\", tags=[self.metric_path, \"key:invalid\"]\n )\n assert redis.keys(\"apiuser:*\") == []\n assert logs.only_entry[\"api_key\"] == \"invalid\"\n assert logs.only_entry[\"invalid_api_key\"] == \"abcdefg\"",
"def _check_settings(self):\n if self.api_key is None:\n raise ImproperlyConfigured(\"You must provide an API key.\")",
"def __init__(self, key = None):\n self.key = key\n self.response_format = 'json'\n \n if self.key is None:\n raise NoAPIKeyException('Warning: Missing API Key. Please visit ' + API_SIGNUP_PAGE + ' to register for a key.')",
"def validate_snx_api_key():\n api_data = {} # type: Dict[str, str]\n response = http_request(endpoint=API_QUOTA, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return 'ok'",
"def test_get_report_wrong_api_key(self):\n vt_analyses_wrong_api_key = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses_wrong_api_key.get_report('test_object_id')\n http_err = vt_analyses_wrong_api_key.get_last_http_error()\n self.assertEqual(http_err, vt_analyses_wrong_api_key.HTTP_AUTHENTICATION_REQUIRED_ERROR)",
"def load_config_key():\n try:\n global api_key\n api_key = os.environ['IN_API_KEY']\n if len(api_key) == 32:\n try:\n int(api_key, 16)\n except ValueError:\n print(\"Invalid API key\")\n except KeyError:\n print('No API Token detected. '\n 'Please visit {0} and get an API Token, '\n 'which will be used by instantnews '\n 'to get access to the data.'\n .format(API_URL))\n sys.exit(1)",
"def test_missing_api_key(self):\n with self.assertRaises(TypeError):\n ConnectorWebexTeams()",
"def validate_api_key(self) -> tuple[bool, str]:\n response = self._api_query('wallets')\n\n if response.status_code != HTTPStatus.OK:\n result, msg = self._process_unsuccessful_response(\n response=response,\n case='validate_api_key',\n )\n return result, msg\n\n return True, ''",
"def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)",
"def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)",
"def _invalid_transport_key_id():\n pecan.abort(404, u._('Not Found. Provided transport key id is invalid.'))",
"def resolve_apikey(self):\n # check the instance variable\n apikey = self.apikey\n if apikey is not None:\n return apikey\n\n # check the class variable and environment\n apikey = resolve_apikey()\n if apikey is not None:\n return apikey\n\n # if we got this far, the API key wasn't found\n raise MonitisError('The Monitis API key is required')",
"def test_setup_bad_api_key(self, mock_get_forecast):\n # The Dark Sky API wrapper that we use raises an HTTP error\n # when you try to use a bad (or no) API key.\n url = \"https://api.darksky.net/forecast/{}/{},{}?units=auto\".format(\n self.key, str(self.lat), str(self.lon)\n )\n msg = f\"400 Client Error: Bad Request for url: {url}\"\n mock_get_forecast.side_effect = HTTPError(msg)\n\n response = darksky.setup_platform(\n self.hass, VALID_CONFIG_MINIMAL[\"sensor\"], MagicMock()\n )\n assert not response",
"def test_api_ping_failed_invalid_api(self):\r\n\r\n # Login a user and then test the validation of api key\r\n\r\n user_data = {'login': u'admin',\r\n 'password': u'admin',\r\n 'form.submitted': u'true'}\r\n\r\n # Assuming user logged in without errors\r\n self.testapp.post('/login', params=user_data)\r\n\r\n # Check for authentication of api key\r\n\r\n res = self.testapp.get('/api/v1/admin/ping?api_key=' + 'invalid',\r\n status=200)\r\n ping = json.loads(res.body)\r\n\r\n self.assertFalse(ping['success'])\r\n self.assertEqual(ping['message'], \"API key is invalid.\")\r\n self._check_cors_headers(res)",
"def test_missing_api_key(self):\n runner = CliRunner()\n expected_error = 'Error: Missing option \"-k\" / \"--api-key\"'\n\n result = runner.invoke(subcommand.setup, [])\n assert result.exit_code == 2\n assert expected_error in result.output",
"def test_no_credentials(remove_api_key):\n with raises(\n RuntimeError,\n match=\"Failed to read API key. Did you forget to set GIPHY_API_KEY environment variable?\",\n ):\n api_credentials_provider.resolve_credentials()",
"async def test_invalid_key(hass: HomeAssistant, invalid_key_api: Mock) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}\n )\n assert result.get(\"type\") == data_entry_flow.FlowResultType.FORM\n assert result.get(\"step_id\") == \"user\"\n\n # Test filling in API key\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER},\n data={CONF_API_TOKEN: \"psk_123456789\"},\n )\n assert result.get(\"type\") == data_entry_flow.FlowResultType.FORM\n # Goes back to the user step\n assert result.get(\"step_id\") == \"user\"\n assert result.get(\"errors\") == {\"api_token\": \"invalid_api_token\"}",
"def test_no_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._no_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")"
] | [
"0.7113899",
"0.70748323",
"0.70287615",
"0.70035744",
"0.69579756",
"0.69482815",
"0.6905072",
"0.6886157",
"0.68102723",
"0.6711269",
"0.6688278",
"0.66838604",
"0.6640796",
"0.6632667",
"0.6593882",
"0.6562457",
"0.65179884",
"0.6508917",
"0.6468864",
"0.6456466",
"0.6435698",
"0.6435698",
"0.6393877",
"0.635552",
"0.6348982",
"0.6313319",
"0.63065636",
"0.6287591",
"0.6266777",
"0.6249873"
] | 0.7133699 | 0 |
Save API key and timeout to configuration file. | def test_save_api_key_and_timeout(self, key_option, timeout_option):
runner = CliRunner()
api_key = "<api_key>"
timeout = 123456
expected_config = {"api_key": api_key, "timeout": timeout}
expected_output = "Configuration saved to {!r}\n".format(CONFIG_FILE)
with patch("greynoise.cli.subcommand.save_config") as save_config:
result = runner.invoke(
subcommand.setup, [key_option, api_key, timeout_option, timeout]
)
assert result.exit_code == 0
assert result.output == expected_output
save_config.assert_called_with(expected_config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(cls, api_key: str, token: str):\n cls.config_file.write_text(str({\"key\": api_key, \"token\": token}))\n return True",
"def test_save_api_key(self, key_option):\n runner = CliRunner()\n api_key = \"<api_key>\"\n expected_config = {\"api_key\": api_key, \"timeout\": DEFAULT_CONFIG[\"timeout\"]}\n expected_output = \"Configuration saved to {!r}\\n\".format(CONFIG_FILE)\n\n with patch(\"greynoise.cli.subcommand.save_config\") as save_config:\n result = runner.invoke(subcommand.setup, [key_option, api_key])\n assert result.exit_code == 0\n assert result.output == expected_output\n save_config.assert_called_with(expected_config)",
"def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()",
"def save(self) -> None:\n self._client.save_config()",
"def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)",
"def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)",
"def save_config_file(self):\n with open(self.config_file_name, 'w',encoding='utf-8') as outfile:\n json.dump(self._config, outfile,indent=2)",
"def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)",
"def save_config(self, filename: str=None):\n if not filename:\n filename = self.config_file\n with open(filename, \"w\") as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)",
"def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)",
"def _save_configuration(self):\n configuration = self._dynamodb.Table('configuration')\n configuration.put_item(Item={'scope': 'spotify',\n 'access_token': self._access_token,\n 'refresh_token': self._refresh_token,\n 'client_id': self._client_id,\n 'client_secret': self._client_secret})",
"def save_config(self, directory_name=None, filename=None):\n\n dirname_ = \"\" if directory_name is None else directory_name\n if dirname_ != \"\" and not os.path.exists(dirname_):\n os.makedirs(dirname_)\n\n filename_ = self.name if filename is None else filename\n filename_ = dirname_ + \"/\" + filename_ + \".json\"\n data = self.__dict__\n # Delete useless parameters\n del data[\"logger\"]\n del data[\"t_init\"]\n del data[\"t_end\"]\n del data[\"n_iter\"]\n\n JsonUtils.write_file(filename_, data)",
"def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)",
"def save():\n\n env.config.save(env.config_file)",
"def __saveCacheTokens(self):\n self.orgConf[self.cacheSection] = {\n self.ACCESS_TOKEN: self.accessToken,\n self.REFRESH_TOKEN: self.refreshToken\n }\n with open(self.configFile, \"w\") as f:\n self.orgConf.write(f)",
"def saveConfig():\n with open(_CONFIG_FNM, 'w') as configfile:\n CONFIG_DICT.write(configfile,\n space_around_delimiters=True)",
"def save_credentials(verify = True):\n config = {\n 'client_id': input('client id: '),\n 'client_secret': input('client secret: ')\n }\n\n if verify:\n sp = spotipy.Spotify(client_credentials_manager = SpotifyClientCredentials(**config))\n try:\n sp.search(\"The Beatles\")\n except SpotifyException as e:\n # TODO: informative message\n raise\n\n path = Path(CONFIG_PATH).expanduser()\n print(\"Writing credentials to %s\" % path.absolute())\n\n yaml.dump(config, path.open('w'), default_flow_style = False)",
"def configure(api_key=None):\n configuration = {\"api_key\": api_key}\n global _default_configuration\n _default_configuration = configuration",
"def write_key(api_key, output_path, client_module=pyalveo):\n client = client_module.Client(api_key, API_URL, use_cache=False)\n outfile = open(output_path, 'w')\n outfile.write(api_key)\n outfile.close()",
"def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)",
"def configure(api_key=None):\n configuration = {'api_key': api_key}\n global _default_configuration\n _default_configuration = configuration",
"def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()",
"def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)",
"def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))",
"def save(self):\n with open(self._CONFIG_FILE_PATH, 'w') as config_file:\n json.dump(vars(self), config_file)\n return self._CONFIG_FILE_PATH",
"def write_config(self):\n cfg = {\n 'ALERT_API_KEY':self.api_key,\n 'APP_NAME':self.title,\n 'alertes':self.alertes\n }\n write_conf(self.CONF_FILE,cfg)",
"def save_config(self):\n if not os.path.exists(self._conf_dir):\n os.makedirs(self._conf_dir)\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n with open(conf_file, \"w\") as ofile:\n json.dump(self.conf, ofile, indent=2)",
"def save_config(self):\n config.save_config(self.config, self.config_file)",
"def save_config(self):\n\n h_config = configparser.ConfigParser()\n\n h_config[\"general\"] = {}\n if not self.configuration.interval:\n self.configuration.interval = __interval__\n h_config[\"general\"][\"interval\"] = str(self.configuration.interval)\n if not self.configuration.wifi_clients:\n self.configuration.wifi_clients = __wifi_clients_example__\n h_config[\"general\"][\"wifi_clients\"] = \",\".join(self.configuration.wifi_clients)\n if not self.configuration.schedules_names:\n self.configuration.schedules_names = __schedules_names_example__\n h_config[\"general\"][\"schedules_name\"] = \",\".join(self.configuration.schedules_names)\n\n h_config[\"unifi\"] = {}\n if not self.configuration.unifi_host:\n self.configuration.unifi_host = __unifi_controller_host__\n h_config[\"unifi\"][\"host\"] = self.configuration.unifi_host\n if not self.configuration.unifi_port:\n self.configuration.unifi_port = __unifi_controller_port__\n h_config[\"unifi\"][\"port\"] = str(self.configuration.unifi_port)\n if not self.configuration.unifi_username:\n self.configuration.unifi_username = __unifi_controller_user__\n h_config[\"unifi\"][\"username\"] = self.configuration.unifi_username\n if not self.configuration.unifi_password:\n self.configuration.unifi_password = __unifi_controller_pwd__\n h_config[\"unifi\"][\"password\"] = self.configuration.unifi_password\n\n h_config[\"hue\"] = {}\n if not self.configuration.hue_host:\n self.configuration.hue_host = __hue_hub_host__\n h_config[\"hue\"][\"host\"] = self.configuration.hue_host\n if not self.configuration.hue_port:\n self.configuration.hue_port = __hue_hub_port__\n h_config[\"hue\"][\"port\"] = str(self.configuration.hue_port)\n if not self.configuration.hue_key:\n self.configuration.hue_key = __hue_key__\n h_config[\"hue\"][\"key\"] = self.configuration.hue_key\n\n h_config[\"zmq\"] = {}\n if not self.configuration.pub_host:\n self.configuration.pub_host = __zmq_default_publishing_host__\n h_config[\"zmq\"][\"host\"] = self.configuration.pub_host\n if not self.configuration.pub_port:\n self.configuration.pub_port = __zmq_default_publishing_port__\n h_config[\"zmq\"][\"port\"] = str(self.configuration.pub_port)\n if \"no_pub\" in self.configuration:\n h_config[\"zmq\"][\"disabled\"] = str(int(self.configuration.no_pub))\n\n h_config[\"logging\"] = {}\n if self.configuration.syslog_host:\n h_config[\"logging\"][\"syslog_host\"] = self.configuration.syslog_host\n if self.configuration.syslog_port:\n h_config[\"logging\"][\"syslog_port\"] = str(self.configuration.syslog_port)\n if self.configuration.log_file:\n h_config[\"logging\"][\"log_file\"] = str(self.configuration.log_file)\n\n with self.config_file.open(mode='w') as configfile:\n h_config.write(configfile)\n logging.info(\"Configuration saved to {}\".format(str(self.config_file)))",
"def save_conf(self, name=None):\n \n if name:\n filename = name\n \n else:\n filename = \"conf_\" + str(self.conf[\"device\"]) + \"_\" + datetime.today().strftime('%Y-%m-%d') + \".txt\"\n \n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename, \"w\") as file:\n json.dump(self.conf, file)"
] | [
"0.7276802",
"0.64182734",
"0.64098287",
"0.6263883",
"0.62403715",
"0.61415565",
"0.6018724",
"0.59904474",
"0.59442395",
"0.5923738",
"0.59134287",
"0.5912611",
"0.5911297",
"0.5909669",
"0.5908877",
"0.5864476",
"0.58602154",
"0.5853999",
"0.5843998",
"0.5833313",
"0.5831508",
"0.58291894",
"0.58103776",
"0.5799897",
"0.57891196",
"0.5777105",
"0.5764561",
"0.5751999",
"0.5735014",
"0.57199186"
] | 0.698072 | 1 |
Setup fails when api_key is not passed. | def test_missing_api_key(self):
runner = CliRunner()
expected_error = 'Error: Missing option "-k" / "--api-key"'
result = runner.invoke(subcommand.setup, [])
assert result.exit_code == 2
assert expected_error in result.output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_api_key(self):\n\n api_key = 'abc'\n project_id = '123'\n\n kaput.init(api_key, project_id)\n\n self.assertEqual(api_key, kaput._API_KEY)\n self.assertEqual(project_id, kaput._PROJECT_ID)\n self.assertFalse(kaput._DEBUG)\n self.assertEqual(kaput._handle_exception, sys.excepthook)",
"def test_no_api_key(self):\n\n self.assertRaises(Exception, kaput.init, None, '123')",
"def _check_settings(self):\n if self.api_key is None:\n raise ImproperlyConfigured(\"You must provide an API key.\")",
"def test_create_api_key(self):\n pass",
"def test_setup_bad_api_key(self, mock_get_forecast):\n # The Dark Sky API wrapper that we use raises an HTTP error\n # when you try to use a bad (or no) API key.\n url = \"https://api.darksky.net/forecast/{}/{},{}?units=auto\".format(\n self.key, str(self.lat), str(self.lon)\n )\n msg = f\"400 Client Error: Bad Request for url: {url}\"\n mock_get_forecast.side_effect = HTTPError(msg)\n\n response = darksky.setup_platform(\n self.hass, VALID_CONFIG_MINIMAL[\"sensor\"], MagicMock()\n )\n assert not response",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.quick,\n [\"0.0.0.0\"],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def test_api_key_error(api):\n\twith pytest.raises(top_stories.APIKeyError):\n\t\tmissingAPI = top_stories.TopStoriesAPI()",
"def a_valid_api_key(configuration):\n configuration.api_key[\"apiKeyAuth\"] = os.getenv(\"DD_TEST_CLIENT_API_KEY\", \"fake\")",
"def test_add_api_key_to_org(self):\n pass",
"def test_missing_api_key(self):\n with self.assertRaises(TypeError):\n ConnectorWebexTeams()",
"def test_init_barracks_helper_fail_when_no_api_key_given():\n try:\n BarracksHelper(None, _base_url)\n assert False\n except ValueError:\n assert True",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.ip, [\"0.0.0.0\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def load_config_key():\n try:\n global api_key\n api_key = os.environ['IN_API_KEY']\n if len(api_key) == 32:\n try:\n int(api_key, 16)\n except ValueError:\n print(\"Invalid API key\")\n except KeyError:\n print('No API Token detected. '\n 'Please visit {0} and get an API Token, '\n 'which will be used by instantnews '\n 'to get access to the data.'\n .format(API_URL))\n sys.exit(1)",
"def SetAPIKey(self, api_key):\n self._api_key = api_key",
"def SetAPIKey(self, api_key):\n self._api_key = api_key",
"def setup_stripe(api_key):\n stripe.api_key = api_key",
"def __init__(self, api_key: str):\n self.api_key = api_key",
"def __init__(self, api_key=None):\n self.api_key = api_key",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.stats, [\"query\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def set_apikey(self, apikey):\n self.apikey = apikey\n self.__init_submodules(apikey)",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.query,\n [\"<query>\"],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def test_no_credentials(remove_api_key):\n with raises(\n RuntimeError,\n match=\"Failed to read API key. Did you forget to set GIPHY_API_KEY environment variable?\",\n ):\n api_credentials_provider.resolve_credentials()",
"def test_api_key_is_None(self):\n settings.GTMETRIX_REST_API_KEY = None\n with raises(GTmetrixAPIKeyIsNone):\n gt = GTmetrixInterface()",
"def __init__(self, api_key):\r\n self.api_key = api_key",
"async def test_invalid_key(hass: HomeAssistant, invalid_key_api: Mock) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}\n )\n assert result.get(\"type\") == data_entry_flow.FlowResultType.FORM\n assert result.get(\"step_id\") == \"user\"\n\n # Test filling in API key\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER},\n data={CONF_API_TOKEN: \"psk_123456789\"},\n )\n assert result.get(\"type\") == data_entry_flow.FlowResultType.FORM\n # Goes back to the user step\n assert result.get(\"step_id\") == \"user\"\n assert result.get(\"errors\") == {\"api_token\": \"invalid_api_token\"}",
"def configure(api_key=None):\n configuration = {'api_key': api_key}\n global _default_configuration\n _default_configuration = configuration",
"def configure(api_key=None):\n configuration = {\"api_key\": api_key}\n global _default_configuration\n _default_configuration = configuration",
"def test_validate_api_key(app, seed_data, key, result):\n user_id, api_key = seed_data\n if key == 'use-valid-key':\n key = api_key\n with app.app_context():\n assert auth.validate_api_key(user_id, key) == result",
"def setup(api_token, endpoint_url=None):\n global DEFAULT_API_TOKEN, DEFAULT_ENDPOINT_URL\n DEFAULT_API_TOKEN = api_token\n\n if endpoint_url is not None:\n DEFAULT_ENDPOINT_URL = endpoint_url",
"def resolve_apikey(self):\n # check the instance variable\n apikey = self.apikey\n if apikey is not None:\n return apikey\n\n # check the class variable and environment\n apikey = resolve_apikey()\n if apikey is not None:\n return apikey\n\n # if we got this far, the API key wasn't found\n raise MonitisError('The Monitis API key is required')"
] | [
"0.7627034",
"0.74046487",
"0.73914933",
"0.7372485",
"0.71628237",
"0.7137709",
"0.71146214",
"0.7114452",
"0.7004966",
"0.6978424",
"0.6942819",
"0.69388515",
"0.6821132",
"0.67947453",
"0.67947453",
"0.6776923",
"0.67639387",
"0.6718686",
"0.6701499",
"0.66941726",
"0.6672662",
"0.6669535",
"0.66508037",
"0.66393435",
"0.66318744",
"0.6607009",
"0.65838706",
"0.65403765",
"0.65233487",
"0.6495015"
] | 0.75195545 | 1 |
Run stats query from input file. | def test_input_file(self, api_client):
runner = CliRunner()
query = "<query>"
api_client.stats.return_value = []
expected = json.dumps([[]], indent=4, sort_keys=True)
result = runner.invoke(subcommand.stats, ["-f", "json", "-i", StringIO(query)])
assert result.exit_code == 0
assert result.output.strip("\n") == expected
api_client.stats.assert_called_with(query=query) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return",
"def scan(ctx, infile):\n # ensure that ctx.obj exists and is a dict (in case `cli()` is called\n # by means other than the `if` block below)\n ctx.ensure_object(dict)\n ctx.obj['log'] = cdslog\n with open(infile, 'r') as fj:\n args = json.load(fj)\n ctx.obj['dsargs'] = define_args(args['index'], args['tstep'])\n api_request(ctx, args, scan=True)",
"def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)",
"def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')",
"def test_empty_input_file(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one query must be passed either as an argument \"\n \"(QUERY) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.stats,\n [\"-i\", StringIO()],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise stats\" in result.output\n assert expected in result.output\n api_client.query.assert_not_called()",
"def parse_results_from_file(fname):\n for l in open(fname,\"r\"):\n fields=l.split()\n query_name=fields[0]\n ranks=[int(rank) for rank in fields[1::2]]\n yield (query_name, list(zip(ranks,fields[2::2])) )",
"def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)",
"def execute_queries_from_file(self, file_name, file_path=test_data_path):\n if file_path:\n with open(file_path + file_name, 'rb') as file:\n query = sqlalchemy.sql.text(file)\n else:\n with open(file_name, 'rb') as file:\n query = sqlalchemy.sql.text(file)\n self.execute_query(query)\n return self",
"def stats(input, output, dictshare, format_in, format_out, verbose, zipfile, checkdates):\n if verbose:\n enableVerbose()\n options = {}\n options['output'] = output\n options['dictshare'] = dictshare\n options['zipfile'] = zipfile\n options['format_in'] = format_in\n options['format_out'] = format_out\n options['checkdates'] = checkdates\n options['verbose'] = verbose\n acmd = Analyzer(nodates=not checkdates)\n acmd.analyze(input, options)\n pass",
"def main():\n chatfile_name = sys.argv[1]\n analyze(chatfile_name)",
"def main():\n filename = \"data/exercise.csv\"\n analyze(filename)",
"def run_analysis(args):\n import google.datalab.bigquery as bq\n if args.bigquery_table:\n table = bq.Table(args.bigquery_table)\n schema_list = table.schema._bq_schema\n else:\n schema_list = json.loads(\n file_io.read_file_to_string(args.schema_file).decode())\n table = bq.ExternalDataSource(\n source=args.input_file_pattern,\n schema=bq.Schema(schema_list))\n\n # Check the schema is supported.\n for col_schema in schema_list:\n col_type = col_schema['type'].lower()\n if col_type != 'string' and col_type != 'integer' and col_type != 'float':\n raise ValueError('Schema contains an unsupported type %s.' % col_type)\n\n run_numerical_analysis(table, schema_list, args)\n run_categorical_analysis(table, schema_list, args)\n\n # Save a copy of the schema to the output location.\n file_io.write_string_to_file(\n os.path.join(args.output_dir, SCHEMA_FILE),\n json.dumps(schema_list, indent=2, separators=(',', ': ')))",
"def main():\n if len(sys.argv) != 4:\n sys.exit('Please run with : python data-eng.py donors_file.txt zipcode_output_filename date_output_filename')\n compute_stats(sys.argv[1], sys.argv[2], sys.argv[3])",
"def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))",
"def run(self, file):\n self.loadReport(file)\n self.findCountryCode()\n self.reformatAndSave()",
"def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector",
"def run_query_tap_s3_csv(self, file):\n pass",
"def main(input_file):\n with open(input_file, 'r') as file:\n # not currently used, but part of challenge:\n _ = int(file.readline().rstrip())\n for line in file:\n count = doublesquare(int(line.rstrip()))\n print(count)",
"def scan(infile):\n with open(infile, 'r') as fj:\n args = json.load(fj)\n api_request(args['update'], args['format'], args['stream'], \n args['params'], args['year'], args['months'], \n args['timestep'], args['back'])",
"def read_run_info_from_file(file):\n\n with open(file) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n runs = []\n for line in lines:\n if line == '':\n continue\n comma_splits = line.split(',')\n train_acc = float(comma_splits[0].strip())\n test_acc = float(comma_splits[1].strip())\n feature_set = set(w for w in comma_splits[2].split() if not w == '')\n runs.append(\n {'fts': feature_set, 'train_acc': train_acc, 'test_acc': test_acc}\n )\n return runs",
"def analyze_file(self, filename):\n if self.exceeded_max():\n return\n\n if self.preprocess is not None:\n input = self.preprocess(filename)\n else:\n with open(filename, \"r\") as file:\n input = file.read()\n\n self.analyze_raw(input)",
"def load_queries(self, file):\n queries = []\n with open(file, 'r') as f:\n for line in f:\n reg_match = re.match(r'^(\\d+).(.*)', line)\n tokens = self.es_helper.get_tokens(reg_match.group(2).strip())\n queries.append(Query(reg_match.group(1).strip(), self.es_helper, tokens))\n self.queries = queries",
"def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times",
"def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict",
"def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector",
"def main(input_file, visualize):\n logging.info('Reading lines...')\n\n with open(input_file) as f:\n content = f.read()\n\n clauses, thesis = content.split('---\\n')\n\n logging.info('Parsing clauses...')\n parser = ClauseParser()\n parsed_clauses = parser.parse_cnf_list(clauses.splitlines())\n parsed_thesis = parser.parse_cnf_list(thesis.splitlines())\n\n result, tree = resolution(parsed_clauses, parsed_thesis)\n\n if visualize:\n display_resolution_tree(tree)\n\n logging.info(f'The thesis is {result}')",
"def table_stats(self, db, dest, kvargs, lines):\n if 'table' in kvargs:\n tables = [db.get_table(kvargs['table'])]\n else:\n tables = db.tables()\n options = kvargs.get('options','')\n done = False\n for table in db.tables():\n print(\"======================= {} =======================\".format(table.name))\n if 'dump' in options:\n print(\"schema dump:\")\n table.dump()\n print(\"\")\n if 'head' in options:\n print(\"First 5 records:\")\n for source_record in db.read_records_as_dicts(tablename=table.name, limit=5):\n print(source_record)\n print(\"\")\n # Compute single-variable stats on each of the variables\n sw = stopwatch().start()\n print(\"Computing statistics...\")\n stats = {}\n census_checksum = 0\n \n if self.spark_context:\n print(\"Using spark to read {} ... assuming first line has headings\".format(table.filename))\n sc = self.spark_context\n data = sc.textFile(table.filename)\n header = data.first() # extract the header\n stats = data.filter(lambda row:row!=header).map(table.parse_line_to_dict).reduce(stats_reducer)\n else:\n try:\n for source_record in db.read_records_as_dicts(tablename=table.name,limit=self.limit):\n if source_record['RECTYPE']=='P':\n census_checksum += census_person_polynominal(source_record)\n stats = stats_reducer(source_record, stats)\n except KeyboardInterrupt as e:\n print(\"*** KeyboardInterrupt at count: {}\".format(stats[':count']))\n done = True\n if stats:\n print(\"total records: {} speed: {:8.0f} records/sec\".format( stats[':count'], stats[':count']/sw.elapsed()))\n tt = tytable.ttable()\n tt.add_head(['variable','min','avg','max'])\n tt.set_col_alignment(1,tytable.ttable.RIGHT)\n tt.set_col_alignment(2,tytable.ttable.RIGHT)\n tt.set_col_alignment(3,tytable.ttable.RIGHT)\n for key in stats_variable_names(stats):\n try:\n tt.add_data([key, stats[key+\":min\"], stats[key+\":sum\"] / stats[':count'], stats[key+\":max\"]])\n except TypeError:\n tt.add_data([key, stats[key+\":min\"], \"\", stats[key+\":max\"]])\n print(tt.typeset(mode=tytable.TEXT))\n if census_checksum:\n print(\"Census checksum: {}\".format(census_checksum))\n print(\"\")\n if done:\n return True # had the keyboard abort\n return True",
"def do_stat(self, arg):\n\t\topts = get_options(parser.parser_stat, arg)\n\t\tif opts is None: return\n\t\tdisplay.print_stats(\n\t\t\t\tself.manager.provide_stats(limit = opts.limit))",
"def setupQuery(self, file):\n file.write(\"QUERY(FALSE);\\n\")\n file.write(\"COUNTEREXAMPLE;\\n\")\n return",
"def vcf_query(self, **kwargs):\n try:\n import pysam\n except ImportError:\n print(\"Can't find pysam\")\n raise ImportError('Handling of bam files requires pysam')\n\n try:\n file_handle = pysam.Tabix(self._meta.filename, 'rb')\n except IOError:\n raise IOError('Could not find bam file')\n\n reads = file_handle.fetch(\n kwargs['id'],\n kwargs['start'],\n kwargs['stop'])\n\n hits = dict(**reads)\n print(\"hits\")\n\n raise NotImplementedError()"
] | [
"0.59040993",
"0.5832681",
"0.5798229",
"0.5739486",
"0.5717046",
"0.57023704",
"0.5664398",
"0.5652504",
"0.56456834",
"0.55918276",
"0.55815077",
"0.55731744",
"0.5562873",
"0.5555167",
"0.5533607",
"0.55091774",
"0.5423072",
"0.5409913",
"0.5403574",
"0.53904545",
"0.5387026",
"0.5360202",
"0.5348909",
"0.5348699",
"0.53450495",
"0.5337463",
"0.5329245",
"0.52865624",
"0.52825195",
"0.52727985"
] | 0.61347854 | 0 |
Given a type the_type, generate a basic field which casts the passed argument to the_type if it is not None, else return None. | def _gen_basic_field(name_of_field, name_of_type, the_type):
def validate(self, x):
return None if x is None else the_type(x)
doc = "A field which can be {name_of_type} or None".format(name_of_type=name_of_type)
return Field(name_of_field, (), {'validate': validate, '__doc__': doc}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def NoneOrType(type_):\n def coercer(value):\n if value is None:\n return value\n else:\n return type_(value)\n return coercer",
"def optional_field(\n field_type: Union[Type[_T], str], relation_verbose_name: Optional[str] = None\n) -> Optional[_T]:\n metadata = {}\n if relation_verbose_name:\n metadata[\"relation_verbose_name\"] = relation_verbose_name\n\n assert not isinstance(field_type, str) or field_type == \"self\"\n\n return attr.ib(\n default=None,\n validator=attr.validators.optional(\n instance_of_self()\n if isinstance(field_type, str)\n else attr.validators.instance_of(field_type)\n ),\n metadata=metadata,\n )",
"def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def field_type(f, default=MISSING, *, unwrap=True) -> Union[tuple, Any]:\n return _field_type(f, TYPE, default, unwrap=unwrap)",
"def resolve_field(struct_object: Struct, attr_name: str, attr_value: Any) -> Optional[BaseField]:\n if attr_name.startswith(\"_\"):\n return None\n\n if isinstance(attr_value, BaseField):\n new_field: BaseField = attr_value._replace_parent(parent=struct_object) # pylint: disable=protected-access\n return new_field\n\n return None",
"def gen_config_field(name_of_field, name_of_type, the_type):\n return _gen_basic_field(name_of_field, name_of_type, the_type)",
"def strip_non_null_from_type(graphql_type):\n while isinstance(graphql_type, GraphQLNonNull):\n graphql_type = graphql_type.of_type\n return graphql_type",
"def build_standard_field(self, field_name, model_field_type):\n field_mapping = self.serializer_field_mapping\n field_class = field_mapping[model_field_type]\n field_kwargs = get_field_kwargs(field_name, model_field_type)\n\n if \"choices\" in field_kwargs:\n # Fields with choices get coerced into `ChoiceField`\n # instead of using their regular typed field.\n field_class = self.serializer_choice_field\n # Some model fields may introduce kwargs that would not be valid\n # for the choice field. We need to strip these out.\n # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)\n valid_kwargs = {\n \"read_only\",\n \"write_only\",\n \"required\",\n \"default\",\n \"initial\",\n \"source\",\n \"label\",\n \"help_text\",\n \"style\",\n \"error_messages\",\n \"validators\",\n \"allow_null\",\n \"allow_blank\",\n \"choices\",\n }\n for key in list(field_kwargs):\n if key not in valid_kwargs:\n field_kwargs.pop(key)\n\n if not issubclass(field_class, fields.CharField) and not issubclass(\n field_class, fields.ChoiceField\n ):\n # `allow_blank` is only valid for textual fields.\n field_kwargs.pop(\"allow_blank\", None)\n\n return field_class, field_kwargs",
"def try_get_field(self, field_name: str) -> Optional[fields.Field]:\n field = getattr(self, field_name, None)\n if isinstance(field, fields.Field):\n return field\n return None",
"def to_type(a: Any, data_type):\n return None if a is None else data_type(a)",
"def get_field_type(self, field):\n for mapping in self.mappings:\n if isinstance(field, mapping[1]):\n return mapping[0]\n return None",
"def get_field_type(\n self, field_type: Union[Type, str], collection_name: str\n ) -> SchemaFieldDataType:\n TypeClass: Optional[Type] = _field_type_mapping.get(field_type)\n\n if TypeClass is None:\n self.report.report_warning(\n collection_name, f\"unable to map type {field_type} to metadata schema\"\n )\n TypeClass = NullTypeClass\n\n return SchemaFieldDataType(type=TypeClass())",
"def get_field_def(schema, parent_type, field_ast):\n name = field_ast.name.value\n if name == SchemaMetaFieldDef.name and schema.get_query_type() == parent_type:\n return SchemaMetaFieldDef\n\n elif name == TypeMetaFieldDef.name and schema.get_query_type() == parent_type:\n return TypeMetaFieldDef\n\n elif name == TypeNameMetaFieldDef.name and \\\n isinstance(parent_type, (\n GraphQLObjectType,\n GraphQLInterfaceType,\n GraphQLUnionType,\n )):\n return TypeNameMetaFieldDef\n\n elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):\n return parent_type.get_fields().get(name)",
"def _format_field_val(\n self,\n field: str,\n field_type: str,\n value: Any,\n ) -> str | int | bool | list | None:\n\n # If the field is empty, no need to format.\n if value is None:\n return None\n\n # TODO(DanielRyanSmith): Write checks to ensure enum values are valid.\n if field_type == 'emails' or field_type == 'split_str':\n list_val = self._split_list_input(field, field_type, value, ',')\n if field == 'blink_components' and len(value) == 0:\n return [settings.DEFAULT_COMPONENT]\n return list_val\n elif field_type == 'link':\n return self._extract_link(value)\n elif field_type == 'links':\n list_val = self._split_list_input(field, field_type, value)\n # Filter out any URLs that do not conform to the proper pattern.\n return [self._extract_link(link)\n for link in list_val if link]\n elif field_type == 'int':\n # Int fields can be unset by giving null or nothing in the input field.\n if value == '' or value is None:\n return None\n try:\n return int(value)\n except ValueError:\n self._abort_invalid_data_type(field, field_type, value)\n elif field_type == 'bool':\n return bool(value)\n return str(value)",
"def UserDefinedFromTypeRef(self, mojom_type):\n type_key = mojom_type.type_reference.type_key\n module_type = self.UserDefinedFromTypeKey(type_key)\n if mojom_type.type_reference.nullable:\n return module_type.MakeNullableKind()\n return module_type",
"def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field",
"def specific(self):\n field_attr = field_registry.field_map[self.type]\n return getattr(self, field_attr, None)",
"def required_field(\n field_type: Union[Type[_T], str],\n frozen: bool = False,\n relation_verbose_name: Optional[str] = None,\n) -> _T:\n metadata = {}\n if relation_verbose_name:\n metadata[\"relation_verbose_name\"] = relation_verbose_name\n\n assert not isinstance(field_type, str) or field_type == \"self\"\n\n kwargs = {\n \"validator\": instance_of_self()\n if isinstance(field_type, str)\n else attr.validators.instance_of(field_type),\n \"metadata\": metadata,\n }\n if frozen:\n kwargs[\"on_setattr\"] = attr.setters.frozen\n\n return attr.ib(**kwargs) # type: ignore",
"def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )",
"def _get_decl_for_model_field(field: Field) -> Optional[declarations.BaseDeclaration]:\n if isinstance(field, PartialDateField):\n return factory.Faker('date')\n internal_type = field.get_internal_type()\n declaration = None\n if internal_type in ('CharField', 'TextField'):\n if field.unique:\n declaration = UniqueFaker('word')\n else:\n declaration = factory.Faker('word')\n elif internal_type in (\n 'IntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField',\n 'BigIntegerField'):\n if field.unique:\n declaration = factory.Sequence(lambda n: n)\n else:\n declaration = factory.Faker('pyint')\n elif internal_type in ('BooleanField', 'NullBooleanField'):\n declaration = factory.Faker('pybool')\n elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):\n # The providers for these fields are called 'date','date_time','time'.\n # Derive the provider name from the internal_type.\n provider = ''\n for i, c in enumerate(internal_type.replace('Field', '')):\n if i and c.isupper():\n provider += '_'\n provider += c.lower()\n declaration = factory.Faker(provider)\n elif internal_type == 'DurationField':\n declaration = factory.Faker('time_delta')\n if declaration is None:\n raise Exception(\n f\"Could not find a faker declaration appropriate for model field {field!r}\"\n )\n return declaration",
"def get_field_type(field):\n if (field < len(Field.FIELD_TYPES)):\n return Field.FIELD_TYPES[field][1]\n return 'unknown'",
"def _decode_nullable(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if obj is not None:\n return _json_compat_obj_decode_helper(\n data_type.validator, obj, alias_validators, strict, old_style,\n for_msgpack)\n else:\n return None",
"def _get_default_value(type_name, is_simple, is_iterative, is_required):\n # Iterables: convert via pre-defined mappings.\n if is_iterative:\n if is_required:\n return _get_iterative_default_value()\n else:\n return _get_iterative_null_value()\n # Simple types: convert via pre-defined mappings.\n elif is_simple:\n if is_required:\n return _get_simple_default_value(type_name)\n else:\n return _get_simple_null_value(type_name)\n # Complex types: convert via pre-defined mappings.\n else:\n if is_required:\n return _get_complex_default_value(type_name)\n else:\n return _get_complex_null_value(type_name)",
"def _assign_type(self, type):\n if self.is_input:\n return 'data'\n else:\n return type",
"def field_type(self):\n return \"\"",
"def __get_attr_helper(self, object, field, default=None):\n # TODO: Make PR to fix this ^ bug\n if hasattr(object, field):\n return getattr(object, field)\n\n return default",
"def _uifield_from_pydantic1(model_field: ModelField) -> UiField:\n from pydantic.fields import SHAPE_SINGLETON\n from pydantic.fields import Undefined as PydanticUndefined\n\n finfo = model_field.field_info\n\n _extra_dict = finfo.extra.copy()\n # backport from pydantic2\n if \"json_schema_extra\" in _extra_dict:\n _extra_dict.update(_extra_dict.pop(\"json_schema_extra\"))\n\n extra = {k: v for k, v in _extra_dict.items() if k in _UI_FIELD_NAMES}\n const = finfo.const if finfo.const not in (None, PydanticUndefined) else Undefined\n default = (\n Undefined if finfo.default in (PydanticUndefined, Ellipsis) else finfo.default\n )\n\n nullable = None\n if model_field.allow_none and (\n model_field.shape != SHAPE_SINGLETON or not model_field.sub_fields\n ):\n nullable = True\n\n return UiField(\n name=model_field.name,\n title=finfo.title,\n description=finfo.description,\n default=default,\n default_factory=model_field.default_factory,\n type=model_field.outer_type_,\n nullable=nullable,\n const=const,\n minimum=finfo.ge,\n maximum=finfo.le,\n exclusive_minimum=finfo.gt,\n exclusive_maximum=finfo.lt,\n multiple_of=finfo.multiple_of,\n min_length=finfo.min_length,\n max_length=finfo.max_length,\n pattern=finfo.regex,\n # format=finfo.format,\n min_items=finfo.min_items,\n max_items=finfo.max_items,\n unique_items=finfo.unique_items,\n _native_field=model_field,\n **extra,\n )",
"def get_type_name_value(obj):\n return None if obj is None else obj.GetTypeName()",
"def field_type(self) -> Optional[NameObject]:\n return self.get(\"/FT\")"
] | [
"0.654419",
"0.6099716",
"0.60488933",
"0.5988047",
"0.58408254",
"0.577662",
"0.5766554",
"0.5711901",
"0.5640027",
"0.5590506",
"0.5590125",
"0.55728567",
"0.5530288",
"0.54900694",
"0.5441718",
"0.5416539",
"0.5405262",
"0.5376466",
"0.5349207",
"0.5310458",
"0.52852196",
"0.5253065",
"0.52400416",
"0.5223552",
"0.52177346",
"0.5179317",
"0.5172434",
"0.515066",
"0.51481944",
"0.5135861"
] | 0.73560774 | 0 |
Generate a Field type that will validate a `Config`. | def gen_config_field(name_of_field, name_of_type, the_type):
return _gen_basic_field(name_of_field, name_of_type, the_type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )",
"def make_config_field(\n cls,\n doc: str = \"How to pack tract, patch, and possibly band into an integer.\"\n ) -> ConfigurableField:\n return ConfigurableField(doc, target=cls.from_config, ConfigClass=cls.ConfigClass)",
"def _gen_basic_field(name_of_field, name_of_type, the_type):\n def validate(self, x):\n return None if x is None else the_type(x)\n\n doc = \"A field which can be {name_of_type} or None\".format(name_of_type=name_of_type)\n\n return Field(name_of_field, (), {'validate': validate, '__doc__': doc})",
"def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result",
"def DecoderDataclassField(feature_type: str, default: str):\n\n\n class DecoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid decoder config from the decoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_decoder_classes(feature_type):\n dec = get_decoder_cls(feature_type, value[TYPE])\n try:\n return dec.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid decoder params: {value}, see `{dec}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for decoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n decoder_classes = list(get_decoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': decoder_classes, 'default': default}}, 'title': 'decoder_options', 'allOf': get_decoder_conds(feature_type)}\n try:\n decoder = get_decoder_cls(feature_type, default)\n load_default = decoder.Schema().load({'type': default})\n dump_default = decoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': DecoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported decoder type: {default}. See decoder_registry. Details: {e}')",
"def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )",
"def get_form_field(spec):\n\n FieldClass = TYPES_MAP[spec[\"type\"]]\n\n kwargs = {\n \"label\": spec.get(\"label\"),\n \"required\": spec.get(\"required\") or False,\n \"help_text\": spec.get(\"help_text\"),\n }\n\n if \"choices\" in spec:\n kwargs[\"choices\"] = [\n [c[\"value\"], c[\"display_name\"]] for c in spec[\"choices\"]]\n if not kwargs[\"required\"]:\n kwargs[\"choices\"].insert(0, (\"\", \"\"))\n\n if spec[\"type\"] == \"string\":\n if \"max_length\" in spec:\n kwargs[\"max_length\"] = spec[\"max_length\"]\n else:\n kwargs[\"widget\"] = forms.widgets.Textarea\n\n if spec[\"type\"] == \"integer\":\n kwargs[\"max_value\"] = 100000000000\n\n return FieldClass(**kwargs)",
"def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )",
"def PreprocessingDataclassField(feature_type: str):\n\n\n class PreprocessingMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid preprocessing config from the\n preprocessing_registry and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if feature_type in preprocessing_registry:\n pre = preprocessing_registry[feature_type]\n try:\n return pre.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid preprocessing params: {value}, see `{pre}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for preprocessor: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n preprocessor_cls = preprocessing_registry[feature_type]\n props = schema_utils.unload_jsonschema_from_marshmallow_class(preprocessor_cls)['properties']\n return {'type': 'object', 'properties': props, 'title': 'preprocessing_options', 'additionalProperties': True}\n try:\n preprocessor = preprocessing_registry[feature_type]\n load_default = preprocessor.Schema().load({'feature_type': feature_type})\n dump_default = preprocessor.Schema().dump({'feature_type': feature_type})\n return field(metadata={'marshmallow_field': PreprocessingMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported preprocessing type: {feature_type}. See preprocessing_registry. Details: {e}')",
"def EncoderDataclassField(feature_type: str, default: str):\n\n\n class EncoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid encoder config from the encoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_encoder_classes(feature_type):\n enc = get_encoder_cls(feature_type, value[TYPE])\n try:\n return enc.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid encoder params: {value}, see `{enc}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for encoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n encoder_classes = list(get_encoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': encoder_classes, 'default': default}}, 'title': 'encoder_options', 'allOf': get_encoder_conds(feature_type)}\n try:\n encoder = get_encoder_cls(feature_type, default)\n load_default = encoder.Schema().load({'type': default})\n dump_default = encoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': EncoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported encoder type: {default}. See encoder_registry. Details: {e}')",
"def field(base : SetupVal, field_name : str) -> SetupVal:\n if not isinstance(base, SetupVal):\n raise ValueError('field expected a SetupVal, but got {base!r}')\n if not isinstance(field_name, str):\n raise ValueError('field expected a str, but got {field_name!r}')\n return FieldVal(base, field_name)",
"def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field",
"def SchedulerDataclassField(default={'type': 'fifo'}, description='Hyperopt scheduler settings.'):\n\n\n class SchedulerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid scheduler from\n `ludwig.schema.hyperopt.scheduler_registry` and creates a corresponding `oneOf` JSON schema for external\n usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in scheduler_config_registry:\n scheduler_config_cls = scheduler_config_registry[value['type'].lower()]\n try:\n return scheduler_config_cls.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for scheduler: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for scheduler: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(scheduler_config_registry.keys()), 'default': default['type'], 'description': 'The type of scheduler to use during hyperopt'}}, 'title': 'scheduler_options', 'allOf': get_scheduler_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in scheduler_config_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = scheduler_config_registry[default['type'].lower()]\n load_default = opt.Schema().load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': SchedulerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported scheduler type: {default['type']}. See scheduler_config_registry. Details: {e}\")",
"def _uifield_from_pydantic2(finfo: FieldInfo, name: str) -> UiField:\n import annotated_types as at\n from pydantic_core import PydanticUndefined\n\n if isinstance(finfo.json_schema_extra, dict):\n extra = {\n k: v for k, v in finfo.json_schema_extra.items() if k in _UI_FIELD_NAMES\n }\n else:\n extra = {}\n default = (\n Undefined if finfo.default in (PydanticUndefined, Ellipsis) else finfo.default\n )\n\n nullable = None\n if get_origin(finfo.annotation) is Union and any(\n i for i in get_args(finfo.annotation) if i is type(None)\n ):\n nullable = True\n\n restrictions: dict = {}\n for meta in finfo.metadata:\n if isinstance(meta, at.Ge):\n restrictions[\"minimum\"] = meta.ge\n elif isinstance(meta, at.Gt):\n restrictions[\"exclusive_minimum\"] = meta.gt\n elif isinstance(meta, at.Le):\n restrictions[\"maximum\"] = meta.le\n elif isinstance(meta, at.Lt):\n restrictions[\"exclusive_maximum\"] = meta.lt\n elif isinstance(meta, at.MultipleOf):\n restrictions[\"multiple_of\"] = meta.multiple_of\n elif isinstance(meta, at.MinLen):\n restrictions[\"min_length\"] = meta.min_length\n elif isinstance(meta, at.MaxLen):\n restrictions[\"max_length\"] = meta.max_length\n elif hasattr(meta, \"__dict__\"):\n # PydanticGeneralMetadata\n restrictions[\"pattern\"] = meta.__dict__.get(\"pattern\")\n\n return UiField(\n name=name,\n title=finfo.title,\n description=finfo.description,\n default=default,\n default_factory=finfo.default_factory,\n type=finfo.annotation,\n nullable=nullable,\n # const=const,\n **restrictions,\n # format=finfo.format,\n _native_field=finfo,\n **extra,\n )",
"def create_field(self, label, value_type, key=None):\n payload = self._build_params(label=label, value_type=value_type, key=key)\n return Field.deserialize(self._post('fields', None, payload))",
"def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)",
"def test_type_builder_builds_correct_model_for_simple_class():\n schema = [\n SchemaObject(\n name=\"TestClass\",\n properties=[\n SchemaValue(name=\"stringValue\", value_type=\"string\"),\n SchemaValue(name=\"booleanValue\", value_type=\"boolean\"),\n SchemaValue(name=\"anyValue\", value_type=\"any\"),\n SchemaValue(name=\"nullValue\", value_type=\"null\"),\n SchemaValue(name=\"optionalStringValue\", value_types=[\"null\", \"string\"]),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == ClassDefinition(\n name=\"TestClass\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"stringValue\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"boolean_value\",\n key=\"booleanValue\",\n value_type=\"bool\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"any_value\", key=\"anyValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"null_value\", key=\"nullValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"optional_string_value\",\n key=\"optionalStringValue\",\n value_type=\"Optional[str]\",\n known_type=True,\n ),\n ],\n depends_on=set(),\n )",
"def get_field_type(\n self, field_type: Union[Type, str], collection_name: str\n ) -> SchemaFieldDataType:\n TypeClass: Optional[Type] = _field_type_mapping.get(field_type)\n\n if TypeClass is None:\n self.report.report_warning(\n collection_name, f\"unable to map type {field_type} to metadata schema\"\n )\n TypeClass = NullTypeClass\n\n return SchemaFieldDataType(type=TypeClass())",
"def configure_fields(config, domain):\n locales = config[\"locales\"]\n logging.debug(\"Loaded locale->scheme mappings: %r\", locales)\n\n fields = config[\"fields\"]\n logging.debug(\"Loaded fields: %r\", [field[\"name\"] for field in fields])\n\n # A list of lists where each sublist contains all of the arguments that\n # should be passed to define-index-field to configure that field, with\n # the exception of the --domain flag.\n field_arguments = []\n\n for field in fields:\n new_arguments = [\"--type\", field[\"type\"]]\n\n # Configure the traits we want (if any traits can't be applied to this\n # type of field we'll error).\n new_arguments += get_disable_flags(field[\"type\"], field[\"traits\"])\n\n # Because of the special \"locale_specific\" scheme we need to do some\n # fancy processing here.\n analysis_scheme = field.get(\"analysis_scheme\")\n if analysis_scheme == \"locale_specific\":\n for locale, scheme in locales.iteritems():\n # Make a clone of the current arguments and add the locale\n # specific name and analysis scheme.\n cloned_arguments = list(new_arguments)\n cloned_arguments += [\n \"--analysis-scheme\", scheme,\n \"--name\", \"{}_{}\".format(field[\"name\"], locale)\n ]\n\n field_arguments.append(cloned_arguments)\n else:\n if analysis_scheme:\n new_arguments += [\"--analysis-scheme\", analysis_scheme]\n\n new_arguments += [\"--name\", field[\"name\"]]\n\n field_arguments.append(new_arguments)\n\n for i in field_arguments:\n # The name is always the last item in the sublist (hacky)\n name = i[-1]\n logging.info(\"Configuring field %r.\", name)\n\n command = [\"aws\", \"cloudsearch\", \"define-index-field\", \n \"--domain-name\", domain] + i\n\n maybe_execute_command(command,\n \"Could not configure field {}.\".format(name))",
"def DefaultsDataclassField(feature_type: str):\n\n\n class DefaultMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid defaults config from the feature_registry\n and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n input_feature_class = input_mixin_registry[feature_type]\n output_feature_class = output_mixin_registry.get(feature_type, None)\n try:\n input_schema = input_feature_class.Schema().load(value)\n if output_feature_class:\n output_schema = output_feature_class.Schema().load(value)\n combined = input_schema + output_schema\n else:\n combined = input_schema\n return combined\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid params: {value}, see `{attr}` definition. Error: {error}')\n raise ValidationError(f'Invalid params: {value}')\n\n @staticmethod\n def _jsonschema_type_mapping():\n input_feature_cls = input_mixin_registry.get(feature_type)\n output_feature_cls = output_mixin_registry.get(feature_type, None)\n input_props = schema_utils.unload_jsonschema_from_marshmallow_class(input_feature_cls)['properties']\n if output_feature_cls:\n output_props = schema_utils.unload_jsonschema_from_marshmallow_class(output_feature_cls)['properties']\n combined_props = {**output_props, **input_props}\n else:\n combined_props = input_props\n return {'type': 'object', 'properties': combined_props, 'additionalProperties': False, 'title': 'defaults_options'}\n try:\n input_cls = input_mixin_registry[feature_type]\n output_cls = output_mixin_registry.get(feature_type, None)\n dump_default = input_cls.Schema().dump({'type': feature_type})\n if output_cls:\n output_dump = output_cls.Schema().dump({'type': feature_type})\n dump_default = {**output_dump, **dump_default}\n load_default = input_cls.Schema().load({'type': feature_type})\n if output_cls:\n output_load = output_cls.Schema().load({'type': feature_type})\n for k in dump_default.keys():\n if getattr(load_default, k, -1) == -1:\n setattr(load_default, k, getattr(output_load, k))\n return field(metadata={'marshmallow_field': DefaultMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported feature type: {feature_type}. See input_type_registry. Details: {e}')",
"def SplitDataclassField(default: str):\n\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid split params: {value}, see `{split_class}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(split_config_registry.data.keys()), 'default': default}}, 'title': 'split_options', 'allOf': get_split_conds()}\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({'type': default})\n dump_default = splitter.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': SplitMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported splitter type: {default}. See split_registry. Details: {e}')",
"def check_field_type(field_class):\n if field_class == 'TextField':\n field_type = 'Text field'\n elif field_class == 'NumericField':\n field_type = 'Numeric field'\n elif field_class == 'DateField':\n field_type = 'Date field'\n elif field_class == 'DateTimeField':\n field_type = 'Date & time field'\n elif field_class == 'TimeField':\n field_type = 'Time field'\n elif field_class == 'LookupField':\n field_type = 'Select box field'\n elif field_class == 'MultipleLookupField':\n field_type = 'Multiple select field'\n\n return field_type",
"def fields_validator():\n\n return validator.BrewerySchema()",
"def OptimizerDataclassField(default={'type': 'adam'}, description='TODO'):\n\n\n class OptimizerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid optimizer from\n `ludwig.modules.optimization_modules.optimizer_registry` and creates a corresponding `oneOf` JSON schema\n for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in optimizer_registry:\n opt = optimizer_registry[value['type'].lower()][1]\n try:\n return opt.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for optimizer: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for optimizer: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(optimizer_registry.keys()), 'default': default['type'], 'description': 'The type of optimizer to use during the learning process'}}, 'title': 'optimizer_options', 'allOf': get_optimizer_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in optimizer_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = optimizer_registry[default['type'].lower()][1]\n load_default = opt.Schema()\n load_default = load_default.load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': OptimizerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported optimizer type: {default['type']}. See optimizer_registry. Details: {e}\")",
"def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)",
"def SplitDataclassField(default: str):\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(\n f\"Invalid split params: {value}, see `{split_class}` definition. Error: {error}\"\n )\n raise ValidationError(\n f\"Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.\"\n )\n raise ValidationError(\"Field should be None or dict\")\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\"type\": \"string\", \"enum\": list(split_config_registry.data.keys()), \"default\": default},\n },\n \"title\": \"split_options\",\n \"allOf\": get_split_conds(),\n }\n\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({\"type\": default})\n dump_default = splitter.Schema().dump({\"type\": default})\n\n return field(\n metadata={\n \"marshmallow_field\": SplitMarshmallowField(\n allow_none=False,\n dump_default=dump_default,\n load_default=load_default,\n )\n },\n default_factory=lambda: load_default,\n )\n except Exception as e:\n raise ValidationError(f\"Unsupported splitter type: {default}. See split_registry. \" f\"Details: {e}\")",
"def build_standard_field(self, field_name, model_field_type):\n field_mapping = self.serializer_field_mapping\n field_class = field_mapping[model_field_type]\n field_kwargs = get_field_kwargs(field_name, model_field_type)\n\n if \"choices\" in field_kwargs:\n # Fields with choices get coerced into `ChoiceField`\n # instead of using their regular typed field.\n field_class = self.serializer_choice_field\n # Some model fields may introduce kwargs that would not be valid\n # for the choice field. We need to strip these out.\n # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)\n valid_kwargs = {\n \"read_only\",\n \"write_only\",\n \"required\",\n \"default\",\n \"initial\",\n \"source\",\n \"label\",\n \"help_text\",\n \"style\",\n \"error_messages\",\n \"validators\",\n \"allow_null\",\n \"allow_blank\",\n \"choices\",\n }\n for key in list(field_kwargs):\n if key not in valid_kwargs:\n field_kwargs.pop(key)\n\n if not issubclass(field_class, fields.CharField) and not issubclass(\n field_class, fields.ChoiceField\n ):\n # `allow_blank` is only valid for textual fields.\n field_kwargs.pop(\"allow_blank\", None)\n\n return field_class, field_kwargs",
"def _uifield_from_pydantic1(model_field: ModelField) -> UiField:\n from pydantic.fields import SHAPE_SINGLETON\n from pydantic.fields import Undefined as PydanticUndefined\n\n finfo = model_field.field_info\n\n _extra_dict = finfo.extra.copy()\n # backport from pydantic2\n if \"json_schema_extra\" in _extra_dict:\n _extra_dict.update(_extra_dict.pop(\"json_schema_extra\"))\n\n extra = {k: v for k, v in _extra_dict.items() if k in _UI_FIELD_NAMES}\n const = finfo.const if finfo.const not in (None, PydanticUndefined) else Undefined\n default = (\n Undefined if finfo.default in (PydanticUndefined, Ellipsis) else finfo.default\n )\n\n nullable = None\n if model_field.allow_none and (\n model_field.shape != SHAPE_SINGLETON or not model_field.sub_fields\n ):\n nullable = True\n\n return UiField(\n name=model_field.name,\n title=finfo.title,\n description=finfo.description,\n default=default,\n default_factory=model_field.default_factory,\n type=model_field.outer_type_,\n nullable=nullable,\n const=const,\n minimum=finfo.ge,\n maximum=finfo.le,\n exclusive_minimum=finfo.gt,\n exclusive_maximum=finfo.lt,\n multiple_of=finfo.multiple_of,\n min_length=finfo.min_length,\n max_length=finfo.max_length,\n pattern=finfo.regex,\n # format=finfo.format,\n min_items=finfo.min_items,\n max_items=finfo.max_items,\n unique_items=finfo.unique_items,\n _native_field=model_field,\n **extra,\n )",
"def field_type(name):\n if name not in field_types:\n field_types[name] = records.fields_get([name], attributes=['type'])[name]['type']\n return field_types.get(name)",
"def construct_config(\n config_schema: Type[ConfigClass], sources: Union[Sequence[_SourceTypes], _SourceTypes]\n) -> ConfigClass:\n if not issubclass(config_schema, ConfigSchema):\n raise TypeError(\"Config schema supplied isn't a subclass of ConfigSchema (aka Pydantic's BaseModel)\")\n compiled_config_dict = _compile_sources(sources=sources)\n return cast(ConfigClass, config_schema(**compiled_config_dict))"
] | [
"0.63629675",
"0.60587436",
"0.60122585",
"0.5977167",
"0.57657665",
"0.5746371",
"0.5733373",
"0.5731873",
"0.572674",
"0.5627907",
"0.5559946",
"0.5531215",
"0.5527566",
"0.55121136",
"0.55115044",
"0.5466833",
"0.54139024",
"0.5401748",
"0.5386312",
"0.53542686",
"0.5348639",
"0.53378147",
"0.53375274",
"0.53074193",
"0.5299626",
"0.5298231",
"0.5295255",
"0.5293737",
"0.5232814",
"0.5224529"
] | 0.73748803 | 0 |
Splits the reddit dataset by submission ID | def split_by_submission(reddit_directory, output_directory, num_splits, cache_dir="comment_maps"):
logger.debug("Creating target directories...")
global target_directories
target_directories = create_target_directories(output_directory, num_splits)
logger.debug("Target directories created.")
if not os.path.isdir(cache_dir) or not os.listdir(cache_dir): # Missing/empty directory
# The comment data must be loaded and read so that we have the mapping
# from comment full-name to base (submission) full-name, which is required for the splitting
# of the other data sets
mkdir(cache_dir)
logger.info("No {comment --> submission} mapping cache found.")
logger.info("Processing comment tables...")
split_data_set(reddit_directory, "stanford_comment_data", "post_fullname", num_splits, output_directory,
maps_dir=cache_dir,
map_columns=("comment_fullname", "post_fullname"))
logger.debug("Loading comment cache from: %s" % cache_dir)
# global comment_post_mapping # stores map from comment fullname -> base submission id
# comment_post_mapping = load_dict_cache(cache_dir, shared_memory=True)
# logger.info("Loaded comment cache with: %d entries" % len(comment_post_mapping))
fd = shmht.open(os.path.join(output_directory, "shmht"), 2000000000, 1)
load_shmht(cache_dir, fd)
logger.info("Loaded comment cache into shared memory hash table")
process = psutil.Process(os.getpid())
logger.debug("PID: %d, Memory usage: %.1f GB" % (process.pid, process.memory_info().rss / 1e9))
logger.info("Processing submission tables...")
# Must first split up the submission data because
split_data_set(reddit_directory, "stanford_submission_data", "post_fullname", num_splits, output_directory)
# Now split the rest of the data while adding a column using the mapping that we have
for data_set_name in ["stanford_report_data", "stanford_removal_data", "stanford_vote_data"]:
mapped_split(reddit_directory, data_set_name, 'target_fullname', 'post_fullname', num_splits)
shmht.close(fd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def scrape_submissions(self):\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n submission_count = 0\n async for submission in subreddit_origin.new(limit=self.limit):\n if self.memory.contains(submission.id):\n continue\n\n self.memory.add(submission.id)\n\n # Parse Submission\n submission = self.parse_submission(submission)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(submission)\n\n submission_count += 1\n\n return submission_count",
"def scrape_submission(submission_url):\n\n\t'''\n\tScrape Data\n\t'''\n\n\t# Get submission dict\n\tsubmission_dict = reddit.extract_post_data(submission_url=submission_url)\n\n\t# Get list of comments_dicts\n\tsubmission_object = submission_dict.get('submission_object')\n\tcomments_dict = reddit.extract_post_comments_data(submission_object)\n\n\t'''\n\tExit if no comments were extracted from the submission\n\t'''\n\n\tif not len(comments_dict.get('data')) > 0:\n\t\tlogger.info('Data extraction yielded zero comments. Aborting sentiment analysis and database insertion.')\n\t\treturn\n\n\t'''\n\tAnalyze Sentiment\n\t'''\n\n\t# Call sentimentanalysis to analyze the comments and append the dicts\n\tSentimentAnalysis.list_parser(comments_dict)\n\n\t'''\n\tInsert to Database\n\t'''\n\n\t# Create instance of database_manager\n\tdatabase_manager = DatabaseManager()\n\n\t# Check if submission exists\n\tif database_manager.check_submission_exists(submission_dict):\n\t\t# Delete the submission and associated data if exists\n\t\tdatabase_manager.delete_submission(submission_dict)\n\n\t# Insert new submission info into database\n\tnew_submission = database_manager.insert_submission(submission_dict)\n\n\t# Insert comments if submission inserted successfully\n\tif new_submission is not None:\n\t\tdatabase_manager.insert_comments(comments_dict, new_submission)\n\t\tdatabase_manager.insert_sentiment(comments_dict)\n\n\t# Returns submission_id\n\treturn submission_dict.get('id')",
"def get_comments_from_submission(submission_id):\n submission = (REDDIT.submission(id=submission_id))\n return submission",
"def parse_sub_reddits(sub_reddit: str, \n match_words: list):\n\n url_to_open = f\"https://www.reddit.com/r/{sub_reddit}.json\"\n success_status = 0\n while success_status != 200:\n try:\n response = urlopen(url_to_open, timeout=10)\n success_status = response.status\n except HTTPError:\n logging.info(f\"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.\")\n time.sleep(120)\n success_status = 400\n \n entire_sub_reddit = json.loads(response.read())\n\n posts = [post[\"data\"] for post in entire_sub_reddit['data']['children'] if post[\"kind\"] == \"t3\"]\n _ids = []\n post_dataframes = []\n return_dict = {}\n if len(posts) > 0:\n for post in posts:\n try:\n title = post['title'].lower()\n if re.findall(r\"(?=(\"+'|'.join(match_words)+r\"))\", title):\n _id = post['id']\n norm_df = pd.json_normalize(post)\n norm_df = norm_df[['id', 'subreddit', 'title', 'ups', 'downs', 'upvote_ratio', 'num_comments', 'author_fullname', 'created_utc', 'subreddit_subscribers']]\n norm_df = norm_df.rename(columns = {'id': 'post_id', 'author_fullname': 'author'})\n post_dataframes.append(norm_df)\n if post['num_comments'] > 0:\n _ids.append(_id)\n except KeyError:\n pass \n if len(post_dataframes) > 0:\n all_dfs = pd.concat(post_dataframes, ignore_index=True)\n return_dict['data'] = all_dfs\n return_dict['ids'] = _ids\n else:\n return_dict['data'] = None\n return_dict['ids'] = None \n else:\n return_dict['data'] = None\n return_dict['ids'] = None \n\n return return_dict",
"def submission_analysis():\n if request.method == 'POST':\n submission_text = request.data\n data = request.get_json(force=True)\n\n # data['tokens'] = tokenize(data['title'] + data['post'])\n #data['tokens'] = tokenize(data['content'])\n\n if data['post'] == None:\n data['post'] = \"\"\n\n x = get_subreddit(data['title'] + \" \" + data['post'])\n return jsonify(x)",
"def reddit_test(API_client, API_secret, user_agent, user, passwd):\r\n reddit = praw.Reddit(client_id=API_client,\r\n client_secret=API_secret,\r\n user_agent=user_agent,\r\n username=user,\r\n password=passwd)\r\n\r\n # Make authorized instances of reddit\r\n sub1 = reddit.subreddit('soccer').top(limit=900)\r\n sub2 = reddit.subreddit('britishproblems').top(limit=900)\r\n sub3 = reddit.subreddit('learnprogramming').top(limit=900)\r\n fields = [\"title\"] # Fields to be used for data mining methods\r\n all_posts = [] # Stores the list of posts\r\n\r\n # Create dataframe and assign value to the fields accordingly\r\n for post in sub1:\r\n to_dict = vars(post)\r\n sub_dict = {field: to_dict[field] for field in fields}\r\n sub_dict[\"target\"] = 0\r\n all_posts.append(sub_dict)\r\n\r\n for post in sub2:\r\n to_dict = vars(post)\r\n sub_dict = {field: to_dict[field] for field in fields}\r\n sub_dict[\"target\"] = 1\r\n all_posts.append(sub_dict)\r\n\r\n for post in sub3:\r\n to_dict = vars(post)\r\n sub_dict = {field: to_dict[field] for field in fields}\r\n sub_dict[\"target\"] = 2\r\n all_posts.append(sub_dict)\r\n\r\n dataframe = pd.DataFrame(all_posts)\r\n cols = list(dataframe.columns)\r\n cols[cols.index('target')], cols[-1] = cols[-1], cols[cols.index('target')]\r\n dataframe = dataframe[cols]\r\n\r\n # Creates an instance of TF-IDF vector for feature extraction. Stop words will be ignored.\r\n tfidf_transformer = TfidfVectorizer(stop_words=sklearn.feature_extraction.text.ENGLISH_STOP_WORDS)\r\n # Creates a feature vector for post's title\r\n X_train_title_counts = pd.DataFrame((tfidf_transformer.fit_transform(dataframe[\"title\"].values)).todense())\r\n Y = pd.DataFrame(dataframe[\"target\"].values)\r\n\r\n # Splits the data into training set, development set and test set\r\n train_X, train_Y, dev_X,dev_Y,test_X, test_Y = get_training_and_testing_sets(X_train_title_counts, Y)\r\n\r\n\r\n Accuracy = {}\r\n\r\n # Perform various data mining methods on the data\r\n Train_SVC(train_X, train_Y, test_X, test_Y, Accuracy)\r\n Train_RandomForest(train_X, train_Y, test_X, test_Y, Accuracy)\r\n train_X=train_X.append(dev_X)",
"def get_from_reddit(self, submission_id: str, max_comment_level: int) -> Submission:\n if max_comment_level == -1:\n max_comment_level = sys.maxsize\n\n submission = self.reddit.submission(id=submission_id)\n submission.comments.replace_more(limit=0)\n comment_queue = []\n tab_queue = []\n sub = Submission(submission_id,\n unicodedata.normalize(\"NFKD\", submission.title),\n submission.url,\n int(submission.created_utc),\n submission.ups,\n submission.downs,\n submission.score)\n sub.selftext = submission.selftext\n sub.is_self = submission.is_self\n for s in submission.comments:\n comment_queue.append(s)\n tab_queue.append(0)\n while comment_queue:\n comment = comment_queue.pop()\n tab = tab_queue.pop()\n com = Comment(\n comment.id,\n comment.parent_id,\n unicodedata.normalize(\"NFKD\", comment.body),\n tab,\n int(comment.created_utc),\n comment.ups,\n comment.downs,\n comment.score)\n sub.comments.append(com)\n if tab > max_comment_level:\n continue\n for reply in comment.replies:\n comment_queue.append(reply)\n tab_queue.append(tab + 1)\n return sub",
"def split_by_submission(reddit_directory, output_directory, num_splits, cached=False, map_cache=None):\n logger.debug(\"Creating target directories...\")\n global target_directories\n target_directories = create_split_directories(output_directory, num_splits)\n logger.debug(\"Target directories created.\")\n\n logger.debug(\"Connecting to Redis database...\")\n global redis_pool\n redis_pool = redis.ConnectionPool(host=\"localhost\", port=6379, db=0)\n\n if not cached:\n # The comment data must be loaded and read so that we have the mapping\n # from comment full-name to base (submission) full-name, which is required for the splitting\n # of the other data sets\n logger.info(\"No database of {comment --> submission} map cached.\")\n logger.info(\"Processing comment tables...\")\n split_data_set(reddit_directory, \"stanford_comment_data\", \"post_fullname\", num_splits, target_directories,\n map_columns=(\"comment_fullname\", \"post_fullname\"))\n\n elif map_cache is not None and os.path.isdir(map_cache) and os.listdir(map_cache):\n logger.debug(\"Loading dictionaries from cache into Redis...\")\n load_dict_cache_into_db(map_cache)\n\n else:\n logger.debug(\"Redis Database cache exists. Skipping comment splitting.\")\n\n redis_db = redis.StrictRedis(connection_pool=redis_pool)\n logger.debug(\"Redis database has: %d keys\" % redis_db.info()['db0']['keys'])\n\n # Now split the rest of the data while adding a column using the mapping that we have\n for data_set_name in [\"stanford_report_data\", \"stanford_removal_data\", \"stanford_vote_data\"]:\n mapped_split(reddit_directory, data_set_name, 'target_fullname', 'post_fullname', num_splits)\n\n # Split the submission tables (they don't need to be mapped using the database)\n logger.info(\"Processing submission tables...\")\n split_data_set(reddit_directory, \"stanford_submission_data\", \"post_fullname\", num_splits, target_directories)",
"def get_posts(self):\r\n\r\n sub_dict = {\r\n 'selftext': [], 'title': [], 'id': [], 'sorted_by': [],\r\n 'num_comments': [], 'score': [], 'ups': [], 'downs': []}\r\n csv = f'{self.sub}_posts.csv'\r\n\r\n # Attempt to specify a sorting method.\r\n sort, subreddit = self.set_sort()\r\n\r\n # Set csv_loaded to True if csv exists since you can't\r\n # evaluate the truth value of a DataFrame.\r\n df, csv_loaded = (pd.read_csv(csv), 1) if isfile(csv) else ('', 0)\r\n\r\n print(f'csv = {csv}')\r\n print(f'After set_sort(), sort = {sort} and sub = {self.sub}')\r\n print(f'csv_loaded = {csv_loaded}')\r\n\r\n print(f'Collecting information from r/{self.sub}.')\r\n\r\n for post in subreddit:\r\n\r\n # Check if post.id is in df and set to True if df is empty.\r\n # This way new posts are still added to dictionary when df = ''\r\n unique_id = post.id not in tuple(df.id) if csv_loaded else True\r\n\r\n # Save any unique posts to sub_dict.\r\n if unique_id:\r\n sub_dict['selftext'].append(post.selftext)\r\n sub_dict['title'].append(post.title)\r\n sub_dict['id'].append(post.id)\r\n sub_dict['sorted_by'].append(sort)\r\n sub_dict['num_comments'].append(post.num_comments)\r\n sub_dict['score'].append(post.score)\r\n sub_dict['ups'].append(post.ups)\r\n sub_dict['downs'].append(post.downs)\r\n sleep(0.1)\r\n\r\n new_df = pd.DataFrame(sub_dict)\r\n\r\n # Add new_df to df if df exists then save it to a csv.\r\n if 'DataFrame' in str(type(df)) and self.mode == 'w':\r\n pd.concat([df, new_df], axis=0, sort=0).to_csv(csv, index=False)\r\n print(\r\n f'{len(new_df)} new posts collected and added to {csv}')\r\n elif self.mode == 'w':\r\n new_df.to_csv(csv, index=False)\r\n print(f'{len(new_df)} posts collected and saved to {csv}')\r\n else:\r\n print(\r\n f'{len(new_df)} posts were collected but they were not '\r\n f'added to {csv} because mode was set to \"{self.mode}\"')",
"def preprocess_split(self, input_dataset, last_id, num_sents, max_sent_len, prefix_id = \"\"):\n dataset = []\n for sent in input_dataset[last_id:]:\n last_id += 1\n if type(sent) == tuple or len(sent) > max_sent_len or len(sent) <= 1:\n continue\n dataset.append(self.preprocess_sent(sent, prefix_id + str(len(dataset))))\n if len(dataset) == num_sents:\n break\n\n return dataset, last_id",
"def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments",
"def comment_data(post_id: str, \n sub_reddit: str):\n url_to_open = f\"https://www.reddit.com/r/{sub_reddit}/comments/{post_id}.json\"\n success_status = 0\n while success_status != 200:\n try:\n response = urlopen(url_to_open, timeout=10)\n success_status = response.status\n except HTTPError:\n logging.info(f\"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.\")\n time.sleep(120)\n success_status = 400\n \n sub_reddit_page = json.loads(response.read())\n comments_df = pd.json_normalize(sub_reddit_page[1]['data']['children'])\n comments_df['post_id'] = post_id\n comments_df = comments_df[['post_id', 'data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n comments_df = comments_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n comments_df['reply'] = 'N'\n comments_df['comment_replied_id'] = ''\n # get all replies \n replies_list = []\n for comment in sub_reddit_page[1]['data']['children']:\n replies = comment.get('data').get('replies')\n comment_id = comment.get('data').get('id') \n if replies is None or replies == '':\n pass\n else:\n replies_df = pd.json_normalize(replies['data']['children'])\n try:\n replies_df = replies_df[['data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n except KeyError:\n pass\n replies_df = replies_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n replies_df['reply'] = 'Y'\n replies_df['comment_replied_id'] = comment_id\n replies_df['post_id'] = post_id\n replies_list.append(replies_df)\n if len(replies_list) == 1:\n all_replies = replies_list[0]\n elif len(replies_list) > 1: \n all_replies = pd.concat(replies_list, ignore_index = True)\n else:\n all_replies = None \n\n column_order = [c for c in comments_df.columns]\n comments_df = comments_df[column_order]\n if all_replies is not None:\n all_replies = all_replies[column_order]\n all_comments_replies = pd.concat([comments_df, replies_df], ignore_index=True)\n else:\n all_comments_replies = comments_df\n\n return all_comments_replies",
"def get_onehundred_new_posts():\n one_hundred_new_posts = []\n for post in reddit.subreddit(\"all\").new():\n try:\n one_hundred_new_posts.append(post.id)\n submission = reddit.submission(id=post.id)\n except:\n one_hundred_new_posts.remove(post.id)\n return one_hundred_new_posts",
"def parse_submission(submission):\n df = pd.read_csv(\n submission,\n dtype={'PredictionString': str},\n keep_default_na=False)\n parsed = {}\n for _, row in df.iterrows():\n pid = row['patientId']\n boxes = list(parse_pred_string(row['PredictionString']))\n parsed[pid] = {\n 'scores': [b['score'] for b in boxes],\n 'boxes': [b['box'] for b in boxes],\n }\n\n return parsed",
"def generate_submission_datafiles_data(submission_id=str()):\n\n columns = list()\n data_set = list()\n\n columns.append(dict(className='summary-details-control detail-hover-message', orderable=False, data=None,\n title='', defaultContent='', width=\"5%\"))\n columns.append(dict(data=\"record_id\", visible=False))\n columns.append(dict(data=\"name\", title=\"Name\"))\n\n try:\n submission_record = Submission().get_record(submission_id)\n except:\n return dict(dataSet=data_set,\n columns=columns\n )\n\n submission_record = Submission().get_record(submission_id)\n bundle = submission_record.get(\"bundle\", list())\n datafile_object_list = [ObjectId(datafile_id) for datafile_id in bundle]\n\n projection = dict(name=1)\n filter_by = dict()\n filter_by[\"_id\"] = {'$in': datafile_object_list}\n\n records = DataFile().get_all_records_columns(sort_by='date_created', sort_direction=1, projection=projection,\n filter_by=filter_by)\n\n if len(records):\n df = pd.DataFrame(records)\n df['s_n'] = df.index\n\n df['record_id'] = df._id.astype(str)\n df[\"DT_RowId\"] = df.record_id\n df.DT_RowId = 'row_' + df.DT_RowId\n df = df.drop('_id', axis='columns')\n\n data_set = df.to_dict('records')\n\n return dict(dataSet=data_set,\n columns=columns\n )",
"def get_reddit_data(target_items, total_threads):\n if not os.path.exists('refresh_token.txt'):\n f = open('refresh_token.txt', 'x')\n f.close()\n print('Please fill in required information in \\'refresh_token.txt\\'')\n sys.exit()\n if not os.path.exists('bot.txt'):\n f2 = open('bot.txt', 'x')\n f2.close()\n print('Please fill in required information in \\'bot.txt\\'')\n sys.exit()\n\n # Authenticate to Reddit\n refresh_token_manager = FileTokenManager('refresh_token.txt') # Refer to praw documentation for obtaining a refresh token from reddit here: https://praw.readthedocs.io/en/latest/getting_started/authentication.html\n reddit = praw.Reddit(token_manager=refresh_token_manager, user_agent=open('bot.txt', 'r').read()) # Get bot token\n\n # Scrape Reddit data\n posts = []\n target_reddit = reddit.subreddit(target_reddit_str)\n for post in target_reddit.hot(limit=total_threads): # Search from top posts in 'hot' category in specified subreddit, limit based on user specification.\n posts.append(\n [post.title,\n post.score,\n post.num_comments,\n post.url,\n post.id,\n post.created])\n posts = pd.DataFrame(posts, columns=['title', 'score', 'num_comments', 'url', 'id', 'created']) # Build a pandas dataframe\n # Parse useful stuff in the dataframe\n\n # Type of product\n titles = []\n for i in range(posts.shape[0]): # df.shape[0] = number of rows\n titles.append(posts.at[i, 'title'])\n part_type = []\n for i in range(len(titles)): # Get only the part types from title of post\n name = titles[i]\n index = -1\n for j in range(len(name)):\n if name[j] == '[' and index > -2:\n index = j+1\n elif name[j] == ']' and index > -1:\n part_type.append(name[index:j].lower())\n index = -2 # Prevents string from getting screwed up while parsing extra ]\n if index == -1:\n part_type.append('')\n # Certain part types require additional parsing for formatting. Ex. 'm.2 ssd' can be parsed to just 'ssd'.\n for i in range(len(part_type)):\n for j in range(len(formatted_strings)):\n if formatted_strings[j] in part_type[i]:\n part_type[i] = formatted_strings[j]\n\n # Certain part types aren't always labelled correctly. Go through terms and set them to term[0] (see redefined_terms definition for more info)\n for i in range(len(part_type)):\n for j in range(len(redefined_terms)):\n for k in range(len(redefined_terms[j])):\n if redefined_terms[j][k] in part_type[i]:\n part_type[i] = redefined_terms[j][0]\n\n posts['part_type'] = part_type # add part types to dataframe\n\n # Price range\n prices = []\n found = False\n for i in range(len(titles)):\n skip_rest = False\n for j in range(len(titles[i])):\n if titles[i][j] == '$' and not skip_rest:\n found = True\n skip_rest = True\n prices.append(titles[i][j:])\n if not found:\n prices.append('')\n\n posts['prices'] = prices # add prices to dataframe\n # posts = posts[2:] # remove the top posts on the subreddit pinned by moderators\n # posts.to_csv('posts.csv')\n\n # Get target products\n target_nums = []\n for i in range(len(part_type)):\n for j in range(len(target_items)):\n if part_type[i] == target_items[j]:\n target_nums.append(i)\n # print(target_nums)\n # Make a new dataframe with just target products\n targets = pd.DataFrame(columns=['title', 'score', 'num_comments', 'url', 'id', 'part_type', 'prices'])\n if len(target_nums) > 0:\n for i in range(len(target_nums)):\n targets.loc[posts.index[target_nums[i]]] = posts.iloc[target_nums[i]] # Copy everything with target numbers over to new dataframe\n # Change indexing of new dataframe to be 0-n\n size = targets.shape[0]\n indicies = [i for i in range(size)]\n targets['index'] = indicies\n targets.set_index('index', inplace=True)\n posts = posts[2:] # remove the top posts on the subreddit pinned by moderators\n else:\n sys.exit() # No products to show\n\n # Get urls to original posts\n\n post_urls = []\n # print(targets.shape[0])\n # print(targets)\n for i in range(targets.shape[0]):\n # post_urls.append(targets.at[i, 'id'])\n post_urls.append('https://www.reddit.com/r/' + target_reddit_str + '/comments/' + targets.at[i, 'id'] + '/')\n targets['post_url'] = post_urls # add post urls to dataframe\n\n # Calculate the SAVR score. Function of 1000 + comment_weight * comments + upvote_weight * upvotes\n scores = []\n for i in range(targets.shape[0]):\n scores.append(math.floor(1000 + targets.at[i, 'score'] * savr_score_upvote_weight + targets.at[i, 'num_comments'] * savr_score_comment_weight))\n targets['scores'] = scores\n targets = targets.sort_values(by=['scores'], ascending=False) # Sort the dataframe by scores determined by the program.\n # targets.to_csv('targets.csv')\n return targets",
"def build_submission(y_pred, id_submission):\n y_pred_ = zero_to_neg(y_pred)\n ret = np.ones((len(y_pred_), 2))\n for i in range(len(y_pred_)):\n ret[i] = np.array([i+1, y_pred_[i]])\n ret = ret.astype(int)\n sub = pd.DataFrame(data = ret)\n sub.columns = ['Id', 'Prediction']\n sub.to_csv('pred_' + id_submission + '.csv', index=None)",
"def process_subreddit(session, subreddit, period, limit, cached_ids=[],\n recache=False):\n # set submission query params\n params = {\n \"t\": period,\n \"show\": \"all\"\n }\n\n # process submissions\n submissions = subreddit.get_new(limit=limit, params=params)\n for s in tqdm(submissions, desc=\"Submissions\", nested=True):\n not_cached = (s.id not in cached_ids)\n should_be_recached = (recache and s.id in cached_ids and\n not s.archived)\n if not_cached or should_be_recached:\n try:\n submission, comments = parse_submission(s)\n process_submission(session, submission)\n process_comments(session, comments)\n except HTTPError as exc:\n logger.error(\n \"Skipping submission {0} due to HTTP status {1} error. \"\n \"Continuing...\".format(\n submission.permalink.encode(\"UTF-8\"),\n exc.response.status_code)\n )\n except ValueError: # Occurs occasionally with empty responses\n logger.error(\n \"Skipping submission {0} due to ValueError.\".format(\n submission.permalink.encode(\"UTF-8\")))",
"def runIDS(date):\n createDirectories(date)\n input_bucket = 'emg-author-subreddit-pairs'\n output_bucket = 'emg-author-subreddit-pairs-ids'\n df = streamBlob(input_bucket, date)\n df = df.reset_index().astype({'author':str,'subreddit':str,'num_comments':int})\n\n print(\"getting subreddit ids\")\n subIds = sortedIds(df['subreddit'])\n df['subreddit_id'] = df['subreddit'].map(lambda x: subIds[x])\n\n print(\"getting author ids\")\n authorIds = sortedIds(df['author'])\n df['author_id']=df['author'].map(lambda x: authorIds[x])\n\n print(\"storing dataset w/ ids\")\n\n filename = cachePath(f\"\"\"{date}/author-subbreddit-pairs-IDs.gzip\"\"\")\n df.to_csv(filename,compression='gzip')\n\n uploadCommands(filename, output_bucket, date)",
"def load_submissions(assignment: Assignment, submissions: List[Dict]) -> List[Submission]:\n logger.info(\"Creating %s submissions via Canvas API\", len(submissions))\n\n result: List[Submission] = []\n for submission in submissions:\n result.append(\n assignment.submit(submission)\n )\n\n logger.info(\"Successfully created %s submissions\", len(submissions))\n\n return result",
"def get_records_for_submitter_ids(self, sids, node):\n uuids = []\n pids = []\n count = 0\n for sid in sids:\n count += 1\n args = 'submitter_id:\"{}\"'.format(sid)\n res = self.paginate_query(node=node, args=args, props=[\"id\", \"submitter_id\",\"project_id\"])\n recs = res[\"data\"][node]\n if len(recs) == 1:\n uuids.append(recs[0][\"id\"])\n pids.append(recs[0][\"project_id\"])\n elif len(recs) == 0:\n print(\"No data returned for {}:\\n\\t{}\".format(sid, res))\n print(\"\\t{}/{}\".format(count, len(sids)))\n print(\n \"Finished retrieving {} uuids for {} submitter_ids\".format(\n len(uuids), len(sids)\n )\n )\n df = pd.DataFrame({'project_id':pids,'uuid':uuids,'submitter_id':sids})\n\n dfs = []\n for i in range(len(df)):\n sid = df.iloc[i]['submitter_id']\n pid = df.iloc[i]['project_id']\n uuid = df.iloc[i]['uuid']\n prog,proj = pid.split(\"-\",1)\n print(\"({}/{}): {}\".format(i+1,len(df),uuid))\n mydir = \"project_uuids/{}_tsvs\".format(pid) # create the directory to store TSVs\n if not os.path.exists(mydir):\n os.makedirs(mydir)\n filename = \"{}/{}_{}.tsv\".format(mydir,pid,uuid)\n if os.path.isfile(filename):\n print(\"File previously downloaded.\")\n else:\n self.sub.export_record(prog, proj, uuid, \"tsv\", filename)\n df1 = pd.read_csv(filename, sep=\"\\t\", header=0)\n dfs.append(df1)\n all_data = pd.concat(dfs, ignore_index=True)\n master = \"master_uuids_{}.tsv\".format(node)\n all_data.to_csv(\"{}\".format(master), sep='\\t',index=False)\n print(\"Master node TSV with {} total recs written to {}.\".format(len(all_data),master))\n return all_data",
"def get_submissions(submissions):\n results = []\n for entry in submissions:\n results.append({\n 'timestamp' : entry['timestamp'],\n 'code' : clean_code(entry['raw_text']),\n 'style_score' : entry['style_score'],\n 'cluster' : entry['cluster'],\n 'correct' : get_correct(entry),\n 'hints' : get_hints(entry)\n })\n sorted(results, key=lambda x : x['timestamp'])\n for entry in results:\n entry['timestamp'] = convert_timestamp(entry['timestamp'])\n return results",
"def _filter_for_submission_ids(query: DocumentNode) -> [int]:\n acc = Config.accumulation_size + 1\n response = DB.client.execute(query)\n # Count occurrences of every ID\n elements = Counter([submission['photo_id'] for submission in response['results']])\n # Filter for acc size\n elements = filter(lambda x: x[1] < acc,\n [(submissions_by_count, elements[submissions_by_count]) for submissions_by_count in elements])\n # return ID of filtered elements\n return [submission[0] for submission in elements]",
"def clean_subreddit(filename):\n\n # Get name for processed file\n regex = r\"([^\\/]+)(?=\\-all)\"\n matches = re.search(regex, subreddit_folder)\n new_file = matches.group(1)\n\n # Create list of columns to keep\n keep_cols = ['id', 'created_utc', 'author', 'title',\\\n 'score', 'num_comments', 'subreddit', 'link_flair_text']\n\n keep_cols_text = ['id', 'created_utc', 'author', 'selftext']\n\n # Create file name\n processedfile_csv = \"data/processed/submissions/\" + new_file + \\\n \"-metadata\" + \".csv\"\n\n processed_textfile_csv = \"data/processed/submissions/\" + new_file + \\\n \"-text\" + \".csv\"\n\n # Create empty data frame\n df_keep = pd.DataFrame()\n df_keep_text = pd.DataFrame()\n\n # Read in json file\n try:\n data = pd.read_json(filename)\n\n # ValueError: Trailing data thrown if file is pretty indented\n except ValueError:\n data = pd.read_json(filename, lines = True)\n\n try:\n df_keep = df_keep.append(data[keep_cols])\n except KeyError:\n keep_cols = ['id', 'created_utc', 'author', 'title',\\\n 'score', 'num_comments', 'subreddit']\n df_keep = df_keep.append(data[keep_cols])\n\n try:\n df_keep_text = df_keep_text.append(data[keep_cols_text])\n except KeyError:\n keep_cols_text = ['id', 'created_utc', 'author']\n df_keep_text = df_keep_text.append(data[keep_cols_text])\n\n\n # Change date format\n ## For metadata\n df_keep['datetime_dv'] = pd.to_datetime(df_keep['created_utc'], unit = 's')# dv = derived\n df_keep['date_dv'] = df_keep['datetime_dv'].dt.date\n\n # For text\n df_keep_text['datetime_dv'] = pd.to_datetime(df_keep_text['created_utc'], unit = 's')# dv = derived\n df_keep_text['date_dv'] = df_keep_text['datetime_dv'].dt.date\n\n\n ##### Delimit by date #####\n # Create mask of time slot\n mask = (df_keep['date_dv'] >= start) & (df_keep['date_dv'] <= end) # inclusive on either end\n mask_text = (df_keep_text['date_dv'] >= start) & (df_keep_text['date_dv'] <= end)\n\n # Only keep data within date frame\n df_keep = df_keep.loc[mask]\n df_keep_text = df_keep_text.loc[mask_text]\n ############################\n\n\n # Save to json\n df_keep_text.to_csv(processed_textfile_csv, mode = \"w\")\n df_keep.to_csv(processedfile_csv, mode = \"w\") # mode= w will overwrite previous file\n print(len(df_keep_text.index))\n print(processed_textfile_csv)\n\n\n data = [] # force empty",
"def process_submission(self, submission_id: str, max_comment_level: int=5) -> Submission:\n sub = self.get_from_reddit(submission_id, max_comment_level)\n try:\n article = NewsPlease.from_url(sub.url)\n sub.actual_title = unicodedata.normalize(\"NFKD\", article.title)\n sub.news_text = unicodedata.normalize(\"NFKD\", article.text)\n except Exception as e:\n sub.actual_title = str(e)\n return sub",
"def get_new_submissions_for_subreddit(self, subreddit):\n return self._reddit.subreddit(subreddit).new(limit=1000)",
"def _scan_submission_page(self, url_format):\n\n submissions = []\n\n try:\n page = 1\n while True:\n url = url_format % page\n doc = self._limited_call(self._html_get, url)\n logger.debug(\"Scanning submissions from %s\" % url)\n\n count = 0\n\n for el in doc.cssselect(\".gallery > *\"):\n if el.get(\"id\") == \"no-images\":\n continue\n\n id_str = el.get(\"id\")[4:]\n if id_str == \"\":\n continue\n\n id = int(id_str)\n\n submission = self._submissions.get(id)\n if submission is None:\n submission = Submission()\n submission._session = self\n submission.id = id\n self._submissions[id] = submission\n\n submission.title = str(\n el.cssselect(\"span\")[0].text_content())\n\n if \"r-adult\" in el.classes:\n submission.rating = \"adult\"\n elif \"r-mature\" in el.classes:\n submission.rating = \"mature\"\n elif \"r-general\" in el.classes:\n submission.rating = \"general\"\n else:\n raise exceptions.ScraperError()\n\n if \"t-image\" in el.classes:\n submission.type = \"image\"\n elif \"t-text\" in el.classes:\n submission.type = \"text\"\n elif \"t-audio\" in el.classes:\n submission.type = \"audio\"\n elif \"t-flash\" in el.classes:\n submission.type = \"flash\"\n else:\n raise exceptions.ScraperError()\n\n submission.thumbnail_url = \"https:\" + el.cssselect(\"img\")[\n 0].get(\"src\")\n\n submissions.append(submission)\n count += 1\n\n if count == 0:\n break\n\n logger.debug(\"Found %d submissions\" % count)\n\n page += 1\n\n except (IndexError, ValueError):\n raise exceptions.ScraperError()\n\n return submissions",
"def split_data_set(reddit_path, data_set_name, on, num_splits, target_directories, map_columns=None):\n targets = {}\n for i in range(num_splits):\n targets[i] = os.path.join(target_directories[i], data_set_name)\n mkdir(targets[i])\n\n full_sub_data_path = os.path.join(reddit_path, data_set_name)\n data_files = map(lambda f: os.path.join(full_sub_data_path, f), os.listdir(full_sub_data_path))\n args_list = [(on, table_file, targets, num_splits, map_columns) for table_file in data_files]\n\n pool = mp.Pool(pool_size)\n pool.map(unpack_split_file_with_map, args_list)",
"def split_data(df):\n\n df['ranked_latest'] = df.groupby(['userId'])['timestamp'].rank(method='first', ascending=False)\n train_df = df[df['ranked_latest'] != 1]\n test_df = df[df['ranked_latest'] == 1]\n\n train_df = train_df[['userId', 'movieId', 'rating']]\n test_df = test_df[['userId', 'movieId', 'rating']]\n\n return train_df, test_df",
"def test_save_historical_submission_comments():\n data = []\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n for item in threads:\n data.append(item.d_)\n\n save_historical_submission_comments(data, TEST_SUBREDDIT + '_TEST.csv')"
] | [
"0.59111464",
"0.58586067",
"0.58490336",
"0.5802594",
"0.57980126",
"0.57522035",
"0.57325864",
"0.56903297",
"0.56826586",
"0.5672754",
"0.56150174",
"0.55359817",
"0.55321723",
"0.5508987",
"0.5500947",
"0.5494311",
"0.5488408",
"0.54096574",
"0.5398215",
"0.5391513",
"0.5388343",
"0.5382075",
"0.53685546",
"0.5344455",
"0.5333745",
"0.5303908",
"0.52934587",
"0.5289095",
"0.52773637",
"0.5223617"
] | 0.5920839 | 0 |
Creates the target directories for each independent subset of the data Will create files named "00000", "00001", ..., "" in the output_directory | def create_target_directories(output_directory, num_splits):
target_directories = {i: os.path.join(output_directory, "%05d" % i) for i in range(num_splits)}
for i in target_directories:
target_dir = target_directories[i]
if os.path.isfile(target_dir):
logger.error("File exists: %s" % target_dir)
exit(1)
mkdir(target_dir)
return target_directories | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)",
"def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])",
"def _create_target_directories(self):\n if os.path.exists(self.PREPROCESSED_DATA_OUT_DIR):\n if self._hparams.over_write:\n print_info(\"Deleting data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n shutil.rmtree(self.PREPROCESSED_DATA_OUT_DIR)\n print_info(\"Recreating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)\n else:\n print_info(\"Skipping preprocessing step, since the data might already be available\")\n else:\n print_info(\"Creating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)",
"def write_output_files(input_path, output_path, out_data, random = False):\n create_directory_structure(output_path)\n for city in cities:\n # set relevant list\n data_dir = os.path.join(input_path, city, city+'_test')\n sub_files = list_filenames(data_dir)\n for f in sub_files:\n # load data\n outfile = os.path.join(output_path, city, city+'_test',f)\n if random:\n out = np.random.randint(256, size=(5,3,495,436,3), dtype = np.dtype(np.uint8))\n else:\n out = out_data\n write_data(out, outfile)\n print(\"just wrote file {}\".format(outfile))",
"def create_output_folder(output_folder_name: str, finding_labels: list):\n if not os.path.isdir(output_folder_name):\n os.mkdir(output_folder_name)\n for type in ['/train', '/val', '/test']:\n if not os.path.isdir(output_folder_name + type):\n os.mkdir(output_folder_name + type)\n for disease in finding_labels:\n if not os.path.isdir(output_folder_name + type + '/' + disease):\n os.mkdir(output_folder_name + type + '/' + disease)",
"def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))",
"def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile",
"def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()",
"def do_2004(in_dir, out_dir):\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{full_path} -> {out_dir}/{idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)",
"def do_2003(in_dir, out_dir):\n\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{item} -> {idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)",
"def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'",
"def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)",
"def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names",
"def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)",
"def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")",
"def initialize_output_files(self):\r\n if not self.C.restart:\r\n print(\"* Touching output files.\", flush=True)\r\n # begin writing `generation.csv` file\r\n csv_path_and_filename = self.C.job_dir + \"generation.csv\"\r\n util.properties_to_csv(\r\n prop_dict=self.ts_properties,\r\n csv_filename=csv_path_and_filename,\r\n epoch_key=\"Training set\",\r\n append=False,\r\n )\r\n\r\n # begin writing `convergence.csv` file\r\n util.write_model_status(append=False)\r\n\r\n # create `generation/` subdirectory to write generation output to\r\n os.makedirs(self.C.job_dir + \"generation/\", exist_ok=True)",
"def mkdirout():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n #outdir = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n if os.path.exists(output_dir()):\n sys.exit(\"ERROR. Unable to create output directory. %s already exists. Please, make sure you choose an output path not containing former results.\" % output_dir() ) # LOGGING?\n else:\n try:\n os.mkdir(output_dir())\n except OSError:\n sys.exit(\"ERROR. Unable to create output directory %s.\" % output_dir() )\n os.mkdir(output_tmpdir())\n os.mkdir(output_tmpdir(\"pisacov\"))\n os.mkdir(output_tmpdir(\"pisa\"))\n os.mkdir(output_tmpdir(\"deepmetapsicov\"))",
"def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass",
"def make_output_dirs_for_part2(parent_dir):\n\n if not os.path.exists(parent_dir + 'Modeling/'):\n os.makedirs(parent_dir + 'Modeling/')\n if not os.path.exists(parent_dir + 'Modeling/cleaned_template_fastas/'):\n os.makedirs(parent_dir + 'Modeling/cleaned_template_fastas/')\n if not os.path.exists(parent_dir + 'Modeling/cleaned_template_pdbs/'):\n os.makedirs(parent_dir + 'Modeling/cleaned_template_pdbs/')\n if not os.path.exists(parent_dir + 'Modeling/fasta_alns_and_identities/'):\n os.makedirs(parent_dir + 'Modeling/fasta_alns_and_identities/')\n if not os.path.exists(parent_dir + 'Modeling/grishin_alns/'):\n os.makedirs(parent_dir + 'Modeling/grishin_alns/')\n if not os.path.exists(parent_dir + 'Modeling/threaded_pdbs/'):\n os.makedirs(parent_dir + 'Modeling/threaded_pdbs/')\n if not os.path.exists(parent_dir + 'Modeling/final_models/'):\n os.makedirs(parent_dir + 'Modeling/final_models/')",
"def make_dir(file_name): # output_file_loc = des\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if not os.path.exists(f'{output_path}/{fld}'):\n os.makedirs(f'{output_path}/{fld}')\n # print(\"MAKE_DIR completed...\") \n return",
"def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)",
"def create_paths(manager, parentpath=\"extractor_test_results/HoG/\"):\n \n paths_to_create = [\"data/features_all\", \"data/features_filled\",\n \"data/pair/both\", \"hog_images\", \"hog_plots\",\n \"orig_frames\", \"processed_frames\", \"evaluation\"]\n \n for path in paths_to_create:\n manager.make_folder(parentpath + path)",
"def before_process(self,data,labels):\n # JM: if integer labels are given, then create different output\n # directories for each new label\n if all(isinstance(lbl,int) for lbl in labels):\n self.batch_dirs = \\\n [os.path.join(self.output_dir,str(lbl)) for lbl in labels]\n # JM: otherwise create the same output directory for each image\n else:\n self.batch_dirs = [self.output_dir] * len(data)\n\n # create output directories if they don't already exist\n uniques = set(self.batch_dirs)\n for out_dir in uniques:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n self.batch_index = 0",
"def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)",
"def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])",
"def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)",
"def makeOutDirs(od):\n if args.format.lower() == 'kitti':\n id = \"%s/images\" % od\n ld = \"%s/labels\" % od\n elif args.format.lower() == 'darknet':\n id = \"%s/images\" % od\n ld = \"%s/annotations\" % od\n else:\n print \"Invalid output format %s!\" % args.format\n usage()\n ensureDir(id)\n ensureDir(ld)\n return id, ld",
"def create_train_folder(df_train, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/train')\n print(f'Create train set at: {folder_path}')\n for _, row in tqdm(df_train.iterrows(), total=df_train.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'train', row['filename'])\n shutil.copy(img, destination_path )",
"def generate_output(dataset_path, dataset_name, dest): #keep\n def func_name_extractor(x):\n x = os.path.basename(x)\n return x\n\n binaries = list(os.scandir(dataset_path))\n import numpy as np\n np.random.seed(42)\n np.random.shuffle(binaries)\n train_output = open(os.path.join(dataset_path, dataset_name + \"_train_output.txt\"), \"w\")\n test_output = open(os.path.join(dataset_path, dataset_name + \"_test_output.txt\"), \"w\")\n val_output = open(os.path.join(dataset_path, dataset_name + \"_val_output.txt\"), \"w\")\n mapper = dict()\n all_funcs = set()\n for i, entry in enumerate(binaries):\n funcs = list(glob(f\"{entry.path}/*\"))\n all_funcs.update(funcs)\n for func in funcs:\n func_name = func_name_extractor(func)\n func_name = func_name.split(\"_\")\n for label in func_name:\n if label not in mapper:\n mapper[label] = []\n mapper[label].append(func)\n\n well_named_funcs = set()\n popular_names = filter(lambda x: len(x[1]) >= 3, mapper.items())\n\n count_func_names = open(os.path.join(dataset_path, \"count_func_names.txt\"), \"w\")\n for name, name_funcs in mapper.items():\n line= name + \" \" + str(len(name_funcs)) + \"\\n\"\n count_func_names.write(line)\n\n\n names_hists = {name: {'free': len(name_funcs), 'train': 0, 'val': 0, 'test': 0} for name, name_funcs in popular_names}\n for partial in map(lambda x: x[1], filter(lambda x: len(x[1]) >= 3, mapper.items())):\n well_named_funcs.update(partial)\n well_named_funcs = list(well_named_funcs)\n\n # generate output\n np.random.shuffle(well_named_funcs)\n print(f\"{len(all_funcs)} functions, {len(well_named_funcs)} functions with a name that contains a common word\")\n # print(\"choosing 250 functions for test/validation\")\n\n global_counters = {'train': 0, 'val': 0, 'test': 0}\n less_than_th = 0\n less_than_five = 0\n less_than_8 = 0\n for i, func in enumerate(well_named_funcs):\n func_name_parts = func_name_extractor(func).split(\"_\") \n print_name = gen_shared_name(names_hists, func_name_parts)\n names_hists, dest = set_decide(names_hists, print_name, global_counters)\n global_counters[dest] += 1\n print_name = \"|\".join(print_name) \n if dest == 'train':\n output = train_output\n elif dest == 'test':\n output = test_output\n else:\n output = val_output\n\n try:\n with open(func, \"r\") as f:\n for line in f:\n line = line.split(\" \")\n line[0] = print_name\n line = \" \".join(line)\n line = line_process(line)\n m = len(line.split(\" \")[1].split(\",\")[1].split(\"|\"))\n if \"fp_const\" not in line:\n if m < 1000:\n less_than_th += 1 \n if m < 800:\n less_than_8 += 1\n if m < 500:\n less_than_five += 1\n train_output.write(line)\n except:\n pass\n print(\"num of lines with line less than 1000 is \", less_than_th)\n print(\"num of lines with line less than 800 is \", less_than_8)\n print(\"num of lines with line less than 500 is \", less_than_five)\n train_output.close()\n test_output.close()\n val_output.close()",
"def data_directory(class_labels):\n\n dataset_folders = ['train','validation','test']\n object_class = class_labels\n os.mkdir(BASE_DIR)\n\n for folder in dataset_folders:\n for obj_cls in object_class:\n training_dir = BASE_DIR + os.sep +'{}'.format(folder)\n if not os.path.exists(BASE_DIR+os.sep +'{}'.format(folder)):\n os.mkdir(training_dir)\n class_dir = training_dir + os.sep + '{}'.format(obj_cls)\n if not os.path.exists(training_dir + os.sep + '{}'.format(obj_cls)):\n os.mkdir(class_dir)"
] | [
"0.7421393",
"0.7061714",
"0.69978166",
"0.69296634",
"0.67830724",
"0.6746593",
"0.67342675",
"0.67236626",
"0.66699076",
"0.6666345",
"0.664067",
"0.66080076",
"0.65926135",
"0.6464124",
"0.64080656",
"0.639672",
"0.6379212",
"0.6359835",
"0.6335128",
"0.62624854",
"0.6252963",
"0.62338257",
"0.62320614",
"0.6223672",
"0.6209228",
"0.6177455",
"0.6161072",
"0.61400545",
"0.6127509",
"0.6123602"
] | 0.72653764 | 1 |
Given the day and file path, give the summary of what was sold on that day | def melon_count_summary(day_number, path):
print("Day", day_number)
the_file = open(path)
for line in the_file:
line = line.rstrip()
words = line.split('|')
print("Delivered {} {}s for total of ${}".format(words[1], words[0], words[2]))
the_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def produce_daily_summary(file, day): \n\n print(day)\n #prints day of report\n the_file = open(file)\n #opens file for use in function\n for line in the_file:\n #iterates over file to separate lines\n line = line.rstrip()\n #removes extra characters at end of lines\n words = line.split('|')\n #splits the lines of string for data variable assignment\n\n melon = words[0]\n count = words[1]\n amount = words[2]\n #assigns valiables to words at specified index\n\n print(f\"Delivered {count} {melon}s for total of ${amount}.\")\n #prints \"f\" string with variables printed in\n \n the_file.close()\n #closes file",
"def display_sum_sold():\n sold_games = reports.sum_sold(filename)\n print(\n \"Total sold copies from {} file is: {} millions\\n\".format(\n filename,\n sold_games))",
"def dailyanalysis(experiment):\n import os\n for fn in os.listdir('/network/aopp/hera/mad/bakerh/fms_tmp/' +\n experiment):\n if fn.find('exe.fms') == -1 and fn.find('mppnccombine.ifc') == -1:\n storedaily('/network/aopp/hera/mad/bakerh/fms_tmp/' + experiment +\n '/' + fn + '/combine/',\n '/network/aopp/hera/mad/bakerh/data/FMS/output/' +\n experiment + '/' + fn + '/history/')\n print('Completed ' + fn)",
"def __parseDailyFilename(self, f):\n base = os.path.basename(f)\n\n tokens = base.split('.')\n if len(tokens) < 6:\n # assume it's an old file in the format A2000089etcetc.tif i.e. ?YYYYDDD*\n yr = base[1:5]\n day = base[5:8]\n else:\n # assume it's a file in the newer format ?*.YYYY.DDD.etc format\n varname, yr, day, temporalSummary, res, spatialSummary = tokens[0:6]\n outTemplate = varname + \"{}.{}.{}.\" + \"{}.{}.{}.tif\".format(temporalSummary, res, spatialSummary)\n if self._outTemplate == \"FILLED-OUTPUT{}.{}.{}.TemporalSummary.Res.SpatialSummary.tif\":\n self._outTemplate = outTemplate\n else:\n assert self._outTemplate == outTemplate\n return day, yr",
"def get_summary(infile):\n sched = scheduler.parse_sched_file(infile)\n tracker = dict()\n for curr in sched:\n parts = curr.split('-')\n d = date(int(parts[0]), int(parts[1]), int(parts[2]))\n name = sched[curr].strip()\n if name not in tracker:\n tracker[name] = [0, 0]\n if d.weekday() == 4 or d.weekday() == 5:\n tracker[name][1] += 1\n else:\n tracker[name][0] += 1\n for name in tracker:\n print '%s weekdays=%d, weekends=%d' % (name, tracker[name][0], tracker[name][1])",
"def writing_sum_sold(file_name):\n result = str(reports.sum_sold(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")",
"def ExtractProgram(filename, todaystr):\n\n report = []\n save = False\n for line in open(filename).readlines():\n if len(line) < 8:\n continue\n # yy/mm/dd\n if line[2] == '/' and line[5] == '/':\n save = line[:8] == todaystr\n if save is True:\n report.append(line)\n\n return ''.join(report)",
"def amount_total(path, file_type):\n final_frame = clean_kdr_data(path, file_type)\n amount_work = final_frame.groupby(\"Date\")[\"Place\"].count()\n amount_work = amount_work.to_frame()\n amount_work.columns = [\"Freq\"]\n\n # Dropping outlier data\n amount_work = amount_work.drop([\"2019-01-04\"])\n amount_work = amount_work.drop([\"2019-01-07\"])\n\n return amount_work",
"def print_daily_summary(weather_data_day: dict):\n date_time = time.localtime(weather_data_day['dt'])\n sunrise_time = time.localtime(weather_data_day['sunrise'])\n sunset_time = time.localtime(weather_data_day['sunset'])\n print(f\"\\nForecast for Vancouver on {date_time[2]}/{date_time[1]}/{date_time[0]}, at {date_time[3]}:00 local time.\")\n print(f\"Temperature low: {weather_data_day['temp']['min']}C. Temperature high: {weather_data_day['temp']['max']}C.\")\n print(f\"Time of sunrise is: {sunrise_time[3]}:{sunrise_time[4]:02} local time.\", end=\" \")\n print(f\"Time of sunset is: {sunset_time[3]}:{sunset_time[4]:02} local time.\")\n print(f\"Humidity (rh) is {weather_data_day['humidity']}%.\")\n print(f\"Wind speed is {weather_data_day['wind_speed']}m/s.\")\n print(f\"The weather is expected to be {weather_data_day['weather'][0]['description']}.\")",
"def doSummary(self):\n for name in self.stockList:\n tempVolume=0.\n for dateStr in self.listOfDates:\n rawTradeDataPath = FileNames.BinRTTradesDir + '/' + dateStr + '/' + name + '_trades.binRT'\n tradeReader = TAQTradesReader(rawTradeDataPath)\n tempVolume=tempVolume+np.nansum(tradeReader._s)/10000.0 # divide 10000 because otherwise the sum could exceed the range of int32\n self.dict[name]=tempVolume",
"def daily_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n\n if not os.path.exists(daydir):\n print \"Creating new output directory \" + daydir\n os.makedirs(daydir)\n\n files = os.listdir(weekdir)\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n \n dt = dacycle['cyclelength']\n\n for k,v in fileinfo.iteritems():\n cycle_file = os.path.join(weekdir,k)\n for i in range(abs(dt.days)):\n daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))\n if not os.path.lexists(daily_file):\n os.symlink(cycle_file,daily_file)\n #print daily_file,cycle_file",
"def display_get_date_avg():\n average_date = reports.get_date_avg(filename)\n print(\n \"The average date for all games in {} is: {}\\n\".format(\n filename,\n average_date))",
"def test_data(filename, col, day):\r\n\r\n extracted = extract_info(filename)\r\n formatted = format(extracted, col, day) # calling my funcations\r\n float_rep = float(formatted) # getting the float representation of the info\r\n return float_rep",
"def get_fund_logfile():\n return \"fund\" + get_day() + \".log\"",
"def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))",
"def day(d):\n\t\tx = db.cquery(\"day\",d)\n\t\tprint \"Total:\", x[0]\n\t\tf = raw_input(\"[L]ist [N]ew overview or [B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(), i[0], i[1], \" \", ui.statstimein(), i[2], ui.statstimeout(), i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\":\n\t\t\thome_stats()\n\t\telif f == \"b\":\n\t\t\thome()\n\t\telse:\n\t\t\tpass",
"def summary(self, fromdt, todt):\r\n totalSaved = self.miser.totalSaved(fromdt, todt) \r\n sumStr = \"%s: %s to %s\\n\" % (self.miser.name, fromdt, todt)\r\n sumStr += \"Total saved: %.2f\" % totalSaved\r\n\r\n sumStr += \"\\n\\nGoals:\\n\"\r\n sumStr += self._goalsMetStr(fromdt, todt, totalSaved)\r\n\r\n return sumStr",
"def list_files(site,\n sdate,\n ndays=1,\n edate=None):\n\n # create a panda series of dates\n if edate is not None:\n d_ser = pd.Series(pd.date_range(start=sdate, end=edate, freq='D'))\n else:\n d_ser = pd.Series(pd.date_range(\n start=sdate, periods=ndays, freq='D'))\n\n f_df = pd.DataFrame(columns=['date', 'fname', 'dir','hdir'])\n\n # create file name and directory structure\n for di, dt in d_ser.iteritems():\n # filename\n fnm = '{0:04d}{1:02d}{2:02d}'.format(\n dt.year, dt.month, dt.day)\n fnm = 'thg_l2_mag_'+site.lower()+'_'+fnm+'_v01.cdf'\n\n\n # directory location\n # THEMIS data is store in local_dir as YYYY\\MM\\DD\\themis_file\n fdr = os.path.join(local_dir,\n site.lower(),\n '{0:04d}'.format(dt.year))\n if not os.path.exists(fdr):\n os.makedirs(fdr)\n\n # http directory\n hdr = http_dir+'thg/l2/mag/'+site.lower()+'/{0:04d}/'.format(dt.year)\n\n f_df = f_df.append(\n {'date': dt, 'fname': fnm, 'dir': fdr, 'hdir':hdr}, ignore_index=True)\n\n return f_df",
"def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)",
"def agg_at_daily_level(storage_type, incremental=True):\n\n filenames = list_files(WEEKLY_DATA_PATH, format='csv')\n current_year = str(datetime.date.today().year)\n\n if len(filenames) == 0:\n print('No file exists. Kindly add files in WEEKLY_DATA_PATH')\n sys.exit(0)\n\n if incremental and exists('{}/Last_Week_Processed.pkl'.format(PROCESSED_PATH)):\n file_list = []\n max_week_num = retrieve_file(PROCESSED_PATH, 'Last_Week_Processed.pkl')\n for filename in filenames:\n week_num = int(current_year + filename.split('_')[0][4:])\n if week_num >= max_week_num:\n file_list.append(filename)\n filenames = file_list\n else:\n max_week_num = int(current_year + '00')\n combined = []\n print(\"Processing following file(s):\")\n for filename in filenames:\n print(filename)\n week_num = int(current_year + filename.split('_')[0][4:])\n if max_week_num < week_num:\n max_week_num = week_num\n df = retrieve_file(WEEKLY_DATA_PATH, filename, sep='\\t')\n df = df[~df.duplicated()]\n df = df[df['country'].isin(['US', 'United States'])]\n req_cols = (['date', 'state', 'positive', 'trust', 'anger', 'fear', 'negative',\n 'sadness', 'anticipation', 'joy', 'surprise', 'disgust'])\n cols_list = list(set(df.columns).intersection(req_cols))\n df = df[~df['state'].isnull()][cols_list]\n grouped = df.groupby(['date', 'state'], as_index=False).mean().reset_index()\n combined.append(grouped)\n\n combined_df = pd.concat(combined, axis=0, sort=False)\n if storage_type == 'single':\n store_file(combined_df, DAILY_DATA_PATH, 'Day_Level_Agg.csv', sep='\\t')\n elif storage_type == 'daily':\n for d in combined_df['date'].unique():\n date_record = combined_df[combined_df['date'] == d]\n store_file(date_record, DAILY_DATA_PATH, '{}_tweets.csv'.format(d), sep='\\t')\n\n store_file(max_week_num, PROCESSED_PATH, 'Last_Week_Processed.pkl')",
"def totals(year, month, day):\n\n try:\n year = int(year)\n day = int(day)\n\n if year < 0 or day < 0 or day > 31:\n raise OssecAPIException(1307)\n\n day = \"%02d\" % day\n except ValueError:\n raise OssecAPIException(1307)\n\n if month not in MONTHS:\n try:\n index = int(month)\n except ValueError:\n raise OssecAPIException(1307)\n\n if index < 1 or index > 12:\n raise OssecAPIException(1307)\n\n try:\n month = MONTHS[index - 1]\n except IndexError:\n raise OssecAPIException(1307)\n\n try:\n stat_filename = common.stats_path + \"/totals/\" + str(year) + '/' + month + \"/ossec-totals-\" + day + \".log\"\n stats = open(stat_filename, 'r')\n except IOError:\n raise OssecAPIException(1308, stat_filename)\n\n response = []\n alerts = []\n\n for line in stats:\n data = line.split('-')\n\n if len(data) == 4:\n hour = int(data[0])\n sigid = int(data[1])\n level = int(data[2])\n times = int(data[3])\n\n alert = {'sigid': sigid, 'level': level, 'times': times}\n alerts.append(alert)\n else:\n data = line.split('--')\n\n if len(data) != 5:\n if len(data) in (0, 1):\n continue\n else:\n raise OssecAPIException(1309)\n\n hour = int(data[0])\n total_alerts = int(data[1])\n events = int(data[2])\n syscheck = int(data[3])\n firewall = int(data[4])\n\n response.append({'hour': hour, 'alerts': alerts, 'totalAlerts': total_alerts, 'events': events, 'syscheck': syscheck, 'firewall': firewall})\n alerts = []\n\n return response",
"def date_diagnostic(scenario, dates):\n running = 0\n with open(\"scenario%s_dates.txt\" % (scenario,), \"w\") as fp:\n fp.write(\"date,total\\n\")\n for date in pd.date_range(APR15, MAY30):\n hits = [d for d in dates if d == date]\n running += len(hits)\n fp.write(\"%s,%s\\n\" % (date, running))",
"def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path",
"def summarize(self, nthday=None):\n assert self.validate()\n D = self._data\n if nthday is None:\n daysel = slice(None)\n else:\n daysel = D['MJD'] < np.min(D['MJD']) + nthday\n D = D[daysel]\n tsched = 24 * D['tsched'].sum()\n topen = 24 * D['topen'].sum()\n tscience = 24 * D['tscience'].sum()\n print('Scheduled {:.3f} hr Open {:.3f}% Live {:.3f}%'.format(\n tsched, 100 * topen / max(1e-6, tsched), 100 * tscience / max(1e-6, topen)))\n print('=' * 82)\n print('PROG TILES NEXP SETUP ABT SPLIT ABT TEXP TSETUP TSPLIT TOPEN TDEAD')\n print('=' * 82)\n # Summarize by program.\n for program in self.tiles.programs:\n progidx = self.tiles.program_index[program]\n ntiles_p, ndone_p, nexp_p, nsetup_p, nsplit_p, nsetup_abort_p, nsplit_abort_p = [0] * 7\n tscience_p, tsetup_p, tsplit_p = [0.] * 3\n ntiles_all = 0\n sel = progidx\n ntiles = np.sum(self.tiles.program_mask[program])\n ndone = D['completed'][:, sel].sum()\n nexp = D['nexp'][:, sel].sum()\n nsetup = D['nsetup'][:, sel].sum()\n nsplit = D['nsplit'][:, sel].sum()\n nsetup_abort = D['nsetup_abort'][:, sel].sum()\n nsplit_abort = D['nsplit_abort'][:, sel].sum()\n tscience = 86400 * D['tscience'][:, sel].sum() / max(1, ndone)\n tsetup = 86400 * D['tsetup'][:, sel].sum() / max(1, ndone)\n tsplit = 86400 * D['tsplit'][:, sel].sum() / max(1, ndone)\n line = '{:6s} {} {:4d}/{:4d} {:5d} {:5d} {:3d} {:5d} {:3d} {:6.1f}s {:5.1f}s {:5.1f}s'.format(\n program, ' ', ndone, ntiles, nexp, nsetup, nsetup_abort, nsplit, nsplit_abort, tscience, tsetup, tsplit)\n print(line)",
"def get_download_info(files):\n file_paths = [] # the files we need to check\n file_count = 0 # count of each file in files\n total_size = 0\n\n all_product_types = []\n for ring_obs_id in files:\n for product_type in files[ring_obs_id]:\n for f in files[ring_obs_id][product_type]:\n\n all_product_types.append(product_type)\n\n if product_type != 'preview_image':\n # this is a pds file not a browse product\n # collect the urls.. we will process these at the end\n file_paths += [f for f in files[ring_obs_id][product_type]] # list of all urls\n\n elif product_type == 'preview_image':\n # the file size of each preview images on disc is checked here\n # todo: OMG WHY WHAT\n # todo: get the file sizes into database instead = process like pds files and remove this whole section!\n\n from results.views import get_base_path_previews\n try:\n size = getsize(f)\n total_size += size\n file_count = file_count + 1\n except OSError:\n log.error('could not find file: ' + f)\n\n all_product_types = list(set(all_product_types)) # make unique\n # now we have all pds file_names, put all file names in a list and get their count\n if file_paths:\n\n file_names = list(set([ get_file_path(u) for u in file_paths]))\n file_count += len(file_names)\n\n # query database for the sum of all file_names size fields\n file_sizes = FileSizes.objects.filter(name__in=file_names, PRODUCT_TYPE__in=all_product_types).values('name','size','volume_id').distinct()\n total_size += sum([f['size'] for f in file_sizes]) # todo: this is here b/c django was not happy mixing aggregate+distinct\n\n return total_size, file_count # bytes",
"def day2_case():\n print(\"Day 2 Start\")\n with open(os.path.join(__location__, \"Data/day2data.txt\"), \"r\") as daydata:\n formated_daydata = day2.format_data(daydata)\n print(\"Checksum:\", day2.get_checksum_from_2d_array(formated_daydata))\n print(\"Evenly divisable rowsum:\", day2.get_evenly_divisable_rowsum(formated_daydata))\n print(\"Day 2 Done\")",
"def read_daily_source(path = default_data_location):\n \n from os import listdir\n from os.path import isfile, join\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f)) and f!='.DS_Store']\n \n source = []\n for f in onlyfiles:\n with open(join(path, f),'r',encoding='utf-8') as fin:\n try:\n source.append(BeautifulSoup(fin.read(), 'html.parser'))\n except Exception:\n print('failed to read {}'.format(f))\n raise\n \n data = pd.DataFrame({'name':onlyfiles, 'source':source})\n data['name'] = data['name'].apply(lambda x: x[:-5])\n data['source_hash'] = data['source'].apply(lambda x: get_hash(x))\n return data",
"def main():\n print \"Apple Inc. (AAPL) Daily Closing Prices:\"\n for i in stock_data:\n t_data = i.findAll('td', {\"class\":\"yfnc_tabledata1\"})\n if len(t_data) is 7:\n date = t_data[0].contents[0]\n close = t_data[6].contents[0]\n print (\"Date: {}, Closing Price: {}\").format(date, close)",
"def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)",
"def _get_run_info(self, path, creation_date):\n total = 0\n try:\n for entry in os.scandir(path):\n # Only evaluates size of files and not folders inside raw/proc\n if entry.is_file():\n # if it's a file, use stat() function\n total += entry.stat().st_size\n\n except NotADirectoryError:\n # if `path` isn't a directory, get the file size then\n total = os.path.getsize(path)\n except PermissionError:\n # if for whatever reason we can't open the folder, return 0\n return 0\n\n if os.path.isdir(path):\n validator = RunValidator(path)\n elif path.endswith(\".h5\"):\n validator = FileValidator(H5File(path).files[0])\n else:\n return 0\n\n try:\n validator.run_checks()\n except Exception:\n pass\n return total, str(ValidationError(validator.problems))"
] | [
"0.6675652",
"0.5932114",
"0.5637807",
"0.56349427",
"0.55986214",
"0.55191576",
"0.5389637",
"0.5369172",
"0.5310285",
"0.5299707",
"0.5261772",
"0.526045",
"0.52516216",
"0.52198076",
"0.5205368",
"0.5167127",
"0.51663005",
"0.5151746",
"0.5145209",
"0.51248497",
"0.51245743",
"0.5108868",
"0.5106518",
"0.507504",
"0.5065546",
"0.5006668",
"0.49998313",
"0.49790603",
"0.49764967",
"0.4960424"
] | 0.63805336 | 1 |
for each decision token load the workflow and run it | def process_workflow(
workflow_type, decision, settings, logger, client, token, maximum_page_size
):
# for the workflowType attempt to do the work
if workflow_type is not None:
logger.info("workflowType: %s", workflow_type)
# Instantiate and object for the workflow using eval
# Build a string for the object name
workflow_name = get_workflow_name(workflow_type)
# Attempt to import the module for the workflow
if import_workflow_class(workflow_name):
# Instantiate the workflow object
workflow_object = get_workflow_object(
workflow_name,
settings,
logger,
client,
token,
decision,
maximum_page_size,
)
# Process the workflow
invoke_do_workflow(workflow_name, workflow_object, logger)
else:
logger.info("error: could not load object %s\n", workflow_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_workflow():\n pass",
"def run_flow(flow_id):\n if flow_id == 1:\n etl.load_images_from_urls()\n elif flow_id == 2:\n etl.find_edges_and_save()\n elif flow_id == 3:\n etl.normalize_dataset()\n elif flow_id == 4:\n classifiers.run_models_comparison()",
"def start_workflow(self, **params):\n raise NotImplementedError",
"def run(self, data):\n\n if data and self.application:\n # Build tuples for embedding index\n if self.application.embeddings:\n data = [(x, element, None) for x, element in enumerate(data)]\n\n # Process workflow\n with st.spinner(\"Running workflow....\"):\n results = []\n for result in self.application.workflow(self.name, data):\n # Store result\n results.append(result)\n\n # Write result if this isn't an indexing workflow\n if not self.application.embeddings:\n st.write(result)\n\n # Store workflow results\n self.data = results",
"def run_workflow(EMBEDDING_BASE_PATH):\n train_tweets_path, val_tweets_path, test_tweets_path, image_dataset = run_pre_workflow()\n\n input_images, train_tweets, val_tweets, test_tweets, glove_embeddings = replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, image_dataset, EMBEDDING_BASE_PATH)\n\n preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion = transformation_catalog()\n \n sites_catalog()\n\n pegasus_properties()\n \n wf = Workflow('Crisis_Computing_Workflow')\n\n # --------------------------------------------------- TEXT PIPELINE ------------------------------------------------------ \n\n # Job 1: Preprocess tweets\n preprocessed_train_tweets = File('preprocessed_train_tweets.csv')\n preprocessed_val_tweets = File('preprocessed_val_tweets.csv')\n preprocessed_test_tweets = File('preprocessed_test_tweets.csv')\n \n job_preprocess_tweets = [Job(preprocess_tweets) for i in range(3)]\n job_preprocess_tweets[0].add_inputs(train_tweets)\n job_preprocess_tweets[0].add_outputs(preprocessed_train_tweets)\n job_preprocess_tweets[0].add_args('--filename', 'train_tweets.csv')\n \n job_preprocess_tweets[1].add_inputs(val_tweets)\n job_preprocess_tweets[1].add_outputs(preprocessed_val_tweets)\n job_preprocess_tweets[1].add_args('--filename', 'val_tweets.csv')\n \n job_preprocess_tweets[2].add_inputs(test_tweets)\n job_preprocess_tweets[2].add_outputs(preprocessed_test_tweets)\n job_preprocess_tweets[2].add_args('--filename', 'test_tweets.csv')\n\n\n # Job 2: HPO Bi-LSTM\n bilstm_best_params = File('best_bilstm_hpo_params.txt')\n\n job_hpo_train_bilstm = Job(hpo_train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets)\\\n .add_outputs(bilstm_best_params)\\\n .add_args('--trials', BILSTM_NUM_TRIALS)\n\n\n # Job 3: Train Bi-LSTM using best parameters from HPO study and output loss and accuracy curves\n trained_bilstm_model = File('bilstm_final_model.h5') \n bilstm_loss_curve = File('Loss_curve_bilstm.png')\n bilstm_accuracy_curve = File('Accuracy_curve_bilstm.png')\n\n\n job_train_bilstm = Job(train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, bilstm_best_params)\\\n .add_outputs(bilstm_loss_curve, bilstm_accuracy_curve, trained_bilstm_model)\\\n\n\n # Job 4: Run inference on best Bi-LSTM model to produce output on test dataset along with confusion matrix\n bilstm_train_output_prob = File('bilstm_train_output.csv')\n bilstm_test_output_prob = File('bilstm_test_output.csv')\n bilstm_confusion_matrix = File('bilstm_confusion_matrix.png')\n\n job_bilstm_inference = Job(bilstm_inference)\\\n .add_inputs(preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, trained_bilstm_model)\\\n .add_outputs(bilstm_train_output_prob, bilstm_test_output_prob, bilstm_confusion_matrix)\n\n\n # --------------------------------------------------- IMAGE PIPELINE ------------------------------------------------------ \n\n \n # Job 1: Preprocess images\n prefix = \"resized_\"\n job_preprocess_images = [Job(preprocess_images) for i in range(NUM_WORKERS)]\n resized_images = split_preprocess_jobs(job_preprocess_images, input_images, prefix)\n\n # Job 2: HPO ResNet-50\n resnet_best_params = File('best_resnet_hpo_params.txt')\n\n job_hpo_train_resnet = Job(hpo_train_resnet)\\\n .add_inputs(*resized_images)\\\n .add_args('--trials', RESNET_NUM_TRIALS)\\\n .add_outputs(resnet_best_params)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 3: Train ResNet-50 using best parameters from HPO study and output loss and accuracy curves\n trained_resnet_model = File('resnet_final_model.pth')\n resnet_loss_curve = File('Loss_curve_resnet.png')\n resnet_accuracy_curve = File('Accuracy_curve_resnet.png')\n\n job_train_resnet = Job(train_resnet)\\\n .add_inputs(*resized_images, resnet_best_params)\\\n .add_outputs(resnet_loss_curve, resnet_accuracy_curve, trained_resnet_model)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 4: Run inference on best ResNet-50 model to produce output on test dataset along with confusion matrix\n resnet_train_output_prob = File('resnet_train_output.csv')\n resnet_confusion_matrix = File('resnet_confusion_matrix.png')\n resnet_test_output_prob = File('resnet_test_output.csv') \n\n job_resnet_inference = Job(resnet_inference)\\\n .add_inputs(*resized_images, trained_resnet_model)\\\n .add_outputs(resnet_train_output_prob, resnet_test_output_prob, resnet_confusion_matrix)\n\n \n \n # --------------------------------------------------- LATE FUSION ------------------------------------------------------ \n\n # Job 1: Late Fusion\n confusion_matrix_MPC = File('late_fusion_MPC.png')\n confusion_matrix_LR = File('late_fusion_LR.png')\n confusion_matrix_MLP = File('late_fusion_MLP.png')\n report_MLP = File('late_fusion_MLP.csv')\n report_MPC = File('late_fusion_MPC.csv')\n report_LR = File('late_fusion_LR.csv')\n\n job_late_fusion = Job(late_fusion)\\\n .add_inputs(resnet_train_output_prob, resnet_test_output_prob, bilstm_train_output_prob, bilstm_test_output_prob)\\\n .add_outputs(confusion_matrix_MPC, confusion_matrix_LR, confusion_matrix_MLP, report_MLP, report_MPC, report_LR)\n\n wf.add_jobs(*job_preprocess_tweets, *job_preprocess_images, job_bilstm_inference, job_hpo_train_bilstm, job_train_bilstm, job_hpo_train_resnet, job_train_resnet, job_resnet_inference, job_late_fusion)\n\n try:\n wf.plan(submit=False, sites=[\"donut\"], output_sites=[\"donut\"], dir=\"submit\")\n #wf.wait()\n #wf.statistics()\n except PegasusClientError as e:\n print(e.output)\n \n #plot_workflow_graph(wf)\n \n return",
"def load_and_cache_examples(args, tokenizer, split, task_name, model_type, predictions=None):\n processor = MoralStoriesProcessor()\n if task_name != 'consequence|action+context_genref':\n args.data_dir = os.path.join(args.original_data_dir, task_name, args.split_name)\n else:\n args.data_dir = os.path.join(args.original_data_dir, 'consequence|action+context_gen', args.split_name)\n\n # Get features\n logger.info('Creating features from dataset file at %s', args.data_dir)\n label_list = processor.get_labels()\n if split == 'train':\n examples = processor.get_train_examples(args.data_dir)\n elif split == 'dev':\n examples = processor.get_dev_examples(args.data_dir)\n elif split == 'test':\n examples = processor.get_test_examples(args.data_dir)\n else:\n raise Exception('split value should be in [train, dev, test]')\n\n # Replace gold sequences with model predictions\n if predictions is not None:\n if type(predictions[0]) != tuple:\n all_predictions = [tuple(predictions)]\n else:\n all_predictions = predictions\n extended_examples = list()\n\n for predictions in all_predictions:\n if predictions[0] == 'consequences':\n if len(all_predictions) == 1:\n # Remove negative examples\n positive_examples = list()\n for ex in examples:\n if ex.label == '1':\n positive_examples.append(ex)\n examples = positive_examples\n\n for pr_id, pr in enumerate(predictions[1]):\n ex = examples[pr_id]\n if ex.moral_consequence is not None:\n if len(all_predictions) == 1:\n ex.moral_consequence = pr\n else:\n ex.moral_consequence_draft = pr\n else:\n if len(all_predictions) == 1:\n ex.immoral_consequence = pr\n else:\n ex.immoral_consequence_draft = pr\n extended_examples.append(ex)\n examples = extended_examples\n extended_examples = list()\n\n if predictions[0] == 'consequence_labels':\n for pr_id, pr in enumerate(predictions[1]):\n ex = examples[pr_id]\n if ex.moral_consequence_draft is not None:\n if pr == 1:\n ex.moral_consequence_draft = ex.moral_consequence_draft + ' ' + '<|CSQ_TRUE|>'\n else:\n ex.moral_consequence_draft = ex.moral_consequence_draft + ' ' + '<|CSQ_FALSE|>'\n else:\n if pr == 0:\n ex.immoral_consequence_draft = ex.immoral_consequence_draft + ' ' + '<|CSQ_TRUE|>'\n else:\n ex.immoral_consequence_draft = ex.immoral_consequence_draft + ' ' + '<|CSQ_FALSE|>'\n extended_examples.append(ex)\n examples = extended_examples\n extended_examples = list()\n\n # Generate features; target task is classification\n pad_token_id = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]\n if pad_token_id is None:\n pad_token_id = tokenizer.convert_tokens_to_ids([tokenizer.eos_token])[0]\n features = convert_examples_to_features(examples,\n label_list,\n args.max_seq_length,\n args.max_gen_length,\n tokenizer,\n task_name,\n model_type,\n TASK_DICT[task_name],\n cls_token_at_end=False,\n cls_token=tokenizer.cls_token,\n sep_token=tokenizer.sep_token,\n sep_token_extra=bool(model_type in ['roberta']),\n cls_token_segment_id=0,\n pad_on_left=False,\n pad_token=pad_token_id,\n pad_token_segment_id=0,\n is_eval=split == 'test',\n fit_to_max_corpus_len=True)\n\n # Make feature tensors\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n if 'gen' in task_name:\n all_label_masks = torch.tensor([f.label_mask for f in features], dtype=torch.long)\n all_gen_prompts = torch.tensor([f.gen_prompt_id for f in features], dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,\n all_label_ids, all_label_masks, all_gen_prompts)\n else:\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n return dataset",
"def _run(self):\n result = self._consensus()\n if self._decision.split_group and result:\n self._set_decision(result)",
"def do_workflow(self, arg=None):\n\n def add_steps_to_workflow(curr_flow):\n while True:\n cmd_call = simple_input('Please choose a command to add to the workflow.', cmds, True)\n if cmd_call not in ['DONE', 'EXIT']:\n if self.is_output_cmd(cmd_call):\n curr_flow.add_output(cmd_call)\n else:\n curr_flow.add_step(cmd_call)\n cmds.pop(cmds.index(cmd_call))\n\n _conf = simple_input('Do you want to configure this command?', ['Y','N'], True) if self.is_configureable(cmd) else None\n if _conf == 'Y':\n curr_flow.configure_step(cmd_call)\n\n elif cmd_call == 'DONE':\n break\n else:\n return\n return curr_flow.has_steps()\n\n def confirm_workflow(curr_flow):\n checks = [('START', 'Start workflow?'), ('ADD', 'Do you want to add more steps?'),\n ('RESTART', 'Do you want to start over?')]\n curr_flow.draw_steps()\n for check in checks:\n _continue = simple_input(check[1], ['Y', 'N', 'EXIT'])\n if _continue == 'Y':\n return check[0]\n if _continue == 'EXIT':\n return 'EXIT'\n return 'INVALID'\n\n print('Preparing Workflow Wizard...')\n options = sorted(self.cmds + self.output_cmds)\n from smores.workflow import Workflow\n workflow = Workflow(self)\n target, load_type = self.validate_args('', 'file')\n if target:\n _l = True if target in self.inputs['files'].keys() else False\n workflow.add_target(target, load_type, _l)\n print('Please choose the commands you would like to add to the workflow.'\n '\\nCommands will be executed in the order in which they are added.'\n '\\n\\nPlease note that some commands have dependencies that must be satisfied. An overview of '\n 'command dependencies is available on the main SMOREs wiki on Github')\n print('\\nAvailable Commands for WorkFlow')\n cmds = []\n for i, _o in enumerate(options):\n print('{1}'.format(i, _o))\n cmds.append(_o)\n cmds.append('DONE')\n steps_added = add_steps_to_workflow(workflow)\n while steps_added:\n _run = confirm_workflow(workflow)\n if _run == 'START':\n break\n elif _run == 'ADD':\n _ = add_steps_to_workflow(workflow)\n elif _run == 'RESTART':\n self.do_workflow('')\n else:\n return\n workflow.run()\n print('Workflow has completed.')\n return\n\n else:\n print('Workflows currently have to be setup without the file already being loaded.')\n return",
"def run(self):\n self._build_docker_images()\n self._build_docker_image_test()\n self._run_workflow_in_matrix_of_envs()",
"def train(args):\n print(args)\n\n # Run a training job\n configs = LuxMatchConfigs_Default\n\n # Create a default opponent agent\n opponent = Agent()\n\n # Create a RL agent in training mode\n player = AgentPolicy(mode=\"train\")\n\n # Train the model\n env_eval = None\n if args.n_envs == 1:\n env = LuxEnvironment(configs=configs,\n learning_agent=player,\n opponent_agent=opponent)\n else:\n env = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(args.n_envs)])\n \n run_id = args.id\n print(\"Run id %s\" % run_id)\n\n if args.path:\n # by default previous model params are used (lr, batch size, gamma...)\n model = PPO.load(args.path)\n model.set_env(env=env)\n\n # Update the learning rate\n model.lr_schedule = get_schedule_fn(args.learning_rate)\n\n # TODO: Update other training parameters\n else:\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate=args.learning_rate,\n gamma=args.gamma,\n gae_lambda=args.gae_lambda,\n batch_size=args.batch_size,\n n_steps=args.n_steps\n )\n\n \n \n callbacks = []\n\n # Save a checkpoint and 5 match replay files every 100K steps\n player_replay = AgentPolicy(mode=\"inference\", model=model)\n callbacks.append(\n SaveReplayAndModelCallback(\n save_freq=100000,\n save_path='./models/',\n name_prefix=f'model{run_id}',\n replay_env=LuxEnvironment(\n configs=configs,\n learning_agent=player_replay,\n opponent_agent=Agent()\n ),\n replay_num_episodes=5\n )\n )\n \n # Since reward metrics don't work for multi-environment setups, we add an evaluation logger\n # for metrics.\n if args.n_envs > 1:\n # An evaluation environment is needed to measure multi-env setups. Use a fixed 4 envs.\n env_eval = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(4)])\n\n callbacks.append(\n EvalCallback(env_eval, best_model_save_path=f'./logs_{run_id}/',\n log_path=f'./logs_{run_id}/',\n eval_freq=args.n_steps*2, # Run it every 2 training iterations\n n_eval_episodes=30, # Run 30 games\n deterministic=False, render=False)\n )\n\n print(\"Training model...\")\n model.learn(total_timesteps=args.step_count,\n callback=callbacks)\n if not os.path.exists(f'models/rl_model_{run_id}_{args.step_count}_steps.zip'):\n model.save(path=f'models/rl_model_{run_id}_{args.step_count}_steps.zip')\n print(\"Done training model.\")\n\n # Inference the model\n print(\"Inference model policy with rendering...\")\n saves = glob.glob(f'models/rl_model_{run_id}_*_steps.zip')\n latest_save = sorted(saves, key=lambda x: int(x.split('_')[-2]), reverse=True)[0]\n model.load(path=latest_save)\n obs = env.reset()\n for i in range(600):\n action_code, _states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action_code)\n if i % 5 == 0:\n print(\"Turn %i\" % i)\n env.render()\n\n if done:\n print(\"Episode done, resetting.\")\n obs = env.reset()\n print(\"Done\")\n\n '''\n # Learn with self-play against the learned model as an opponent now\n print(\"Training model with self-play against last version of model...\")\n player = AgentPolicy(mode=\"train\")\n opponent = AgentPolicy(mode=\"inference\", model=model)\n env = LuxEnvironment(configs, player, opponent)\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate = 0.0003,\n gamma=0.999,\n gae_lambda = 0.95\n )\n model.learn(total_timesteps=2000)\n env.close()\n print(\"Done\")\n '''",
"def __init__(self, workflow, **kwds):\n self.kwds = kwds\n self.url = self.get('url', 'cmsweb.cern.ch')\n WorkflowManager.__init__(self, workflow, self.url)\n self.workload = getWorkload(self.url, workflow)\n self.cacheID = self.winfo.get('StepOneConfigCacheID', '')\n self.config = getConfig(self.url, self.cacheID)\n self.pileup_dataset = self._pileup_dataset()\n self.priority = self._priority()\n self.era = self.get('era', 'Summer12')\n self.lfn = self.get('lfn', '/store/mc')\n self.special_name = self.get('specialName', '')\n self.max_rss = self.get('maxRSS', 2300000)\n self.max_vsize = self.get('maxVSize', 4100000000)\n self.input_dataset = ''\n self.pileup_scenario = ''\n self.global_tag = self.get('globalTag', '')\n self.campaign = self.get('campaign', '')\n self.max_merge_events = self.get('maxMergeEvents', 50000)\n self.activity = self.get('activity', 'reprocessing')\n self.restrict = self.get('restrict', 'None')\n self.site_use = self.get('site', None)\n self.site_cust = self.get('site_cust', None)\n self.xrootd = self.get('xrootd', 0)\n self.ext_tag = self.get('ext', '')\n self.team = self.get('team', '')\n\n # perform various initialization\n self._init()\n\n # custom settings\n # Construct processed dataset version\n if self.pileup_scenario:\n self.pileup_scenario = self.pileup_scenario+'_' \n\n specialprocstring = kwds.get('specialName', '')\n if specialprocstring:\n self.special_name = specialprocstring + '_'\n\n # ProcessingString\n inprocstring = kwds.get('procstring', '')\n if inprocstring:\n self.procstring = inprocstring\n else:\n self.procstring = self.special_name + self.pileup_scenario +\\\n self.global_tag + self.ext_tag\n\n # ProcessingVersion\n inprocversion = kwds.get('procversion', '')\n if inprocversion:\n self.procversion = inprocversion\n else:\n self.procversion = self.dataset_version(self.era, self.procstring)",
"def run(self):\n\n poll = self.poll()\n\n if not 'events' in poll:\n return True\n\n history = self.get_history(poll)\n activity_states = self.get_activity_states(history)\n workflow_execution_info = self.get_workflow_execution_info(poll)\n context = event.get_current_context(history)\n\n if workflow_execution_info is not None:\n context.update(workflow_execution_info)\n\n decisions = swf.Layer1Decisions()\n\n try:\n for current in activity.find_available_activities(\n self.flow, activity_states, context):\n\n decisions.schedule_activity_task(\n current.id, # activity id.\n current.activity_name,\n self.version,\n task_list=current.activity_worker.task_list,\n input=json.dumps(current.create_execution_input()),\n heartbeat_timeout=str(current.heartbeat_timeout),\n start_to_close_timeout=str(current.timeout),\n schedule_to_start_timeout=str(current.schedule_to_start),\n schedule_to_close_timeout=str(current.schedule_to_close))\n else:\n activities = list(\n activity.find_uncomplete_activities(\n self.flow, activity_states, context))\n if not activities:\n decisions.complete_workflow_execution()\n except Exception as e:\n decisions.fail_workflow_execution(reason=str(e))\n\n self.complete(decisions=decisions)\n return True",
"def run(self, config):\n print(config) \n allfiles = os.listdir(os.path.abspath(config.strategy_dir))\n fsl = FileStrategyLoader()\n data_proxy = DataProxy(os.path.abspath(config.data_bundle_path))\n\n for elt in allfiles:\n source = fsl.load(os.path.join(os.path.abspath(config.strategy_dir), elt), {})\n #For every strategy code assign context\n context = Context()\n print(source['assets']())\n context.scope = source\n context.data_proxy = data_proxy\n context.account = Account(initcash=config.initial_cash, start_date=config.start_date, end_date=config.end_date)\n context.event_source = EventSource()\n context.event_bus = EventBus()\n context.start_date = config.start_date\n context.end_date = config.end_date\n context.frequency = config.frequency\n handle_ctx = Thread(target=context.run)\n handle_ctx.setDaemon(True)\n handle_ctx.start()\n\n while True:\n time.sleep(10)",
"def StepVASP0(my_project, struct_list,order_key=0):\n WORKFLOWS = my_project['Workflow']\n Workflow_Params = WORKFLOWS['Steps'][order_key]\n Workflow_name = Workflow_Params['NAME']\n job_dir = my_project['NAME'] + Workflow_Params['NAME']\n logger = get_logger(job_dir)\n chkpt = job_dir + '.json'\n\n incar_dict = my_project['Incar_General']\n incar_init = Incar.from_dict(incar_dict)\n kpoints_init = Kpoints.monkhorst_automatic([18,18,18]) # an initialization to anything sensible\n\n # Decide the Kpoints density per atom setting here\n turn_knobs= OrderedDict({'POSCAR': struct_list,'KPOINTS':Workflow_Params['KPOINTS'],\\\n 'POTCAR_pseudopotential':Workflow_Params['PSEUDOPOTENTIAL']})\n\n if Workflow_Params['Other_Knobs']:\n #for k in Workflow_Params['Other_Knobs']: \n #print (type(Workflow_Params['Other_Knobs']))\n turn_knobs.update(Workflow_Params['Other_Knobs'])\n job_bin = vasp_config[Workflow_Params['Queue']['Bin']]\n qdict = Workflow_Params['Queue']\n # Decide general queue settings for all runs in this step\n #print (turn_knobs)\n qadapter, job_cmd = get_run_cmmnd(partition=qdict['Partition'],ntasks=qdict['Ntasks'],\\\n nnodes = qdict['Nnodes'],walltime=qdict['Walltime'],job_bin=job_bin,\\\n mem=qdict['Memory'], job_name=job_dir)\n\n # run the jobs in this step\n run_cal(turn_knobs, qadapter, job_cmd, job_dir, logger,\n chkpt, incar=incar_init, kpoints=kpoints_init,\n poscar=struct_list[0], magnetism=Workflow_Params['Magnetism'],\\\n is_matrix=Workflow_Params['Matrix'],\\\n Grid_type=Workflow_Params['Kpt_Grid'])\n return [chkpt]",
"def _start(self):\n if self._classifier is None:\n self._classifier = TFSlimClassifier(self.config)\n self._classifier.__enter__()",
"def run_wf(self,wfname):\n wf = self.workflows[wfname]\n self.logmethod('preparing workflow {} for execution'.format(wfname))\n stk,diag = wf.execution_stack()\n self.prepare_wf(wf,stk)\n wf.execute()\n self.logmethod('execution finished')",
"def _stage1(self):\n self.start_progress()\n tasks = list(self._chain_dict(self._model.adjust_tasks))\n if len(tasks) == 0:\n self._stage2(self._no_adjustments_case())\n else:\n task = lambda : self._run_adjust_tasks(tasks)\n locator.get(\"pool\").submit(task, self._stage2)",
"def run(self):\n if self.next_state == \"initialize_rexarm\":\n self.initialize_rexarm()\n\n if self.next_state == \"idle\":\n self.idle()\n\n if self.next_state == \"estop\":\n self.estop()\n\n if self.next_state == \"execute_tp\":\n self.execute_tp()\n\n if self.next_state == \"execute\":\n self.execute()\n\n if self.next_state == \"calibrate\":\n self.calibrate()\n\n if self.next_state == \"manual\":\n self.manual()\n\n if self.next_state == \"learn\":\n self.learn()\n\n if self.next_state == \"remember\":\n self.remember()\n\n if self.next_state == \"write\":\n self.write()\n\n if self.next_state == \"get_color\":\n self.get_color()\n\n if self.next_state == \"find_blocks\":\n self.find_blocks()\n\n # if self.next_state == \"dance\":\n # self.execute_dance()",
"def run(self, input_files, input_metadata, output_files):\n try:\n # Set and check execution directory. If not exists the directory will be created.\n execution_path = os.path.abspath(self.configuration.get('execution', '.'))\n execution_parent_dir = os.path.dirname(execution_path)\n if not os.path.isdir(execution_parent_dir):\n os.makedirs(execution_parent_dir)\n\n # Update working directory to execution path\n os.chdir(execution_path)\n logger.debug(\"Execution path: {}\".format(execution_path))\n\n # Set file names for output files (with random name if not predefined)\n for key in output_files.keys():\n if output_files[key] is not None:\n pop_output_path = os.path.abspath(output_files[key])\n self.populable_outputs[key] = pop_output_path\n output_files[key] = pop_output_path\n else:\n errstr = \"The output_file[{}] can not be located. Please specify its expected path.\".format(key)\n logger.error(errstr)\n raise Exception(errstr)\n\n logger.debug(\"Init execution of the Machine Learning Model generation\")\n # Prepare file paths\n for key in input_files.keys():\n if key == 'radiomic_features':\n dataset = input_files[key]\n elif key == 'ML_technique':\n ml = input_files[key]\n else:\n logger.debug('Unrecognized input file key {}'.format(key))\n continue\n\n\n\n output_metadata = {}\n for key in output_files.keys():\n \n logger.info('VRE_ML: Iterating over Key {}'.format(key))\n\n \n if os.path.isfile(output_files[key]):\n meta = Metadata()\n meta.file_path = output_files[key] # Set file_path for output files\n \n logger.info('VRE_ML: Update metadata with key {} and value {}'.format(key, meta.file_path))\n\n meta.data_type = 'tool_statistics'\n meta.file_type = 'PDF'\n\n # Set sources for output files\n meta.sources = [output_files[key]+'.pdf']\n # Generate model\n generate_model.run(dataset=dataset,output_files[key]+'.pdf')\n\n # Append new element in output metadata\n logger.info('VRE_ML: Update metadata with key {} and value {}'.format(key, meta.file_path))\n output_metadata.update({key: meta})\n\n else:\n logger.warning(\"Output {} not found. Path {} not exists\".format(key, output_files[key]))\n\n logger.debug(\"Output metadata created\")\n\n return output_files, output_metadata\n\n except Exception:\n errstr = \"VRE ML RUNNER pipeline failed. See logs\"\n logger.fatal(errstr)\n raise Exception(errstr)",
"def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation",
"async def dispatch_auto_starts(self, ctx):\n for operation in self.config.dataflow.operations.values():\n if operation.inputs or not await self.ictx.check_conditions(\n operation, self.config.dataflow, ctx\n ):\n continue\n parameter_set = MemoryParameterSet(\n MemoryParameterSetConfig(ctx=ctx, parameters=[])\n )\n task = await self.nctx.dispatch(self, operation, parameter_set)\n task.operation = operation\n task.parameter_set = parameter_set\n yield task",
"def run_step(self):\n self.hooked_sess.run(self.train_op)",
"def workflow(base_dir, # base tool path\n use_cache=1, # whether to skip already executed runs (in cache) or not (1/0)\n ignore_git=0): # whether to ignore git version or not (1/0)\n\n # get some needed variables from config file\n runs = int(config['general']['runs'])\n workers = int(config['general']['workers'])\n\n batch_size = int(config['mtje']['batch_size'])\n epochs = int(config['mtje']['epochs'])\n use_malicious_labels = int(config['mtje']['use_malicious_labels'])\n use_count_labels = int(config['mtje']['use_count_labels'])\n gen_type = config['mtje']['gen_type']\n similarity_measure = config['mtje']['similarity_measure'].lower()\n net_type = 'mtje'\n\n training_n_samples = int(config['sorel20mDataset']['training_n_samples'])\n validation_n_samples = int(config['sorel20mDataset']['validation_n_samples'])\n test_n_samples = int(config['sorel20mDataset']['test_n_samples'])\n\n min_n_anchor_samples = int(config['freshDataset']['min_n_anchor_samples'])\n max_n_anchor_samples = int(config['freshDataset']['max_n_anchor_samples'])\n fresh_n_queries = int(config['freshDataset']['n_queries'])\n n_evaluations = int(config['freshDataset']['n_evaluations'])\n\n f_c_epochs = int(config['familyClassifier']['epochs'])\n f_c_train_split_proportion = int(config['familyClassifier']['train_split_proportion'])\n f_c_valid_split_proportion = int(config['familyClassifier']['valid_split_proportion'])\n f_c_test_split_proportion = int(config['familyClassifier']['test_split_proportion'])\n f_c_batch_size = int(config['familyClassifier']['batch_size'])\n\n c_l_epochs = int(config['contrastiveLearning']['epochs'])\n c_l_train_split_proportion = int(config['contrastiveLearning']['train_split_proportion'])\n c_l_valid_split_proportion = int(config['contrastiveLearning']['valid_split_proportion'])\n c_l_test_split_proportion = int(config['contrastiveLearning']['test_split_proportion'])\n c_l_batch_size = int(config['contrastiveLearning']['batch_size'])\n c_l_rank_size = int(config['contrastiveLearning']['rank_size'])\n c_l_knn_k_min = int(config['contrastiveLearning']['knn_k_min'])\n c_l_knn_k_max = int(config['contrastiveLearning']['knn_k_max'])\n\n # initialize Hash object\n ch = Hash()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items('sorel20mDataset'))))\n # get config file sha256 digest\n dataset_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items(net_type))))\n # get config file sha256 digest\n config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('freshDataset'))))\n # get config file sha256 digest\n fresh_dataset_config_sha = ch.get_b64()\n\n # create copy of the current config hash digest\n ch_copy = ch.copy()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('familyClassifier'))))\n # get config file sha256 digest\n family_class_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch_copy.update(json.dumps(dict(config.items('contrastiveLearning'))))\n # get config file sha256 digest\n contr_learn_config_sha = ch_copy.get_b64()\n\n # instantiate key-n_samples dict\n n_samples_dict = {'train': training_n_samples,\n 'validation': validation_n_samples,\n 'test': test_n_samples}\n\n # Note: The entrypoint names are defined in MLproject. The artifact directories\n # are documented by each step's .py file.\n\n # start mlflow run\n with mlflow.start_run() as active_run:\n # get code git commit version\n git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT)\n\n # log config file\n mlflow.log_text(json.dumps({s: dict(config.items(s)) for s in config.sections()}), 'config.txt')\n\n # set dataset destination dir\n dataset_dir = os.path.join(base_dir, 'dataset')\n # set dataset base path (directory containing 'meta.db')\n dataset_base_path = os.path.join(dataset_dir, '09-DEC-2020', 'processed-data')\n # set pre-processed dataset base path (directory containing .dat files)\n pre_processed_dataset_dir = os.path.join(dataset_dir, '09-DEC-2020', 'pre-processed_dataset')\n # set fresh dataset base path (directory containing .dat files)\n fresh_dataset_dir = os.path.join(dataset_dir, 'fresh_dataset')\n\n # if pre-processed dataset files for this run parameters are not present, generate them\n if not preproc_check_files(destination_dir=pre_processed_dataset_dir,\n n_samples_dict=n_samples_dict):\n logger.info(\"Pre-processed dataset not found.\")\n\n # if the original Sorel20M dataset is not present, download it\n if not download_check_files(dataset_dir):\n logger.info(\"Dataset not found.\")\n\n # run dataset downloader\n download_dataset_run = run(\"download_dataset\", {\n 'destination_dir': dataset_dir\n }, config_sha=dataset_config_sha)\n\n # pre-process dataset\n preprocess_dataset_run = run(\"preprocess_dataset\", {\n 'ds_path': dataset_base_path,\n 'destination_dir': pre_processed_dataset_dir,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'test_n_samples': test_n_samples,\n 'batch_size': batch_size,\n 'remove_missing_features': str(os.path.join(dataset_base_path, \"shas_missing_ember_features.json\"))\n }, config_sha=dataset_config_sha)\n\n # if the fresh dataset is not present, generate it\n if not fresh_check_files(fresh_dataset_dir):\n logger.info(\"Fresh dataset not found.\")\n\n # generate fresh dataset\n build_fresh_dataset_run = run(\"build_fresh_dataset\", {\n 'dataset_dest_dir': fresh_dataset_dir\n }, config_sha=fresh_dataset_config_sha)\n\n # initialize results files dicts\n results_files = {}\n c_l_results_files = {}\n\n # instantiate common (between consecutive training runs) training parameters\n common_training_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'epochs': epochs,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'use_malicious_labels': use_malicious_labels,\n 'use_count_labels': use_count_labels,\n 'workers': workers\n }\n\n # instantiate common (between consecutive training runs) evaluation parameters\n common_evaluation_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'test_n_samples': test_n_samples,\n 'evaluate_malware': use_malicious_labels,\n 'evaluate_count': use_count_labels\n }\n\n # for each training run\n for training_run_id in range(runs):\n logger.info(\"initiating training run n. {}\".format(str(training_run_id)))\n\n # -- Model Training and Evaluation Steps -------------------------------------------------------------------\n # set training parameters\n training_params = common_training_params\n training_params.update({'training_run': training_run_id})\n\n # train network (get or run) on Sorel20M dataset\n training_run = get_or_run(\"train_network\",\n training_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n resume=True,\n config_sha=config_sha)\n\n # get model checkpoints path\n checkpoint_path = parse.unquote(parse.urlparse(os.path.join(training_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n checkpoint_file = os.path.join(checkpoint_path, \"epoch_{}.pt\".format(epochs))\n\n # set evaluation parameters\n evaluation_params = common_evaluation_params\n evaluation_params.update({'checkpoint_file': checkpoint_file})\n\n # evaluate model against Sorel20M dataset\n evaluation_run = get_or_run(\"evaluate_network\",\n evaluation_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n config_sha=config_sha)\n\n # get model evaluation results path\n results_path = parse.unquote(parse.urlparse(os.path.join(evaluation_run.info.artifact_uri,\n \"model_results\")).path)\n\n # set model evaluation results filename\n results_file = os.path.join(results_path, \"results.csv\")\n\n # add file path to results_files dictionary (used for plotting mean results)\n results_files[\"run_id_\" + str(training_run_id)] = results_file\n\n # compute (and plot) all tagging results\n all_tagging_results_run = get_or_run(\"compute_all_run_results\", {\n 'results_file': results_file,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Model Evaluation using Fresh Dataset Steps ------------------------------------------------------------\n # evaluate model against fresh dataset\n fresh_evaluation_run = get_or_run(\"evaluate_fresh\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'min_n_anchor_samples': min_n_anchor_samples,\n 'max_n_anchor_samples': max_n_anchor_samples,\n 'n_query_samples': fresh_n_queries,\n 'n_evaluations': n_evaluations\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n\n # get model evaluation results path\n fresh_results_path = parse.unquote(parse.urlparse(os.path.join(fresh_evaluation_run.info.artifact_uri,\n \"fresh_prediction_results\")).path)\n\n # set model evaluation results filename\n fresh_results_file = os.path.join(fresh_results_path, \"fresh_prediction_results.json\")\n\n # compute (and plot) all family prediction results (on fresh dataset)\n all_tagging_results_run = get_or_run(\"compute_all_run_fresh_results\", {\n 'results_file': fresh_results_file\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Family Classifier Steps -------------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n f_c_train_run = get_or_run(\"train_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': f_c_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model checkpoints path\n f_c_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(f_c_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n f_c_checkpoint_file = os.path.join(f_c_checkpoint_path, \"epoch_{}.pt\".format(f_c_epochs))\n\n # evaluate model against fresh dataset\n f_c_eval_run = get_or_run(\"evaluate_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': f_c_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model evaluation results path\n f_c_results_path = parse.unquote(parse.urlparse(os.path.join(f_c_eval_run.info.artifact_uri,\n \"family_class_results\")).path)\n\n # set model evaluation results filename\n f_c_results_file = os.path.join(f_c_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n f_c_compute_results_run = get_or_run(\"compute_all_family_class_results\", {\n 'results_file': f_c_results_file,\n 'fresh_ds_path': fresh_dataset_dir\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Contrastive Learning Steps ----------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n c_l_train_run = get_or_run(\"train_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': c_l_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model checkpoints path\n c_l_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(c_l_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n c_l_checkpoint_file = os.path.join(c_l_checkpoint_path, \"epoch_{}.pt\".format(c_l_epochs))\n\n # evaluate model against fresh dataset\n c_l_eval_run = get_or_run(\"evaluate_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': c_l_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size,\n 'rank_size': c_l_rank_size,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_results_path = parse.unquote(parse.urlparse(os.path.join(c_l_eval_run.info.artifact_uri,\n \"contrastive_learning_results\")).path)\n\n # set model evaluation results filename\n c_l_results_file = os.path.join(c_l_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n c_l_compute_results_run = get_or_run(\"compute_contrastive_learning_results\", {\n 'results_file': c_l_results_file,\n 'fresh_ds_path': fresh_dataset_dir,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_scores_dir_path = parse.unquote(parse.urlparse(os.path.join(c_l_compute_results_run.info.artifact_uri,\n \"contrastive_learning_scores\")).path)\n\n # add dir path to c_l_results_files dictionary (used for plotting mean score trends)\n c_l_results_files[\"run_id_\" + str(training_run_id)] = c_l_scores_dir_path\n # ----------------------------------------------------------------------------------------------------------\n\n # create temp dir name using the value from config_sha (sha of some parts of the config file).\n # -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n tempdir = os.path.join(base_dir, 'tmp_{}'.format(config_sha))\n # create temp dir\n os.makedirs(tempdir, exist_ok=True)\n\n # create contrastive learning temp dir name using the value from config_sha (sha of some parts of the config\n # file). -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n c_l_tempdir = os.path.join(base_dir, 'tmp_{}'.format(contr_learn_config_sha))\n # create temp dir\n os.makedirs(c_l_tempdir, exist_ok=True)\n\n # set run-to-filename file path\n run_to_filename = os.path.join(tempdir, \"results.json\")\n\n # create and open the results.json file in write mode\n with open(run_to_filename, \"w\") as output_file:\n # save results_files dictionary as a json file\n json.dump(results_files, output_file)\n\n mlflow.log_artifact(run_to_filename, \"run_to_filename\")\n\n # set run-to-filename file path\n c_l_run_to_filename = os.path.join(c_l_tempdir, \"c_l_results.json\")\n\n # create and open the c_l_results.json file in write mode\n with open(c_l_run_to_filename, \"w\") as output_file:\n # save c_l_results_files dictionary as a json file\n json.dump(c_l_results_files, output_file)\n\n mlflow.log_artifact(c_l_run_to_filename, \"run_to_filename\")\n\n # if there is more than 1 run, compute also per-tag mean results\n if runs > 1:\n # plot all roc distributions\n per_tag_plot_runs = get_or_run(\"plot_all_roc_distributions\", {\n 'run_to_filename_json': run_to_filename,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n\n # plot all model mean scores trends\n plot_all_scores_trends = get_or_run(\"plot_all_contrastive_scores_trends\", {\n 'run_to_filename_json': c_l_run_to_filename,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # remove temp files and temporary directory\n os.remove(run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(tempdir)\n\n # remove contrastive learning temp files and temporary directory\n os.remove(c_l_run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(c_l_tempdir)",
"def main(unused_argv):\n\n # Create tokenizer based on the training files.\n logging.info(\"Step 1: Loading tokenizer\")\n train_en = FLAGS.data_dir+'/EN_TRAIN_CORPUS_NAME'\n val_en = FLAGS.data_dir+'/EN_VAL_CORPUS_NAME'\n\n VOCAB_FILE = \"VOCAB_NAME\" \n vocab_file = os.path.join(FLAGS.data_dir, VOCAB_FILE)\n subtokenizer = tokenizer.Subtokenizer.init_from_files(\n vocab_file, [train_en], _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD,\n min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT)\n\n compiled_train_files = (train_en, train_en)\n compiled_eval_files = (val_en, val_en)\n\n # Tokenize and save data as Examples in the TFRecord format.\n logging.info(\"Step 3: Preprocessing and saving data\")\n train_tfrecord_files = encode_and_save_files(\n subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG,\n _TRAIN_SHARDS)\n encode_and_save_files(\n subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG,\n _EVAL_SHARDS)\n\n for fname in train_tfrecord_files:\n shuffle_records(fname)",
"def entry(self):\n if not os.path.isfile('model'):\n train()\n schedule.every(0.01).seconds.do(predict, self)\n while True:\n schedule.run_pending()",
"def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )",
"def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds",
"def train(args):\n print(args)\n\n # Run a training job\n configs = LuxMatchConfigs_Default\n\n # Create a default opponent agent\n opponent = Agent()\n\n # Create a RL agent in training mode\n player = AgentPolicy(mode=\"train\")\n\n # Train the model\n num_cpu = 1\n if num_cpu == 1:\n env = LuxEnvironment(configs=configs,\n learning_agent=player,\n opponent_agent=opponent)\n else:\n env = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(num_cpu)])\n run_id = args.id\n print(\"Run id %s\" % run_id)\n\n if args.path:\n # by default previous model params are used (lr, batch size, gamma...)\n model = PPO.load(args.path)\n model.set_env(env=env)\n\n # Update the learning rate\n model.lr_schedule = get_schedule_fn(args.learning_rate)\n\n # TODO: Update other training parameters\n else:\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate=args.learning_rate,\n gamma=args.gamma,\n gae_lambda=args.gae_lambda,\n batch_size=args.batch_size,\n n_steps=args.n_steps\n )\n\n print(\"Training model...\")\n # Save a checkpoint every 1M steps\n checkpoint_callback = CheckpointCallback(save_freq=1000000,\n save_path='./models/',\n name_prefix=f'rl_model_{run_id}')\n model.learn(total_timesteps=args.step_count,\n callback=checkpoint_callback) # 20M steps\n if not os.path.exists(f'models/rl_model_{run_id}_{args.step_count}_steps.zip'):\n model.save(path=f'models/rl_model_{run_id}_{args.step_count}_steps.zip')\n print(\"Done training model.\")\n\n # Inference the model\n print(\"Inference model policy with rendering...\")\n saves = glob.glob(f'models/rl_model_{run_id}_*_steps.zip')\n latest_save = sorted(saves, key=lambda x: int(x.split('_')[-2]), reverse=True)[0]\n model.load(path=latest_save)\n obs = env.reset()\n for i in range(600):\n action_code, _states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action_code)\n if i % 5 == 0:\n print(\"Turn %i\" % i)\n env.render()\n\n if done:\n print(\"Episode done, resetting.\")\n obs = env.reset()\n print(\"Done\")\n\n '''\n # Learn with self-play against the learned model as an opponent now\n print(\"Training model with self-play against last version of model...\")\n player = AgentPolicy(mode=\"train\")\n opponent = AgentPolicy(mode=\"inference\", model=model)\n env = LuxEnvironment(configs, player, opponent)\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate = 0.0003,\n gamma=0.999,\n gae_lambda = 0.95\n )\n\n model.learn(total_timesteps=2000)\n env.close()\n print(\"Done\")\n '''",
"def train(self, num_decisions=350):\n os.system(\"mkdir \" + self.folder_name + \"Train\")\n for i in range(5000):\n episode_folder_name = self.folder_name + \"Train/\" + str(i) + \"/\"\n all_system_states = []\n all_system_rewards = []\n all_system_states_cluster = []\n all_grid_states_cluster = []\n all_surrounding_states_cluster = []\n os.system(\"mkdir \" + episode_folder_name)\n filename = episode_folder_name + str(i) + \".h5\"\n self.system.reset_context(filename)\n self.system.run_decorrelation(20)\n grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n for j in range(num_decisions):\n action_index = self._get_action(state, i)\n transition_to_add = [state, action_index]\n tag = \"_train_\" + str(j)\n actions = [self.all_actions[i] for i in action_index]\n try:\n self.system.update_action(actions)\n system_states, system_rewards, system_states_cluster = self.system.run_step(\n is_detailed=True, tag=tag)\n all_system_states.append(system_states)\n all_system_rewards.append(system_rewards)\n all_system_states_cluster.append(system_states_cluster)\n\n except OpenMMException:\n print(\"Broken Simulation at Episode:\",\n str(i), \", Decision:\", str(j))\n break\n\n grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n reward = self._get_reward(grid_reward, surrounding_reward)\n\n all_grid_states_cluster.append(grid_states_cluster)\n all_surrounding_states_cluster.append(surrounding_states_cluster)\n\n # Use len_reward for number of grids\n done = [False] * len(reward) # Never Done\n transition_to_add.extend([reward, state, done])\n rb_decision_samples = 0\n for rb_tuple in zip(*transition_to_add):\n self.buffer.push(*list(rb_tuple))\n\n for _ in range(self.update_num):\n self._update()\n self._save_episode_data(episode_folder_name)\n np.save(episode_folder_name + \"system_states\",\n np.array(all_system_states))\n np.save(episode_folder_name + \"system_rewards\",\n np.array(all_system_rewards))\n np.save(episode_folder_name + \"system_states_cluster\",\n np.array(all_system_states_cluster))\n np.save(episode_folder_name + \"grid_states_cluster\",\n np.array(all_grid_states_cluster, dtype=object))\n np.save(episode_folder_name + \"all_states_cluster\",\n np.array(all_surrounding_states_cluster))\n self._save_data()",
"def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)"
] | [
"0.6183861",
"0.5807708",
"0.5665213",
"0.5628459",
"0.5554209",
"0.5551104",
"0.5526221",
"0.55122674",
"0.54981613",
"0.5495451",
"0.54880655",
"0.546243",
"0.54456717",
"0.54436725",
"0.5430598",
"0.5423562",
"0.5422723",
"0.5422035",
"0.541754",
"0.5381879",
"0.5361445",
"0.53582484",
"0.53567106",
"0.5345142",
"0.5323331",
"0.5314898",
"0.52941984",
"0.52901715",
"0.5287156",
"0.52788013"
] | 0.6123142 | 1 |
given workflow name and object process it by calling do_workflow() | def invoke_do_workflow(workflow_name, workflow_object, logger):
try:
success = workflow_object.do_workflow()
except Exception:
success = None
logger.error("error processing workflow %s", workflow_name, exc_info=True)
# Print the result to the log
if success:
logger.info("%s success %s" % (workflow_name, success)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_workflow(self, workflow_name, workflow_input, **params):\n raise NotImplementedError",
"def process_workflow(\n workflow_type, decision, settings, logger, client, token, maximum_page_size\n):\n # for the workflowType attempt to do the work\n if workflow_type is not None:\n\n logger.info(\"workflowType: %s\", workflow_type)\n\n # Instantiate and object for the workflow using eval\n # Build a string for the object name\n workflow_name = get_workflow_name(workflow_type)\n\n # Attempt to import the module for the workflow\n if import_workflow_class(workflow_name):\n # Instantiate the workflow object\n workflow_object = get_workflow_object(\n workflow_name,\n settings,\n logger,\n client,\n token,\n decision,\n maximum_page_size,\n )\n # Process the workflow\n invoke_do_workflow(workflow_name, workflow_object, logger)\n else:\n logger.info(\"error: could not load object %s\\n\", workflow_name)",
"def get_workflow_object(\n workflow_name, settings, logger, client, token, decision, maximum_page_size\n):\n module_name = \"workflow.\" + workflow_name\n module_object = importlib.import_module(module_name)\n workflow_class = getattr(module_object, workflow_name)\n # Create the object\n workflow_object = workflow_class(\n settings, logger, client, token, decision, maximum_page_size\n )\n return workflow_object",
"def __init__(self, workflow):\n self.workflow = workflow",
"def start_workflow(self, **params):\n raise NotImplementedError",
"def init_workflow():\n pass",
"def run_workflow(workflow_log_id):\n outputs = {}\n protocol = \"tcp\"\n\n workflow_log = WorkflowLog.objects.get(id=workflow_log_id)\n worker = workflow_log.performed_on\n\n WORKER_ENDPOINT = \"%s://%s:%s\" % (protocol, worker.ip, str(worker.port))\n WORKER_SECRET_KEY = worker.secret_key\n\n conn = BotConnection(WORKER_ENDPOINT, WORKER_SECRET_KEY)\n conn.connect()\n\n # Make a JSON\n request_header = {'workflow_log_id': workflow_log.id,\n 'workflow': slugify(workflow_log.workflow.title),\n 'workflow_log_time': workflow_log.date_created.strftime('%Y%m%d-%H%M%S'),\n 'script': {},\n 'hooks': {}, # see doc/HOOKS.md\n }\n\n # hooks for this workflow\n if workflow_log.workflow.pre_task:\n request_header['hooks']['pre_task'] = workflow_log.workflow.pre_task\n\n if workflow_log.workflow.post_task:\n request_header['hooks']['post_task'] = workflow_log.workflow.post_task\n\n ordered_workflows = order_workflow_tasks(workflow_log.workflow)\n\n workflow_log.date_started = timezone.now()\n for idx, workflow_task in enumerate(ordered_workflows):\n template = render_template(workflow_log, workflow_task)\n\n if workflow_task.task.is_builtin:\n m = importCode(template, \"test\")\n output = {}\n output['stdout'] = str(m.run())\n output['exit_code'] = workflow_log.SUCCESS\n else:\n request = request_header\n request['script']['id'] = idx\n request['script']['body'] = template\n\n output = send_script(request, conn)\n\n outputs['%i_%s' % (workflow_task.id, workflow_task.task.title)] = output\n\n # loop over all next wf_tasks and add this scripts output to inputs\n current = workflow_task\n while current.next_workflow_task:\n current = current.next_workflow_task\n\n # deepcopy dict to prevent runtime error\n inp = deepcopy(workflow_log.inputs)\n # loop key, value pairs and look if this output needs to be set as input\n for key, value in inp[str(current.id)]['string'].iteritems():\n if value == 'output_%s' % str(workflow_task.id):\n workflow_log.inputs[str(current.id)]['string'][key] = output['stdout']\n\n if 'exit_code' not in output or output['exit_code'] is not workflow_log.SUCCESS:\n workflow_log.exit_code = workflow_log.ERROR\n workflow_log.save()\n break\n else:\n workflow_log.exit_code = workflow_log.SUCCESS\n\n conn.close()\n\n workflow_log.date_finished = timezone.now()\n workflow_log.outputs = outputs\n workflow_log.save()\n\n # Notify user in case of failure\n if workflow_log.exit_code == workflow_log.ERROR:\n send_failiure_notification(workflow_log)",
"def run(self, data):\n\n if data and self.application:\n # Build tuples for embedding index\n if self.application.embeddings:\n data = [(x, element, None) for x, element in enumerate(data)]\n\n # Process workflow\n with st.spinner(\"Running workflow....\"):\n results = []\n for result in self.application.workflow(self.name, data):\n # Store result\n results.append(result)\n\n # Write result if this isn't an indexing workflow\n if not self.application.embeddings:\n st.write(result)\n\n # Store workflow results\n self.data = results",
"def execute_workflow(self):\n logging.info(f\"called {self.job} with {self.file_type}\")\n\n job_params = {\n \"job\": self.job,\n \"job_run_dao\": self.job_run_dao,\n \"incident_dao\": self.incident_dao,\n \"subprocess\": self.file_type\n }\n\n with SmsJobController(**job_params) as controller:\n self.job_run = controller.job_run\n\n try:\n self.process_map[self.job]()\n controller.job_run_result = controller.run_result_enum.SUCCESS\n except KeyError:\n raise KeyError",
"def get(name, config):\n\n process = Process()\n\n # Build workflow\n with st.spinner(\"Building workflow....\"):\n process.build(name, config)\n\n return process",
"def workflow_handle(\n self, workflow: WorkflowObject, postproc: Optional[RunObject] = None,\n groups: Optional[List[GroupObject]] = None\n ) -> Dict:\n obj = self.workflow_descriptor(workflow)\n # Add parameter declarations to the serialized workflow descriptor\n parameters = workflow.parameters.values() if workflow.parameters is not None else []\n obj[WORKFLOW_PARAMETERS] = [p.to_dict() for p in parameters]\n # Add parameter group definitions if defined for the workflow.\n parameter_groups = workflow.parameter_groups\n if parameter_groups is not None:\n obj[WORKFLOW_PARAGROUPS] = [\n {\n PARAGROUP_NAME: g.name,\n PARAGROUP_TITLE: g.title,\n PARAGROUP_INDEX: g.index\n } for g in parameter_groups]\n # Add serialization for post-processing workflow (if present).\n if postproc is not None:\n obj[POSTPROC_RUN] = self.runs.run_handle(run=postproc)\n # Add users' workflow groups if given.\n if groups is not None:\n obj.update(self.groups.group_listing(groups=groups))\n return obj",
"def run_wf(self,wfname):\n wf = self.workflows[wfname]\n self.logmethod('preparing workflow {} for execution'.format(wfname))\n stk,diag = wf.execution_stack()\n self.prepare_wf(wf,stk)\n wf.execute()\n self.logmethod('execution finished')",
"def test_get_workflow_definition_by_process_id(self):\n pass",
"def __init__(self, workflow, **kwds):\n self.kwds = kwds\n self.url = self.get('url', 'cmsweb.cern.ch')\n WorkflowManager.__init__(self, workflow, self.url)\n self.workload = getWorkload(self.url, workflow)\n self.cacheID = self.winfo.get('StepOneConfigCacheID', '')\n self.config = getConfig(self.url, self.cacheID)\n self.pileup_dataset = self._pileup_dataset()\n self.priority = self._priority()\n self.era = self.get('era', 'Summer12')\n self.lfn = self.get('lfn', '/store/mc')\n self.special_name = self.get('specialName', '')\n self.max_rss = self.get('maxRSS', 2300000)\n self.max_vsize = self.get('maxVSize', 4100000000)\n self.input_dataset = ''\n self.pileup_scenario = ''\n self.global_tag = self.get('globalTag', '')\n self.campaign = self.get('campaign', '')\n self.max_merge_events = self.get('maxMergeEvents', 50000)\n self.activity = self.get('activity', 'reprocessing')\n self.restrict = self.get('restrict', 'None')\n self.site_use = self.get('site', None)\n self.site_cust = self.get('site_cust', None)\n self.xrootd = self.get('xrootd', 0)\n self.ext_tag = self.get('ext', '')\n self.team = self.get('team', '')\n\n # perform various initialization\n self._init()\n\n # custom settings\n # Construct processed dataset version\n if self.pileup_scenario:\n self.pileup_scenario = self.pileup_scenario+'_' \n\n specialprocstring = kwds.get('specialName', '')\n if specialprocstring:\n self.special_name = specialprocstring + '_'\n\n # ProcessingString\n inprocstring = kwds.get('procstring', '')\n if inprocstring:\n self.procstring = inprocstring\n else:\n self.procstring = self.special_name + self.pileup_scenario +\\\n self.global_tag + self.ext_tag\n\n # ProcessingVersion\n inprocversion = kwds.get('procversion', '')\n if inprocversion:\n self.procversion = inprocversion\n else:\n self.procversion = self.dataset_version(self.era, self.procstring)",
"def test_change_workflow_definition(self):\n pass",
"def update_workflow_from_dict(\n self,\n workflow_dict,\n workflow_id=None,\n validate=True\n ):\n valid_def = {}\n if validate:\n valid_def = Definition.validate_workflow(workflow_dict)\n if valid_def is False:\n Log.an().error(\n 'invalid workflow:\\n%s', yaml.dump(workflow_dict)\n )\n return False\n\n else:\n valid_def = workflow_dict\n\n # insert workflow_id into dict if provided\n if workflow_id:\n valid_def['workflow_id'] = workflow_id\n\n # make sure steps of workflow are valid, update app IDs\n if not self.synchronize_workflow_with_db(valid_def):\n Log.an().error(\n 'cannot synchronize workflow with data source: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # update workflow record\n if not self.update_workflow(\n valid_def['workflow_id'],\n {\n 'name': valid_def['name'],\n 'description': valid_def['description'],\n 'username': valid_def['username'],\n 'git': valid_def['git'],\n 'inputs': json.dumps(valid_def['inputs']),\n 'parameters': json.dumps(valid_def['parameters']),\n 'final_output': json.dumps(valid_def['final_output']),\n 'apps': json.dumps(valid_def['apps']),\n 'public': valid_def['public'],\n 'enable': valid_def['enable'],\n 'test': valid_def['test'],\n 'version': valid_def['version']\n }\n ):\n Log.an().error(\n 'cannot update workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n # update steps, create map of steps\n step_name2id = self.update_workflow_steps_from_dict(valid_def)\n if not step_name2id:\n Log.an().error(\n 'cannot update workflow steps: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # delete dependencies\n if not self.delete_depend_by_workflow_id(valid_def['workflow_id']):\n Log.an().error(\n 'cannot delete step dependencies for workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n # insert dependency records\n if not self.import_step_depends_from_dict(valid_def, step_name2id):\n Log.an().error(\n 'cannot import step dependencies for workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n return True",
"def add_workflow(self, workflow):\n self.workflow_manager.add_workflow(workflow)",
"def test_get_workflow_definition(self):\n pass",
"def test_deploy_workflow_definition(self):\n pass",
"def patch_workflow(obj):\n\n import types\n\n def run_bootstrap(self, bootstrap):\n X = self.design.iloc[:, bootstrap]\n Y = self.response.iloc[:, bootstrap]\n utils.Debug.vprint('Calculating betas using MEN', level=0)\n self.kvs.sync_processes(\"pre-bootstrap\")\n return ElasticNetRunner().run(X, Y, self.kvs)\n\n obj.run_bootstrap = types.MethodType(run_bootstrap, obj)",
"def get_workflow(self, playbook_name, workflow_name):\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n return self.workflows[key]\n return None",
"def process_task(params):\n params['task'](params)",
"def test_workflows_post(self):\n pass",
"def oc_process(row):\n conf = {\"flagForCutting\": \"false\",\n \"flagForReview\": \"false\",\n \"publishToEngage\": \"true\",\n \"publishToHarvesting\": \"true\",\n \"straightToPublishing\": \"true\"}\n process = {\"workflow\": row[\"workflow\"], \"configuration\": conf}\n return process",
"def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):\n return create_inferelator_workflow(regression=regression, workflow=workflow)()",
"def import_workflow_class(workflow_name):\n try:\n module_name = \"workflow.\" + workflow_name\n importlib.import_module(module_name)\n return True\n except ImportError:\n return False",
"def new_workflow(self, upload_file, name=\"\", description=\"\", submit=None):\n data = upload_file.file.read()\n if not name:\n name = upload_file.filename.replace(\".xml\", \"\")\n workflow = Workflow(name=name, description=description,\n data=data,\n created_by=identity.current.user.id)\n log.info(\"Saved new workflow %d\", workflow.id)\n raise redirect(\"/workflow/%d\" % workflow.id)",
"def workflow_step(self, workflow_step):\n\n self._workflow_step = workflow_step",
"def test_workflows_get(self):\n pass",
"def test_workflows_get(self):\n pass"
] | [
"0.6806819",
"0.6798629",
"0.66735774",
"0.6562618",
"0.6457929",
"0.61686677",
"0.61226225",
"0.61045694",
"0.60235626",
"0.60008824",
"0.59290653",
"0.58635896",
"0.57689816",
"0.5767123",
"0.57602173",
"0.5752276",
"0.57437325",
"0.57329977",
"0.568696",
"0.5669722",
"0.56604236",
"0.56468",
"0.55966157",
"0.5583702",
"0.5568597",
"0.5558301",
"0.55478734",
"0.552863",
"0.5527188",
"0.5527188"
] | 0.75934297 | 0 |
trim data from a copy of decision prior to logging if not debug | def trimmed_decision(decision, debug=False):
decision_trimmed = copy.deepcopy(decision)
if not debug:
# removed to limit verbosity
decision_trimmed["events"] = []
return decision_trimmed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_timelog():",
"def cleaning (data):",
"def cleanup(self, data):\n tmp = copy.copy(data)\n for field in ('log_entries', 'instances',\n 'picked_instances', 'saved_instances',\n 'terminated_instances', 'skipped_instances'):\n if field in tmp:\n del tmp[field]\n return tmp",
"def _get_cleaned_logs(self, log, logstart, logend):\n start = log.find(logstart) + len(logstart)\n normal_log = log[start:].replace(logend, '')\n if normal_log.strip() != '' or self.session.run_counter == 1:\n return normal_log\n lastlogend = self.LOGEND%(self.session.uuid, self.session.run_counter - 1)\n start = log.find(lastlogend) + len(lastlogend)\n return log[start:].replace(logstart, '').replace(logend, '')",
"def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()",
"def _wipe_log(self):\n self.temp_log.clear()\n self.temp_log = [[], []] # init a 2D array",
"def _trimRecords(self):\n self.highpassrecords.resize(self.nhighpassrecords, refcheck=False)\n self.lowpassrecords.resize(self.nlowpassrecords, refcheck=False)\n self.digitalsvalrecords.resize(self.ndigitalsvalrecords, refcheck=False)\n # cleanup by deleting any struct arrays of len 0\n for recname in ('highpassrecords', 'lowpassrecords', 'digitalsvalrecords'):\n if len(self.__getattribute__(recname)) == 0:\n self.__delattr__(recname)",
"def _normalize_data_to_send(info):\n # Remove the parts of the data that are unbounded in size.\n info = copy.deepcopy(info)\n for key in ['model_config', 'epoch_history']:\n if key in info:\n del info[key]\n return info",
"def unusedFromKDOTDataPreparation():",
"def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return",
"def trim_support(\n dict_, data, logit, bins=25, trim=True, reestimate_p=False, show_output=False\n):\n # Find common support\n prop_score = data[\"prop_score\"]\n common_support = _define_common_support(dict_, data, bins, show_output)\n\n # Trim the data. Recommended.\n if trim is True:\n # data, prop_score = trim_data(prop_score, common_support, data)\n data = data[\n (data.prop_score >= common_support[0])\n & (data.prop_score <= common_support[1])\n ]\n prop_score = prop_score[\n (prop_score >= common_support[0]) & (prop_score <= common_support[1])\n ]\n\n # Optional. Not recommended\n # Re-estimate baseline propensity score on the trimmed sample\n if reestimate_p is True:\n # Re-estimate the parameters of the decision equation based\n # on the new trimmed data set\n data = estimate_treatment_propensity(dict_, data, logit, show_output)\n\n else:\n pass\n else:\n pass\n\n data = data.sort_values(by=\"prop_score\", ascending=True)\n prop_score = prop_score.sort_values(axis=0, ascending=True)\n X = data[dict_[\"TREATED\"][\"order\"]]\n Y = data[[dict_[\"ESTIMATION\"][\"dependent\"]]]\n\n return X, Y, prop_score",
"def _handle_dump_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data",
"def trim_dump_only(self, data, **kwargs):\n if isinstance(data, str) or data is None:\n return data\n name = data.pop(\"name\", None)\n data.pop(\"version\", None)\n # CliV2AnonymousEnvironment is a default name for anonymous environment\n if name is not None and name != ANONYMOUS_ENV_NAME:\n module_logger.warning(\n \"Warning: the provided asset name '%s' will not be used for anonymous registration\",\n name,\n )\n return super(AnonymousEnvironmentSchema, self).trim_dump_only(data, **kwargs)",
"def discard_partial_logs(logbook):\n trim = logbook.copy()\n\n times = np.array(\n list(\n itertools.chain(\n *[logbook[trip_id]['latest_information_time'].values for trip_id in logbook.keys()]\n )\n )\n ).astype(int)\n first, last = np.min(times), np.max(times)\n\n for trip_id in logbook.keys():\n if logbook[trip_id]['latest_information_time'].astype(int).isin([first, last]).any():\n trim.pop(trip_id)\n\n return trim",
"def full_clean():\n response_to_df_csv()\n dirty_data = pd.read_csv(\"./data/dirty_data.csv\")\n cleaned_data = dirty_data\n cleaned_data = drop_cols(cleaned_data)\n cleaned_data = lowercase_columns(cleaned_data)\n cleaned_data = make_numeric(cleaned_data)\n cleaned_data = drop_zero_pay(cleaned_data)\n cleaned_data = add_log_col(cleaned_data)\n cleaned_data.to_csv('./data/cleaned_for_testing.csv')\n\n return cleaned_data",
"def test_trim_filter(self):\n expected_filter = (\n \"{inspec} {trim}=start={pi}:duration={d},{setpts}=PTS-STARTPTS \"\n \"{outspec}\".format(\n inspec=self.segment.input_stream_specifier(),\n trim=self.segment._TRIM, setpts=self.segment._SETPTS,\n pi=self.EXPECTED_PUNCH_IN.total_seconds(),\n d=self.EXPECTED_DURATION,\n outspec=self.segment.output_stream_specifier()))\n self.assertEqual(self.segment.trim_filter(), expected_filter)",
"def _to_trim(self):\n self._status = 2\n fm.seg2trim(self._basename)\n self._status = 3",
"def _trimTime(time,data,tStart,tStop):\t\n\tif tStart is None:\n\t\tiStart=0;\n\t\tiStop=len(time);\n\telse:\n\t\t# determine indices of cutoff regions\n\t\tiStart=_process.findNearest(time,tStart); # index of lower cutoff\n\t\tiStop=_process.findNearest(time,tStop);\t # index of higher cutoff\n\t\t\n\t# trim time\n\ttime=time[iStart:iStop];\n\t\n\t# trim data\n\tif type(data) is not list:\n\t\tdata=[data];\n\tfor i in range(0,len(data)):\n\t\tdata[i]=data[i][iStart:iStop];\n\t\t\n\treturn time, data",
"def trim_data_back_to(monthToKeep):\n global g_failed_tests_info_dict\n current_time = time.time() # unit in seconds\n\n oldest_time_allowed = current_time - monthToKeep*30*24*3600 # in seconds\n\n clean_up_failed_test_dict(oldest_time_allowed)\n clean_up_summary_text(oldest_time_allowed)",
"def trim_alt(ds, data_vars=[\"Altitude_m\", \"Counts\", \"Temperature_C\"]):\n\n if \"trim_method\" in ds.attrs:\n trm_list = ds.attrs[\"trim_method\"]\n\n if not isinstance(trm_list, list): # make sure it is a list before looping\n trm_list = [trm_list]\n\n for trm_meth in trm_list:\n if trm_meth.lower() == \"altitude\":\n print(\"Trimming using altitude data\")\n altitude = ds[\n \"Altitude_m\"\n ] # need to use atltitude values before starting trimming\n for var in data_vars:\n ds[var] = ds[var].where(~(altitude < ds.attrs[\"Deadzone_m\"]))\n ds[var] = ds[var].where(~(altitude > ds.attrs[\"Range_m\"]))\n print(f\"Trimming {var}\")\n\n histtext = \"Trimmed altimeter data using Altimeter_m = 0.\"\n\n ds = utils.insert_history(ds, histtext)\n\n elif trm_meth.lower() == \"bin range\":\n print(\"Trimming using good_bins of %s\" % str(ds.attrs[\"good_bins\"]))\n if \"bins\" in ds.coords:\n # trim coordinate bins\n ds = ds.isel(\n bins=slice(ds.attrs[\"good_bins\"][0], ds.attrs[\"good_bins\"][1])\n )\n # reset Bin_count attribute\n ds.attrs[\"Bin_count\"] = (\n ds.attrs[\"good_bins\"][1] - ds.attrs[\"good_bins\"][0]\n )\n\n histtext = (\n \"Removed extra bins from altimeter data using good_bins attribute.\"\n )\n\n ds = utils.insert_history(ds, histtext)\n\n else:\n print(\"Did not trim altimeter data\")\n\n return ds",
"def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()",
"def __analytics(self):\n\n # timestamps is not found\n if self.__timestamps is None:\n return\n\n if len(self.__timestamps) == 0:\n Log.w(\"There are not good enough portions to cut. Try changing the configurations.\")\n return\n\n Log.i(f\"Clipping a total of {len(self.__timestamps)} sub portion(s).\")\n Log.i(f\"Output video length would be approx. :: {self.__output_length}s or {float(self.__output_length / 60)}m\")\n Log.i(f\"Percent of video trimmed :: {100 - ((self.__output_length * 100) / self.__actual_length)}%\")",
"def manipulate_data(data):\n log.info(\"Doing some fun stuff here!\")\n return data",
"def trim_features():\n pass",
"def retain_if(self, condition):\n for item in(self.data_):\n if(item !=None):#Ignore space that we haven't filled yet\n if not condition(item):\n self.data_.remove(item)\n self.size_-=1",
"def test_trim_row(self):\n row = {'PROJ_NO': 'A'*500, 'LOCATION': 'B'*500, 'CHANGE_DATE': 'C'*500,\n 'PROJ_NAME1': 'D'*500, 'PCV_NAME': 'E'*500, 'SUMMARY': 'F'*500,\n 'STATE': 'G'*500, 'OVERS_PART': 'H'*500, 'OVERS_PCT': 'I'*500,\n 'PROJ_REQ': 'J'*500, 'UNIDENT_BAL': 'K'*500,\n 'ATTRIBUTE4': 'L'*500, 'SECTOR': 'M'*500, 'SUB_SECTOR': 'N'*500,\n 'LAST_UPDATED_FROM_PAYGOV': 'O'*500}\n should_trim = ('PROJ_NO', 'LOCATION', 'PROJ_NAME1', 'PCV_NAME',\n 'STATE', 'SECTOR')\n no_trim = [key for key in row if key not in should_trim]\n logger = Mock()\n row = sync.trim_row(row, logger)\n for key in should_trim:\n self.assertTrue(len(row[key]) < 500)\n for key in no_trim:\n self.assertEqual(len(row[key]), 500)\n self.assertEqual(logger.warning.call_count, len(should_trim))",
"def filter_data(data: AnnData) -> None:\n\n assert \"passed_qc\" in data.obs\n data._inplace_subset_obs(data.obs[\"passed_qc\"].values)\n data._inplace_subset_var((data.var[\"n_cells\"] > 0).values)\n logger.info(\n \"After filteration, {nc} cells and {ng} genes are kept. Among {ng} genes, {nrb} genes are robust.\".format(\n nc=data.shape[0], ng=data.shape[1], nrb=data.var[\"robust\"].sum()\n )\n )",
"def pre_mutation(context):\n line = context.current_source_line.strip()\n if context.current_line_index != 0:\n prev_line = context.source_by_line_number[context.current_line_index - 1].strip()\n else:\n prev_line = \"\"\n\n if line.startswith(\"logger.\") or prev_line.startswith(\"logger.\"):\n context.skip = True\n if line.startswith(\"logger = structlog\"):\n context.skip = True\n if line.startswith(\"cls.__doc__\"):\n context.skip = True\n\n # This file is copied verbatim and is not tested\n if context.filename.endswith(\"crypt.py\"):\n context.skip = True",
"def _collect_outputs(self, outputs):\n #only keep output on the last step\n outputs = outputs[-1]\n logging.debug(outputs)\n assert(len(outputs) == 2)\n return outputs",
"def ignorableWhitespace(self, data):\n pass"
] | [
"0.5868072",
"0.5775717",
"0.5558721",
"0.55497444",
"0.5545323",
"0.5411763",
"0.5360599",
"0.53106064",
"0.52514195",
"0.52372736",
"0.5230301",
"0.51863253",
"0.5142404",
"0.5125108",
"0.51198083",
"0.51118124",
"0.5096022",
"0.50936425",
"0.5068333",
"0.5065867",
"0.5047958",
"0.5038066",
"0.502929",
"0.500859",
"0.5000147",
"0.49965864",
"0.49904364",
"0.4984601",
"0.49752653",
"0.49480745"
] | 0.672401 | 0 |
Given a poll_for_decision_task response, check if there is a nextPageToken and if so, recursively poll for all workflow events, and assemble a final decision response to return | def get_all_paged_events(
decision, client, domain, task_list, identity, maximum_page_size
):
# First check if there is no nextPageToken, if there is none
# return the decision, nothing to page
next_page_token = None
try:
next_page_token = decision["nextPageToken"]
except KeyError:
next_page_token = None
if next_page_token is None:
return decision
# Continue, we have a nextPageToken. Assemble a full array of events by continually polling
all_events = decision["events"]
while next_page_token is not None:
try:
next_page_token = decision["nextPageToken"]
if next_page_token is not None:
decision = client.poll_for_decision_task(
domain=domain,
taskList={"name": task_list},
identity=identity,
nextPageToken=next_page_token,
maximumPageSize=maximum_page_size,
)
for event in decision["events"]:
all_events.append(event)
except KeyError:
next_page_token = None
# Finally, reset the original decision response with the full set of events
decision["events"] = all_events
return decision | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def poll_for_decision_task(domain, task_list, identity=None,\n next_page_token=None, maximum_page_size=1000,\n reverse_order=False):\n kwargs = {}\n\n for aws_prop, value, conversion in (\n ('identity', identity, None),\n ('maximumPageSize', maximum_page_size, None),\n ('reverseOrder', reverse_order, None),\n ('nextPageToken', next_page_token, None)):\n\n kwargs = check_and_add_kwargs(aws_prop, value, conversion, kwargs)\n\n result = make_request(\n SWF.poll_for_decision_task,\n domain=domain,\n taskList=task_list,\n **kwargs)\n\n if result.success:\n return result.result\n\n return None",
"def respond_decision_task_completed(token, decisions, execution_context=None):\n kwargs = {}\n\n if execution_context:\n kwargs['executionContext'] = execution_context\n\n result = make_request(\n SWF.respond_decision_task_completed,\n taskToken=token,\n decisions=decisions,\n **kwargs)\n\n if result.success:\n return result.result\n\n return None",
"def parse(self, response):\n meeting_types = ['admin-opp-committee-meeting', 'audit-committee', 'board-meeting']\n\n data = json.loads(response.text)\n for item in data:\n if (item.get('category') != [] and item.get('category')[0] in meeting_types):\n name, dt_time = self._parse_name_time(item['title'])\n start = self._parse_time_dict(self._parse_datetime(item['start']), dt_time)\n end = self._parse_time_dict(self._parse_datetime(item['end']), dt_time)\n end['date'] = start['date']\n if start['time'] == end['time']:\n end['time'] = None\n item_data = {\n '_type': 'event',\n 'name': name,\n 'description': item['description'],\n 'classification': self._parse_classification(item.get('category')[0]),\n 'start': start,\n 'end': end,\n 'all_day': False,\n 'timezone': self.timezone,\n 'sources': self._parse_sources(item)\n }\n item_data['status'] = self._generate_status(item_data)\n item_data['id'] = self._generate_id(item_data)\n\n # If it's a board meeting, return description\n if item['category'][0] in ['board-meeting', 'admin-opp-committee-meeting']:\n yield self._board_meeting(item_data)\n else:\n # Request each relevant event page,\n # including current data in meta attr\n req = scrapy.Request(\n item['url'],\n callback=self._parse_event,\n dont_filter=True,\n )\n req.meta['item'] = item_data\n yield req",
"def poll(self):\n if not self.pollable:\n self.logger.warning('Poll has been triggered but the bot is not pollable! Return;')\n return\n self._busy_mutext.acquire()\n try:\n self.logger.info('Stating poll')\n for scope in self.ScopeCollector.collect_all():\n stats_collection = []\n for stat_class in self._statistics:\n for parent in scope.hierarchy:\n if stat_class.EndpointScope == parent.__class__:\n statistics = stat_class(**parent.query) # TODO: Init from scope\n statistics.set_endpoint_scope(parent)\n self.logger.debug(f'Collecting statistics: {statistics}')\n statistics.collect()\n stats_collection.append(statistics)\n for task_cls in self.get_conditional_tasks(scope):\n task = task_cls(scope, stats_collection)\n task.handle()\n\n self.logger.info('Finished poll')\n\n finally:\n self._busy_mutext.release()",
"def run(self):\n\n poll = self.poll()\n\n if not 'events' in poll:\n return True\n\n history = self.get_history(poll)\n activity_states = self.get_activity_states(history)\n workflow_execution_info = self.get_workflow_execution_info(poll)\n context = event.get_current_context(history)\n\n if workflow_execution_info is not None:\n context.update(workflow_execution_info)\n\n decisions = swf.Layer1Decisions()\n\n try:\n for current in activity.find_available_activities(\n self.flow, activity_states, context):\n\n decisions.schedule_activity_task(\n current.id, # activity id.\n current.activity_name,\n self.version,\n task_list=current.activity_worker.task_list,\n input=json.dumps(current.create_execution_input()),\n heartbeat_timeout=str(current.heartbeat_timeout),\n start_to_close_timeout=str(current.timeout),\n schedule_to_start_timeout=str(current.schedule_to_start),\n schedule_to_close_timeout=str(current.schedule_to_close))\n else:\n activities = list(\n activity.find_uncomplete_activities(\n self.flow, activity_states, context))\n if not activities:\n decisions.complete_workflow_execution()\n except Exception as e:\n decisions.fail_workflow_execution(reason=str(e))\n\n self.complete(decisions=decisions)\n return True",
"def get_history(self, poll):\n\n events = poll['events']\n while 'nextPageToken' in poll:\n poll = self.poll(next_page_token=poll['nextPageToken'])\n\n if 'events' in poll:\n events += poll['events']\n\n # Remove all the events that are related to decisions and only.\n return [e for e in events if not e['eventType'].startswith('Decision')]",
"def decide(self):\n meeting_state = self.meeting.get_state()\n\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n error_response = self._checkin_proposal_document_before_deciding()\n if error_response:\n return error_response\n\n self.agenda_item.decide()\n\n response = JSONResponse(self.request)\n if self.agenda_item.has_proposal:\n response.info(\n _(u'agenda_item_proposal_decided',\n default=u'Agenda Item decided and excerpt generated.'))\n else:\n response.info(_(u'agenda_item_decided',\n default=u'Agenda Item decided.'))\n\n if meeting_state != self.meeting.get_state():\n response.redirect(self.context.absolute_url())\n msg = _(u'agenda_item_meeting_held',\n default=u\"Agendaitem has been decided and the meeting has been held.\")\n api.portal.show_message(message=msg, request=self.request, type='info')\n\n return response.dump()",
"def perform_scraping(current_session):\r\n\r\n # List Array storing all relevant decision information\r\n final_data_fetch = []\r\n pagination_index = global_constants['NUMBER_PAGE_TO_SCRAPE_FIRST']\r\n while pagination_index < global_constants['NUMBER_PAGE_TO_SCRAPE_LAST']:\r\n print(\"Page:\", pagination_index, \" Collected records:\", len(final_data_fetch))\r\n\r\n # Get relevant admit-reject page based on pagination value\r\n result = current_session.get(global_constants['ALL_RESULTS_URL'] + str(pagination_index),\r\n headers=dict(referer=global_constants['ALL_RESULTS_URL']))\r\n tree = lxml_html.fromstring(result.content)\r\n\r\n # Get Nodes containing individual decisions for each page(approx 20 per page)\r\n decision_buckets = tree.xpath('//*[@class=\"row\"]/div[@class=\"col-sm-6\"]/div[@class=\"panel panel-warning\"]/div[@class=\"panel-body\"]')\r\n\r\n # If decision buckets are empty, captcha page has been encountered\r\n if len(decision_buckets) == 0:\r\n print(\"Captcha Time\")\r\n time.sleep(120)\r\n continue\r\n\r\n for individual_decision_bucket in decision_buckets:\r\n\r\n current_admit_status = ((individual_decision_bucket.xpath('./div[1]/div[2]/label'))[0]).text.strip()\r\n\r\n # Fetch results only if ADMIT or REJECT\r\n if current_admit_status.lower() == 'admit' or current_admit_status.lower() == 'reject':\r\n\r\n # Get relevant information from html page returned in response\r\n current_bucket_university_course = ((individual_decision_bucket.xpath('./div[1]/div[1]/h4/small'))[0]).text.replace(\"\\n\",\"\").strip()\r\n current_gre = get_gre_or_toefl(((((individual_decision_bucket.xpath('./div[2]/div[1]'))[0]).getchildren())[1]).tail)\r\n current_toefl = get_gre_or_toefl(((((individual_decision_bucket.xpath('./div[2]/div[2]'))[0]).getchildren())[1]).tail)\r\n current_gpa = get_gpa(((((individual_decision_bucket.xpath('./div[2]/div[3]'))[0]).getchildren())[1]).tail)\r\n current_workex = get_workex_months(((((individual_decision_bucket.xpath('./div[2]/div[4]'))[0]).getchildren())[1]).tail)\r\n\r\n current_university, current_course = split_bucket_university_course(current_bucket_university_course.lower())\r\n # Append decision information to final bucket only if minimum criteria met\r\n if current_university is not None and filter_criteria_met(current_gre, current_gpa, current_toefl):\r\n\r\n # Get UG College from profile of user\r\n profile_page_path = ((individual_decision_bucket.xpath('./div[1]/div[1]/h4/a'))[0]).attrib['href']\r\n profile_result = current_session.get(global_constants['HOME_PAGE'] + profile_page_path,\r\n headers=dict(referer=global_constants['PAST_RESULTS_URL']))\r\n profile_tree = lxml_html.fromstring(profile_result.content)\r\n ug_details_bucket = (profile_tree.xpath('//div[@class=\"col-sm-12 card\"][1]'))\r\n if len(ug_details_bucket) >= 1:\r\n ug_details_bucket = ug_details_bucket[0]\r\n current_ug_course = ((ug_details_bucket.xpath('./div[1]/div[7]/p[1]/b[1]'))[0]).text.replace(\"\\n\", \"\").strip()\r\n current_ug_college = ((ug_details_bucket.xpath('./div[1]/div[7]/p[2]'))[0]).text.replace(\"\\n\", \"\").strip()\r\n\r\n final_data_fetch.append([current_course, current_university, current_gpa, current_gre, current_toefl,\r\n current_workex, current_ug_course, current_ug_college, current_admit_status])\r\n\r\n # Add sleep time to allow for web scraping in undetected manner\r\n sleep_delay = random.choice([0, 1, 2, 3])\r\n time.sleep(sleep_delay)\r\n pagination_index += 1\r\n\r\n # Export final_data to excel sheet\r\n export_to_file(final_data_fetch)",
"def get_continuity_final_response(response, auth):\n while True:\n LOGGER.debug(\"Continuing getting response...\")\n response = do_get_le_url(response.json()['links'][0]['href'], get_le_api_key(auth))\n if response.status_code != 200:\n LOGGER.info(\"Response code is not 200, returning. Last response: %r\", response)\n return None\n\n if 'links' not in response.json():\n LOGGER.info(\"Got final response, returning.\")\n return response\n else:\n LOGGER.debug(\"links attribute found in a subsequent continue request. Should \"\n \"continue polling.\")\n time.sleep(1) # sleeping here to give Logentries some time to finish the query.\n continue",
"def parse_competition(self, response):\n # gather events from the competition page\n event_urls = response.css('div.navilevel1 p a::attr(href)').getall()\n event_titles = response.css('div.navilevel1 p a::text').getall()\n\n for event_url, event_title in zip(event_urls, event_titles):\n # assemble direct URL for this event\n full_event_url = response.urljoin(event_url)\n\n # pass along metadata for use in next steps\n event_details = parse_qs(urlsplit(full_event_url).query)\n event_title = regex_replace(event_title)\n\n if treatable_event(event_title):\n response.meta.update(dict(instance_of_event_in_competition=detect_event_multiple(event_title),\n event_title=clean_event_title(event_title),\n event_gender=event_details.get(\"gen\", [np.nan])[0]))\n\n # scrape the event page\n yield scrapy.Request(url=full_event_url,\n callback=self.parse_event,\n meta=response.meta)",
"def get_last_response(self, task_number):\r\n last_response = \"\"\r\n task_state = self.task_states[task_number]\r\n task_xml = self.task_xml[task_number]\r\n task_type = self.get_tag_name(task_xml)\r\n\r\n children = self.child_modules()\r\n\r\n task_descriptor = children['descriptors'][task_type](self.system)\r\n etree_xml = etree.fromstring(task_xml)\r\n\r\n min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))\r\n max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))\r\n\r\n task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)\r\n task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,\r\n self.static_data, instance_state=task_state)\r\n last_response = task.latest_answer()\r\n last_score = task.latest_score()\r\n all_scores = task.all_scores()\r\n last_post_assessment = task.latest_post_assessment(self.system)\r\n last_post_feedback = \"\"\r\n feedback_dicts = [{}]\r\n grader_ids = [0]\r\n submission_ids = [0]\r\n if task_type == \"openended\":\r\n last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False)\r\n if isinstance(last_post_assessment, list):\r\n eval_list = []\r\n for i in xrange(0, len(last_post_assessment)):\r\n eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i]))\r\n last_post_evaluation = \"\".join(eval_list)\r\n else:\r\n last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment)\r\n last_post_assessment = last_post_evaluation\r\n try:\r\n rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', \"{}\"), self.system)\r\n except Exception:\r\n log.debug(\"Could not parse rubric data from child history. \"\r\n \"Likely we have not yet initialized a previous step, so this is perfectly fine.\")\r\n rubric_data = {}\r\n rubric_scores = rubric_data.get('rubric_scores')\r\n grader_types = rubric_data.get('grader_types')\r\n feedback_items = rubric_data.get('feedback_items')\r\n feedback_dicts = rubric_data.get('feedback_dicts')\r\n grader_ids = rubric_data.get('grader_ids')\r\n submission_ids = rubric_data.get('submission_ids')\r\n elif task_type == \"selfassessment\":\r\n rubric_scores = last_post_assessment\r\n grader_types = ['SA']\r\n feedback_items = ['']\r\n last_post_assessment = \"\"\r\n last_correctness = task.is_last_response_correct()\r\n max_score = task.max_score()\r\n state = task.child_state\r\n if task_type in HUMAN_TASK_TYPE:\r\n human_task_name = HUMAN_TASK_TYPE[task_type]\r\n else:\r\n human_task_name = task_type\r\n\r\n if state in task.HUMAN_NAMES:\r\n human_state = task.HUMAN_NAMES[state]\r\n else:\r\n human_state = state\r\n if grader_types is not None and len(grader_types) > 0:\r\n grader_type = grader_types[0]\r\n else:\r\n grader_type = \"IN\"\r\n grader_types = [\"IN\"]\r\n\r\n if grader_type in HUMAN_GRADER_TYPE:\r\n human_grader_name = HUMAN_GRADER_TYPE[grader_type]\r\n else:\r\n human_grader_name = grader_type\r\n\r\n last_response_dict = {\r\n 'response': last_response,\r\n 'score': last_score,\r\n 'all_scores': all_scores,\r\n 'post_assessment': last_post_assessment,\r\n 'type': task_type,\r\n 'max_score': max_score,\r\n 'state': state,\r\n 'human_state': human_state,\r\n 'human_task': human_task_name,\r\n 'correct': last_correctness,\r\n 'min_score_to_attempt': min_score_to_attempt,\r\n 'max_score_to_attempt': max_score_to_attempt,\r\n 'rubric_scores': rubric_scores,\r\n 'grader_types': grader_types,\r\n 'feedback_items': feedback_items,\r\n 'grader_type': grader_type,\r\n 'human_grader_type': human_grader_name,\r\n 'feedback_dicts': feedback_dicts,\r\n 'grader_ids': grader_ids,\r\n 'submission_ids': submission_ids,\r\n 'success': True\r\n }\r\n return last_response_dict",
"def _parse_next(self, response):\n req = scrapy.Request(\n 'http://www.thecha.org/doing-business/contracting-opportunities/view-all/Board%20Meeting', # noqa\n callback=self._parse_combined_meetings,\n dont_filter=True,\n )\n req.meta['upcoming'] = self._parse_notice(response)\n yield req",
"def __poll_evalutation_result(self, eval_state_url, sleep_time):\n\n self.logger.debug(\n f\"Starting poll cycle for {self.assignment_id}, eval_id {self.eval_id}. Polling {eval_state_url}\"\n )\n\n result = self.interactor.get_policy_eval_state(eval_state_url)\n\n while result.status_code == 202:\n self.logger.debug(\n f\"Evaluation for eval_id {self.eval_id} still ongoing, waiting {sleep_time} seconds before next check\"\n )\n time.sleep(sleep_time)\n result = self.interactor.get_policy_eval_state(eval_state_url)\n\n self.logger.debug(f\"Evaluation for eval_id {self.eval_id} done: {result}\")",
"def get_cal_events(user, calservice):\r\n cal_page_token = None\r\n while True:\r\n try:\r\n #the next for loop retrives the calendar events\r\n #list to be checked for matching criteria\r\n prieml = user['primaryEmail']\r\n creator_to_del = '[email protected]'\r\n event_to_del = 'Digital Directorate Team Meeting'\r\n events = calservice.events().list(calendarId=prieml,\r\n pageToken=cal_page_token).execute()\r\n for event in events['items']:\r\n if event['status'] != 'cancelled':\r\n try:\r\n #this is the criteri to be checked against\r\n organiser = event['organizer']['email']\r\n summary = event['summary']\r\n if organiser == creator_to_del \\\r\n and summary == event_to_del:\r\n try:\r\n #checking for specific start date \r\n #in the event some events have different\r\n #dateTime\\date keywords\r\n if event['start']['dateTime']:\r\n evdate = event['start']['dateTime']\r\n startDate = datetime.strptime(evdate[0:10],\r\n '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate[0:10]))\r\n except KeyError:\r\n #if the keyword is not dateTime \r\n #then fetch date keyword\r\n evdate = event['start']['date']\r\n startDate = datetime.strptime(evdate, '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate))\r\n except KeyError:\r\n continue\r\n cal_page_token = events.get('nextPageToken')\r\n if not cal_page_token:\r\n break\r\n except ValueError:\r\n print('Oops! Thhe last event has an error. Try again...')",
"def custom_wait_for_completion(task_description, output):\n state = 'UNSUBMITTED'\n while not (state == 'COMPLETED' or state =='FAILED'):\n output.add_live_msg(ms.STATUS.format(state))\n time.sleep(5)\n \n #search for the task in task_list\n for task in task_description:\n current_task = gs.isTask(task)\n if current_task:\n state = current_task.state\n if state == 'RUNNING' or state == 'FAILED': \n break\n \n return state",
"def get_workflow_pending_approval_jobs(workflow_id, headers):\n\n for current_job in get_all_items(f\"/workflow/{workflow_id}/job\", headers):\n if (current_job.get(\"type\") == \"approval\") and (current_job.get(\"status\") == \"on_hold\"):\n yield current_job",
"def generate_response(self, input_statement, additional_response_selection_parameters=None):\n Statement = self.storage.get_object('statement')\n\n results = []\n result = None\n max_confidence = -1\n\n for adapter in self.logic_adapters:\n if adapter.can_process(input_statement):\n\n output, Story_ID ,children_questions,means_questions = adapter.process(input_statement, additional_response_selection_parameters)\n results.append(output)\n\n self.logger.info(\n '{} selected \"{}\" as a response with a confidence of {}'.format(\n adapter.class_name, output.text, output.confidence\n )\n )\n\n if output.confidence > max_confidence:\n result = output\n max_confidence = output.confidence\n else:\n self.logger.info(\n 'Not processing the statement using {}'.format(adapter.class_name)\n )\n\n class ResultOption:\n def __init__(self, statement, count=1):\n self.statement = statement\n self.count = count\n\n # If multiple adapters agree on the same statement,\n # then that statement is more likely to be the correct response\n if len(results) >= 3:\n result_options = {}\n for result_option in results:\n result_string = result_option.text + ':' + (result_option.in_response_to or '')\n\n if result_string in result_options:\n result_options[result_string].count += 1\n if result_options[result_string].statement.confidence < result_option.confidence:\n result_options[result_string].statement = result_option\n else:\n result_options[result_string] = ResultOption(\n result_option\n )\n\n most_common = list(result_options.values())[0]\n\n for result_option in result_options.values():\n if result_option.count > most_common.count:\n most_common = result_option\n\n if most_common.count > 1:\n result = most_common.statement\n\n response = Statement(\n text=result.text,\n in_response_to=input_statement.text,\n conversation=input_statement.conversation,\n persona='bot:' + self.name\n )\n\n response.confidence = result.confidence\n\n return response, Story_ID ,children_questions,means_questions",
"def _check_approval_update(self, state):\n\t\tcurrent_employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)\n\t\t# is_officer = self.env.user.has_group('hr_holidays.group_hr_holidays_user')\n\t\tis_manager = self.env.user.has_group('hr_holidays.group_hr_holidays_manager')\n\t\tfor holiday in self:\n\t\t\tval_type = holiday.holiday_status_id.validation_type\n\t\t\tif state == 'confirm':\n\t\t\t\tcontinue\n\n\t\t\tif state == 'draft':\n\t\t\t\tif holiday.employee_id != current_employee and not is_manager:\n\t\t\t\t\traise UserError(_('Only a Leave Manager can reset other people leaves.'))\n\t\t\t\tcontinue\n\n\t\t\t# if not is_officer:\n\t\t\t# \traise UserError(_('Only a Leave Officer or Manager can approve or refuse leave requests.'))\n\n\t\t\t# if is_officer:\n\t\t\t# \t# use ir.rule based first access check: department, members, ... (see security.xml)\n\t\t\tholiday.check_access_rule('write')\n\n\t\t\tif holiday.employee_id == current_employee and not is_manager:\n\t\t\t\traise UserError(_('Only a Leave Manager can approve its own requests.'))\n\n\t\t\tif (state == 'validate1' and val_type == 'both') or (state == 'validate' and val_type == 'manager'):\n\t\t\t\tmanager = holiday.employee_id.parent_id or holiday.employee_id.department_id.manager_id\n\t\t\t\tif (manager and manager != current_employee) and not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('You must be either %s\\'s manager or Leave manager to approve this leave') % (holiday.employee_id.name))\n\n\t\t\tif state == 'validate' and val_type == 'both':\n\t\t\t\tif not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('Only an Leave Manager can apply the second approval on leave requests.'))",
"def get_clarifications_commonsenseqa(ex, nlp, comet_model):\n CATEGORY_TO_QUESTION = {\"xIntent\": \"What was their intention?\",\n \"xNeed\": \"Before that, what did they need?\",\n \"oEffect\": \"What happens to others as a result?\",\n \"oReact\": \"What do others feel as a result?\",\n \"oWant\": \"What do others want as a result?\",\n \"xEffect\": \"What happens to them as a result?\",\n \"xReact\": \"What do they feel as a result?\",\n \"xWant\": \"What do they want as a result?\",\n \"xAttr\": \"How are they seen?\"}\n\n CATEGORY_TO_PREFIX = {\"xIntent\": \"Because they wanted\",\n \"xNeed\": \"Before, they needed\",\n \"oEffect\": \"Others then\",\n \"oReact\": \"As a result, others feel\",\n \"oWant\": \"As a result, others want\",\n \"xEffect\": \"They then\",\n \"xReact\": \"As a result, they feel\",\n \"xWant\": \"As a result, they want\",\n \"xAttr\": \"They are seen seen as\"}\n\n context = ex['question']['stem']\n personx,_ = get_personx(nlp, context, use_chunk=False)\n\n if len(personx) == 0:\n return []\n\n outputs = {category: comet_model.predict(context, category, num_beams=5) for category in comet_model.categories}\n\n curr_events = []\n for category, prefix in CATEGORY_TO_PREFIX.items():\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", '', out_event, flags=re.I)\n out_event = re.sub(\"person x\", '', out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n return curr_events",
"def _complete_recurring_thisandfuture(self, completion_timestamp):\n recurrences = self.icalendar_instance.subcomponents\n orig = recurrences[0]\n if not \"STATUS\" in orig:\n orig[\"STATUS\"] = \"NEEDS-ACTION\"\n\n if len(recurrences) == 1:\n ## We copy the original one\n just_completed = orig.copy()\n just_completed.pop(\"RRULE\")\n just_completed.add(\n \"RECURRENCE-ID\", orig.get(\"DTSTART\", completion_timestamp)\n )\n seqno = just_completed.pop(\"SEQUENCE\", 0)\n just_completed.add(\"SEQUENCE\", seqno + 1)\n recurrences.append(just_completed)\n\n prev = recurrences[-1]\n rrule = prev.get(\"RRULE\", orig[\"RRULE\"])\n thisandfuture = prev.copy()\n seqno = thisandfuture.pop(\"SEQUENCE\", 0)\n thisandfuture.add(\"SEQUENCE\", seqno + 1)\n\n ## If we have multiple recurrences, assume the last one is a THISANDFUTURE.\n ## (Otherwise, the data is coming from another client ...)\n ## The RANGE parameter needs to be removed\n if len(recurrences) > 2:\n if prev[\"RECURRENCE-ID\"].params.get(\"RANGE\", None) == \"THISANDFUTURE\":\n prev[\"RECURRENCE-ID\"].params.pop(\"RANGE\")\n else:\n raise NotImplementedError(\n \"multiple instances found, but last one is not of type THISANDFUTURE, possibly this has been created by some incompatible client, but we should deal with it\"\n )\n self._complete_ical(prev, completion_timestamp)\n\n thisandfuture.pop(\"RECURRENCE-ID\", None)\n thisandfuture.add(\"RECURRENCE-ID\", self._next(i=prev, rrule=rrule))\n thisandfuture[\"RECURRENCE-ID\"].params[\"RANGE\"] = \"THISANDFUTURE\"\n rrule2 = thisandfuture.pop(\"RRULE\", None)\n\n ## Counting logic\n if rrule2 is not None:\n count = rrule2.get(\"COUNT\", None)\n if count is not None and count[0] in (0, 1):\n for i in recurrences:\n self._complete_ical(i, completion_timestamp=completion_timestamp)\n thisandfuture.add(\"RRULE\", rrule2)\n else:\n count = rrule.get(\"COUNT\", None)\n if count is not None and count[0] <= len(\n [x for x in recurrences if not self._is_pending(x)]\n ):\n self._complete_ical(\n recurrences[0], completion_timestamp=completion_timestamp\n )\n self.save(increase_seqno=False)\n return\n\n rrule = rrule2 or rrule\n\n duration = self._get_duration(i=prev)\n thisandfuture.pop(\"DTSTART\", None)\n thisandfuture.pop(\"DUE\", None)\n next_dtstart = self._next(i=prev, rrule=rrule, ts=completion_timestamp)\n thisandfuture.add(\"DTSTART\", next_dtstart)\n self._set_duration(i=thisandfuture, duration=duration, movable_attr=\"DUE\")\n self.icalendar_instance.subcomponents.append(thisandfuture)\n self.save(increase_seqno=False)",
"def workorderwizard_findprevious(request):\n # Manually checking if user is authenticated rather than using @login_required\n # in order to return a 401 status that the workorder wizard understands so it can display a specific error message\n # instead of returning a 302 redirect to the login page, which wouldn't work because this view is called via AJAX\n if not request.user.is_authenticated:\n return HttpResponse('Unauthorized', status=401)\n\n # load JSON\n data = json.loads(request.body.decode('utf-8'))\n\n # check that all required fields are present\n mandatory_fields = ('org', 'event_name', 'location', 'start', 'end', 'setup_complete')\n if not all(key in data for key in mandatory_fields):\n return HttpResponse('Unprocessable Entity', status=422)\n\n # Search events that took place in the past 18 months for a match\n try:\n org = events_models.Organization.objects.get(pk=data['org'])\n except events_models.Organization.DoesNotExist:\n return HttpResponse('Unprocessable Entity', status=422)\n events_past_18_months = events_models.Event2019.objects.filter(\n org=org,\n test_event=False,\n datetime_start__gte=(timezone.now() - timezone.timedelta(days=548))\n )\n if not request.user.has_perm('events.list_org_events', org):\n events_past_18_months = events_past_18_months.exclude(approved=False)\n if not request.user.has_perm('events.list_org_hidden_events', org):\n events_past_18_months = events_past_18_months.exclude(sensitive=True)\n event_names = events_past_18_months.values_list('event_name', flat=True).distinct()\n name_closest_match = process.extractOne(data['event_name'], event_names, scorer=fuzz.ratio)\n if not name_closest_match or name_closest_match[1] < 80:\n # no match\n return HttpResponse(status=204)\n # match found\n closest_match = events_past_18_months.filter(event_name=name_closest_match[0]).order_by('-datetime_start').first()\n if (not closest_match.approved and not request.user.has_perm('events.view_events', closest_match)\n or closest_match.sensitive and not request.user.has_perm('events.view_hidden_event', closest_match)):\n # match blocked by lack of user permission\n return HttpResponse(status=204)\n\n # Prepare response\n services_data = []\n for service_instance in closest_match.serviceinstance_set.all():\n services_data.append({\n 'id': service_instance.service.shortname,\n 'detail': service_instance.detail\n })\n return HttpResponse(json.dumps({\n 'event_name': closest_match.event_name,\n 'location': closest_match.location.pk,\n 'start': str(closest_match.datetime_start),\n 'services': services_data\n }))",
"def poll_for_operation_completion(self, lifecycle_operation_occurrence_id, final_states, max_wait_time,\n poll_interval):\n operation_pending = True\n elapsed_time = 0\n\n while operation_pending and elapsed_time < max_wait_time:\n operation_status = self.get_operation_status(lifecycle_operation_occurrence_id)\n LOG.debug('Got status %s for operation with ID %s' % (operation_status, lifecycle_operation_occurrence_id))\n if operation_status in final_states:\n operation_pending = False\n else:\n LOG.debug('Expected state to be one of %s, got %s' % (final_states, operation_status))\n LOG.debug('Sleeping %s seconds' % poll_interval)\n time.sleep(poll_interval)\n elapsed_time += poll_interval\n LOG.debug('Elapsed time %s seconds out of %s' % (elapsed_time, max_wait_time))\n\n return operation_status",
"def notify_participant_event_decision(request, user, event, decision, comment_to_applicant):\n\n subject = f\"{settings.SITE_NAME} Event Registration Decision\"\n context = {\n 'name': user.get_full_name(),\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'event_title': event.title,\n 'event_url': reverse('event_detail', args=[event.slug]),\n 'decision': decision,\n 'comment_to_applicant': comment_to_applicant,\n 'SITE_NAME': settings.SITE_NAME,\n }\n body = loader.render_to_string('events/email/event_participation_decision.html', context)\n # Not resend the email if there was an integrity error\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [user.email], fail_silently=False)",
"def _check_ongoing_poll(view):\n meeting_path = resource_path(view.request.meeting)\n ongoing = view.catalog_search(type_name = 'Poll',\n path = meeting_path,\n workflow_state = 'ongoing')\n if ongoing:\n raise HTTPForbidden(_(u\"access_during_ongoing_not_allowed\",\n default = u\"During ongoing polls, this action isn't allowed. \"\n \"Try again when polls have closed.\"))",
"def process_response(self, response):\n json = response.json()\n for resp in json[\"responses\"]:\n sub_qry = self._current_query.get(int(resp[\"id\"]))\n self.context.pending_request().map_json(resp[\"body\"], sub_qry.return_type)",
"def get_clarifications_piqa(ex, nlp, comet_model):\n # Questions are usually like \"how would you do something?\"\n personx = \"you\"\n\n input_event = ex[\"goal\"].replace(\"?\", \"\")\n outputs = {category: comet_model.predict(input_event, category, num_beams=5) for category in comet_model.categories}\n\n # We only care about preconditions and postconditions for X\n relevant_categories = [\"xIntent\", \"xNeed\", \"xEffect\", \"xWant\"]\n curr_events = []\n for category in relevant_categories:\n prefix = CATEGORY_TO_PREFIX[category]\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n\n return curr_events",
"def check_for_recommendation_result_report(context):\n json_data = context.response.json()\n if \"recommendation\" in json_data:\n check_recommendation_in_result(context)\n else:\n look_for_other_attributes(context)\n check_vulnerability_in_result(context)",
"def _resolve_deferred_response(self, response, remaining, deferred_id=None):\n if remaining == 0:\n raise DeferredError(\"Failed to resolve deferred response.\")\n\n if response.status_code == 202 and self.block_on_deferred_response:\n deferred_id = deferred_id or response.text\n sleep(self.deferred_poll_interval)\n return self._resolve_deferred_response(\n self.get('get_deferred_results', {'deferred_id': deferred_id}),\n remaining-1,\n deferred_id\n )\n\n return response",
"def to_poll_response_11(self, in_response_to):\n\n poll_response = tm11.PollResponse(message_id=tm11.generate_message_id(),\n in_response_to=in_response_to,\n collection_name=self.result_set.data_collection.name)\n\n if self.exclusive_begin_timestamp_label:\n poll_response.exclusive_begin_timestamp_label = self.exclusive_begin_timestamp_label\n\n if self.inclusive_end_timestamp_label:\n poll_response.inclusive_end_timestamp_label = self.inclusive_end_timestamp_label\n\n if self.result_set.subscription:\n poll_response.subscription_id = self.result_set.subscription.subscription_id\n\n poll_response.record_count = tm11.RecordCount(int(self.result_set.total_content_blocks), False)\n poll_response.more = self.more\n poll_response.result_id = str(self.result_set.pk)\n poll_response.result_part_number = int(self.part_number)\n\n for content_block in self.content_blocks.all():\n cb = content_block.to_content_block_11()\n poll_response.content_blocks.append(cb)\n\n return poll_response",
"def get_task_token(decision):\n try:\n return decision[\"taskToken\"]\n except KeyError:\n # No taskToken returned\n return None"
] | [
"0.63863784",
"0.5704795",
"0.50615245",
"0.5034302",
"0.49235952",
"0.49071118",
"0.4883602",
"0.48640746",
"0.48498127",
"0.4848576",
"0.4816674",
"0.476571",
"0.4736862",
"0.46318975",
"0.45984492",
"0.45964563",
"0.44784856",
"0.44721466",
"0.44705853",
"0.44598907",
"0.4431605",
"0.44238418",
"0.4394544",
"0.43811956",
"0.43631682",
"0.43630955",
"0.43561545",
"0.43498144",
"0.43487844",
"0.43198094"
] | 0.6158818 | 1 |
From the decision response, which is JSON data form SWF, get the input data that started the workflow | def get_input(decision):
try:
workflow_input = json.loads(
decision["events"][0]["workflowExecutionStartedEventAttributes"]["input"]
)
except KeyError:
workflow_input = None
return workflow_input | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_input(activity_task):\n try:\n input = json.loads(activity_task[\"input\"])\n except KeyError:\n input = None\n return input",
"def start_operation(self):\n\n # Check file\n is_good_file, info = self.check_file()\n\n if is_good_file:\n # Check input params from client\n is_error, json_params = self.check_text()\n\n if bool(is_error):\n return is_error, json_params\n else:\n # Start machine learning here\n from slashmlapi.app.slashml.ml_manager import MLManager\n\n # Path to zip file\n path_textfile = info['filename']\n print(json_params)\n results = MLManager.get_results(path_textfile, json_params, self.config, self.start_time)\n return True, results\n else:\n return is_good_file, info",
"def _process(self):\n self.output[\"data\"] = get_form_data(self.kwargs[\"collect\"].ona_scan_form_pk)",
"def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def extract_workflow_data(workflow):\n workflow_data = {}\n workflow_data[\"id\"] = workflow.id\n workflow_data['name'] = workflow.name\n workflow_data['created_at'] = workflow.created_at\n workflow_data['updated_at'] = workflow.updated_at\n workflow_data[\"state\"] = workflow.state\n return workflow_data",
"def process():\n question = request.form['question']\n parsed_question = parse(question)\n wiki_extract = get_wiki_extract(parsed_question)\n wiki_url = get_wiki_url(parsed_question)\n address = get_address(parsed_question)\n lat = get_latitude(parsed_question)\n lng = get_longitude(parsed_question)\n return jsonify({'data': [question, address, wiki_extract,\n wiki_url, lat, lng, parsed_question]})",
"def preprocess(self, request):\n # Take the input data and pre-process it make it inference ready\n\n json_list = []\n # for each request\n for idx, data in enumerate(request):\n # Read the bytearray of the jsonline from the input\n jsonline_arr = data.get('body') \n # Input json is in bytearray, convert it to string\n jsonline_str = jsonline_arr.decode(\"utf-8\")\n # split the json lines\n json_list_request = []\n # for each time series\n for line in io.StringIO(jsonline_str):\n json_record = json.loads(line)\n json_list_request.append(json_record)\n json_list.append(json_list_request)\n return json_list",
"def extract_training_request(json_data):\n if not validate_training_request(json_data):\n return None\n\n return json_data",
"def run(request):\n print(\"This is the run() method\")\n \n #load str\n payload = json.loads(request)\n \n return f\"/n Returning the input for testing: {payload}\"",
"def get_input(self):\n if self.console_label is None:\n self.text_area.delete('1.0', END)\n print(\"The training has finished and the training file was created and sent to the server! Go Back.\")\n return\n\n valid_responses = {'y', 'n', 'u', 'f'}\n\n user_input = self.user_input.get()\n\n self.user_input.delete(0, END)\n\n if user_input not in valid_responses:\n return\n\n self.console_label.label_record_pair(user_input, self.current_record_pair)\n\n if user_input == 'f':\n self.upload_training_file()\n self.current_record_pair = None\n self.console_label = None\n self.text_area.delete('1.0', END)\n return\n\n self.text_area.yview(END)\n\n self.current_record_pair = self.console_label.get_uncertain_pair()",
"def get_launch_response():\n \n return get_init_response()",
"def ReadFlowProcessingRequests(self):\n return list(self.flow_processing_requests.values())",
"def get_input_data(sample):\n with checkpoints.query_portal.get(sample=sample).output[0].open() as f:\n data = json.read(f)\n return data",
"def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json",
"def test_get_workflow_definition(self):\n pass",
"def get_task_from_request_form(request):\n json_data = request.get_json()\n # Required fields\n if \"title\" not in json_data:\n raise ValueError(\"Required field is missing\")\n if \"reference\" not in json_data:\n raise ValueError(\"Required field is missing\")\n if \"status\" not in json_data:\n raise ValueError(\"Required field is missing\")\n\n task_from_request = {\n 'title': json_data['title'],\n 'reference': json_data['reference'],\n 'description': json_data['description'],\n 'timeWorked': [],\n 'status': json_data['status'],\n 'visible': \"visible\" in json_data\n }\n\n return task_from_request",
"def extract_critic_input(self, data):\n return data[1]",
"def get_input_data(self, name='0'):\n return data",
"def predict_start():\n data = request.json\n\n if data:\n predictor.pred_dict[\"start_date\"] = data[\"start_date\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'",
"def run(self, data):\n\n if data and self.application:\n # Build tuples for embedding index\n if self.application.embeddings:\n data = [(x, element, None) for x, element in enumerate(data)]\n\n # Process workflow\n with st.spinner(\"Running workflow....\"):\n results = []\n for result in self.application.workflow(self.name, data):\n # Store result\n results.append(result)\n\n # Write result if this isn't an indexing workflow\n if not self.application.embeddings:\n st.write(result)\n\n # Store workflow results\n self.data = results",
"def input(self):\n return self[\"input\"]",
"def input(self):\n return self[\"input\"]",
"def locate_input(self,il):\n if il.tp == opmod.no_input or il.val is None:\n return None\n elif il.tp == opmod.basic_type:\n return il.val\n elif il.tp == opmod.entire_workflow:\n wf = self.workflows[il.val]\n stk,diag = wf.execution_stack()\n self.prepare_wf(wf,stk)\n return wf\n #return self.workflows[il.val]\n elif il.tp == opmod.plugin_item:\n if isinstance(il.val,list):\n return [self.plugin_manager.get_data_from_uri(v) for v in il.val]\n else:\n return self.plugin_manager.get_data_from_uri(il.val)\n # changed: let basic_type inputs be loaded directly,\n # without using InputLocators.\n #elif il.tp == opmod.basic_type:\n # return il.val",
"def get_workflow_steps(self):\n return self._data_dict[self.KEY_WF_STEPS]",
"def pre_get_flow_validation_result(\n self,\n request: flow.GetFlowValidationResultRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[flow.GetFlowValidationResultRequest, Sequence[Tuple[str, str]]]:\n return request, metadata",
"def test_get_start_form_data(self):\n pass",
"def input_data(self):\n return read_json(self.file_path)",
"def inputs(self, selected, workflow):\n\n change, query = False, None\n with st.expander(\"Data\", expanded=\"embeddings\" not in selected):\n default = self.appsetting(workflow, \"data\")\n default = default if default else \"\"\n\n data = st.text_area(\"Input\", height=10, value=default)\n\n if selected and data and data != self.state(\"data\"):\n change = True\n\n # Save data and workflow state\n st.session_state[\"data\"] = data\n\n if \"embeddings\" in selected:\n default = self.appsetting(workflow, \"query\")\n default = default if default else \"\"\n\n # Set query and limit\n query = st.text_input(\"Query\", value=default)\n\n if selected and query and query != self.state(\"query\"):\n change = True\n\n # Save query state\n st.session_state[\"query\"] = query\n\n return change or self.state(\"api\") or self.state(\"download\")",
"def xnat_workflow_info_show(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info show: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot get response from request: \" + request_url)\n\t\tsys.exit(1)\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\ti = 0\n\tfor json_item in json_items:\n\t\ti = i + 1\n\t\tprint i\n\n\t\t# meta\n\t\tjson_meta = json_item['meta']\n\t\tisHistory = json_meta['isHistory']\n\t\ttype = json_meta['xsi:type']\n\t\tstart_date = json_meta['start_date']\n\n\t\tprint \" isHistory: \" + str(isHistory)\n\t\tprint \" type: \" + type\n\t\tprint \" start_date: \" + start_date\n\t\n\t\t# children\n\t\t#json_children = json_item['children']\n\t\t#print \" children\"\n\t\t#print json_children\n\n\t\t# data_fields\n\t\tjson_data_fields = json_item['data_fields']\n\t\tstatus = json_data_fields['status']\n\t\tworkflow_id = json_data_fields['wrk_workflowData_id']\n\t\tdata_type = json_data_fields['data_type']\n\t\tlaunch_time = json_data_fields['launch_time']\n\t\tExternalID = json_data_fields['ExternalID']\n\t\tpipeline_name = json_data_fields['pipeline_name']\n\t\tID = json_data_fields['ID']\n\t\n\t\tprint \" status: \" + status\n\t\tprint \" workflow_id: \" + str(workflow_id)\n\t\tprint \" data_type: \" + data_type\n\t\tprint \" launch_time: \" + launch_time\n\t\tprint \" ExternalID: \" + ExternalID\n\t\tprint \" pipeline_name: \" + pipeline_name\n\t\tprint \" ID: \" + ID\n\n\t\tprint \" All Data Fields:\"\n\t\tprint \" \" + str(json_data_fields)",
"def receiveData():\r\n preference = request.get_json()\r\n program = preference.pop('program')\r\n enroll_yr = preference.pop('enroll_yr')\r\n enroll_sem = preference.pop('enroll_sem')\r\n spec = 0\r\n if 'spec' in preference:\r\n spec = int(preference['spec'])\r\n preference.pop('spec')\r\n\r\n program_link = 'https://programsandcourses.anu.edu.au/2019/program/'\r\n\r\n program_link = str(program_link) + str(program)\r\n # calculate which type of semester does the enrolled semester fall in\r\n # S1 in odd year, S2 in odd year, S1 in even year or S2 in even year \r\n if int(enroll_yr)%2 == 1:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 1\r\n else:\r\n sem = 2\r\n else:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 3\r\n else:\r\n sem = 4\r\n \r\n # call the pre-processing program which put the model in file test1.mzn & test1.dzn\r\n scraper = dp.DegreeRuleScraper(str(program_link))\r\n orders = scraper.build_program_order_struct()\r\n orders.buildAModel(preference, sem, spec)\r\n \r\n # call MiniZinc to solve for the model\r\n cmd = 'minizinc --solver OSICBC test1.mzn test1.dzn > plan.txt'\r\n os.system(cmd)\r\n jsondata = readmyJson('plan')\r\n \r\n return jsonify(jsondata)"
] | [
"0.55532914",
"0.5477838",
"0.54677725",
"0.5407188",
"0.5296765",
"0.52947277",
"0.5269286",
"0.5250171",
"0.5232065",
"0.52313894",
"0.52157223",
"0.5212821",
"0.5212311",
"0.5211624",
"0.51754016",
"0.5171327",
"0.5157477",
"0.5142468",
"0.5139388",
"0.5137017",
"0.51071405",
"0.51071405",
"0.5097303",
"0.50918096",
"0.508403",
"0.5063622",
"0.5057681",
"0.5029805",
"0.5027339",
"0.5024799"
] | 0.7367048 | 0 |
Given a response from polling for decision from SWF via boto, extract the taskToken from the json data, if present | def get_task_token(decision):
try:
return decision["taskToken"]
except KeyError:
# No taskToken returned
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_taskToken(activity_task):\n try:\n return activity_task[\"taskToken\"]\n except KeyError:\n # No taskToken returned\n return None",
"def task_result(task_id):\n tag_task = parse_html_tags.AsyncResult(task_id)\n return jsonify(tag_task.get())",
"def fetch_token():\n bucket = os.environ[\"SPOTIFY_BUCKET_NAME\"]\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\", \"\")\n logger.info(\"Reading Spotify OAuth token from s3://%s/%s/token.json.\" %\n (bucket, path))\n s3 = boto3.client('s3')\n content_object = s3.get_object(Bucket=bucket, Key=\"%s/token.json\" % path)\n file_content = content_object['Body'].read().decode('utf-8')\n token = json.loads(file_content)\n return token",
"def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)",
"def _extract_continuation_token(continuation_token):\n if not continuation_token:\n return None, None\n try:\n return continuation_token.get(\"PartitionKey\"), continuation_token.get(\"RowKey\")\n except AttributeError as exc:\n raise ValueError(\"Invalid continuation token format.\") from exc",
"def _parse_json_token(self, body):\n\n token_match = re.search('var\\s*jsonToken\\s*=[\\s\\']*([\\w-]+)', body)\n return token_match.group(1)",
"def get_token_from_json(json):\r\n return PodiumToken(json[\"access_token\"], json[\"token_type\"], json[\"created_at\"])",
"def _collect_token(config, cert):\n try:\n json_cert = json.loads(cert)\n except:\n click.secho(\"There was an error accessing/parsing those files!...\\n\", fg='red', reverse=True)\n if config.verbose:\n click.secho(\"The file you uploaded must be compatible with a JSON parser. Please revise and try again.\", fg='cyan')\n else:\n if config.verbose:\n click.secho(\"Searching for token...\", fg='white')\n try:\n token = json_cert[\"stream_token\"]\n if token is None:\n raise ValueError\n except:\n click.secho(\"Token not found in provided template!...\\n\", fg='red', reverse=True)\n if config.verbose:\n click.secho(\"Make sure your using the template file generated from 'dstream define'!\", fg='cyan')\n else:\n if config.verbose:\n click.secho(\"Found stream_token: \" + token + '\\n', fg='white')\n return token",
"def processResponse(token, enc_key, sig_key):\n payload = []\n # Decrypt encrypted token (JWE).\n enc = jwe.JWE()\n enc.deserialize(token, key=enc_key)\n payload.append(enc.payload.decode(\"utf-8\"))\n # This again contains a signed token (JWS), so we deserialize it and verify the signature.\n sig = jws.JWS()\n sig.deserialize(payload[0])\n sig.verify(sig_key)\n payload.append(sig.payload.decode(\"utf-8\"))\n return payload",
"async def get_task_result(task_id: TaskId):",
"def get_token():\n return session.get('microsoft_token')",
"def get_token():\n return session.get('microsoft_token')",
"def get_tweet_text(worker_response):\n return worker_response.get('fields').get('tweet')",
"def token(self):\n return self[\"token\"]",
"def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token",
"def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")",
"def get_input(activity_task):\n try:\n input = json.loads(activity_task[\"input\"])\n except KeyError:\n input = None\n return input",
"def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)",
"def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token",
"def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id",
"async def _a_get_sp_token(self, resource: str) -> str:\n sp_token = self.oauth_tokens.get(resource)\n if sp_token and self._is_oauth_token_valid(sp_token):\n return sp_token[\"access_token\"]\n\n self.log.info(\"Existing Service Principal token is expired, or going to expire soon. Refreshing...\")\n try:\n async for attempt in self._a_get_retry_object():\n with attempt:\n async with self._session.post(\n resource,\n auth=HTTPBasicAuth(self.databricks_conn.login, self.databricks_conn.password),\n data=\"grant_type=client_credentials&scope=all-apis\",\n headers={\n **self.user_agent_header,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n },\n timeout=self.token_timeout_seconds,\n ) as resp:\n resp.raise_for_status()\n jsn = await resp.json()\n jsn[\"expires_on\"] = int(time.time() + jsn[\"expires_in\"])\n\n self._is_oauth_token_valid(jsn)\n self.oauth_tokens[resource] = jsn\n break\n except RetryError:\n raise AirflowException(f\"API requests to Databricks failed {self.retry_limit} times. Giving up.\")\n except requests_exceptions.HTTPError as e:\n raise AirflowException(f\"Response: {e.response.content}, Status Code: {e.response.status_code}\")\n\n return jsn[\"access_token\"]",
"def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']",
"def _parse_token(self, response=None):\n token_url = 'https://tinychat.com/start?#signin'\n if response is None:\n response = util.web.http_get(url=token_url, referer=token_url, proxy=self._proxy)\n\n if response is not None and response['content'] is not None:\n soup = BeautifulSoup(response['content'], 'html.parser')\n\n token = soup.find(attrs={'name': 'csrf-token'})\n self._token = token['content']",
"async def get_temp_token(self) -> str:\n data = await self.raw_request(\n self.URL_TEMPTOKEN.format(username=self.username, password=self.password)\n )\n if \"result\" not in data or \"token\" not in data[\"result\"]:\n raise DSException(\"invalid api response\")\n return data[\"result\"][\"token\"]",
"def __get_token(self):\n r = requests.post(self.credentials.conf('endpoint') + '/tokens', json={\n 'auth': {\n 'passwordCredentials': {\n 'username': self.credentials.conf('username'),\n 'password': self.credentials.conf('password'),\n },\n 'tenantId': self.credentials.conf('tenant_id'),\n },\n })\n logger.debug('request:')\n logger.debug('%s', r.request.body)\n #print(r.status_code)\n if r.status_code != 200:\n logger.debug('%s', r.content)\n logger.debug('%s', r.json())\n raise RuntimeError('It failed to get token.')\n logger.debug('%s', r.content)\n j = r.json()\n logger.debug('%s', j)\n token = j['access']['token']['id']\n \n # Get DNS URL.\n \n dns_vers_url = None\n for svc in j['access']['serviceCatalog']:\n if svc['type'] == 'dns':\n for ep in svc['endpoints']:\n if ep['region'] == self.credentials.conf('region'):\n dns_vers_url = ep['publicURL']\n if not dns_vers_url:\n raise RuntimeError('It failed to get DNSv1 URL.')\n \n # Get DNSv1 URL.\n r = requests.get(dns_vers_url, headers={'Accept': 'application/json'})\n #print(r.status_code)\n if r.status_code != 300:\n logger.debug('%s', r.content)\n logger.debug('%s', r.json())\n raise RuntimeError('It failed to get DNS URLs.')\n logger.debug('%s', r.content)\n j = r.json()\n logger.debug('%s', j)\n \n url = None\n for val in j['versions']['values']:\n if val['id'] == 'v1':\n url = val['links'][0]['href']\n if not url:\n raise RuntimeError('No DNS v1 URL.')\n return (token, url)",
"def wts_get_token(hostname: str, idp: str, access_token: str):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Connection\": \"keep-alive\",\n \"Authorization\": \"bearer \" + access_token,\n }\n\n try:\n url = f\"https://{hostname}/wts/token/?idp={idp}\"\n try:\n response = requests.get(url=url, headers=headers)\n response.raise_for_status()\n except requests.exceptions.HTTPError as exc:\n logger.critical(\n f\"HTTP Error ({exc.response.status_code}): getting WTS token: {exc.response.text}\"\n )\n logger.critical(\n \"Please make sure the target commons is connected on your profile page and that connection has not expired.\"\n )\n return None\n\n return _handle_access_token_response(response, \"token\")\n\n except Gen3AuthError:\n logger.critical(f\"Unable to authenticate your credentials with {hostname}\")\n return None",
"def temp_getToken():\n # First check if query is okay or not\n data = json.loads(request.data)\n username = data.get(\"username\",None)\n password = data.get(\"password\",None)\n \n if request.headers.get('X-VITASK-API') != \"2020_Mar_25\" or username is None or password is None:\n return jsonify({\n \"error\" : \"Incorrect API Request\",\n \"code\" : \"400\" # Bad request\n })\n \n # Now began actual work\n username = username.upper()\n \n # This API is only to get user token and get personal details. For syncing details, there will be a seperate API\n # This assumes that token is None just like previous authenticate\n valid = True\n try:\n sess, valid = generate_session(username, password)\n except Exception as e:\n return jsonify({\n \"error\" : \"Something broke\",\n \"code\" : \"500\"\n })\n if not valid:\n # Password incorrect\n return jsonify({\n \"error\" : \"Incorrect Password\"\n })\n ref = db.reference('vitask')\n try:\n profile = {}\n profile, check_profile = get_student_profile(sess, username)\n session['id'] = profile['appNo']\n session['name'] = profile['name']\n session['reg'] = profile['regNo']\n session['loggedin'] = 1\n if(check_profile == False):\n return jsonify({\"Error\": \"Internal Error in fetching profile.Please try again.\"})\n finally:\n name, school, branch, program, regno, appno, email, proctoremail, proctorname, api = ProfileFunc()\n # Timetable,Attendance,Acadhistory and Marks fetching in parallel\n try:\n runInParallel(parallel_timetable(sess, username, session['id']), parallel_attendance(sess, username, session['id']), parallel_acadhistory(sess, username, session['id']), parallel_marks(sess, username, session['id'])) \n finally:\n return jsonify({'Name': name,'School': school,'Branch': branch,'Program': program,'RegNo': regno,'AppNo': appno,'Email': email,'ProctorEmail': proctoremail,'ProctorName': proctorname,'APItoken': api})",
"def getOnePayload(results):\n ans, auth, add = results\n return ans[0].payload",
"def _process_response(self, response):\n\n self.log.debug(\"Received Response: %r\", response)\n\n return self.token_manager.process_response(response)",
"def test_token(authToken):\n url = endpoint('test')\n r = requests.get(url, headers={'authorizationToken': authToken}) \n if r.status_code == 403:\n print(\"403\")\n return False\n response = json.loads( r.content.decode() )\n return response"
] | [
"0.6179731",
"0.5696123",
"0.56038076",
"0.5577959",
"0.55362946",
"0.5421447",
"0.5356318",
"0.5342842",
"0.5334133",
"0.5325056",
"0.531852",
"0.531852",
"0.530204",
"0.5300194",
"0.5241135",
"0.52054435",
"0.5161591",
"0.5144464",
"0.5133138",
"0.5128214",
"0.5108521",
"0.50988525",
"0.5093746",
"0.50596505",
"0.5029181",
"0.50225425",
"0.5021361",
"0.5017311",
"0.5014543",
"0.49868876"
] | 0.6669999 | 0 |
Given a polling for decision response from SWF via boto, extract the workflowType from the json data | def get_workflow_type(decision):
try:
return decision["workflowType"]["name"]
except KeyError:
# No workflowType found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_input(decision):\n try:\n workflow_input = json.loads(\n decision[\"events\"][0][\"workflowExecutionStartedEventAttributes\"][\"input\"]\n )\n except KeyError:\n workflow_input = None\n return workflow_input",
"def get_flowtype(self, data):\n flowType = data['flowType']\n return flowType",
"def extract_workflow_data(workflow):\n workflow_data = {}\n workflow_data[\"id\"] = workflow.id\n workflow_data['name'] = workflow.name\n workflow_data['created_at'] = workflow.created_at\n workflow_data['updated_at'] = workflow.updated_at\n workflow_data[\"state\"] = workflow.state\n return workflow_data",
"def test_get_workflow_definition(self):\n pass",
"def GetOutputType(self, response_type):\n if response_type == \"KML\":\n return \"xml\"\n return \"json\"",
"def payload_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"payload_type\")",
"def get_workflow(self, playbook_name, workflow_name):\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n return self.workflows[key]\n return None",
"def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def process_workflow(\n workflow_type, decision, settings, logger, client, token, maximum_page_size\n):\n # for the workflowType attempt to do the work\n if workflow_type is not None:\n\n logger.info(\"workflowType: %s\", workflow_type)\n\n # Instantiate and object for the workflow using eval\n # Build a string for the object name\n workflow_name = get_workflow_name(workflow_type)\n\n # Attempt to import the module for the workflow\n if import_workflow_class(workflow_name):\n # Instantiate the workflow object\n workflow_object = get_workflow_object(\n workflow_name,\n settings,\n logger,\n client,\n token,\n decision,\n maximum_page_size,\n )\n # Process the workflow\n invoke_do_workflow(workflow_name, workflow_object, logger)\n else:\n logger.info(\"error: could not load object %s\\n\", workflow_name)",
"def get_activityType(activity_task):\n try:\n return activity_task[\"activityType\"][\"name\"]\n except KeyError:\n # No activityType found\n return None",
"def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]",
"def load_workflow(resource, workflow_name):\n try:\n playbook_file = open(resource, 'r')\n except (IOError, OSError) as e:\n logger.error('Could not load workflow from {0}. Reason: {1}'.format(resource, format_exception_message(e)))\n return None\n else:\n with playbook_file:\n workflow_loaded = playbook_file.read()\n try:\n playbook_json = json.loads(workflow_loaded)\n playbook_name = playbook_json['name']\n workflow_json = next(\n (workflow for workflow in playbook_json['workflows']\n if workflow['name'] == workflow_name), None)\n if workflow_json is None:\n logger.warning('Workflow {0} not found in playbook {0}. '\n 'Cannot load.'.format(workflow_name, playbook_name))\n return None\n workflow = Workflow.create(workflow_json)\n return playbook_name, workflow\n except ValueError as e:\n logger.error('Cannot parse {0}. Reason: {1}'.format(resource, format_exception_message(e)))\n except (InvalidInput, UnknownApp, UnknownAppAction, UnknownFilter, UnknownFlag) as e:\n logger.error('Error constructing workflow {0}. Reason: {1}'.format(workflow_name,\n format_exception_message(e)))\n return None\n except KeyError as e:\n logger.error('Invalid Playbook JSON format. Details: {}'.format(e))\n return None",
"def create_workflows_from_json(workflow):\r\n results = []\r\n\r\n for flow in workflow:\r\n if not 'plugin_name' in flow:\r\n return None\r\n if not 'description' in flow:\r\n return None\r\n if not 'configuration' in flow:\r\n return None\r\n\r\n for flow in workflow:\r\n wf = Workflow()\r\n wf.plugin_name = flow['plugin_name']\r\n wf.configuration = json.dumps(flow['configuration'])\r\n wf.description = flow['description']\r\n results.append(wf)\r\n return results",
"def switcher(self, json_data):\n if json_data['type'] == \"object\":\n return self.type_object(json_data)\n elif json_data['type'] == \"array\":\n return self.type_array()\n elif json_data['type'] in [\"string\", \"boolean\", \"numbers\"]:\n return self.type_others()\n else:\n raise Exception(\"No basic types found in JSON schema\")",
"def get_job_state(self, response) -> Text:\n return response['state']",
"def get_workflow_name(workflow_type):\n return \"workflow_\" + workflow_type",
"def _getStatisticType(self, statistic):\n\n instructions = simplejson.loads(statistic.instructions_json)\n return instructions['type']",
"def test_workflows_get(self):\n pass",
"def test_workflows_get(self):\n pass",
"def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))",
"def type(self):\n return self.get(\"type\")",
"def type(self):\n return self.raw.get(\"type\")",
"def parse(self, payload):\n payload = json.loads(payload)\n \n if payload['response'] in self.possible_responses:\n return self.possible_responses[payload['response']](payload)\n else:\n print 'Response not valid'",
"def _find_type(trial: dict) -> list:\n tag = [trial['study_type'], trial['overall_status']]\n if 'phase' in trial:\n tag.append(trial['phase'])\n del trial['phase']\n if 'last_known_status' in trial and trial['last_known_status'] != trial['overall_status']:\n tag.append(trial['last_known_status'])\n if 'last_known_status' in trial:\n del trial['last_known_status']\n del trial['study_type']\n if 'keyword' in trial:\n if isinstance(trial['keyword'], list):\n tag.extend(trial['keyword'])\n else:\n tag.append(trial['keyword'])\n del trial['keyword']\n if 'intervention' in trial:\n if isinstance(trial['intervention'], dict):\n tag.append(trial['intervention']['intervention_type'])\n else:\n tag.extend([intervention['intervention_type'] for intervention in trial['intervention']])\n # it contains more information then type\n # del trial['intervention']\n if 'biospec_retention' in trial:\n tag.append(trial['biospec_retention'])\n del trial['biospec_retention']\n return tag",
"def _get_workflow_template(self, namespace, name):\n try:\n with onepanel.core.api.ApiClient(self.configuration) as api_client:\n api_instance = onepanel.core.api.WorkflowTemplateServiceApi(api_client)\n api_response = api_instance.get_workflow_template(namespace, name)\n return api_response\n except ApiException as e:\n if e.status == 404:\n return None\n return None",
"def wf_info(workflow_path):\n\n supported_formats = [\"py\", \"wdl\", \"cwl\"]\n file_type = workflow_path.lower().split(\".\")[-1] # Grab the file extension\n workflow_path = workflow_path if \":\" in workflow_path else \"file://\" + workflow_path\n\n if file_type in supported_formats:\n if workflow_path.startswith(\"file://\"):\n version = get_version(file_type, workflow_path[7:])\n elif workflow_path.startswith(\"https://\") or workflow_path.startswith(\n \"http://\"\n ):\n # If file not local go fetch it.\n html = urlopen(workflow_path).read()\n local_loc = os.path.join(os.getcwd(), \"fetchedFromRemote.\" + file_type)\n with open(local_loc, \"w\") as f:\n f.write(html.decode())\n version = wf_info(\"file://\" + local_loc)[\n 0\n ] # Don't take the file_type here, found it above.\n os.remove(\n local_loc\n ) # TODO: Find a way to avoid recreating file before version determination.\n else:\n raise NotImplementedError(\n \"Unsupported workflow file location: {}. Must be local or HTTP(S).\".format(\n workflow_path\n )\n )\n else:\n raise TypeError(\n \"Unsupported workflow type: .{}. Must be {}.\".format(\n file_type, \".py, .cwl, or .wdl\"\n )\n )\n return version, file_type.upper()",
"def get(self, request, name, format=None):\n out = get_workflow_meta(name)\n return Response(out)",
"def _load_workflow( self, workflow_id ):\n id = self.app.security.decode_id( workflow_id )\n stored = self.app.model.context.query( self.app.model.StoredWorkflow ).get( id )\n return stored.latest_workflow",
"def gettype(self, failobj=None):\n missing = []\n value = self.get('content-type', missing)\n if value is missing:\n return failobj\n return re.split(r';\\s*', value.strip())[0].lower()",
"def type(self) -> Optional[pulumi.Input['JobType']]:\n return pulumi.get(self, \"type\")"
] | [
"0.60059506",
"0.5944195",
"0.55025107",
"0.5493954",
"0.54329574",
"0.5415628",
"0.5329207",
"0.53271276",
"0.53192997",
"0.5313738",
"0.52731276",
"0.5255581",
"0.5222106",
"0.5197899",
"0.5140142",
"0.5139222",
"0.51147765",
"0.5095208",
"0.5095208",
"0.5078905",
"0.50226617",
"0.49827176",
"0.49772453",
"0.49590155",
"0.49531716",
"0.4945884",
"0.49276677",
"0.49264306",
"0.4923907",
"0.49025354"
] | 0.7099173 | 0 |
Given a workflowType workflow_type, return the name of a corresponding workflow class to load | def get_workflow_name(workflow_type):
return "workflow_" + workflow_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]",
"def get_workflow_type(decision):\n try:\n return decision[\"workflowType\"][\"name\"]\n except KeyError:\n # No workflowType found\n return None",
"def import_workflow_class(workflow_name):\n try:\n module_name = \"workflow.\" + workflow_name\n importlib.import_module(module_name)\n return True\n except ImportError:\n return False",
"def get_type_label(type_url):\n return type_dict[type_url]",
"def get_workflow_object(\n workflow_name, settings, logger, client, token, decision, maximum_page_size\n):\n module_name = \"workflow.\" + workflow_name\n module_object = importlib.import_module(module_name)\n workflow_class = getattr(module_object, workflow_name)\n # Create the object\n workflow_object = workflow_class(\n settings, logger, client, token, decision, maximum_page_size\n )\n return workflow_object",
"def get_event_class_by_type(type):\n event_module = importlib.import_module('.'.join(type.split('.')[:-1]))\n return getattr(event_module, type.split('.')[-1])",
"def _get_classname(cls):\n return cls.__name__",
"def test_workflow_class_discovery():\n config = {\n \"workflow-name\": \"tests.workflows.test_workflow.CustomWorkflow\",\n \"cluster-type\": CLUSTER_TYPE\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-workflow-discovery-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n _execution_dir, workflow = launch_flow(template_dir, 1)\n assert isinstance(workflow, CustomWorkflow)\n assert workflow.execute.didrun",
"def process_type(process_dict):\n if 'class' not in process_dict:\n exit_perm_fail(\"No class attribute in process\")\n if process_dict['class'] not in ['Workflow', 'CommandLineTool']:\n exit_perm_fail('Invalid class {} in process'.format(process_dict['class']))\n return process_dict['class']",
"def type_name(attr_type: AttrType) -> str:\n return attr_type.native_name or class_name(attr_type.name)",
"def get_entity_classification_name(entity_type, entity_classification=None):\n assert entity_type in BlueprintEntity.entity_classification\n if entity_classification is None:\n entity_classification = BlueprintEntity.get_entity_classification_default(entity_type)\n assert entity_classification in BlueprintEntity.entity_classification[entity_type]\n return BlueprintEntity.entity_classification[entity_type][entity_classification]",
"def getclassname(instance_or_cls):\n return getclass(instance_or_cls).__name__",
"def get_activityType(activity_task):\n try:\n return activity_task[\"activityType\"][\"name\"]\n except KeyError:\n # No activityType found\n return None",
"def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)",
"def _get_model_from_str_type(model_type):\n if model_type == \"lstm\":\n return model.LSTMNeuralTranslationModel\n\n if model_type == \"temporal\":\n return model.TemporalCNNNeuralTranslationModel",
"def workflow_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workflow_name\")",
"def getClassName(self):\n n = type(self).__name__\n return n",
"def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_",
"def type(cls):\n return cls.__name__",
"def serialize_type(self, typ):\n if isinstance(typ, super):\n typ = typ.__self_class__\n if isinstance(\n _safe_getattr(typ, \"__module__\", None), six.string_types\n ) and isinstance(_safe_getattr(typ, \"__name__\", None), six.string_types):\n module = typ.__module__\n name = typ.__name__\n if module not in sys.modules:\n return None\n if (\n self.config.unwrap_cls(_safe_getattr(sys.modules[module], name, None))\n is typ\n ):\n return (module, name)\n return None",
"def get_workflow(self, playbook_name, workflow_name):\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n return self.workflows[key]\n return None",
"def generate_workflow_name(self) -> str:\n return self._workflow_name",
"def generate_workflow_name(self) -> str:\n workflow_name = None\n if self._parsed_url.basename in WORKFLOW_SPEC_FILENAMES:\n # We omit the name of the specification file if it is standard\n # (e.g. `reana.yaml` or `reana.yml`)\n workflow_name = self._clean_workflow_name(self._parsed_url.dirname)\n if not workflow_name:\n workflow_name = self._clean_workflow_name(\n f\"{self._parsed_url.dirname}-{self._parsed_url.basename_without_extension}\"\n )\n return workflow_name",
"def typeof(inst):\n return type(inst).__name__",
"def infer_abbr(class_type):\r\n if not inspect.isclass(class_type):\r\n raise TypeError(\r\n f'class_type must be a type, but got {type(class_type)}')\r\n if hasattr(class_type, '_abbr_'):\r\n return class_type._abbr_\r\n if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN\r\n return 'in'\r\n elif issubclass(class_type, _BatchNorm):\r\n return 'bn'\r\n elif issubclass(class_type, nn.GroupNorm):\r\n return 'gn'\r\n elif issubclass(class_type, nn.LayerNorm):\r\n return 'ln'\r\n else:\r\n class_name = class_type.__name__.lower()\r\n if 'batch' in class_name:\r\n return 'bn'\r\n elif 'group' in class_name:\r\n return 'gn'\r\n elif 'layer' in class_name:\r\n return 'ln'\r\n elif 'instance' in class_name:\r\n return 'in'\r\n else:\r\n return 'norm'",
"def _load_workflow( self, workflow_id ):\n id = self.app.security.decode_id( workflow_id )\n stored = self.app.model.context.query( self.app.model.StoredWorkflow ).get( id )\n return stored.latest_workflow",
"def get_activity_name(activityType):\n return \"activity_\" + activityType",
"def task_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"task_type\")",
"def _get_task_type(self):\n\n if self.num_classes == 0:\n return ps_pb2.Type(\n one_dimensional_regression=ps_pb2.OneDimensionalRegression(\n label=self._label_key))\n if self.num_classes == 2:\n return ps_pb2.Type(\n binary_classification=ps_pb2.BinaryClassification(\n label=self._label_key))\n return ps_pb2.Type(\n multi_class_classification=ps_pb2.MultiClassClassification(\n label=self._label_key))",
"def type_name(self) -> str:\n return self.head.__class__.__name__"
] | [
"0.71542835",
"0.67883533",
"0.63764113",
"0.6033745",
"0.59493256",
"0.58950895",
"0.58703315",
"0.58070356",
"0.57632667",
"0.5750004",
"0.57477725",
"0.57462007",
"0.5737046",
"0.5728862",
"0.56926966",
"0.56816906",
"0.56768847",
"0.56578654",
"0.56526405",
"0.56239957",
"0.5604851",
"0.5601003",
"0.5598383",
"0.55656505",
"0.5563198",
"0.5545406",
"0.5536603",
"0.55282855",
"0.55232894",
"0.55206907"
] | 0.78413796 | 0 |
Given an workflow subclass name as workflow_name, attempt to lazy load the class when needed | def import_workflow_class(workflow_name):
try:
module_name = "workflow." + workflow_name
importlib.import_module(module_name)
return True
except ImportError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_workflow_object(\n workflow_name, settings, logger, client, token, decision, maximum_page_size\n):\n module_name = \"workflow.\" + workflow_name\n module_object = importlib.import_module(module_name)\n workflow_class = getattr(module_object, workflow_name)\n # Create the object\n workflow_object = workflow_class(\n settings, logger, client, token, decision, maximum_page_size\n )\n return workflow_object",
"def _get_cls(name, cls):\n return cls.get(name, None) if isinstance(cls, dict) else cls",
"def test_workflow_class_discovery():\n config = {\n \"workflow-name\": \"tests.workflows.test_workflow.CustomWorkflow\",\n \"cluster-type\": CLUSTER_TYPE\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-workflow-discovery-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n _execution_dir, workflow = launch_flow(template_dir, 1)\n assert isinstance(workflow, CustomWorkflow)\n assert workflow.execute.didrun",
"def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )",
"def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]",
"def get_class(name, base_class, *modules):\n for cls in iter_classes(base_class, *modules, class_filter=lambda x: x.__module__.split('.')[-1] == name):\n return cls\n return None",
"def GetScaffolderObjectByName(\n cls, scaffolder_name) -> Optional[interface.Scaffolder]:\n scaffolder_class = cls._scaffolder_classes.get(\n scaffolder_name.lower(), None)\n if scaffolder_class:\n return scaffolder_class()\n return None",
"def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)",
"def load_workflow(resource, workflow_name):\n try:\n playbook_file = open(resource, 'r')\n except (IOError, OSError) as e:\n logger.error('Could not load workflow from {0}. Reason: {1}'.format(resource, format_exception_message(e)))\n return None\n else:\n with playbook_file:\n workflow_loaded = playbook_file.read()\n try:\n playbook_json = json.loads(workflow_loaded)\n playbook_name = playbook_json['name']\n workflow_json = next(\n (workflow for workflow in playbook_json['workflows']\n if workflow['name'] == workflow_name), None)\n if workflow_json is None:\n logger.warning('Workflow {0} not found in playbook {0}. '\n 'Cannot load.'.format(workflow_name, playbook_name))\n return None\n workflow = Workflow.create(workflow_json)\n return playbook_name, workflow\n except ValueError as e:\n logger.error('Cannot parse {0}. Reason: {1}'.format(resource, format_exception_message(e)))\n except (InvalidInput, UnknownApp, UnknownAppAction, UnknownFilter, UnknownFlag) as e:\n logger.error('Error constructing workflow {0}. Reason: {1}'.format(workflow_name,\n format_exception_message(e)))\n return None\n except KeyError as e:\n logger.error('Invalid Playbook JSON format. Details: {}'.format(e))\n return None",
"def _load_workflow( self, workflow_id ):\n id = self.app.security.decode_id( workflow_id )\n stored = self.app.model.context.query( self.app.model.StoredWorkflow ).get( id )\n return stored.latest_workflow",
"def get_class(self, name):\n raise NotImplementedError",
"def get_subclass_from_name(cls, name):\n\n for subclass in cls.all_named_subclasses():\n if subclass.UI_NAME == name:\n return subclass\n\n return None",
"def get_class_from_string(self, classname, module):\n\n myclass = None\n try:\n # Meta language for dinamically import\n myclass = getattr(module, classname)\n except AttributeError as e:\n logger.critical(\"Failed to load resource: \" + str(e))\n\n return myclass",
"def _safe_pickle_load(module_name, class_name, item_name):\n module = import_module(module_name)\n enum_class = getattr(module, class_name)\n return getattr(enum_class, item_name)",
"def get_class(self, name):\n return self.host.get_class(name)",
"def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)",
"def import_class(implementation_filename, base_class):\n\n\n impl_dir, impl_filename = os.path.split(implementation_filename)\n module_name, _ = os.path.splitext(impl_filename)\n\n try:\n sys.path.insert(0, impl_dir)\n fp, filename, description = imp.find_module(module_name)\n module = imp.load_module(module_name, fp, filename, description)\n logging.debug(f\"trying to import fp {fp} \"\n f\" filename {filename} \"\n f\" description {description} \")\n for name in dir(module):\n logging.debug(f\"name {name}\")\n obj = getattr(module, name)\n logging.debug(f\"obj {obj}\")\n try:\n if (type(obj) == type(base_class)\n and issubclass(obj, base_class)\n and obj != base_class):\n return obj\n\n except TypeError as excpt:\n \"\"\" issubclass will throw TypeError for some imports \"\"\"\n logging.debug(f\"caught {excpt}\")\n\n raise ValueError(\"No subclass of {0} in {1}\".format(\n base_class.__name__, implementation_filename))\n\n finally:\n sys.path.pop(0)",
"def load_workflow_by_filename(self, workflow_filename,\n workflow_name=None, workflow_name_prefix=\"\", workflow_name_suffix=\"\"):\n if not self._galaxy_instance:\n raise RuntimeError(\"WorkflowLoader not initialized\")\n self._logger.debug(\"Loading workflow definition from file: %s\", workflow_filename)\n with open(workflow_filename) as f:\n wf_json = _json.load(f)\n self._logger.debug(\"Workflow definition loaded from file: done\")\n wf_json[\"name\"] = \"-\".join([workflow_name_prefix,\n (workflow_name if workflow_name else wf_json[\"name\"]).replace(\" \", \"\"),\n workflow_name_suffix])\n self._logger.debug(\"Uploading the Workflow to the Galaxy instance ...\")\n wf = self._galaxy_instance.workflows.import_new(wf_json)\n self._logger.debug(\"Uploading the Workflow to the Galaxy instance: done\")\n self._workflows[wf.id] = wf\n return wf",
"def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_",
"def find_class(self, class_name: str) -> Type:\n pass",
"def import_classifier(name):\n classinput=open(name,'rb')\n main_class=load(classinput)\n classinput.close()\n return main_class",
"def __init__(self, workflow):\n self.workflow = workflow",
"def load_workflow_from_file(self, path, workflow_name, name_override=None, playbook_override=None):\n with open(path, 'r') as playbook_file:\n playbook_loaded = playbook_file.read()\n try:\n json_in = json.loads(playbook_loaded)\n except json.JSONDecodeError:\n logger.error('Cannot parse {}'.format(path))\n else:\n playbook_name = playbook_override if playbook_override else json_in['name']\n for workflow in (workflow_ for workflow_ in json_in['workflows'] if workflow_['name'] == workflow_name):\n if workflow['name'] == workflow_name:\n workflow_name = name_override if name_override else workflow['name']\n workflow['name'] = workflow_name\n key = _WorkflowKey(playbook_name, workflow_name)\n self.__add_workflow(key, workflow_name, workflow)\n self.add_child_workflows()\n break\n else:\n logger.warning('Workflow {0} not found in playbook {0}. '\n 'Cannot load.'.format(workflow_name, playbook_name))\n return False\n return True",
"def find_task_class(task_module):\n\n task = None\n\n for obj_key, obj_value in task_module.__dict__.items():\n\n if obj_key in BASE_TASK_CLASSES:\n continue\n elif hasattr(task_module.__dict__[obj_key], '__bases__'):\n if task_module.__dict__[obj_key].__bases__[0] in [Task]:\n task = task_module.__dict__[obj_key]\n break\n\n return task",
"def get_class(classname):\n parts = classname.split('.')\n module = '.'.join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp) \n return m",
"def find_subclass(cls, name):\r\n if name == cls.__name__:\r\n return cls\r\n for sc in cls.__sub_classes__:\r\n r = sc.find_subclass(name)\r\n if r != None:\r\n return r",
"def get_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)",
"def get_implementation(parent_class, child_class_name):\n for child_class in parent_class.__subclasses__():\n if child_class.__name__ == child_class_name:\n return child_class()\n return None",
"def get_workflow(self, playbook_name, workflow_name):\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n return self.workflows[key]\n return None",
"def get_class_base_name(name):\n if name is not None:\n return get_class_name(name)\n else:\n return 'object'"
] | [
"0.6176327",
"0.601499",
"0.5917754",
"0.5902988",
"0.58291084",
"0.56954265",
"0.563359",
"0.55178666",
"0.55076855",
"0.55039936",
"0.5494821",
"0.53802574",
"0.5347683",
"0.532594",
"0.5325837",
"0.5311453",
"0.528593",
"0.5283638",
"0.527426",
"0.52651054",
"0.5245763",
"0.52399814",
"0.52374405",
"0.52150637",
"0.5214022",
"0.5187188",
"0.5186734",
"0.51775736",
"0.51433194",
"0.51195174"
] | 0.68432015 | 0 |
Given a workflow_name, and if the module class is already imported, create an object an return it | def get_workflow_object(
workflow_name, settings, logger, client, token, decision, maximum_page_size
):
module_name = "workflow." + workflow_name
module_object = importlib.import_module(module_name)
workflow_class = getattr(module_object, workflow_name)
# Create the object
workflow_object = workflow_class(
settings, logger, client, token, decision, maximum_page_size
)
return workflow_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_workflow_class(workflow_name):\n try:\n module_name = \"workflow.\" + workflow_name\n importlib.import_module(module_name)\n return True\n except ImportError:\n return False",
"def get_module(name) -> Module:\n if isinstance(name, str):\n obj = get_object(name)\n else:\n obj = name\n\n name = obj.__name__\n if name in modules:\n return modules[name]\n else:\n module = Module(obj)\n modules[name] = module\n return module",
"def _create_module(name):\n module = new.module(name)\n sys.modules[name] = module\n return module",
"def createSingleModuleWorkflow(module,name):\n\n moduleType = module.getType()\n moduleName = name\n\n workflow = Workflow()\n step = StepDefinition(moduleType+'_step')\n step.addModule(module)\n moduleInstance = step.createModuleInstance(moduleType,moduleName)\n\n step.addParameter(moduleInstance.parameters.getInput())\n workflow.addParameter(moduleInstance.parameters.getInput())\n\n workflow.addStep(step)\n stepInstance = workflow.createStepInstance(moduleType+'_step',moduleName+'_step')\n\n # Propagate the module input parameters to the workflow level\n moduleInstance.linkParameterUp(moduleInstance.parameters.getInput())\n stepInstance.linkParameterUp(moduleInstance.parameters.getInput())\n\n workflow.setName(name)\n workflow.setDescription('Single module workflow from '+moduleType+' type module')\n workflow.setDescrShort(moduleType+' workflow')\n return workflow",
"def factory(module, name):\n\n class DummyClass(object):\n \"\"\"\n _DummyClass_\n Dummy class to return when a cms class cannot be imported \n\n \"\"\"\n def __init__(self, module, name='', *args, **kwargs):\n self.__module = module\n self.__name = name\n self.__d = dict()\n\n def __setitem__(self, key, value):\n self.__d[key] = value\n\n def __getitem__(self, item):\n return self.__d[item]\n\n def __call__(self, *args, **kwargs):\n pass\n\n def __repr__(self):\n return \"{module}.{name}\".format(module=self.__module, name=self.__name)\n\n return DummyClass",
"def __init__(self, module_name,class_name):\n\n try:\n self.module = importlib.import_module(module_name)\n self.get_class_object = getattr(self.module,class_name)\n \n except:\n print(\"Failed to import the module {} from {}\".format(class_name,module_name))",
"def make_model(name):\n module_path = '{0}.{1}'.format(matchers.__name__, name)\n module = __import__(module_path, fromlist=[''])\n classes = inspect.getmembers(module, inspect.isclass)\n classes = [c for c in classes if c[1].__module__ == module_path]\n classes = [c[1] for c in classes if c[0].lower() == name.lower()]\n assert len(classes) == 1\n return classes[0]",
"def create_module(self, spec):\n cls = type(self)\n if Registry.has_appname(cls.appname):\n if spec.name in Registry[cls.appname]:\n modulename, _ = dotpath_split(spec.name)\n ModuleClass = Registry[cls.appname][spec.name]\n docstr = inspect.getdoc(ModuleClass)\n module = ModuleClass(modulename, doc=docstr)\n return module\n else:\n if spec.name == cls.appname:\n return self.package_module(spec.name)\n appname, appspace, *remainders = spec.name.split(consts.QUALIFIER, 2)\n if appname == cls.appname and appspace in cls.appspaces:\n return self.package_module(spec.name)\n return None\n return None",
"def _get_module(self, name):\n module = self._modules.get(name)\n if not module:\n module = importlib.import_module(name)\n self._modules[name] = module\n return module",
"def newModule(name, swipl):\n if isinstance(name, str):\n name = Atom(name, swipl)\n\n return swipl.PL_new_module(name.handle)",
"def get_module(self, name: str) -> ModuleInstance:\n return self.modules[name]",
"def create_class_from_strings( self, module_name, class_name):\r\n if not( self.logger is None ):\r\n self.logger.debug( \"create class {module_name} {class_name}\" )\r\n\r\n# print( \"create class \" + module_name + \" \" + class_name )\r\n\r\n a_class = getattr( importlib.import_module(module_name), class_name )\r\n instance = a_class( )\r\n return instance",
"def make_module(self, name, newpath=None):\r\n module = imp.new_module(name)\r\n module.__file__ = self.filename\r\n if newpath:\r\n module.__path__ = newpath\r\n sys.modules[name] = module\r\n exec self.code in vars(module)\r\n return module",
"def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )",
"def _createModuleObj(self):\n ModuleTimeWeakening.__init__(self)\n return",
"def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c",
"def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")",
"def import_classifier(name):\n classinput=open(name,'rb')\n main_class=load(classinput)\n classinput.close()\n return main_class",
"def create_instance(self,name):\n print \"INFO : new %s\" % name\n return self.get_class(name)()",
"def importer(name) -> ContextType:\n try:\n # try importing as a module (using importlib from standard import mechanism)\n return __import__(name, globals=globals(), locals=locals())\n except:\n route_steps = name.split(\".\")\n route_steps = route_steps[1:] if not route_steps[0] else route_steps\n is_name_module, is_name_package = is_module(name), is_package(name)\n assert is_name_module or is_name_package\n file_path = os.path.join(*route_steps)\n if is_name_module:\n file_path = f\"{file_path}.py\"\n else: # name is definitely a package (because of the assertion)\n file_path = os.path.join(file_path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(name, file_path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return foo",
"def _new_instance(self):\n return self.__class__(self._fmodule)",
"def _new_instance(self):\n return self.__class__(self._fmodule)",
"def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)",
"def _createModuleObj(self):\n ModuleFaultCohesiveKin.__init__(self)\n return",
"def get_factory(self, class_name):\n if class_name in self._class_name_class_dict:\n return self._class_name_class_dict[class_name]()\n else:\n raise ModuleNotFoundError(\"Module should be in {}\".format(self.factory_names))",
"def import_object(import_str, *args, **kw):\n try:\n __import__(import_str)\n return sys.modules[import_str]\n except ImportError:\n cls = import_class(import_str)\n return cls(*args, **kw)",
"def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_",
"def import_object(name: str) -> Any:\n if name.count(\".\") == 0:\n return __import__(name)\n\n parts = name.split(\".\")\n obj = __import__(\".\".join(parts[:-1]), fromlist=[parts[-1]])\n try:\n return getattr(obj, parts[-1])\n except AttributeError:\n raise ImportError(\"No module named %s\" % parts[-1])",
"def load_from_module_name(\n cls, module_name: str\n ) -> \"ThreatExchangeExtensionManifest\":\n try:\n module = importlib.import_module(module_name)\n except (ImportError, ValueError):\n raise ValueError(f\"No such module '{module_name}'\")\n\n try:\n manifest = module.TX_MANIFEST\n except AttributeError:\n raise ValueError(f\"Module is missing TX_MANIFEST\")\n\n if not isinstance(manifest, cls):\n raise ValueError(f\"TX_MANIFEST is not a {cls.__name__}!\")\n return manifest",
"def create_module(cls, *args, **kwargs): # real signature unknown\n pass"
] | [
"0.72673905",
"0.65685153",
"0.6495803",
"0.64210284",
"0.637271",
"0.6287474",
"0.6183001",
"0.6079765",
"0.6058139",
"0.6051761",
"0.60094583",
"0.59739107",
"0.594022",
"0.59092087",
"0.5896353",
"0.5889849",
"0.57805514",
"0.5768779",
"0.57517135",
"0.57472",
"0.5737868",
"0.5737868",
"0.57294065",
"0.5712564",
"0.56817675",
"0.56809807",
"0.566959",
"0.5662074",
"0.5656885",
"0.56283563"
] | 0.75206375 | 0 |
Returns the unstructured units of the RG.50402 series These interviews did not have any indi | def getUnstructured042_special_Units(filename):
doc = Document(filename)
units = list()
# all interviews start with a header
isHeader = True
# iterate over all paragraphs to get text units
for para in doc.paragraphs:
paragraph = para.text
# ensure paragraph is not just empty line
hasText = paragraph.lstrip()
# ensure it is not an empty line
if hasText:
units.append({'unit':paragraph})
return units | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUnstructured_50_233_0083_Units(filename):\n doc = Document(filename)\n \n \n units=[]\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n if len(paragraph.strip())>0:\n units.append({'unit':paragraph.strip()})\n\n \n \n return units",
"def unit_of_measure(self):\n try:\n uom = self.metadata['geosoft']['dataset']['geo:unitofmeasurement']['#text']\n except KeyError:\n uom = ''\n return uom",
"def _getunits(x):\n if pb.units.has_units(x):\n \n units = x.units\n \n else:\n \n units = None\n \n return units",
"def getUnstructured042Units(filename):\n doc = Document(filename)\n units = list()\n # all interviews start with a header\n isHeader = True\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n\n if isHeader:\n if 'beep' in paragraph.lower():\n isHeader = False\n else:\n # marks the end of the interview\n if 'USHMM Archives' in paragraph or \"wentworth films\" in paragraph.lower() :\n break\n elif 'beep' not in paragraph.lower():\n units.append({'unit':paragraph})\n \n return units",
"def unitsofmeasure(self):\n return self._unitsofmeasure",
"def getUnstructured926Units(filename):\n doc = Document(filename)\n units = list()\n n = re.compile('track [0-9][0-9]')\n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n isHeader = n.match(paragraph.lower())\n\n # marks the end of the interview\n if \"story preservation initiative\" in paragraph.lower() or \"copyright\" in paragraph.lower() :\n break\n elif not isHeader:\n units.append({'unit':paragraph})\n # in case it is a monologue\n if not units:\n units = get926Monologue(filename)\n\n return units",
"def units(self):\n pass",
"def _get_units(self):\n #assert self.ser.isOpen()\n\n self.serial_connection.write('UNI' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n unit = self.MEASUREMENT_UNITS[self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)]\n\n self.serial_connection.write(self.CR + self.LF)\n\n return unit",
"def units(self):\n return self._units",
"def units(self):\n return self._units",
"def get_units(self):\n return str(self._modeler.GetModelUnits())",
"def unit_of_measurement(self):\n return self.values.primary.units",
"def unit_of_measurement(self):\n return self._metadata[1]",
"def unit_of_measurement(self):\n return self._units",
"def enumerateUnits(self):\n return self._lowLevelEnumerateUnits()",
"def unit_of_measurement(self):\n return self.var_units",
"def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier",
"def get_units(self, obj: Dimension) -> [Unit]:\n try:\n return obj.units()\n except KeyError as e:\n logging.error(str(e))\n return []",
"def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6",
"def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)",
"def totalUnits(self):\n\t\treturn self.units",
"def getUnits(self):\n\n return len(self.units)",
"def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')",
"def getUnits(self):\n return _libsbml.Species_getUnits(self)",
"def unit_of_measurement(self):\n if self._xfinity_data.unit is not None:\n return self._xfinity_data.unit",
"def unit_of_measurement(self):\n return None",
"def get_units(self, variable):\n try:\n units = self.dataset[variable].units\n return units\n except:\n return None",
"def xunits(self):\n return self._kml['xunits']",
"def raw_units(self) -> str:\n if self._node.uom == 'F':\n return TEMP_FAHRENHEIT\n if self._node.uom == 'C':\n return TEMP_CELSIUS\n return self._node.uom",
"def unit_of_measurement(self):\n return self._tasmota_entity.unit"
] | [
"0.6984219",
"0.6611555",
"0.65701187",
"0.6565472",
"0.65496695",
"0.6547718",
"0.6372666",
"0.63284683",
"0.6312669",
"0.6312669",
"0.6307095",
"0.63054526",
"0.6286136",
"0.62754107",
"0.6247889",
"0.62228376",
"0.61609495",
"0.61441267",
"0.61164457",
"0.6112991",
"0.6109417",
"0.6094706",
"0.6028782",
"0.6028439",
"0.6021296",
"0.60036045",
"0.59852564",
"0.596437",
"0.5964251",
"0.5948934"
] | 0.68629277 | 1 |
Returns the unstructured units of the RG.502330083 documents These interviews did not have any indi | def getUnstructured_50_233_0083_Units(filename):
doc = Document(filename)
units=[]
# iterate over all paragraphs to get text units
for para in doc.paragraphs:
paragraph = para.text
if len(paragraph.strip())>0:
units.append({'unit':paragraph.strip()})
return units | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUnstructured042_special_Units(filename):\n doc = Document(filename)\n units = list()\n # all interviews start with a header\n isHeader = True\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n\n \n units.append({'unit':paragraph})\n return units",
"def getUnstructured042Units(filename):\n doc = Document(filename)\n units = list()\n # all interviews start with a header\n isHeader = True\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n\n if isHeader:\n if 'beep' in paragraph.lower():\n isHeader = False\n else:\n # marks the end of the interview\n if 'USHMM Archives' in paragraph or \"wentworth films\" in paragraph.lower() :\n break\n elif 'beep' not in paragraph.lower():\n units.append({'unit':paragraph})\n \n return units",
"def getUnstructured926Units(filename):\n doc = Document(filename)\n units = list()\n n = re.compile('track [0-9][0-9]')\n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n isHeader = n.match(paragraph.lower())\n\n # marks the end of the interview\n if \"story preservation initiative\" in paragraph.lower() or \"copyright\" in paragraph.lower() :\n break\n elif not isHeader:\n units.append({'unit':paragraph})\n # in case it is a monologue\n if not units:\n units = get926Monologue(filename)\n\n return units",
"def unit_of_measure(self):\n try:\n uom = self.metadata['geosoft']['dataset']['geo:unitofmeasurement']['#text']\n except KeyError:\n uom = ''\n return uom",
"def getTextUnits_old(filename):\n doc = Document(filename)\n units = list()\n \n unit_tracker = defaultdict(int)\n \n non_units = [\"name:\", \"date:\", \"date\", \"series\", \"transcriber\", \"thesis:\", \"currently:\", \"note\", \"comment\", \"grandparents:\", \"transcript:\", \"note:\"]\n\n ongoing_answer = \"\"\n\n \n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n # ensure it is not an empty line\n if len(paragraph.strip())>0:\n # get first word\n formatted_para = paragraph.lstrip()\n unit_type = formatted_para.partition(' ')[0]\n # in case it is in the format of e.g 'George Salton:'\n \n # e.g AJ:, B.\n m = re.compile('[A-Z][A-Z]?[.|:|-]')\n type1 = m.match(unit_type)\n\n # e.g [WJ]\n n = re.compile('\\[[A-Z][A-Z]\\]')\n typer2= n.match(unit_type)\n\n # timestamp e.g 15:01:27\n o = re.compile('[0-9]?[0-9]:[0-9][0-9]:[0-9][0-9]')\n type3 = o.match(unit_type) \n \n \n \n\n\n # else parse them according to formatting guidelines\n if (\"Question:\" in unit_type or\n type1 or\n \"Answer:\" in unit_type or \n typer2 or\n type3):\n\n # check if there was an ongoing paragraph\n #units.append({'unit': paragraph})\n \n if ongoing_answer: \n units.append({'unit': ongoing_answer})\n\n # reset it\n ongoing_answer = \"\"\n ongoing_answer += paragraph\n \n # update tracker\n unit_tracker[unit_type] += 1\n\n elif (unit_type.endswith(':') and\n unit_type.lower() not in non_units and\n unit_type[:-1].isalpha()):\n \n \n units.append({'unit': paragraph})\n # update tracker\n unit_tracker[unit_type] += 1\n \n \n # backup method,in case it is in the format of e.g 'George Salton:'\n elif len(paragraph.split()) > 3:\n backup_type = paragraph.split()[1]\n backup_two = paragraph.split()[2]\n\n if ((':' in backup_type or backup_type.lower() not in non_units) or\n (':' in backup_two or backup_two.lower() not in non_units)): \n \n if ((paragraph.strip()[0].islower() and len(paragraph.strip()) > 5) or (paragraph.strip()[-1] in ['.','!','?'])) and len(units) >0:\n units[-1]['unit']=units[-1]['unit']+ ' '+paragraph\n # update tracker\n unit_tracker[unit_type] += 1\n else:\n units.append({'unit':paragraph})\n unit_tracker[unit_type] += 1\n # if it is none of these cases, maybe there is an ongoing answer\n \n elif (ongoing_answer and ongoing_answer != paragraph):\n \n if not any(non_unit in paragraph.lower() for non_unit in non_units):\n ongoing_answer += paragraph\n else:\n units.append({'unit':paragraph})\n\n if len(unit_tracker) < 2:\n return []\n \n return units",
"def enumerateUnits(self):\n return self._lowLevelEnumerateUnits()",
"def unitsofmeasure(self):\n return self._unitsofmeasure",
"def test_170329_notimp(self):\n spc = parser(get_file('PTSDY2_notimp.txt'))\n # spc.draw_outlooks()\n outlook = spc.get_outlook('CATEGORICAL', 'MRGL')\n self.assertAlmostEqual(outlook.geometry.area, 110.24, 2)",
"def _getunits(x):\n if pb.units.has_units(x):\n \n units = x.units\n \n else:\n \n units = None\n \n return units",
"def units(self):\n return self._units",
"def units(self):\n return self._units",
"def getNumUnits(self):\n return _libsbml.UnitDefinition_getNumUnits(self)",
"def test_080731_invalid(self):\n spc = parser(get_file('PTSDY1_biggeom.txt'))\n # spc.draw_outlooks()\n outlook = spc.get_outlook('WIND', 'SIGN', 1)\n self.assertAlmostEquals(outlook.geometry.area, 15.82, 2)\n self.assertEquals(len(spc.warnings), 1)",
"def load_unitsm(self):\n self.unit_file = self.path+'units.m'\n self.unit_dic = self.load_m(self.unit_file) #actual reading routine\n self.psix=self.unit_dic['psi_x']\n self.eq_x_r = self.unit_dic['eq_x_r']\n self.eq_x_z = self.unit_dic['eq_x_z']\n self.eq_axis_r = self.unit_dic['eq_axis_r']\n self.eq_axis_z = self.unit_dic['eq_axis_z']\n self.eq_axis_b = self.unit_dic['eq_axis_b']\n self.sml_dt = self.unit_dic['sml_dt']\n self.sml_wedge_n = self.unit_dic['sml_wedge_n']\n self.diag_1d_period = self.unit_dic['diag_1d_period']",
"def _get_units_object(self, units):\n if isinstance(units, cellml_units):\n # We're done\n pass\n else:\n units = amara_parse_cellml(unicode(units))\n assert isinstance(units, cellml_units)\n return units",
"def containsUndeclaredUnits(self):\n return _libsbml.SBase_containsUndeclaredUnits(self)",
"def getUnits(self):\n\n return len(self.units)",
"def units(self):\n pass",
"def get_gds_units(infile):\n close = True\n if hasattr(infile, \"__fspath__\"):\n infile = open(infile.__fspath__(), \"rb\")\n elif isinstance(infile, (basestring, Path)):\n infile = open(infile, \"rb\")\n else:\n close = False\n unit = precision = None\n for rec_type, data in _raw_record_reader(infile):\n # UNITS\n if rec_type == 0x03:\n db_user = _eight_byte_real_to_float(data[4:12])\n db_meters = _eight_byte_real_to_float(data[12:])\n unit = db_meters / db_user\n precision = db_meters\n break\n if close:\n infile.close()\n return (unit, precision)",
"def get_units(self):\n return str(self._modeler.GetModelUnits())",
"def xunits(self):\n return self._kml['xunits']",
"def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')",
"def getNoWells(self):\n #code begins here \n return self.__nwells",
"def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)",
"def unit_of_measurement(self):\n return self._units",
"def totalUnits(self):\n\t\treturn self.units",
"def get_uncrawled_docs(self):\n\n return self.client[self.db]['medical_corpus'].find({\n 'crawled': 'No'\n })",
"def InterBurstGapUnits(self):\r\n\t\treturn self._get_attribute('interBurstGapUnits')",
"def test_170404_nogeom(self):\n # 26 Sep 2017, we can workaround this now\n spc = parser(get_file('PTSDY1_2002_nogeom.txt'))\n outlook = spc.get_outlook('TORNADO', '0.05')\n self.assertAlmostEqual(outlook.geometry.area, 8.76, 2)",
"def test_170522_nogeom(self):\n spc = parser(get_file('PTSDY1_nogeom2.txt'))\n # spc.draw_outlooks()\n outlook = spc.get_outlook('TORNADO', '0.02', 1)\n self.assertAlmostEqual(outlook.geometry.area, 2.90, 2)"
] | [
"0.72448844",
"0.6986209",
"0.6645673",
"0.59777975",
"0.5652428",
"0.5555099",
"0.5550419",
"0.5440101",
"0.54270256",
"0.53146136",
"0.53146136",
"0.53099555",
"0.52914363",
"0.5262525",
"0.52597654",
"0.525665",
"0.5223961",
"0.52127844",
"0.5186636",
"0.51777446",
"0.5112844",
"0.51022464",
"0.50963396",
"0.50853366",
"0.5079435",
"0.50770897",
"0.5031899",
"0.503023",
"0.50286925",
"0.50277674"
] | 0.71817046 | 1 |
Returns the text units for a given file in the noncore asset Uses regex to identify common patterns and uses specific backup methods depending on the interview shelfmark series, in case they are highly unstructured | def getTextUnits_old(filename):
doc = Document(filename)
units = list()
unit_tracker = defaultdict(int)
non_units = ["name:", "date:", "date", "series", "transcriber", "thesis:", "currently:", "note", "comment", "grandparents:", "transcript:", "note:"]
ongoing_answer = ""
# iterate over all paragraphs to get text units
for para in doc.paragraphs:
paragraph = para.text
# ensure it is not an empty line
if len(paragraph.strip())>0:
# get first word
formatted_para = paragraph.lstrip()
unit_type = formatted_para.partition(' ')[0]
# in case it is in the format of e.g 'George Salton:'
# e.g AJ:, B.
m = re.compile('[A-Z][A-Z]?[.|:|-]')
type1 = m.match(unit_type)
# e.g [WJ]
n = re.compile('\[[A-Z][A-Z]\]')
typer2= n.match(unit_type)
# timestamp e.g 15:01:27
o = re.compile('[0-9]?[0-9]:[0-9][0-9]:[0-9][0-9]')
type3 = o.match(unit_type)
# else parse them according to formatting guidelines
if ("Question:" in unit_type or
type1 or
"Answer:" in unit_type or
typer2 or
type3):
# check if there was an ongoing paragraph
#units.append({'unit': paragraph})
if ongoing_answer:
units.append({'unit': ongoing_answer})
# reset it
ongoing_answer = ""
ongoing_answer += paragraph
# update tracker
unit_tracker[unit_type] += 1
elif (unit_type.endswith(':') and
unit_type.lower() not in non_units and
unit_type[:-1].isalpha()):
units.append({'unit': paragraph})
# update tracker
unit_tracker[unit_type] += 1
# backup method,in case it is in the format of e.g 'George Salton:'
elif len(paragraph.split()) > 3:
backup_type = paragraph.split()[1]
backup_two = paragraph.split()[2]
if ((':' in backup_type or backup_type.lower() not in non_units) or
(':' in backup_two or backup_two.lower() not in non_units)):
if ((paragraph.strip()[0].islower() and len(paragraph.strip()) > 5) or (paragraph.strip()[-1] in ['.','!','?'])) and len(units) >0:
units[-1]['unit']=units[-1]['unit']+ ' '+paragraph
# update tracker
unit_tracker[unit_type] += 1
else:
units.append({'unit':paragraph})
unit_tracker[unit_type] += 1
# if it is none of these cases, maybe there is an ongoing answer
elif (ongoing_answer and ongoing_answer != paragraph):
if not any(non_unit in paragraph.lower() for non_unit in non_units):
ongoing_answer += paragraph
else:
units.append({'unit':paragraph})
if len(unit_tracker) < 2:
return []
return units | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUnstructured_50_233_0083_Units(filename):\n doc = Document(filename)\n \n \n units=[]\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n if len(paragraph.strip())>0:\n units.append({'unit':paragraph.strip()})\n\n \n \n return units",
"def getUnstructured926Units(filename):\n doc = Document(filename)\n units = list()\n n = re.compile('track [0-9][0-9]')\n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n isHeader = n.match(paragraph.lower())\n\n # marks the end of the interview\n if \"story preservation initiative\" in paragraph.lower() or \"copyright\" in paragraph.lower() :\n break\n elif not isHeader:\n units.append({'unit':paragraph})\n # in case it is a monologue\n if not units:\n units = get926Monologue(filename)\n\n return units",
"def getUnstructured042_special_Units(filename):\n doc = Document(filename)\n units = list()\n # all interviews start with a header\n isHeader = True\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n\n \n units.append({'unit':paragraph})\n return units",
"def get_units(filepath):\n terms = ['kw', 'kwh', 'mw', 'mwh']\n sub_terms = ['power', ]\n full_patterns = [r'\\b' + term + r'\\b' for term in terms]\n sub_patterns = [term for term in sub_terms]\n patterns = full_patterns + sub_patterns\n \n converter = {'kw':'kW', 'kwh':'kWh', 'mw':'MW', 'mwh':'MWh', 'power':'W'}\n \n with open(filepath) as f:\n reader = csv.reader(f)\n data = list(reader)[:10] # don't look beyond line 10\n for row in data:\n for word in row:\n found = re.findall(\n r'|'.join(patterns),\n word, \n flags=re.IGNORECASE)\n if found:\n unique = set(found)\n if not len(unique) > 1:\n return converter[list(found)[0].lower()]\n return 'unknown_units'",
"def getUnstructured042Units(filename):\n doc = Document(filename)\n units = list()\n # all interviews start with a header\n isHeader = True\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n\n if isHeader:\n if 'beep' in paragraph.lower():\n isHeader = False\n else:\n # marks the end of the interview\n if 'USHMM Archives' in paragraph or \"wentworth films\" in paragraph.lower() :\n break\n elif 'beep' not in paragraph.lower():\n units.append({'unit':paragraph})\n \n return units",
"def units_info(units):\n\n components = units.split()\n\n exponent = None\n pieces = []\n for component in components:\n index = component.find('^')\n if not index == -1:\n exponent = component[index + 1:]\n component = component[:index + 1] + '{' + component[index + 1:]\n component = component + '}'\n\n index = component.find('-')\n if not index == -1:\n component = component[:index] + '^{' + component[index:]\n component = component + '}'\n \n pieces.append(component)\n\n tex_units = ''\n for piece in pieces[:-1]:\n tex_units = tex_units + piece + ' \\;'\n tex_units = tex_units + ' ' + pieces[-1] \n tex_units = '$' + tex_units + '$'\n\n return tex_units, exponent",
"def from_units(text):\n match = re.match(r'^([0-9\\.]+)(|[' + ''.join(UNITS[1:]) + r'])$', text)\n if not match:\n return None\n\n number = float(match.group(1))\n unit = match.group(2)\n return int(number * 1024**UNITS.index(unit))",
"def __rm_general(file_contents: str) -> str:\n\n new_file_contents = file_contents\n\n for regex in COBOL_FORMAT_RM_REGEXES:\n for match in re.finditer(regex, file_contents):\n match_str = match_to_str(match)\n new_file_contents = new_file_contents.replace(match_str, '')\n\n return new_file_contents",
"def read_units(self, fid):\r\n lin = self.read_line(fid) \r\n while lin[0] != ':':\r\n parts = lin.split()\r\n if parts[0]=='mass':\r\n self.mass = float(parts[1])\r\n elif parts[0]=='length':\r\n self.length = float(parts[1])\r\n elif parts[0]=='angle':\r\n self.angle = parts[1]\r\n lin = self.read_line(fid)\r\n return lin",
"def _get_magtot(self, file):\n #TODO implement\n return []",
"def simParser(filePath):\n\t\tresults = []\n\t\twith open(filePath + \".txt\", \"r\") as execFile:\n\t\t\tcontent = execFile.read()\n\n\t\t\tcycleStr = search(r'([cC]ycles.*?:\\s*)(\\d+)', content)\n\t\t\tassemblyInst = search(r'([iI]nstructions.*?:\\s*)(\\d+(.\\d+)?)', content)\n\n\t\t\tif cycleStr: results.append(cycleStr.group(2))\n\t\t\tif assemblyInst: results.append(assemblyInst.group(2))\n\n\t\treturn results",
"def _get_Etot(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n itmp = 0\n Etot = []\n while itmp >= 0:\n itmp = search_string('TOTAL ENERGY', tmptxt)\n if itmp >= 0:\n Etot.append(float(tmptxt.pop(itmp).split()[-1]))\n return Etot",
"def get926Monologue(filename):\n doc = Document(filename)\n monologue = \"\"\n o = re.compile('track [0-9][0-9]')\n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n\n # timestamp e.g 15:01:27\n isHeader = o.match(paragraph.lower()) \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n\n # ensure it is not an empty line\n if hasText and not isHeader:\n monologue += paragraph\n \n return [{'unit': monologue}]",
"def get_words_with_end_times(subtitle_file_path):\n\n with open(subtitle_file_path) as subtitle_file:\n\n # Remove first 4 lines (containing meta information)\n for j in range(0, 4):\n subtitle_file.readline()\n\n text = subtitle_file.read()\n\n # Check if the subtitle file supports individual word times\n if text.find(\"<c>\") == -1:\n print(\"Individual word times are not supported for file: \" + subtitle_file_path)\n return None, None\n\n chunks = text.split(\" \\n\\n\") # split into chunks for easier data processing\n\n words = list()\n word_end_times = list()\n\n for chunk in chunks:\n chunk_lines = chunk.split(\"\\n\")\n words_line = chunk_lines[2]\n\n words_in_chunk = []\n word_end_times_in_chunk = []\n\n first_word_end_index = words_line.find(\"<\")\n if first_word_end_index != -1:\n first_word = words_line[\n 0:first_word_end_index] # get the first word (can't be found using method below)\n\n words_in_chunk = re.findall(\"<c> [\\S]*</c>\", words_line) # get all words\n words_in_chunk = [w[4:-4] for w in words_in_chunk] # strip <c> and <c/>\n\n word_end_times_in_chunk = re.findall(\"<\\d\\d:\\d\\d:\\d\\d.\\d\\d\\d>\", words_line) # get all word end times\n word_end_times_in_chunk = [t[1:-1] for t in word_end_times_in_chunk] # strip < and >\n else:\n # Only one word\n first_word = words_line\n\n last_time = chunk_lines[4][17:29] # end time for the last word\n\n words_in_chunk.insert(0, first_word)\n word_end_times_in_chunk.append(last_time)\n\n words.extend(words_in_chunk)\n word_end_times.extend(word_end_times_in_chunk)\n\n # For the last chunk we have to get the word end time from somewhere else\n first_line_in_last_chunk = chunks[-1].split(\"\\n\")[0]\n last_time = first_line_in_last_chunk[17:29]\n word_end_times.pop()\n word_end_times.append(last_time)\n\n if len(words) != len(word_end_times):\n print(\"Warning: word count does not match times count\")\n\n return words, word_end_times",
"def unitsDetector(self, num):\n try:\n num = int(num)\n except:\n sys.exit('Invalid input! Method only takes ints or floats.')\n \n digits = 0\n while num > 1:\n num /= 10\n digits += 1\n \n digits -= 1\n ind = digits // 3\n units = {3: 'B', 2: 'M', 1: 'K', 0: ''}[ind]\n \n return 10 ** (ind * 3), units",
"def preprocess_sub_units(self):\n if self.unit == \"char\":\n self.preprocess_char()\n elif self.unit == \"char-ngram\":\n self.preprocess_char_ngram()\n elif self.unit == \"morpheme\":\n self.preprocess_morpheme()\n elif self.unit == \"oracle\":\n self.preprocess_oracle()\n else:\n sys.exit(\"Unknown unit\")",
"def parse_file(self):\n for num, line in enumerate(self._text):\n if \"CRYSTAL STRUCTURE SOLUTION\" in line:\n line = line.strip().strip('+').strip()\n if 'SHELXTL' in line:\n self.version = 'SHELXT ' + line.split()[-1]\n if line.strip().startswith('R1 Rweak Alpha'):\n for n in range(100):\n if not self._text[num + 1 + n]:\n break\n if self._text[num + 1]:\n self.solutions[self._text[num + 1 + n][58:76].strip()] = self._text[num + 1 + n][37:51].strip()",
"def get_gds_units(infile):\n close = True\n if hasattr(infile, \"__fspath__\"):\n infile = open(infile.__fspath__(), \"rb\")\n elif isinstance(infile, (basestring, Path)):\n infile = open(infile, \"rb\")\n else:\n close = False\n unit = precision = None\n for rec_type, data in _raw_record_reader(infile):\n # UNITS\n if rec_type == 0x03:\n db_user = _eight_byte_real_to_float(data[4:12])\n db_meters = _eight_byte_real_to_float(data[12:])\n unit = db_meters / db_user\n precision = db_meters\n break\n if close:\n infile.close()\n return (unit, precision)",
"def extractRunInfo(filename):\n tokens = filename.split('_')\n loading = tokens[1].strip('LiF')\n polymer = tokens[2].strip('.m')\n return (float(loading)/100, polymer)",
"def text_only(feedback_folder_path):\n elems = os.listdir(feedback_folder_path)\n global mos_sim\n global mos_nat\n # ignore instruction text files\n for junk in [\"Anleitung.txt\", \"instructions.txt\"]:\n if junk in elems: elems.remove(junk)\n # iterate score text files and update MOS dictionaries\n for file in elems:\n filepath = os.path.join(feedback_folder_path, file)\n code, nat_score, sim_score = score_filepath_to_scores(filepath)\n update_dicts(code, nat_score, sim_score)",
"def test_get_texts_ignores():\n file_map = sd.get_file_map(\".\")\n texts = sd.get_texts(file_map)\n ingnores = \"[:.,;:!?\\\"-()]\\n\".split()\n for text in texts:\n for char in ingnores:\n assert text.find(char) == -1",
"def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n\n ### split off metadata\n content = all_text.split(\"X-FileName:\")\n words = \"\"\n if len(content) > 1:\n ### remove punctuation\n text_string = content[1].translate(str.maketrans(\"\", \"\", string.punctuation))\n\n ### split the text string into individual words\n words = text_string.split()\n\n return words",
"def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')",
"def load_unitsm(self):\n self.unit_file = self.path+'units.m'\n self.unit_dic = self.load_m(self.unit_file) #actual reading routine\n self.psix=self.unit_dic['psi_x']\n self.eq_x_r = self.unit_dic['eq_x_r']\n self.eq_x_z = self.unit_dic['eq_x_z']\n self.eq_axis_r = self.unit_dic['eq_axis_r']\n self.eq_axis_z = self.unit_dic['eq_axis_z']\n self.eq_axis_b = self.unit_dic['eq_axis_b']\n self.sml_dt = self.unit_dic['sml_dt']\n self.sml_wedge_n = self.unit_dic['sml_wedge_n']\n self.diag_1d_period = self.unit_dic['diag_1d_period']",
"def test_CFCalculation_txt_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n #Make sure new script produces the same result as old one\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=-419.7891726292168,\n spin_down=-414.7152560307904,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-35.92607948104669,\n spin_down=-26.384951772020756,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=6.522900740505054, spin_down=5.488104692050172, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation(reference_radius='cdn')\n cf.readPot('files/cf_calculation/VKS.2.0.dat',\n 'files/cf_calculation/VKS.4.0.dat',\n 'files/cf_calculation/VKS.6.0.dat',\n lm=[(2, 0), (4, 0), (6, 0)])\n cf.readCDN('files/cf_calculation/Nd.dat', header=3)\n cf.cdn['RMT'] = 3.138049652\n results = cf.performIntegration()\n\n assert results == expected_results",
"def test_remove_file_group_regex(self):\n with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir:\n mets = OcrdMets(filename=join(tempdir, 'mets.xml'))\n self.assertEqual(len(mets.file_groups), 17)\n self.assertEqual(len(mets.find_all_files()), 35)\n mets.remove_file_group('//OCR-D-GT-.*', recursive=True)\n self.assertEqual(len(mets.file_groups), 15)\n self.assertEqual(len(mets.find_all_files()), 31)",
"def rm_mf(txt:str):\n return (\n txt.strip()\n .replace('(arabisk/bosnisk/engelsk/tysk/kurdisk/svensk tale, norsk tekst)', '')\n .replace('25th anniversary show from royal albert hall - direkte ove', '')\n .replace('ekstraforestilling fred. 09.sept. kl. 22.00.', '')\n .replace('25th anniversary show from royal albert hall', '')\n .replace('ekstraforestilling sund. 11.sept. kl.20.00.', '')\n .replace('daybreakers(utekstet originalversjon)', '')\n .replace('norsk actionthriller. alder 15 år.', '')\n .replace('50th anniversary celebration atmos', '')\n .replace('(nb! filmen er tatt over av fox)', '')\n .replace('(nb! gratis! - kun kjøp i døra)', '')\n .replace('première! norsk. alder: 9 år.', '')\n .replace('(3d - briller kjøpes i kiosken)', '')\n .replace('gratis - ingen forhåndsbest.', '')\n .replace('briller (25,kjøpes i kiosk)', '')\n .replace('(2\\x1f\\x1f\\x1fd,norsk tale)', '')\n .replace('(nyrestaurert/digitalisert)', '')\n .replace('2011 jubileumsforestilling', '')\n .replace('i3d(orginal) full rulle', '')\n .replace('(månedens joker nr. 2!)', '')\n .replace('(festivalpass gjelder)', '')\n .replace('(nb! eneste visning!)', '')\n .replace('med regissørbesøk', '')\n .replace('stord filmklubb', '')\n .replace(':knøttenefilmen', '')\n .replace('(bø filmklubb)', '')\n .replace('10-årsjubileum', '')\n .replace('(gammel reg.)', '')\n .replace('stor ståhai)', '')\n .replace('halvmaraton', '')\n .replace('direkte ove', '')\n .replace('(1978)', '')\n .replace('83d)', '')\n .replace('(n)', '')\n .replace('org.versj stor ståhai', '')\n .replace('ekstraforestilling', '')\n .replace('og valgfritt tale', '')\n .replace('ikke billettsalg', '')\n .replace('30-års jubileum', '')\n .replace('direkteoverført', '')\n .replace('forest. kl.1400', '')\n .replace('originalversjon', '')\n .replace('verdenspremiere', '')\n .replace('forf. innleder', '')\n .replace('i atmos 3d lyd', '')\n .replace('ikke book tale', '')\n .replace('månedens joker', '')\n .replace('norgespremiere', '')\n .replace('orginalversjon', '')\n .replace('usikker lengde', '')\n .replace('barnehagekino', '')\n .replace('gratisvisning', '')\n .replace('norgespremier', '')\n .replace('originalspråk', '')\n .replace('forestilling', '')\n .replace('dolby atmos', '')\n .replace('førpremiere', '')\n .replace('horrornight', '')\n .replace('ingen tekst', '')\n .replace('nederlandsk', '')\n .replace('originalutg', '')\n .replace('portugisisk', '')\n .replace('sing- along', '')\n .replace('sing-a-long', '')\n .replace('strikkekino', '')\n .replace('.vises i 3d', '')\n .replace('gratiskino', '')\n .replace('indonesisk', '')\n .replace('norgesprem', '')\n .replace('uten tekst', '')\n .replace('seniorkino', '')\n .replace('sing-along', '')\n .replace('bollywood', '')\n .replace('eng tekst', '')\n .replace('filmdager', '')\n .replace('filmklubb', '')\n .replace('italiensk', '')\n .replace('med norsk', '')\n .replace('med tekst', '')\n .replace('nei tekst', '')\n .replace('subtitles', '')\n .replace('trehundre', '')\n .replace('utektstet', '')\n .replace('babykino', '')\n .replace('extended', '')\n .replace('islandsk', '')\n .replace('litauisk', '')\n .replace('original', '')\n .replace('premiere', '')\n .replace('utekstet', '')\n .replace('17. mai', '')\n .replace('blueray', '')\n .replace('digital', '')\n .replace('engelsk', '')\n .replace('forest.', '')\n .replace('japansk', '')\n .replace('med nor', '')\n .replace('n-prem.', '')\n .replace('preview', '')\n .replace('reprise', '')\n .replace('stemmer', '')\n .replace('tekster', '')\n .replace('tekstet', '')\n .replace('u/tekst', '')\n .replace('versjon', '')\n .replace('version', '')\n .replace('2\\x1fd', '')\n .replace('3\\x1fd', '')\n .replace('17 mai', '')\n .replace('dubbet', '')\n .replace('fransk', '')\n .replace('gratis', '')\n .replace('m. eng', '')\n .replace('m/ no.', '')\n .replace('n.tale', '')\n .replace('n-prem', '')\n .replace('norske', '')\n .replace('polish', '')\n .replace('polske', '')\n .replace('samisk', '')\n .replace('spansk', '')\n .replace('tale-a', '')\n .replace('teksta', '')\n .replace('35 mm', '')\n .replace('atmos', '')\n .replace('dansk', '')\n .replace('dubba', '')\n .replace('hindi', '')\n .replace('m .no', '')\n .replace('norsk', '')\n .replace('og 2d', '')\n .replace('og 3d', '')\n .replace('polsk', '')\n .replace('tekst', '')\n .replace('versj', '')\n .replace('35mm', '')\n .replace('dig.', '')\n .replace('dubb', '')\n .replace('eng.', '')\n .replace('film', '')\n .replace('i 2d', '')\n .replace('i 3d', '')\n .replace('imax', '')\n .replace('m.no', '')\n .replace('org.', '')\n .replace('orig', '')\n .replace('tale', '')\n .replace('tysk', '')\n .replace('); r', '')\n .replace('2 d', '')\n .replace('3 d', '')\n .replace('4dx', '')\n .replace('5.1', '')\n .replace('dub', '')\n .replace('dvd', '')\n .replace('hfr', '')\n .replace('no.', '')\n .replace('org', '')\n .replace('txt', '')\n .replace('2d, no', '')\n .replace('3d, no', '')\n .replace('2d, n', '')\n .replace('3d, n', '')\n .replace('2d no', '')\n .replace('3d no', '')\n .replace('2d', '')\n .replace('3d', '')\n .replace('4k', '')\n .replace('m/', '')\n .replace('u/', '')\n .replace('*', '')\n .replace('!', '')\n .replace('+', '')\n .replace('.', ' ')\n .replace('/', ' ')\n .replace('-', ' ')\n .replace('\"', '')\n .replace(':', ' ')\n .replace(';', ' ')\n .replace(',', ' ')\n .replace('(', '')\n .replace(')', '')\n .replace(\"'\", '')\n .replace(' ', ' ')\n .strip()\n )",
"def compare_files_text(file1, file2, regex=\"\"):\n if regex:\n write_log(\"start finding regex : {0} ,it may spend times\".format(regex), True)\n result1 = find_regex(file1, regex)\n result2 = find_regex(file2, regex)\n compare_sid('\\n'.join(result1), '\\n'.join(result2))\n else:\n compare_sid(file1, file2)",
"def read_txt(filename):\n content = [] # list with word index : word count for each track\n string = '%'\n find = False \n words = [] \n track_id = [] # list with track ID's from the MSD\n mxm_tid = [] # track ID's from musiXmatch\n str_data = []\n\n read_file = open(filename, \"r\")\n \n for line in read_file:\n if find:\n line = line.strip() # converting line into list\n index1 = line.find(',') # finds index of 1st comma\n index2 = line.find(',', index1+1) # finds index of 2nd comma\n track_id.append(line[:index1]) # appends track id to list \n mxm_tid.append(line[:index2]) # appends track id to list \n res = '{' + line[index2+1:] + '}' # simulates dictionary with string\n d = eval(res) # converts string to actual dictionary \n content.append(d) # appends track data to content list\n else:\n # obtaining line with 5,000 words \n if line.startswith(string):\n line = line[1:] # getting rid of %\n words = [word.strip() for word in line.split(',')]\n find = True # already found list of words \n read_file.close() \n \n\n return (words, content, track_id, mxm_tid)",
"def extract_duration_nl(text):\n if not text:\n return None\n\n time_units = {\n 'microseconds': 0,\n 'milliseconds': 0,\n 'seconds': 0,\n 'minutes': 0,\n 'hours': 0,\n 'days': 0,\n 'weeks': 0\n }\n\n nl_translations = {\n 'microseconds': [\"microsecond\", \"microseconde\", \"microseconden\", \"microsecondje\", \"microsecondjes\"],\n 'milliseconds': [\"millisecond\", \"milliseconde\", \"milliseconden\", \"millisecondje\", \"millisecondjes\"],\n 'seconds': [\"second\", \"seconde\", \"seconden\", \"secondje\", \"secondjes\"],\n 'minutes': [\"minuut\", \"minuten\", \"minuutje\", \"minuutjes\"],\n 'hours': [\"uur\", \"uren\", \"uurtje\", \"uurtjes\"],\n 'days': [\"dag\", \"dagen\", \"dagje\", \"dagjes\"],\n 'weeks': [\"week\", \"weken\", \"weekje\", \"weekjes\"]\n }\n\n pattern = r\"(?P<value>\\d+(?:\\.?\\d+)?)\\s+{unit}\"\n text = _convert_words_to_numbers_nl(text)\n\n for unit in time_units:\n unit_nl_words = nl_translations[unit]\n unit_nl_words.sort(key=len, reverse=True)\n for unit_nl in unit_nl_words:\n unit_pattern = pattern.format(unit=unit_nl)\n matches = re.findall(unit_pattern, text)\n value = sum(map(float, matches))\n time_units[unit] = time_units[unit] + value\n text = re.sub(unit_pattern, '', text)\n\n text = text.strip()\n duration = timedelta(**time_units) if any(time_units.values()) else None\n\n return (duration, text)"
] | [
"0.6960766",
"0.64596313",
"0.64187497",
"0.618116",
"0.58982176",
"0.5609678",
"0.5549594",
"0.51914674",
"0.513762",
"0.513588",
"0.51316273",
"0.51107514",
"0.5104282",
"0.5096793",
"0.5073208",
"0.50728047",
"0.5068827",
"0.5063704",
"0.5061891",
"0.5050305",
"0.5049378",
"0.5047876",
"0.50470287",
"0.5043149",
"0.5041682",
"0.49911863",
"0.49903366",
"0.49852493",
"0.4982359",
"0.492986"
] | 0.6537814 | 1 |
Processes the 509 doc files beloging to the core asset in data Core asset is identified by numbers RG50.030, RG50.106, RG50.549 | def createStructuredTranscript_Non_Core_Doc():
#create a temporary folder that will hold the data transformed from doc to docx
os.system('mkdir ' + INPUT_FOLDER+'temp')
core_doc_asset = []
missing_count = 0
missing_files=[]
# get all the docx files that are part of the core asset
for file in glob.glob(INPUT_FOLDER+"*.doc"):
# RG numbers for the core asset
if ("RG-50.030" not in file and
"RG-50.106" not in file and
"RG-50.549" not in file):
# convert file to docx, storing it in an untracked folder called temp
file_docx = file + 'x'
command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]
call(command, shell=True)
# append to the array
core_doc_asset.append(file_docx)
# get the units for each file, store them and update tracker
core_doc_asset=create_dictionary_of_file_list(core_doc_asset)
not_processed=0
processed_doc=0
# get the units for each file, store them and update tracker
for mongo_rg in core_doc_asset:
# get text units for this entry
processed=[]
result=[]
for file in core_doc_asset[mongo_rg]:
units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])
if units:
#replace white spaces
for i,element in enumerate(units):
units[i]['unit']=' '.join(element['unit'].split())
result.extend(units)
processed.append(True)
else:
#check if processed
processed.append(False)
#set the method used to transform the transcript
h.update_field(DB, TRACKER, "rg_number", mongo_rg, "method", "transcribe_non_core_doc")
not_processed=not_processed+1
if False in processed:
h.update_field(DB, TRACKER, "rg_number", mongo_rg, "status", "Unprocessed")
not_processed=not_processed+1
missing_files.append(' '.join(core_doc_asset[mongo_rg]))
else:
# insert units on the output collection
h.update_field(DB, OUTPUT, "shelfmark", 'USHMM '+mongo_rg, "structured_transcript", result)
# update status on the stracker
h.update_field(DB, TRACKER, "rg_number", mongo_rg, "status", "Processed")
processed_doc=processed_doc+1
#delete the temporary folder
os.system('rm -r ' + INPUT_FOLDER+'temp')
#write the missing files to text file
file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')
file.write('\n'.join(missing_files))
# success
pprint.pprint("Non-core doc files were successfully processed, but there are " + str(missing_count) + " missing") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def documents(iati_import, activity, project, activities_globals):\n imported_docs = []\n changes = []\n\n xml_ns = 'http://www.w3.org/XML/1998/namespace'\n first_image = True\n\n for doc_link in activity.findall('document-link'):\n url = ''\n doc_format = ''\n title = ''\n title_language = ''\n category = ''\n language = ''\n\n if 'url' in doc_link.attrib.keys():\n url = doc_link.attrib['url']\n\n # Check if it's the first image\n if url and url.rsplit('.', 1)[1].lower() in VALID_IMAGE_EXTENSIONS and first_image:\n first_image = False\n continue\n\n if 'format' in doc_link.attrib.keys():\n if not len(doc_link.attrib['format']) > 75:\n doc_format = doc_link.attrib['format']\n else:\n add_log(iati_import, 'document_link_format',\n 'format is too long (75 characters allowed)', project)\n\n # Check if the format is 'application/http'\n if doc_format == 'application/http':\n continue\n\n title_element = doc_link.find('title')\n if not title_element is None:\n title = get_text(title_element, activities_globals['version'])\n if len(title) > 100:\n add_log(iati_import, 'document_link_title',\n 'title is too long (100 characters allowed)', project,\n IatiImportLog.VALUE_PARTLY_SAVED)\n title = title[:100]\n\n if activities_globals['version'][0] == '1' and \\\n '{%s}lang' % xml_ns in title_element.attrib.keys():\n if not len(title_element.attrib['{%s}lang' % xml_ns]) > 2:\n title_language = title_element.attrib['{%s}lang' % xml_ns]\n else:\n add_log(iati_import, 'document_link_title_language',\n 'language is too long (2 characters allowed)', project)\n elif activities_globals['version'][0] == '2':\n narrative_element = title_element.find('narrative')\n if not narrative_element is None and \\\n '{%s}lang' % xml_ns in narrative_element.attrib.keys():\n if not len(narrative_element.attrib['{%s}lang' % xml_ns]) > 2:\n title_language = narrative_element.attrib['{%s}lang' % xml_ns]\n else:\n add_log(iati_import, 'document_link_title_language',\n 'language is too long (2 characters allowed)', project)\n\n category_element = doc_link.find('category')\n if not category_element is None and 'code' in category_element.attrib.keys():\n if not len(category_element.attrib['code']) > 3:\n category = category_element.attrib['code']\n else:\n add_log(iati_import, 'document_link_category',\n 'category is too long (3 characters allowed)', project)\n\n language_element = doc_link.find('language')\n if not language_element is None and 'code' in language_element.attrib.keys():\n if not len(language_element.attrib['code']) > 2:\n language = language_element.attrib['code']\n else:\n add_log(iati_import, 'document_link_language',\n 'language is too long (2 characters allowed)', project)\n\n doc, created = get_model('rsr', 'projectdocument').objects.get_or_create(\n project=project,\n url=url,\n format=doc_format,\n title=title,\n title_language=title_language,\n category=category,\n language=language\n )\n\n if created:\n changes.append(u'added project document (id: %s): %s' % (str(doc.pk), doc))\n\n imported_docs.append(doc)\n\n for doc_link in project.documents.all():\n if not doc_link in imported_docs:\n changes.append(u'deleted project document (id: %s): %s' %\n (str(doc_link.pk),\n doc_link.__unicode__()))\n doc_link.delete()\n\n return changes",
"def normalize_doc_scores():\n# doc_res_files_path = base_path+r\"claimLM_docLM_doc_ret_output\"\n claims_file_counters_dict = {} #for each claim numas key, have the val a counter - if not 110 per claim -> problem!\n doc_res_files_path = linux_base_path+\"/claimLM_docLM_doc_ret_output\"\n# doc_res_files_path = base_path +\"\\\\claimLM_docLM_doc_ret_output\"\n for filename in os.listdir(doc_res_files_path):\n# filename = r\"C:\\study\\technion\\MSc\\Thesis\\Y!\\support_test\\baseline_clmLMdocLM\\claimLM_docLM_doc_ret_output\\doc_res_alpha_0_beta_0.2_clm_47\"\n print \"filename:\"+filename\n doc_score_dict = {} # key is docno, val is the exp(score)\n curr_claim = filename.split(\"_clm_\")[1]\n curr_alpha = filename.split(\"_alpha_\")[1].split(\"_beta_\")[0]\n curr_beta = filename.split(\"_beta_\")[1].split(\"_clm_\")[0]\n curr_dict_name = \"docs_scores_norm_alpha_\"+curr_alpha+\"_beta_\"+curr_beta+\"_clm_\"+curr_claim+\"_dict\"\n try:\n# if os.path.exists(base_path+\"\\\\docs_norm_scores_dicts\\\\\"+curr_dict_name+\"_sorted\"):\n# print curr_dict_name +\" already there\"\n# continue\n# else:\n# print \"applying on \"+curr_dict_name\n # check if the curr alpha beta dict exists already\n doc_file = open(doc_res_files_path+\"/\"+filename,'r')\n doc = doc_file.read().strip() # score\n scores_sum = 0.0\n if curr_claim in claims_file_counters_dict.keys():\n claims_file_counters_dict[curr_claim] += 1 \n else:\n claims_file_counters_dict[curr_claim] = 1\n for i, line in enumerate(doc.split('\\n')):\n data = line.split(' ')\n query_Id = data[0]\n doc_id = data[2]\n norm_score = math.exp(float(data[4]))\n scores_sum += norm_score\n if os.path.exists(curr_dict_name) == True:\n doc_score_dict = read_pickle(curr_dict_name)\n if doc_id in doc_score_dict:\n raise Exception(\"DOC ID %s already in dict\" % doc_id)\n doc_score_dict[query_Id,doc_id] = norm_score\n # divide by scores_sum\n for ((query_Id,doc_id),score) in doc_score_dict.items():\n new_score = float(float(score)/float(scores_sum))\n doc_score_dict[query_Id,doc_id] = new_score\n #rank according to score\n doc_score_dict_sorted = collections.OrderedDict(sorted(doc_score_dict.items(), key= lambda x: (-int(x[0][0]),x[1]),reverse=True))\n save_pickle(linux_base_path+\"/\"+\"docs_norm_scores_dicts/\"+curr_dict_name+\"_sorted\",doc_score_dict_sorted)\n# save_pickle(base_path+ \"\\\\docs_norm_scores_dicts\"+curr_dict_name+\"_sorted\",doc_score_dict_sorted)\n except Exception as err: \n sys.stderr.write('problem in normalize_doc_scores in file:'+ filename) \n print err.args \n print err \n for (claim_num,counter) in claims_file_counters_dict.items():\n if counter!=110:\n print claim_num+\" not 110 files , but \" +str(counter) +\" files\"",
"def openie_prepare_files(document_file, no_entity_filter=False, consider_sections=False):\n temp_dir = tempfile.mkdtemp()\n temp_in_dir = os.path.join(temp_dir, \"input\")\n filelist_fn = os.path.join(temp_dir, \"filelist.txt\")\n out_fn = os.path.join(temp_dir, \"output.txt\")\n os.mkdir(temp_in_dir)\n input_files = []\n\n amount_skipped_files = 0\n doc_count = count_documents(document_file)\n logging.info('counting files to process....')\n if no_entity_filter:\n for document_content in read_pubtator_documents(document_file):\n doc = TaggedDocument(from_str=document_content)\n if not doc or not doc.title or not doc.abstract:\n amount_skipped_files += 1\n else:\n doc_count += 1\n # TODO: Not beautiful but join sections via a '.' to ensure sentence splitting in CoreNLP\n content = '. '.join([te for te, _ in doc.iterate_over_text_elements(sections=consider_sections)])\n input_file = os.path.join(temp_in_dir, \"{}.txt\".format(doc.id))\n input_files.append(input_file)\n with open(input_file, \"w\") as f:\n f.write(content)\n else:\n logging.info('Init spacy nlp...')\n spacy_nlp = English() # just the language with no model\n spacy_nlp.add_pipe(\"sentencizer\")\n\n doc2sentences, doc2tags = filter_document_sentences_without_tags(doc_count, document_file, spacy_nlp,\n consider_sections=consider_sections)\n doc_count = len(doc2tags)\n for doc_id, sentences in doc2sentences.items():\n if sentences:\n input_file = os.path.join(temp_in_dir, \"{}.txt\".format(doc_id))\n input_files.append(input_file)\n with open(input_file, 'wt') as f:\n f.write(' '.join(sentences))\n\n logging.info('{} files need to be processed. {} files skipped.'.format(doc_count, amount_skipped_files))\n with open(filelist_fn, \"w\") as f:\n f.write(\"\\n\".join(input_files))\n return filelist_fn, out_fn, doc_count",
"def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()",
"def documents(pmid_23982599, civic_aid6_document):\n return [pmid_23982599, civic_aid6_document]",
"def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))",
"def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")",
"def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()",
"def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)",
"def readDocuments(docs, prefix):\n\n fmap = open(\"mapping.txt\", \"w\")\n\n\n i = -1\n for folder in pressrelease_folders_txt:\n i += 1\n fullpath = path.join(prefix, folder)\n totFilesInFolder = len(fnmatch.filter(os.listdir(fullpath),\n '*.txt'))\n countFiles = 0\n for f in listdir(path.join(prefix, folder)):\n fmap.write(\"{0}\\t {1:5d}\\n\".format(f, countFiles))\n countFiles += 1\n fullname = fullpath + f\n # text = open(fullname).readlines()\n ff = open(fullname)\n docs.append(ff.read())\n\n print(\"{0:5d}/{1:5d} :: Reading file {2:10s} \".format(countFiles,\n totFilesInFolder, f))\n\n # if countFiles > 4:\n # return\n\n\n fmap.close()",
"def findDocumentsTwo():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed2.csv', 'r') as readfile,\\\n open('documentsTwo.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided4 = counterTwo / 4\n lines4 = lineTwo / 4\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided4: ' + str(divided4) + '\\n')\n writefile.write('lines divided by 4: ' + str(lines4) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines4) + '\\n')\n writefile.write('3: ' + str((lines4 * 2)) + '\\n')\n writefile.write('4: ' + str((lines4 * 3)))\n print('divided4: ' + str(divided4))\n print('lines divided by 4: ' + str(lines4))",
"def updateDocFiles(self):\n for filename, filetype in self._get_doc_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n \n if filename.endswith('conf.py'):\n lines, write_out = self._update_doc_conf_file(lines, filename)\n else:\n raise TypeError, \"Unknown doc file type: %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)",
"def convertFiles():\n\n #### Get file lists\n tmp = os.path.join(remarkableBackupDirectory,remContent)\n files = [x for x in os.listdir(tmp) if \".\" not in x]\n\n for i in range(0, len(files)):\n # get file reference number\n refNrPath = os.path.join(remarkableBackupDirectory, remContent,\n files[i])\n # get meta Data\n meta = json.loads(open(refNrPath + \".metadata\").read())\n fname = meta[\"visibleName\"]\n fname = fname.replace(\" \", \"_\")\n # Does this lines file have an associated pdf?\n AnnotPDF = os.path.isfile(refNrPath + \".pdf\")\n # Get list of all rm files i.e. all pages\n npages = len(glob.glob(refNrPath + \"/*.rm\"))\n if npages != 0:\n if AnnotPDF:\n # we have found an annotated pdf\n # now make sure it has the right ending\n if meta[\"visibleName\"][-4:] != \".pdf\":\n syncFilePath = os.path.join(syncDirectory, \"*\",\n meta[\"visibleName\"] + \".pdf\")\n else:\n syncFilePath = os.path.join(syncDirectory, \"*\",\n meta[\"visibleName\"])\n\n # does the file exist in our system?\n inSyncFolder = glob.glob(syncFilePath) != []\n\n if inSyncFolder:\n # have we exported this thing before?\n local_annotExist = \\\n glob.glob(syncFilePath[:-4] + \"_annot.pdf\") != []\n # first, assume, it needs converting\n remoteChanged = True\n if local_annotExist:\n # if it already exists check when it was last updated\n local_annotPath = \\\n glob.glob(syncFilePath[:-4]+\"_annot.pdf\")[0]\n local_annot_mod_time = os.path.getmtime(local_annotPath)\n # rm time is in ms\n remote_annot_mod_time = int(meta[\"lastModified\"])/1000\n # has this version changed since we last exported it?\n remoteChanged = \\\n remote_annot_mod_time > local_annot_mod_time\n # update if the remote version has changed\n if remoteChanged:\n origPDF = glob.glob(syncFilePath)[0]\n #####\n convertAnnotatedPDF(fname, refNrPath, origPDF)\n #####\n else:\n print(fname + \"hasn't been modified\")\n else:\n print(fname + \" does not exist in the sync directory\")\n # TODO allow y/n input whether it should be copied there\n # anyway\n else:\n # we found a note\n print(\"exporting Notebook \" + fname)\n syncFilePath = os.path.join(syncDirectory, notesDirectory,\n fname + \".pdf\")\n inSyncFolder = glob.glob(syncFilePath) != []\n remoteChanged = True\n if inSyncFolder:\n local_annot_mod_time = os.path.getmtime(syncFilePath)\n remote_annot_mod_time = int(meta['lastModified'])/1000\n remoteChanged = remote_annot_mod_time > local_annot_mod_time\n if remoteChanged:\n #####\n convertNotebook(fname, refNrPath)\n #####\n else:\n print(fname + \"has not changed\")",
"def process_doc_files(*files, add_new_line=True):\n for file in files:\n # Treat folders\n if os.path.isdir(file):\n files = [os.path.join(file, f) for f in os.listdir(file)]\n files = [f for f in files if os.path.isdir(f) or f.endswith(\".mdx\") or f.endswith(\".py\")]\n process_doc_files(*files, add_new_line=add_new_line)\n else:\n try:\n process_doc_file(file, add_new_line=add_new_line)\n except Exception:\n print(f\"There is a problem in {file}.\")\n raise",
"def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)",
"def clean_documents():\n start = datetime.now()\n for i, raw_filename in enumerate(os.listdir(RAW_DIR)):\n fullpath = os.path.join(RAW_DIR, raw_filename)\n if os.path.isfile(fullpath):\n print(\"Cleaning {0} {1}\".format(i, fullpath), file=stderr)\n try:\n with open(fullpath, \"r\") as f:\n text = f.read()\n text = clean(text)\n soup = BeautifulSoup(text, \"html.parser\")\n cleaned = visible_text(soup)\n score = germanwings_score(cleaned)\n if not score:\n print(\"not germanwings: {0}\".format(raw_filename))\n else:\n clean_filename = os.path.join(CLEAN_DIR, raw_filename)\n with open(clean_filename, \"w\") as f:\n f.write(cleaned.encode(\"ascii\", \"ignore\"))\n except Exception as exc:\n print(\"{0}: {1}\".format(fullpath, exc), file=stderr)\n end = datetime.now()\n print(\"Elapsed time to clean: {0}\".format(end - start), file=stderr)",
"def analyze(directory, pdf_file, doc_type):\n\n total_redaction_count = 0\n total_redacted_text_area = 0\n total_estimated_text_area = 0\n total_estimated_num_words_redacted = 0\n\n # Split the pdb (which is a pdf file) into individual jpgs.\n redaction_module.pdf_to_jpg(directory, pdf_file)\n\n os.chdir(directory)\n for jpg_file in os.listdir(directory):\n # Iterating through each page of the PDB\n if jpg_file.endswith(\".jpg\"):\n\n [redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type)\n\n total_redaction_count += redaction_count\n total_redacted_text_area += redacted_text_area\n total_estimated_text_area += estimated_text_area\n total_estimated_num_words_redacted += estimated_num_words_redacted\n\n # Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly).\n os.remove(jpg_file)\n\n # Now that we've gone through each page, we need to calculate the stats for the document.\n if total_estimated_text_area != 0:\n total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area)\n else:\n total_percent_text_redacted = 0\n\n data = []\n # open csv file and write the stats in a single row representing the document.\n with open('output.csv', mode='a+') as output:\n output_writer = csv.writer(output, delimiter=',')\n row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted]\n data.append(row)\n print(tabulate(data, headers=[\" \", \" \", \" \", \" \", \" \"]))\n output_writer.writerow(row)\n output.close()",
"def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")",
"def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # get the data\n data = requests.get(page_url)\n\n # load data into bs4\n soup = BeautifulSoup(data.text, 'html.parser')\n # links = []\n pdf_dis = []\n dates = []\n table = []\n version_hash_fields = []\n\n for tr in soup.find_all('tr'):\n date_col = soup.find_all('td', attrs={'class': 'fd-col2'})\n hyperlink_col = soup.find_all('td', attrs={'class': 'fd-col1'})\n values = [td.text for td in tr.find_all('td')]\n table.append(values)\n for link in hyperlink_col:\n pdf_url = 'https://www.health.mil/' + link.find('a')['href']\n pdf_di = DownloadableItem(doc_type='pdf',\n web_url=pdf_url)\n pdf_dis.append(pdf_di)\n for date in date_col:\n dates.append(date.text)\n\n doc_nums = []\n doc_titles = []\n doc_names = []\n for row in table[1:]:\n doc_data = row[0].split(':')\n\n if len(doc_data) == 1: # if no colon then no doc number\n if doc_data[0] == \"(DTM)-19-004 -Military Service by Transgender Persons and Persons with Gender Dysphoria (Change 1)\":\n doc_nums.append(\"19-004\")\n doc_names.append(\"DTM\")\n doc_titles.append(doc_data[0][14:])\n version_hash_fields.append({\"doc_name\": 'DTM', \"doc_title\": doc_data[0][14:]})\n else:\n doc_nums.append(\" \")\n doc_titles.append(doc_data[0])\n doc_names.append(doc_data[0])\n version_hash_fields.append({\"doc_name\": doc_data[0], \"doc_title\": doc_data[0]})\n else:\n\n tmptitle = doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\")\n\n if \"Volume\" in tmptitle:\n doc_nums.append(doc_data[0][7:]+\" Volume \"+tmptitle.split()[-1])\n else:\n doc_nums.append(doc_data[0][7:])\n doc_titles.append(doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\"))\n doc_names.append(doc_data[0][:6])\n\n version_hash_fields.append({\"doc_name\": doc_data[0][:7], \"doc_title\": doc_data[1]})\n\n parsed_docs = []\n page_url = 'https://www.health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n num_docs = len(doc_nums)\n for i in range(num_docs):\n # put all the relevant info into dictionaries\n doc = Document(doc_type=doc_names[i].replace(\" \",\"-\"),\n doc_title=doc_titles[i],\n doc_num=doc_nums[i],\n doc_name=doc_names[i].replace(\" \",\"-\")+\" \"+doc_nums[i],\n publication_date=dates[i],\n cac_login_required=False,\n crawler_used='dha_pubs',\n source_page_url=page_url,\n downloadable_items=[pdf_dis[i]],\n version_hash_raw_data=version_hash_fields[i])\n parsed_docs.append(doc)\n\n return parsed_docs",
"def findDocumentsThree():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed3.csv', 'r') as readfile,\\\n open('documentsThree.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided2 = counterTwo / 2\n lines2 = lineTwo / 2\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided2: ' + str(divided2) + '\\n')\n writefile.write('lines divided by 2: ' + str(lines2) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines2))\n print('divided2: ' + str(divided2))\n print('lines divided by 2: ' + str(lines2))",
"def process(self, terms):\n for entry in self.files:\n try:\n logger.info('file - {0}'.format(entry.path))\n\n # notional output file path\n path_sentences = self.path.joinpath('{0}.csv'.format(entry.path.stem))\n path_summary = self.path.joinpath('{0}-summary.csv'.format(entry.path.stem))\n logger.info('will save to - {0}'.format(path_sentences.resolve()))\n\n reports = self.inspect_doc(entry, terms)\n\n # receiving a list of dicts\n # therefore pandas can package into a useful outcome\n if len(reports) > 0:\n frame_sentences = pd.DataFrame(reports)\n\n frame_sentences = frame_sentences[['page', 'term', 'sentence']]\n logger.info('saving sentence file to - {0}'.format(path_sentences.resolve()))\n frame_sentences.to_csv(str(path_sentences.resolve()))\n \n frame_summary = frame_sentences.pivot_table(\n index='page',\n columns='term',\n aggfunc='size',\n fill_value=0\n )\n logger.info('saving summary file to - {0}'.format(path_sentences.resolve()))\n frame_summary.to_csv(str(path_summary.resolve()))\n\n\n except Exception as e:\n logger.error(e)",
"def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)",
"def process_wiki_file(args: Tuple[str, str, int]) -> str:\n filepath, language, min_sent_word_count = args\n with bz2.open(filepath, \"rt\", encoding=\"utf8\") as bz2_file:\n\n # Extract text between <doc> xml tags\n soup = BeautifulSoup(bz2_file.read(), \"lxml\")\n docs = soup.find_all(\"doc\")\n wiki_dump_content = \"\"\n for i, doc in enumerate(docs):\n processed_text = process_wiki_doc_text(\n doc.text, language, min_sent_word_count\n )\n if len(processed_text) == 0:\n continue\n\n # Append to result\n if i > 0 and len(wiki_dump_content) > 0:\n wiki_dump_content += \"\\n\"\n wiki_dump_content += processed_text\n\n return wiki_dump_content",
"def inspect(filename):\n bfile = open(filename, 'rb')\n bdata = bfile.read()\n bfile.close()\n doc = loads(bdata)\n file_seq = []\n second = None\n for ver, snapshot in enumerate(doc.index):\n nb_obj = len(snapshot)\n cache = nb_obj * [None]\n mini_index = nb_obj * [None]\n for i in range(1, len(snapshot)):\n mini_index[i] = (snapshot[i]['o_gen'], snapshot[i]['o_ver'])\n if type(snapshot[0]) == list:\n second = snapshot[0].pop()\n snapshot[0] = snapshot[0][0]\n memoize_obj_in_cache([snapshot], doc.bdata, i, cache)\n snapshot[0]['content'] = cache[0]\n snapshot[0]['mini_index'] = mini_index\n if 'xref_stream' not in snapshot[0]:\n file_seq.append(snapshot[0])\n snapshot[0] = second\n for i in range(len(snapshot)):\n if snapshot[i]['o_num'] == 0 and 'xref_stream' in snapshot[i]:\n snapshot[i]['ignore'] = True\n continue\n memoize_obj_in_cache([snapshot], doc.bdata, i, cache)\n snapshot[i]['content'] = cache[i]\n snapshot[i]['mini_index'] = mini_index\n if i == 0: print(snapshot[i])\n file_seq.extend(snapshot)\n file_seq = [x for x in file_seq if x is not None and 'ignore' not in x]\n pos_index = {}\n\n STARTXREF = b'startxref'\n startxref_pos = 0\n while True:\n startxref_pos = bdata.find(STARTXREF, startxref_pos)\n if startxref_pos == -1:\n break\n i, j, _ = next_token(bdata, startxref_pos + len(STARTXREF))\n xref_pos = int(bdata[i:j])\n file_seq.append({'abs_pos':startxref_pos, 'o_num':-1, 'o_gen':-1, 'o_ver':startxref_pos,\n 'mini_index':None, 'content':xref_pos})\n startxref_pos += len(STARTXREF)\n\n EOF = b'%%EOF'\n eof_pos = 0\n while True:\n eof_pos = bdata.find(EOF, eof_pos)\n if eof_pos == -1:\n break\n file_seq.append({'abs_pos':eof_pos, 'o_num':-2, 'o_gen':-2, 'o_ver':eof_pos,\n 'mini_index':None, 'content':None})\n eof_pos += len(EOF)\n \n for obj in file_seq:\n if 'abs_pos' in obj and obj['o_num'] != -2:\n pos_index[obj['abs_pos']] = f\"{obj['o_num']}.{obj['o_gen']}.{obj['o_ver']}\"\n file_seq.sort(key=lambda x: x.get('abs_pos') or x.get('a_')) \n print(build_html(file_seq, pos_index, filename))",
"def prepare_dictionary_from_docs(self):\n if os.path.exists(self.DICT_PATH):\n return True\n self.logger.info(\"START PREPARING DICT\")\n for fn in os.listdir(self.wiki_path):\n self.logger.info(\"dict update {0}\".format(fn))\n content = self.get_processed_content(fn)\n self.dictionary.add_documents([content])\n self.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=100000)\n self.dictionary.compactify()\n self.dictionary.save(self.DICT_PATH)\n return True",
"def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list",
"def create_IMPACT505_b37_reference_files(apps, schema_editor):\n File = apps.get_model(\"file_system\", \"File\")\n FileMetadata = apps.get_model(\"file_system\", \"FileMetadata\")\n FileGroup = apps.get_model(\"file_system\", \"FileGroup\")\n FileType = apps.get_model(\"file_system\", \"FileType\")\n try:\n file_group = FileGroup.objects.get(name=\"Reference Files\")\n txt = FileType.objects.get(name=\"txt\")\n ilist = FileType.objects.get(name=\"ilist\")\n interval_list = FileType.objects.get(name=\"interval_list\")\n except Exception:\n print(\"No file group or file_types defined\")\n return\n try:\n\n file1 = File.objects.create(\n path=\"/juno/work/ci/resources/genomic_resources/targets/IMPACT505/b37/IMPACT505_FP_tiling_genotypes.txt\",\n file_name=\"IMPACT505_FP_tiling_genotypes.txt\",\n file_group=file_group,\n file_type=txt,\n size=0,\n )\n file_metadata_1 = FileMetadata.objects.create(\n file=file1, version=0, metadata={\"assay\": \"IMPACT505_b37\", \"data_type\": \"FP_genotypes\"}\n )\n print(\"File created\")\n except Exception as e:\n print(\"Fail to create file\")\n print(str(e))\n try:\n file2 = File.objects.create(\n path=\"/juno/work/ci/resources/genomic_resources/targets/IMPACT505/b37/IMPACT505_b37_targets.ilist\",\n file_name=\"IMPACT505_b37_targets.ilist\",\n file_group=file_group,\n file_type=ilist,\n size=0,\n )\n file_metadata_2 = FileMetadata.objects.create(\n file=file2, version=0, metadata={\"assay\": \"IMPACT505_b37\", \"data_type\": \"targets_list\"}\n )\n print(\"File created\")\n except Exception as e:\n print(\"Fail to create file\")\n print(str(e))\n try:\n file3 = File.objects.create(\n path=\"/juno/work/ci/resources/genomic_resources/targets/IMPACT505/b37/IMPACT505_b37_baits.ilist\",\n file_name=\"IMPACT505_b37_baits.ilist\",\n file_group=file_group,\n file_type=ilist,\n size=0,\n )\n file_metadata_3 = FileMetadata.objects.create(\n file=file3, version=0, metadata={\"assay\": \"IMPACT505_b37\", \"data_type\": \"baits_list\"}\n )\n print(\"File created\")\n except Exception as e:\n print(\"Fail to create file\")\n print(str(e))\n try:\n file4 = File.objects.create(\n path=\"/juno/work/ci/resources/genomic_resources/targets/IMPACT505/b37/IMPACT505_FP_tiling_intervals.intervals\",\n file_name=\"IMPACT505_FP_tiling_intervals.intervals\",\n file_group=file_group,\n file_type=interval_list,\n size=0,\n )\n file_metadata_4 = FileMetadata.objects.create(\n file=file4, version=0, metadata={\"assay\": \"IMPACT505_b37\", \"data_type\": \"FP_intervals\"}\n )\n print(\"File created\")\n except Exception as e:\n print(\"Fail to create file\")\n print(str(e))",
"def process(self, doc_data):\n self.doc_data = doc_data\n self.process_text(self.auto_link_messages)\n self.process_text(self.auto_link_xips)\n self.add_type_sizes()\n return self.doc_data"
] | [
"0.669818",
"0.6138729",
"0.60152596",
"0.6008848",
"0.6003464",
"0.59747124",
"0.59229577",
"0.5912856",
"0.5852708",
"0.5825268",
"0.58067274",
"0.5783912",
"0.57605404",
"0.570088",
"0.5667414",
"0.56489325",
"0.5625573",
"0.561688",
"0.5607895",
"0.55752087",
"0.5572639",
"0.5553455",
"0.55521446",
"0.55520296",
"0.55457574",
"0.5519863",
"0.55123013",
"0.5505961",
"0.5489387",
"0.54847544"
] | 0.6652755 | 1 |
Validator function that returns val if val is either a number or the word 'auto'. This is used as a validator for the text editor in the traits UI for the tick_interval trait. | def float_or_auto(val):
try:
return float(val)
except:
if isinstance(val, basestring) and val == "auto":
return val
raise TraitError, "Tick interval must be a number or 'auto'." | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)",
"def Validate(self, win):\n\n txtCtrl = self.GetWindow()\n val = txtCtrl.GetValue()\n isValid = False\n if val.isdigit():\n digit = int(val)\n if digit >= self._min and digit <= self._max:\n isValid = True\n if not isValid:\n # Notify the user of the invalid value\n msg = \"Value must be between %d and %d\" % \\\n (self._min, self._max)\n wx.MessageBox(msg,\n \"Invalid Value\",\n style=wx.OK|wx.ICON_ERROR)\n return isValid",
"def validate_rating(self, key, value):\n assert value is None or value <= 10 and value >= 0\n return value",
"def yohoho_validator(payload, chosen):\n\n if not chosen.isdecimal():\n print(f\"Choose a number!\")\n return False\n\n return True",
"def is_valid(self, value) -> 'True | str':\n err_str = super().is_valid()\n if isinstance(err_str, str):\n return err_str\n if value < self.min_val or value > self.max_val:\n return f'The value \"{value}\" must be in range <{self.min_val}, {self.max_val}>.'\n return True",
"def do_interval(self, str_arg):\n self.INTERVAL = float(validateDigit(str_arg))",
"def check_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_interval\")",
"def check_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_interval\")",
"def _validate(self, instance, value):\n\n if not isinstance(value, Real):\n raise TypeError(f\"Value for {self.prop_name} shoulde be real numbers.\")\n\n if (\n self.min_val is not None\n and value < self.min_val\n and not isclose(value, self.min_val)\n ):\n raise ValueError(\n f\"Value should be greater than or equal to {self.min_val}.\"\n )\n\n if (\n self.max_val is not None\n and value > self.max_val\n and not isclose(value, self.max_val)\n ):\n raise ValueError(f\"Value should be less than or equal to {self.max_val}.\")",
"def validate_input(helper, definition):\n input_name = definition.parameters.get(\"input_name\", None)\n interval = int(definition.parameters.get(\"interval\"))\n\n if interval < MIN_INTERVAL:\n raise ValueError(\"Interval must be at least {}\".format(MIN_INTERVAL))\n\n if input_name not in VALID_INPUTS:\n # input_name is hardcoded in the selector dropdown, this should only happen if someone is messing with internals\n raise ValueError(\"Invalid input \\\"{}\\\", supported values are \\\"{}\\\"\".format(input_name, \"|\".join(VALID_INPUTS)))",
"def validate_element(self, value):\n if not isinstance(value, self.type):\n\n # Authorize int values as float.\n if isinstance(value, six.integer_types) and self.type == float:\n return float(value)\n\n if value is None:\n if self.required:\n raise ValidationError('Required field is missing')\n else:\n try:\n name = self.name\n except AttributeError:\n raise ValidationError('Expected type %s for %s, '\n 'found %s (type %s)' %\n (self.type, self.__class__.__name__,\n value, type(value)))\n else:\n raise ValidationError(\n 'Expected type %s for field %s, found %s (type %s)' %\n (self.type, name, value, type(value)))\n return value",
"def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check",
"def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )",
"def validate_element(self, value):\n\n if not isinstance(value, self.type):\n # Authorize in values as float\n if isinstance(value, six.integer_types) and self.type == float:\n return float(value)\n\n if value is None:\n if self.required:\n raise ValidationError('Required field is missing')\n else:\n try:\n name = self.name\n except AttributeError:\n raise ValidationError('Expected type %s for %s, '\n 'found %s (type %s)' %\n (self.type, self.__class__.__name__,\n value, type(value)))\n else:\n raise ValidationError('Expected type %s for field %s, '\n 'found %s (type %s)' %\n (self.type, name, value, type(value)))\n return value",
"def validate(val, num1=0, num2=float('inf')):\n val = int(val)\n if not num1 <= val < num2:\n raise ArgumentTypeError(\"Value out of range: {}. \"\n \"Should be between {} and {}.\".format(val, num1, num2 - 1))\n return val",
"def range_validator(value_str, args):\n \n assert len(args) == 5, \"Error: range_validator requires 5 arguments.\"\n a_type, lb, ub, allow_none, error_msg = args\n try:\n if allow_none and value_str == 'None':\n value = None\n else:\n value = a_type(value_str)\n except ValueError:\n raise InputException(error_msg + value_str)\n if (lb != None and value < lb) or (ub != None and value > ub):\n raise InputException(error_msg + value_str)\n return value",
"def _validate_val_range(self, proposal):\n val_range = proposal[\"value\"]\n if len(val_range) != 2:\n raise traitlets.TraitError(\"val_range must be of length 2.\")\n if val_range[0] > val_range[1]:\n raise traitlets.TraitError(\n \"val_range[0] must be smaller than val_range[1].\"\n )\n return val_range",
"def check_for_float_and_int(check):",
"def check_for_int(check):",
"def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False",
"def _validate_delay(self, attribute: attr.Attribute, value: float):\n\n if not isinstance(value, int) or value <= 0:\n raise ValueError(\n f\"Window delay must be a non-zero value, received {value!r}\"\n )",
"def validate_decimal(v: str, field: Field):\n field_info = field.field_info\n inclusive = field_info.ge is not None or field_info.le is not None\n min_value = field_info.gt if field_info.gt is not None else field_info.ge\n min_value = Decimal(min_value) if min_value is not None else min_value\n max_value = field_info.lt if field_info.lt is not None else field_info.le\n max_value = Decimal(max_value) if max_value is not None else max_value\n ret = validate_decimal(v, min_value, max_value, inclusive)\n if ret is not None:\n raise ValueError(ret)\n return v",
"def validate_check_in_period(check_in_period):\n if not check_in_period:\n check_in_period = 30\n if not isinstance(check_in_period, int):\n try:\n check_in_period = int(check_in_period)\n except ValueError:\n print \"Incorrect check-in period given. Setting to 30.\"\n check_in_period = 30\n\n return check_in_period",
"def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, int) and self._validate_value(value))\n else:\n return False",
"def _parse(self, val):\n if self.type == \"integer\":\n return int(val)\n elif self.type == \"number\":\n return float(val)\n elif self.type == \"boolean\":\n lower_val = str(val).lower()\n if lower_val not in {\"true\", \"false\"}:\n msg = \"Boolean parameter '{}' only accept True/False, got {}.\"\n raise ValidationException(\n message=msg.format(self.name, val),\n no_personal_data_message=msg.format(\"[self.name]\", \"[val]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return True if lower_val == \"true\" else False\n return val",
"def check_value(self, value):",
"def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(name + \" must be an integer\")\n elif value <= 0 and name not in (\"x\", \"y\"):\n raise ValueError(name + \" must be > 0\")\n elif value < 0 and name in (\"x\", \"y\"):\n raise ValueError(name + \" must be >= 0\")",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def checkint(name, val, mn=None, mx=None):\n try:\n\tif val[0:2] == '0x' or val[0:2] == '0X':\n\t x = string.atoi(val, 16)\n\telif val[0:0] == '0':\n\t x = string.atoi(val, 8)\n\telse:\n\t # allow commas as long as they are properly spaced\n\t x = string.split(val, \",\")\n\t if len(x) > 1:\n\t\tfor e in x[1:]:\n\t\t if len(e) != 3:\n\t\t\traise ValidationError, \\\n\t\t\t '%s is not a valid integer' % val\n\t\tif len(x[0]) < 1 or len(x[0]) > 3:\n\t\t raise ValidationError, \\\n\t\t\t '%s is not a valid integer' % val\n\t\tval = re.sub(\",\", \"\", val)\n\t x = string.atoi(val)\n\tif ((mn is not None and x < mn) or\n\t (mx is not None and x > mx)):\n\t\traise ValidationError, \\\n\t\t 'parameter \"%s\", value \"%s\" is out of range' % \\\n\t\t (name, val)\n\treturn\n except ValueError:\n\traise ValidationError, '%s is not a valid integer' % val"
] | [
"0.5691109",
"0.5517272",
"0.5372983",
"0.5315519",
"0.530793",
"0.52717775",
"0.527009",
"0.527009",
"0.52588606",
"0.5233357",
"0.5230444",
"0.5217056",
"0.52077264",
"0.5195558",
"0.5192723",
"0.51831424",
"0.5169164",
"0.5168111",
"0.5160562",
"0.5151546",
"0.5128437",
"0.5104923",
"0.5097822",
"0.5092358",
"0.5091432",
"0.50840765",
"0.508004",
"0.50649524",
"0.50649524",
"0.50530434"
] | 0.7098433 | 0 |
Invalidate cached information about the grid. | def invalidate(self):
self._reset_cache()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invalidateCaches(self):\n\n self._vertexCacheValid = False\n self._genusCacheValid = False\n self._vertexCharacteristicCacheValid = False\n self._coreCacheValid = False",
"def invalidate(self) -> None:\n self._cache.invalidate(self._cache_key)",
"def invalidate(self):\n\n dogpile_region, cache_key = self._get_cache_plus_key()\n dogpile_region.delete(cache_key)",
"def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]",
"def invalidate_cache(self):\n #self.objects.objects = []\n return True",
"def reset_cache(self):\n self.izx.reset_cache()\n self.ezx.reset_cache()",
"def clear_cache(self):\n pass",
"def invalidate_cache(self):\n self._invalidate_http_cache()",
"def cache_clear(self):\n\t\tself.__cache = {}",
"def _clear_cache(self):\n\n self._cache = dict()",
"def _clear_cache(self):\n self.cache = {}",
"def clear_cache():\n # TODO\n pass",
"def invalidate(self):\n self.modified = True\n self._invalidated = True\n self._session.clear()",
"def clear_cache(self):\n self._cache = dict()",
"def decache(self):",
"def _clear_caches(self):\n self._brushes = {}\n self._formats = {}",
"def clear(self):\n self._grid = [[None]]",
"def _reset_cache(self):\n self._tick_positions = array([], dtype=float)\n self._tick_extents = array([], dtype=float)\n self._cache_valid = False\n return",
"def clear(self):\n self._cache = dict()",
"def _clean_cache(self):\n del self._cache\n self._cache = {}",
"def gc(self):\n self._complete_grid = None",
"def delete_grid(self):\n\n\t\tself.a_grid = None\t\t# Deletes the object from memory",
"def clean_cache(self):\n return",
"def reset_cache(self):\n self.cache = [None] * self.n_layers\n self.offset = 0\n logger.debug('Reset cache.')",
"def clear_cache(self):\n self.part_cache.clear()",
"def invalidate_caches(self) -> None:\n for seg in self.segments:\n seg.invalidate_caches()\n\n self._recalculate_caches()",
"def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self):\n # replace with your code\n self._grid = [[0 for dummy_column in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n for dummy_num in range(2):\n self.new_tile()",
"def reset(self):\r\n self.grid = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n self.new_tile()\r\n self.new_tile()",
"def freeGridSave( self ):\n assert(self.hasSaveMemory)\n assert(not self.notSaved)\n self.notSaved = True"
] | [
"0.70984733",
"0.6946755",
"0.69070697",
"0.6881548",
"0.68126804",
"0.6806167",
"0.6802581",
"0.6731509",
"0.6715208",
"0.66486037",
"0.6635163",
"0.6590392",
"0.6583073",
"0.65819734",
"0.65754324",
"0.6560974",
"0.65329874",
"0.65193963",
"0.6497604",
"0.6495294",
"0.6469029",
"0.64681315",
"0.64592624",
"0.6437763",
"0.64216834",
"0.6412647",
"0.6363704",
"0.63416624",
"0.6323273",
"0.63223606"
] | 0.73831993 | 0 |
Lays out the axis as an overlay on another component. | def _layout_as_overlay(self, size=None, force=False):
if self.component is not None:
self.position = self.component.position
self.bounds = self.component.bounds
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overlay(self, other_component, gc, view_bounds=None, mode=\"normal\"):\n if not self.visible:\n return\n self._compute_ticks(other_component)\n self._draw_component(gc, view_bounds, mode)\n self._cache_valid = False\n return",
"def do_layout(self, *args, **kw):\n if self.use_draw_order and self.component is not None:\n self._layout_as_overlay(*args, **kw)\n else:\n super(PlotGrid, self).do_layout(*args, **kw)\n return",
"def plot_overlay2axes(self, axes) -> None:\n # calculate height (based on leaf analysis ratio)\n upper_point = (\n self.leaf_center_px - self.leaf_width_px / 2 * self._analysis_ratio\n )\n lower_point = (\n self.leaf_center_px + self.leaf_width_px / 2 * self._analysis_ratio\n )\n height = abs(upper_point - lower_point) * 0.8\n\n for idx, line in enumerate(self.marker_lines):\n width = abs(self.error[idx]) * self._image.dpmm\n y = line.center.y\n x = self.position[idx] - (self.error[idx] * self._image.dpmm) / 2\n\n if self._orientation == Orientation.UP_DOWN:\n r = Rectangle(width, height, center=(x, y))\n # if any of the values are over tolerance, show another larger rectangle to draw the eye\n if not self.passed[idx] or not self.passed_action[idx]:\n re = Rectangle(\n self._image_window.shape[1] * 0.2, height * 1.2, center=(x, y)\n )\n re.plot2axes(\n axes,\n edgecolor=\"none\",\n fill=True,\n alpha=0.5,\n facecolor=self.bg_color[idx],\n )\n else:\n r = Rectangle(height, width, center=(x, y))\n if not self.passed[idx] or not self.passed_action[idx]:\n re = Rectangle(\n self._image_window.shape[1] * 0.2, height * 1.2, center=(x, y)\n )\n re.plot2axes(\n axes,\n edgecolor=\"none\",\n fill=True,\n alpha=0.5,\n facecolor=self.bg_color[idx],\n )\n r.plot2axes(\n axes, edgecolor=\"none\", fill=True, alpha=1, facecolor=self.bg_color[idx]\n )",
"def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()",
"def draw_overlay(self):\n pass",
"def plotOverlays(self):\n if self.overlayFluxSurfaces:\n self.plotFluxSurfaces()\n if self.overlayMagneticAxis:\n self.plotMagneticAxis()\n if self.overlaySeparatrix:\n self.plotSeparatrix()\n if self.overlayWallCrossSection:\n self.plotWallCrossSection()",
"def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)",
"def centerAxis():\n dislin.center()",
"def alty(self, **kwargs):\n if self._alty_child or self._alty_parent:\n raise RuntimeError('No more than *two* twin axes are allowed.')\n with self.figure._authorize_add_subplot():\n ax = self._make_twin_axes(sharex=self, projection='xy')\n ax.set_autoscalex_on(self.get_autoscalex_on())\n ax.grid(False)\n self._alty_child = ax\n ax._alty_parent = self\n self._alty_overrides()\n ax._alty_overrides()\n self.add_child_axes(ax) # to facilitate tight layout\n self.figure._axstack.remove(ax) # or gets drawn twice!\n ax.format(**_parse_alt('y', kwargs))\n return ax",
"def _make_twin_axes(self, *args, **kwargs):\n # Typically, SubplotBase._make_twin_axes is called instead of this.\n # There is also an override in axes_grid1/axes_divider.py.\n if 'sharex' in kwargs and 'sharey' in kwargs:\n raise ValueError('Twinned Axes may share only one axis.')\n ax2 = self.figure.add_axes(self.get_position(True), *args, **kwargs)\n self.set_adjustable('datalim')\n ax2.set_adjustable('datalim')\n self._twinned_axes.join(self, ax2)\n return ax2",
"def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )",
"def generate_axis(self):\n fg = plt.figure(figsize=(15, 15))\n ax = fg.add_axes([0.1, 0.1, 0.8, 0.8], projection='polar')\n norm = mpc.Normalize(0, 2*np.pi)\n t = np.linspace(0, 2*np.pi, 700) # 700 seems to be a sweet spot for no obvious lines, makes a smooth wheel\n r = np.linspace(0, 1, 2)\n rg, tg = np.meshgrid(r, t)\n c = tg\n ax.pcolormesh(t, r, c.T, norm=norm, cmap=cm.get_cmap('hsv', 2056))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.spines['polar'].set_visible(True)\n self.ax = ax",
"def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])",
"def altx(self, **kwargs):\n # Cannot wrap twiny() because we want to use XYAxes, not\n # matplotlib Axes. Instead use hidden method _make_twin_axes.\n # See https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/axes/_subplots.py # noqa\n if self._altx_child or self._altx_parent:\n raise RuntimeError('No more than *two* twin axes are allowed.')\n with self.figure._authorize_add_subplot():\n ax = self._make_twin_axes(sharey=self, projection='xy')\n ax.set_autoscaley_on(self.get_autoscaley_on())\n ax.grid(False)\n self._altx_child = ax\n ax._altx_parent = self\n self._altx_overrides()\n ax._altx_overrides()\n self.add_child_axes(ax) # to facilitate tight layout\n self.figure._axstack.remove(ax) # or gets drawn twice!\n ax.format(**_parse_alt('x', kwargs))\n return ax",
"def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")",
"def _make_axes(self):\n ax_idx = self.atlas.space.axes_order.index(\"frontal\")\n\n # make acustom axes dict\n atlas_shape = np.array(self.atlas.metadata[\"shape\"]) * np.array(\n self.atlas.metadata[\"resolution\"]\n )\n z_range = np.array([-atlas_shape[2], 0])\n z_ticks = [\n (-v, str(np.abs(v).astype(np.int32)))\n for v in np.linspace(\n 0,\n atlas_shape[ax_idx],\n 10,\n )\n ]\n\n if self.atlas.atlas_name == \"allen_human_500um\":\n z_range = None\n z_ticks = None\n logger.debug(\n \"RENDER: manually forcing axes size for human atlas, atlas needs fixing\"\n )\n\n # make custom axes dict\n axes = dict(\n axesLineWidth=3,\n tipSize=0,\n xtitle=\"AP (μm)\",\n ytitle=\"DV (μm)\",\n ztitle=\"LR (μm)\",\n textScale=0.8,\n xTitleRotation=180,\n zrange=z_range,\n zValuesAndLabels=z_ticks,\n xyGrid=False,\n yzGrid=False,\n zxGrid=False,\n xUseBounds=True,\n yUseBounds=True,\n zUseBounds=True,\n xLabelRotation=180,\n yLabelRotation=180,\n zLabelRotation=90,\n )\n\n return axes",
"def drawAxis(self, frame):\n for face in self.faces():\n face.drawAxis(frame)\n return frame",
"def _finalize_axis(self, key, **kwargs):\n axis = self.handles['axis']\n self.handles['fig'].set_frameon(False)\n axis.grid(self.show_grid)\n axis.view_init(elev=self.elevation, azim=self.azimuth)\n try:\n axis._dist = self.distance\n except Exception:\n # axis.dist is deprecated see here:\n # https://github.com/matplotlib/matplotlib/pull/22084\n axis.dist = self.distance\n\n if self.xaxis is None:\n axis.w_xaxis.line.set_lw(0.)\n axis.w_xaxis.label.set_text('')\n if self.yaxis is None:\n axis.w_yaxis.line.set_lw(0.)\n axis.w_yaxis.label.set_text('')\n if self.zaxis is None:\n axis.w_zaxis.line.set_lw(0.)\n axis.w_zaxis.label.set_text('')\n if self.disable_axes:\n axis.set_axis_off()\n\n if mpl_version <= Version('1.5.9'):\n axis.set_axis_bgcolor(self.bgcolor)\n else:\n axis.set_facecolor(self.bgcolor)\n return super()._finalize_axis(key, **kwargs)",
"def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")",
"def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()",
"def drawAxis(image, cameraMatrix, distCoeffs, rvec, tvec, length):\n pass",
"def addAxes(self):\n numDims = len(self.relation.fieldNames) - 1\n angle = 360 / numDims\n axisDomains = self.relation.axisDomains\n for i in range(numDims):\n axis = PlotAxis(self)\n self.scene().addItem(axis)\n if self.axisAngles and i < len(self.axisAngles):\n axis.setRotation(self.axisAngles[i])\n else:\n axis.setRotation(angle * i)\n self.axes.append(axis)\n\n domain = axisDomains[i]\n text = PlotAxisLabel(\"{}\\n[{:.2f},{:.2f}]\".format(self.relation.fieldNames[i], domain[0], domain[1]))\n text.setFont(self.labelFont)\n self.axisLabels.append(text)\n text.setParentItem(axis)",
"def _draw_overlay(self, gc, view_bounds=None, mode='normal'):\n self._draw_component(gc, view_bounds, mode)\n return",
"def panel_axes(self, side, **kwargs):\n return self.figure._add_axes_panel(self, side, **kwargs)",
"def blit(self):\n # self.ax1.draw_artist(self.lines[:2])\n # self.ax2.draw_artist(self.lines[2:4])\n self.ax1.autoscale()\n self.ax2.autoscale()\n self.ax1.redraw_in_frame()\n self.ax2.redraw_in_frame()\n self.fig.canvas.blit(self.fig.bbox)",
"def add_axes(\n self,\n interactive=None,\n line_width=2,\n color=None,\n x_color=None,\n y_color=None,\n z_color=None,\n xlabel='X',\n ylabel='Y',\n zlabel='Z',\n labels_off=False,\n box=None,\n box_args=None,\n viewport=(0, 0, 0.2, 0.2),\n marker_args=None,\n **kwargs,\n ):\n # Deprecated on v0.37.0, estimated removal on v0.40.0\n if marker_args is not None: # pragma: no cover\n warnings.warn(\n \"Use of `marker_args` is deprecated. Use `**kwargs` instead.\",\n PyVistaDeprecationWarning,\n )\n kwargs.update(marker_args)\n\n if interactive is None:\n interactive = self._theme.interactive\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOff()\n self.Modified()\n del self.axes_widget\n if box is None:\n box = self._theme.axes.box\n if box:\n if box_args is None:\n box_args = {}\n self.axes_actor = create_axes_orientation_box(\n label_color=color,\n line_width=line_width,\n x_color=x_color,\n y_color=y_color,\n z_color=z_color,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n labels_off=labels_off,\n **box_args,\n )\n else:\n self.axes_actor = create_axes_marker(\n label_color=color,\n line_width=line_width,\n x_color=x_color,\n y_color=y_color,\n z_color=z_color,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n labels_off=labels_off,\n **kwargs,\n )\n axes_widget = self.add_orientation_widget(\n self.axes_actor, interactive=interactive, color=None\n )\n axes_widget.SetViewport(viewport)\n return self.axes_actor",
"def setup_axes2(fig, rect,tmin, tmax,zmin,zmax):\n\n tr =PolarAxes.PolarTransform()\n pi = np.pi\n\n angle_ticks = [(tmin, '%.2f' % tmin), (0,r'$0$'), (tmax, '%.2f' % tmax)]\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n grid_locator2 = MaxNLocator(4)\n\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(tmax, tmin, zmax, zmin),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.95 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax",
"def new_axes(self, ax):\n self.ax = ax\n if self.canvas is not ax.figure.canvas:\n if self.canvas is not None:\n self.disconnect_events()\n\n self.canvas = ax.figure.canvas\n self.connect_default_events()\n\n # Reset\n self._selection_completed = False\n\n if self.direction == 'horizontal':\n trans = ax.get_xaxis_transform()\n w, h = 0, 1\n else:\n trans = ax.get_yaxis_transform()\n w, h = 1, 0\n rect_artist = Rectangle((0, 0), w, h,\n transform=trans,\n visible=False,\n **self._props)\n\n self.ax.add_patch(rect_artist)\n self._selection_artist = rect_artist",
"def addAxis(self, tag, name, minimum, maximum, default, warpMap=None):\n axisElement = ET.Element(\"axis\")\n axisElement.attrib['name'] = name\n axisElement.attrib['tag'] = tag\n axisElement.attrib['minimum'] = str(minimum)\n axisElement.attrib['maximum'] = str(maximum)\n axisElement.attrib['default'] = str(default)\n if warpMap is not None:\n for a, b in warpMap:\n warpPt = ET.Element(\"map\")\n warpPt.attrib['input'] = str(a)\n warpPt.attrib['output'] = str(b)\n axisElement.append(warpPt)\n self.root.findall('.axes')[0].append(axisElement)",
"def vplane(self, fig=None):\n #TODO more general multi-axis layout...\n figsize = (9, 6.5) # good for letter paper\n if fig is None: fig = plt.figure(figsize=figsize)\n else: fig.set_size_inches(*figsize)\n axkw = dict(frameon = True)\n left, width = 0.075, 0.6\n bh = 0.11\n pad = 0.04\n depth_ax = fig.add_axes((left, 6*pad+4.5*bh, width, bh*2), **axkw)\n axkw.update(dict(sharex = depth_ax))\n pitch_ax = fig.add_axes((left, 5*pad+3.5*bh, width, bh), **axkw)\n buoyancy_ax = fig.add_axes((left, 4*pad+2.5*bh, width, bh), **axkw)\n mass_ax = fig.add_axes((left, 3*pad + 1.5*bh, width, bh), **axkw)\n control_surface_ax = fig.add_axes((left, 2*pad + bh/2, width, bh), **axkw)\n control_mode_ax = fig.add_axes((left, pad, width, bh/2), **axkw)\n # TODO adjust scale and coverage for each axes\n # TODO do this again now that middle labels are removed\n\n self.plot_timeseries('depth', '-', axes=depth_ax)\n self.plot_timeseries('platform_pitch_angle', axes=pitch_ax)\n self.plot_timeseries('platform_mass_position', axes=mass_ax)\n self.plot_timeseries('platform_buoyancy_position', axes=buoyancy_ax)\n self.plot_timeseries('platform_elevator_angle', axes=control_surface_ax)\n # TODO Include another panel with VerticalControl mode (iff present)\n\n # TODO only if engineering data is requested...\n ### add to depth axes ###\n depth_science = {\n 'Depth_Keller/depth': 'c-',\n 'CTD_NeilBrown/depth': 'k-',\n 'Depth_MSI_US300/depth': 'm-'}\n for k, v in depth_science.items():\n try: self.plot_timeseries(k, v, axes=depth_ax)\n except: print('no {0}'.format(k))\n\n depth_engineering = {\n 'VerticalControl/smoothDepthInternal': 'r-',\n 'VerticalControl/depthCmd': 'g-',\n 'VerticalControl/depthErrorInternal': 'g:'}\n for k, v in depth_engineering.items():\n try: self.plot_timeseries(k, v, axes=depth_ax)\n except: print('no {0}'.format(k))\n # TODO only if sw debug flag is set \n depth_rate_engineering = {\n 'VerticalControl/depthRateCmd': 'gray',\n 'VerticalControl/depth_rate': 'gray', # XXX why same color?\n }\n for k, v in depth_rate_engineering.items():\n try: \n self.plot_timeseries(k, vi, axes=depth_ax, \n convert=oalib.make_multiplier(100))\n except: print('no {0}'.format(k))\n ### add to pitch axes ###\n pitch_engineering = {\n 'AHRS_sp3003D/platform_pitch_angle': 'k-', \n 'DVL_micro/platform_pitch_angle': 'm-',\n 'AHRS_3DMGX3/platform_pitch_angle': 'c-',\n 'InternalSim/platform_pitch_angle': ':r',\n }\n for k, v in pitch_engineering.items():\n try: self.plot_timeseries(k, v, axes=pitch_ax)\n except: print('no {0}'.format(k))\n ### add to mass axes ###\n mass_engineering = {\n 'VerticalControl/massPositionAction': 'g-', \n 'VerticalControl/massIntegralInternal': 'c-',\n 'MassServo/platform_mass_position': 'r-',\n #'VerticalControl/massPitchErrorInternal': ':r',\n }\n for k, v in mass_engineering.items():\n try: self.plot_timeseries(k, v, axes=mass_ax)\n except: print('no {0}'.format(k))\n ### add to buoyancy axes ###\n buoyancy_engineering = {\n 'VerticalControl/buoyancyAction': 'm-',\n 'BuoyancyServo/platform_buoyancy_position': 'b-',\n }\n for k, v in buoyancy_engineering.items():\n try: \n self.plot_timeseries(k, v,\n# convert=oalib.make_multiplier(-10), \n axes=buoyancy_ax)\n except: print('no {0}'.format(k))\n ### add to control surface axes ###\n control_surface_engineering = {\n 'VerticalControl/elevatorAngleAction': 'm-', \n 'VerticalControl/elevatorIntegralInternal': 'm:',\n 'ElevatorServo/platform_elevator_angle': 'c-',\n }\n for k, v in control_surface_engineering.items():\n try: \n self.plot_timeseries(k, v, convert = np.rad2deg, \n axes=control_surface_ax)\n except: print('no {0}'.format(k))\n \n\n # TODO only if supporting data is requested\n ### add other supporting data ###\n try: self.plot_timeseries('CTD_NeilBrown/depth', 'k-', axes=depth_ax)\n except: print('no CTD_NeilBrown/depth')\n try: self.plot_timeseries('Depth_MSI_US300', 'm-', axes=depth_ax)\n except: print('no Depth_MSI_US300')\n\n\n ### print additional information ###\n buoyancyNeutral = ('Config/Control/buoyancyNeutral',\n 'Config/Servo/buoyancyNeutral')\n for s in buoyancyNeutral:\n try:\n print('{0} = {1} {2}'.format(s, self[s+'/value'], self[s+'/units']))\n except:\n print('{0} not found'.format(s))\n \n# VertMd(0=N/A,1=Surf,2=Dep,3=DepRt,4=Pit0,5=Pit,6=PitRt,7=M&E,8=Flt),\n# VertHoldMd(0=N/A,1=Ms,2=El,3=Both)\n try:\n v, t = self.timeseries('VerticalControl/verticalMode')\n oalib.plot_date_blocks(t, v, axes=control_mode_ax, colormap=mpl.cm.jet)\n except: print('VerticalControl/verticalMode not found')\n\n depth_ax.invert_yaxis()\n for ax in fig.get_axes():\n ax.grid(True)\n try:\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),\n fontsize='small')\n except:\n print('uncaught exception for legend...')\n for ax in fig.get_axes()[:-1]:\n plt.setp(ax.get_xticklabels(), visible=False)\n\n depth_ax.set_title(os.path.basename(self.filename))\n control_mode_ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M'))\n plt.setp(control_mode_ax.get_xticklabels(), rotation=30,\n fontsize='small')"
] | [
"0.6280487",
"0.61904055",
"0.61322975",
"0.5823162",
"0.5711349",
"0.5625091",
"0.5573298",
"0.5566164",
"0.54839724",
"0.5427578",
"0.54078066",
"0.53982127",
"0.5386213",
"0.5383875",
"0.5375619",
"0.5364405",
"0.5341407",
"0.53407234",
"0.533962",
"0.5333354",
"0.53311884",
"0.53250563",
"0.5297183",
"0.52520967",
"0.5246539",
"0.5233971",
"0.522997",
"0.5216835",
"0.52160895",
"0.52119344"
] | 0.650393 | 0 |
Clears the cached tick positions. | def _reset_cache(self):
self._tick_positions = array([], dtype=float)
self._tick_extents = array([], dtype=float)
self._cache_valid = False
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self):\n self._x_prev = None\n self._y_prev = None",
"def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD",
"def clear(self):\n self.clear_markers()\n self.l_marker.remove()\n self.l_line.remove()\n self.r_marker.remove()\n self.r_line.remove()",
"def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])",
"def clear(self):\n for i in range(len(self.canvas)):\n self.canvas[i] = 0",
"def clearAllPlots(self):\n self.dataPoints = [[{'x': 0, 'y': 0}]]\n self.sendPreviousDataPoints()",
"def _clear(self):\n self._fillitem = self._fillpath = None\n for item in self.items:\n self.screen._delete(item)\n self.currentLineItem = self.screen._createline()\n self.currentLine = []\n if self._drawing:\n self.currentLine.append(self._position)\n self.items = [self.currentLineItem]\n self.clearstamps()",
"def reset(self):\n self.obstacles = []\n self._tick = 0",
"def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}",
"def reset(self):\n self._x = 0\n self._y = 0",
"def reset(self):\r\n store = get_store()\r\n nbval = store.get('Nbtimecompound')[\"value\"]\r\n for i in range(1, nbval):\r\n self.del_line(1)",
"def clear(self):\n self._clear()\n self._update()",
"def clear(self) -> None:\n self._used = set()\n self.search_pos = 1",
"def reset(self):\n self.placeables = []\n self.previous_placeable = None\n self.current_volume = 0\n self.reset_tip_tracking()",
"def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()",
"def clear(self):\n self._plots[:] = []",
"def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()",
"def clear():",
"def clear(self):\n self.raster_path_line.clear()\n self.labels_path.clear()\n self.shapefile_path.clear()\n self.costumelabels.clear()\n self.layer_name.clear()\n self.class_name.clear()\n self.idfield.clear()",
"def clear(self):\n self._grid = [[None]]",
"def clearCanvas():\n global c, coordinates\n c.delete(\"all\")\n drawMusicLines()\n coordinates.clear()",
"def _clear_caches(self):\n self._brushes = {}\n self._formats = {}",
"def clear(self):\n self._clear_without_update()\n self.update()",
"def clear(self):\n if self.size == 0:\n return\n self.modCount += 1\n self.size = 0\n Arrays.fill(self.keys, None)\n Arrays.fill(self.values, None)",
"def clear(self):\n self._plt.clear()\n self._layer_items = {}",
"def clear(self):\n self.pointscontroller.pop(self.currentlyadded)",
"def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)",
"def clear(self):\n self.start_times = []\n self.stop_times = []\n self.time_elapsed = 0\n self.label.set_text(self.format_timer(self.time_elapsed))\n # If the indicator exists\n if not self.indicator_label == None:\n # Clear it as well\n self.indicator_label.set_label(self.format_timer(self.time_elapsed))",
"def clear(self) -> None:\n self._tiles.clear()\n self._chunks.clear()",
"def reset_axis_counters(self):\n\n self.column_counter = 0\n self.row_counter = 0"
] | [
"0.7003931",
"0.68769014",
"0.6790435",
"0.6670656",
"0.6667401",
"0.6628095",
"0.6591179",
"0.6565466",
"0.6561653",
"0.656022",
"0.65601605",
"0.6543709",
"0.65370935",
"0.6529492",
"0.65255076",
"0.6521366",
"0.6508858",
"0.6478409",
"0.64386266",
"0.6437842",
"0.64263153",
"0.6426041",
"0.6422986",
"0.6411246",
"0.6409534",
"0.63971996",
"0.63935804",
"0.6387651",
"0.63815475",
"0.63660985"
] | 0.8015408 | 0 |
Draws the overlay layer of a component. Overrides PlotComponent. | def _draw_overlay(self, gc, view_bounds=None, mode='normal'):
self._draw_component(gc, view_bounds, mode)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_overlay(self):\n pass",
"def overlay(self, other_component, gc, view_bounds=None, mode=\"normal\"):\n if not self.visible:\n return\n self._compute_ticks(other_component)\n self._draw_component(gc, view_bounds, mode)\n self._cache_valid = False\n return",
"def overlay(self, component, gc, view_bounds=None, mode=\"normal\"):\n\n tmp = self._get_selection_screencoords()\n if tmp is None:\n return\n\n left_line, right_line, polygon = self.calculate_points(component)\n\n gc.save_state()\n try:\n gc.translate_ctm(*component.position)\n gc.set_alpha(self.alpha)\n gc.set_fill_color(self.fill_color_)\n gc.set_line_width(self.border_width)\n gc.set_stroke_color(self.border_color_)\n gc.begin_path()\n gc.lines(polygon)\n gc.fill_path()\n\n gc.begin_path()\n gc.lines(left_line)\n gc.lines(right_line)\n gc.stroke_path()\n finally:\n gc.restore_state()\n return",
"def overlay(self, overlay):\n\n self._overlay = overlay",
"def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)",
"def on_draw_overlay(self):",
"def plotOverlays(self):\n if self.overlayFluxSurfaces:\n self.plotFluxSurfaces()\n if self.overlayMagneticAxis:\n self.plotMagneticAxis()\n if self.overlaySeparatrix:\n self.plotSeparatrix()\n if self.overlayWallCrossSection:\n self.plotWallCrossSection()",
"def _layout_as_overlay(self, size=None, force=False):\n if self.component is not None:\n self.position = self.component.position\n self.bounds = self.component.bounds\n return",
"def do_layout(self, *args, **kw):\n if self.use_draw_order and self.component is not None:\n self._layout_as_overlay(*args, **kw)\n else:\n super(PlotGrid, self).do_layout(*args, **kw)\n return",
"def add_outline(component, layer=LAYER.DEVREC):\n c = component\n points = [\n [c.xmin, c.ymin],\n [c.xmax, c.ymin],\n [c.xmax, c.ymax],\n [c.xmin, c.ymax],\n ]\n c.add_polygon(points, layer=layer)",
"def overlay(self, image, x, y, r=0):\n x -= (image.get_rect()[2] - self.dial.get_rect()[2])/2\n y -= (image.get_rect()[3] - self.dial.get_rect()[3])/2\n image.set_colorkey((255,255,0))\n self.dial.blit(image, (x,y))",
"def render(fig, rec):\n fig.gca().add_patch(\n PolygonPatch(rec['geometry'], fc=BLUE, ec=BLUE, alpha=0.5, zorder=2))\n return fig",
"def draw(self, layer: Layer) -> None:\r\n if layer and layer.layer_index >= self.num_layers:\r\n return\r\n\r\n pyxel.bltm(layer.offset.x, layer.offset.y, self.tilemap_id + layer.layer_index,\r\n self.rect_uv.x, self.rect_uv.y, self.rect_uv.w, self.rect_uv.h,\r\n colkey=layer.transparency_color)",
"def _update_plot(self) -> None:\n\n # Check if plotting is active\n if self._fig is None:\n return None\n LOG.debug(\"Updating plot.\")\n\n # Extract glaciated area\n hs_back = np.ma.masked_where(\n self.h <= 1,\n hillshade(\n self.ele, self.PLOT_HILLSHADE_AZIMUTH, self.PLOT_HILLSHADE_ALTITUDE\n ),\n )\n\n # Clear plot and draw axes\n self._fig.clear()\n ax = plt.subplot(121, facecolor=\"black\")\n ax.tick_params(axis=\"x\", colors=\"w\")\n ax.tick_params(axis=\"y\", colors=\"w\")\n ax.set(xlabel=\"X-coordinate [m]\", ylabel=\"Y-coordinate [m]\")\n ax.xaxis.label.set_color(\"w\")\n ax.yaxis.label.set_color(\"w\")\n title_text = f\"Year: {str(self.i)} ELA: {str(int(self.ela))} m.a.s.l.\"\n ax.set_title(title_text, color=\"white\", size=18)\n\n # Draw new image layers\n plt.imshow(self.hs, vmin=90, vmax=345, cmap=\"copper\", extent=self.extent)\n plt.imshow(255 - hs_back, vmin=1, vmax=150, cmap=\"Greys\", extent=self.extent)\n\n # Mass balance\n ax1 = plt.subplot(222, facecolor=\"black\")\n ax1.plot(self.mass_balance, color=\"w\")\n ax1.plot(self.mass_balance_trend, color=\"r\")\n ax1.set(ylabel=\"Mass balance [m]\")\n ax1.yaxis.label.set_color(\"w\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.tick_params(axis=\"y\", colors=\"w\")\n ax1.set_title(f\"Gradient: {str(self.m)} m/m\", color=\"white\", size=18)\n\n # Plot mean thickness\n ax2 = plt.subplot(224, sharex=ax1, facecolor=\"black\")\n ax2.plot(self.mass, color=\"w\")\n ax2.set(xlabel=\"Year [a]\", ylabel=\"Mean thickness [m]\")\n ax2.xaxis.label.set_color(\"w\")\n ax2.yaxis.label.set_color(\"w\")\n ax2.tick_params(axis=\"x\", colors=\"w\")\n ax2.tick_params(axis=\"y\", colors=\"w\")\n\n # Draw new plot\n self._fig.canvas.draw()\n plt.pause(0.05)",
"def overlay(self):\n return self._overlay",
"def render(self):\r\n super().render()\r\n layers, titles, lat, lon = self.make_layers()\r\n plots = []\r\n for i in range(len(layers)):\r\n p = figure(\r\n tools=self.tools, \r\n toolbar_location=self.toolbarLocation, \r\n plot_width=self.width, \r\n plot_height=self.height,\r\n x_range=(np.min(lon), np.max(lon)),\r\n y_range=(np.min(lat), np.max(lat)),\r\n title=titles[i]\r\n )\r\n p.xaxis.axis_label = self.xlabel\r\n p.yaxis.axis_label = self.ylabel\r\n colorMapper = LinearColorMapper(palette=self.cmap, low=self.vmin, high=self.vmax)\r\n p.image(\r\n image=[layers[i]], \r\n color_mapper=colorMapper, \r\n x=np.min(lon), \r\n y=np.min(lat), \r\n dw=np.max(lon)-np.min(lon), \r\n dh=np.max(lat)-np.min(lat)\r\n )\r\n\r\n p.add_tools(HoverTool(\r\n tooltips=[\r\n ('longitude', '$x'),\r\n ('latitude', '$y'),\r\n (self.variable + self.unit, '@image'),\r\n ],\r\n mode='mouse'\r\n )\r\n )\r\n\r\n colorBar = ColorBar(\r\n color_mapper=colorMapper, \r\n ticker=BasicTicker(),\r\n label_standoff=12, \r\n border_line_color=None, \r\n location=(0,0)\r\n )\r\n\r\n p.add_layout(colorBar, 'right')\r\n plots.append(p)\r\n \r\n \r\n if not inline(): output_file(get_figure_dir() + self.variable + \".html\", title=self.variable) \r\n show(column(plots))",
"def _visualize(self, labels, ticks, overlay, draw, annotate, height=4):\n n = len(labels)\n colors = list(itertools.islice(itertools.cycle(('b', 'y', 'g', 'r')), n))\n if overlay:\n _, axis = plt.subplots(figsize=(6, height))\n for label, color in zip(labels, colors):\n draw(axis, label, color)\n if ticks is not None:\n annotate(axis, ticks)\n axis.legend(labels, bbox_to_anchor=(1.5, 1.0))\n else:\n _, axes = plt.subplots(n, 1, figsize=(6, height * n))\n if not isinstance(axes, collections.Iterable):\n axes=[axes]\n for axis, label, color in zip(axes, labels, colors):\n draw(axis, label, color)\n axis.set_xlabel(label, fontsize=16)\n if ticks is not None:\n annotate(axis, ticks)",
"def draw(self, axes, feature, bbox, location, style_param):\n pass",
"def draw_on_image(self, img):\n image = Image(img)\n\n # If the overlay has not expired, draw on the plate highlight and/or the status message\n if not self.has_expired():\n self._plate.draw_plate(image, Color.Blue())\n self._plate.draw_pins(image, self._options)",
"def draw(self):\n self.figure.show()\n self.figure.canvas.draw()",
"def plot(self, ax=None, color=None, hatch=None, alpha=1.0, linestyle=None, linewidth=None, edgecolor=None):\n # TODO optional arg for text label\n if self.dim != 2:\n raise Exception(\"Cannot plot region of dimension other than 2\")\n if not is_fulldim(self):\n logger.error(\"Cannot plot empty region\")\n return None\n ax = _newax(ax)\n if color is None:\n color = np.random.rand(3)\n for poly2 in self.list_poly:\n # TODO hatched polytopes in same region\n poly2.plot(ax, color=color, hatch=hatch, alpha=alpha, linestyle=linestyle, linewidth=linewidth,\n edgecolor=edgecolor)\n return ax",
"def add_overlay(self, data, vertices=None, to_overlay=None, mask_data=None,\n **kwargs):\n # Check input variables :\n if vertices is None:\n vertices = np.ones((len(self),), dtype=bool)\n if not len(vertices):\n logger.warning('Vertices array is empty. Abandoning.')\n return\n\n data = np.asarray(data)\n to_overlay = self._n_overlay if to_overlay is None else to_overlay\n data_lim = (data.min(), data.max())\n if len(self._data_lim) < to_overlay + 1:\n self._data_lim.append(data_lim)\n else:\n self._data_lim[to_overlay] = data_lim\n # -------------------------------------------------------------\n # TEXTURE COORDINATES\n # -------------------------------------------------------------\n need_reshape = to_overlay >= self._xrange.shape[1]\n if need_reshape:\n # Add column of zeros :\n z_ = np.zeros((len(self),), dtype=np.float32)\n z_text = np.zeros((1, LUT_LEN, 4), dtype=np.float32)\n self._xrange = np.c_[self._xrange, z_]\n self._alphas = np.c_[self._alphas, z_]\n self._text2d_data = np.concatenate((self._text2d_data, z_text))\n # (x, y) coordinates of the overlay for the texture :\n self._xrange[vertices, to_overlay] = normalize(data)\n # Transparency :\n self._alphas[vertices, to_overlay] = 1. # transparency level\n\n # -------------------------------------------------------------\n # TEXTURE COLOR\n # -------------------------------------------------------------\n # Colormap interpolation (if needed):\n colormap = Colormap(**kwargs)\n vec = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[to_overlay, ...] = colormap.to_rgba(vec)\n # Send data to the mask :\n if isinstance(mask_data, np.ndarray) and len(mask_data) == len(self):\n self._bgd_data[mask_data] = .5\n self._bgd_buffer.set_data(self._bgd_data)\n # -------------------------------------------------------------\n # BUFFERS\n # -------------------------------------------------------------\n if need_reshape:\n # Re-define buffers :\n self._xrange_buffer = gloo.VertexBuffer(self._xrange)\n self._text2d = gloo.Texture2D(self._text2d_data)\n self._alphas_buffer = gloo.VertexBuffer(self._alphas)\n # Send buffers to vertex shader :\n self.shared_program.vert['u_range'] = self._xrange_buffer\n self.shared_program.vert['u_alphas'] = self._alphas_buffer\n self.shared_program.vert['u_over_text'] = self._text2d\n else:\n self._xrange_buffer.set_data(self._xrange)\n self._text2d.set_data(self._text2d_data)\n self._alphas_buffer.set_data(self._alphas)\n # Update the number of overlays :\n self._n_overlay = to_overlay + 1\n self.shared_program.vert['u_n_overlays'] = self._n_overlay",
"def draw_layer(\n self, data: pd.DataFrame, layout: Layout, coord: Coord, **params: Any\n ):\n for pid, pdata in data.groupby(\"PANEL\"):\n if len(pdata) == 0:\n continue\n ploc = pdata[\"PANEL\"].iloc[0] - 1\n panel_params = layout.panel_params[ploc]\n ax = layout.axs[ploc]\n self.draw_panel(pdata, panel_params, coord, ax, **params)",
"def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)",
"def draw(self, drawDC=None):\n FigureCanvasAgg.draw(self)\n self.bitmap = _rgba_to_wx_bitmap(self.get_renderer().buffer_rgba())\n self._isDrawn = True\n self.gui_repaint(drawDC=drawDC)",
"def overlay(self, feature, color='Blue', opacity=0.6):\n result = self.copy()\n if type(feature) == Table:\n # if table of features e.g. Table.from_records(taz_map.features)\n if 'feature' in feature.labels:\n feature = feature['feature']\n\n # if marker table e.g. table with columns: latitudes,longitudes,popup,color,area\n else:\n feature = Circle.map_table(feature)\n\n if type(feature) in [list, np.ndarray]:\n for f in feature:\n f._attrs['fill_color'] = color\n f._attrs['fill_opacity'] = opacity\n f.draw_on(result._folium_map)\n\n elif type(feature) == Map:\n for i in range(len(feature._features)):\n f = feature._features[i]\n f._attrs['fill_color'] = color\n f._attrs['fill_opacity'] = opacity\n f.draw_on(result._folium_map)\n elif type(feature) == Region:\n feature._attrs['fill_color'] = color\n feature._attrs['fill_opacity'] = opacity\n feature.draw_on(result._folium_map)\n return result",
"def on_draw_over_image(self):",
"def paint(self):\r\n pass",
"def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)",
"def draw(self, obj, layer=0):\n # self._clearLayer(layer)\n self._layer_items[layer] = self._drawFnFromType(obj)(\n obj, layer=layer, existing=self._layer_items.get(layer, []))"
] | [
"0.7445115",
"0.6843126",
"0.64194393",
"0.6342532",
"0.6219951",
"0.618706",
"0.57982993",
"0.5716643",
"0.5615648",
"0.5535393",
"0.55093724",
"0.5495035",
"0.53562",
"0.53541195",
"0.53445685",
"0.53112686",
"0.53103656",
"0.52522504",
"0.52421886",
"0.5224319",
"0.52023685",
"0.52012134",
"0.51720995",
"0.51369673",
"0.5129589",
"0.51274776",
"0.511611",
"0.5102787",
"0.51025265",
"0.51001126"
] | 0.7053354 | 1 |
Draws this component overlaid on another component. Overrides AbstractOverlay. | def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
if not self.visible:
return
self._compute_ticks(other_component)
self._draw_component(gc, view_bounds, mode)
self._cache_valid = False
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_overlay(self):\n pass",
"def _draw_overlay(self, gc, view_bounds=None, mode='normal'):\n self._draw_component(gc, view_bounds, mode)\n return",
"def on_draw_overlay(self):",
"def overlay(self, overlay):\n\n self._overlay = overlay",
"def overlay(self, component, gc, view_bounds=None, mode=\"normal\"):\n\n tmp = self._get_selection_screencoords()\n if tmp is None:\n return\n\n left_line, right_line, polygon = self.calculate_points(component)\n\n gc.save_state()\n try:\n gc.translate_ctm(*component.position)\n gc.set_alpha(self.alpha)\n gc.set_fill_color(self.fill_color_)\n gc.set_line_width(self.border_width)\n gc.set_stroke_color(self.border_color_)\n gc.begin_path()\n gc.lines(polygon)\n gc.fill_path()\n\n gc.begin_path()\n gc.lines(left_line)\n gc.lines(right_line)\n gc.stroke_path()\n finally:\n gc.restore_state()\n return",
"def on_draw_over_image(self):",
"def draw(self, draw_surface):\n super().draw(draw_surface)\n if self.sub_event is not None:\n self.sub_event.draw(draw_surface)\n else:\n self.text_cursor.draw(draw_surface)",
"def paint(self):\r\n pass",
"def overlay(self, image, x, y, r=0):\n x -= (image.get_rect()[2] - self.dial.get_rect()[2])/2\n y -= (image.get_rect()[3] - self.dial.get_rect()[3])/2\n image.set_colorkey((255,255,0))\n self.dial.blit(image, (x,y))",
"def _layout_as_overlay(self, size=None, force=False):\n if self.component is not None:\n self.position = self.component.position\n self.bounds = self.component.bounds\n return",
"def draw(self, draw_surface):\n if self.sub_event is not None:\n self.sub_event.draw(draw_surface)\n else:\n super().draw(draw_surface)\n self.response_box.draw(draw_surface)",
"def on_draw_over_backgroundimage(self):",
"def draw(self, draw_surface):\n super().draw(draw_surface)\n if self.active_sell_event is not None:\n self.active_sell_event.draw(draw_surface)",
"def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)",
"def render(self, game):\n pygame.draw.rect(game.screen,\n self.colour,\n (int(self.x), int(self.y), self.a, self.b))",
"def draw(self, draw_surface):\n super().draw(draw_surface)\n if self.give_event_handler is not None:\n self.give_event_handler.draw(draw_surface)",
"def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)",
"def draw(self):\n raise NotImplementedError",
"def draw(self):\n raise NotImplementedError",
"def draw(self):\n raise NotImplementedError",
"def do_layout(self, *args, **kw):\n if self.use_draw_order and self.component is not None:\n self._layout_as_overlay(*args, **kw)\n else:\n super(PlotGrid, self).do_layout(*args, **kw)\n return",
"def _draw_widget(self, *args) -> None:\n del args\n\n if self.canvas is None:\n return\n\n # TODO: allow user to set rotation/scale origin\n center = center_of_points_list(self.points)\n self.canvas.clear()\n\n with self.canvas:\n Color(*self.color)\n Scale(self.scale, origin=center)\n Rotate(angle=self.rotation, origin=center)\n KivyPoint(points=self.points,\n pointsize=self.pointsize)",
"def set_blend_mode_over(self):\n self.image_item.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)",
"def draw(self, draw_surface):\n # If their is an active select count sub event pass control to it.\n if self.select_count_event is not None:\n self.select_count_event.draw(draw_surface)\n else:\n # Draw the background menu.\n super().draw(draw_surface)\n\n # Draw the items and prices.\n draw_surface.blit(self._item_surface, (0, 0))\n\n # Draw the selected item description and image\n draw_surface.blit(self.item_desc_surf, (40, 115))\n draw_surface.blit(self.item_pic_surf, (8, 124))\n\n # Draw the cursors.\n self.draw_cursor.draw(draw_surface)\n self.down_bobbing_cursor.draw(draw_surface)\n self.up_bobbing_cursor.draw(draw_surface)\n\n # Also draw subevents if they exist.\n if self.confirm_buy_response is not None:\n self.confirm_buy_response.draw(draw_surface)\n elif self.thanks_dialogue is not None:\n self.thanks_dialogue.draw(draw_surface)",
"def draw(self, renderer, *args, **kwargs):\n self.update_positions(renderer)",
"def draw(self):\n\n super().draw()\n\n if self.hit or self.miss:\n # Change colour depending on hit or miss\n fl_color(FL_RED if self.hit else FL_WHITE)\n fl_pie(self.x()+4, self.y()+4, self.w() - 8, self.h() - 8, 0.0, 360.0)",
"def draw(self, surface):\n surface.blit(self.base, self.base_rect)\n surface.blit(self.barrel, self.rect)",
"def expose(self, widget, event):\n cr = widget.window.cairo_create()\n cr.set_source_rgb(0, 0, 0)\n cr.paint()\n for pos, color in self.locked_squares.iteritems():\n self.paint_square(pos, color, cr)\n for pos in self.curr_piece.occupying():\n self.paint_square(pos, self.curr_piece.color, cr)\n ### Easiest to put \"GAME OVER\" message here ###\n if self.over:\n cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_BOLD)\n ### HACK: The following doesn't scale with DOT_SIZE ###\n cr.set_font_size(41)\n cr.move_to(10, 200)\n cr.set_source_rgb(0, 0, 0) # dark drop-shadow\n cr.show_text('GAME OVER')\n cr.move_to(12, 202)\n cr.set_source_rgb(.82, .82, .82) # light main text\n cr.show_text('GAME OVER')\n cr.stroke()",
"def paint(self, draw, x, y, w, h):\n\t\tpass",
"def draw(self, surface):\n\n\t\tsurface.blit(self.image, self.rect.topleft)"
] | [
"0.7665044",
"0.7020675",
"0.67354435",
"0.6259688",
"0.61783314",
"0.60300004",
"0.5967577",
"0.58583504",
"0.5829422",
"0.5782682",
"0.574185",
"0.5739298",
"0.57293445",
"0.56589687",
"0.56444204",
"0.5635737",
"0.56350815",
"0.56157523",
"0.56157523",
"0.56157523",
"0.56145144",
"0.5609355",
"0.5603078",
"0.5599553",
"0.55780935",
"0.55675083",
"0.5564089",
"0.5562156",
"0.5551876",
"0.55208474"
] | 0.7365193 | 1 |
Event handler that is bound to this mapper's updated event. | def mapper_updated(self):
self.invalidate()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_actual_updated(self):\n self._actual_updated()",
"def update(self, _event):\n print self.get_name(), \"received event\", _event",
"def on_update(self):\n raise NotImplemented(\"on_update method should be implemented.\")",
"def onUpdated(self):",
"def _data_updated_callback(self, attr, old, new):\n pass",
"def handle_update(self, call):\n self.fire_event(EVENT_UPDATE)",
"def changedUpdate(self, e):\n syncJSONtoUI()",
"def on_entity_update(self, event):\n self.entity.on_entity_update(event)",
"def OnAttributesUpdated():\n pass",
"def process_IN_MODIFY(self, event):",
"def _update(self):\n pass",
"def _update_handler(self, state):\n self._schedule_remaining_events()",
"def after_update(self, obj, st):\n pass",
"def process_event(self, event):\r\n pass",
"def after_update(self, *args):\n raise NotImplementedError",
"def handle_updates(self, update):\r\n self.__manage_pump()",
"def process_IN_MODIFY(s, event):\n s.doReload(event)",
"def update( ):\r\n pass",
"def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()",
"def address_mapped_event(self, event):\r\n pass",
"def on_update(self, delta_time):\n pass",
"def on_update(self, delta_time):\n pass",
"def update(self, mapItem: MapItem):\n pass",
"def set_update_received_callback(self, callback):\n self.__update_received = callback",
"async def updated(self, value):\n pass",
"def update(self, *args, **kwargs):\n return self.callback_func(*args, **kwargs)",
"def update(self):\r\n pass",
"def handle_event(self, event):\n pass",
"def _modelUpdated(self, *args, **kwargs):\n topLeft = self.index(column=0)\n bottomRight = self.index(column=1)\n model = self.model()\n if model is not None:\n model.dataChanged.emit(topLeft, bottomRight)",
"def update(self):\n pass"
] | [
"0.72261864",
"0.715251",
"0.71376646",
"0.7063243",
"0.6955862",
"0.67936265",
"0.67064404",
"0.6603367",
"0.6541407",
"0.653413",
"0.6453527",
"0.6444449",
"0.63978213",
"0.63502306",
"0.6309958",
"0.62896883",
"0.6283026",
"0.6219855",
"0.6200566",
"0.6195835",
"0.6171405",
"0.6171405",
"0.61659443",
"0.6162945",
"0.61628395",
"0.6144179",
"0.6141792",
"0.611876",
"0.61063",
"0.60992587"
] | 0.72592545 | 0 |
Called when an attribute that affects the appearance of the grid is changed. | def visual_attr_changed(self):
if self.component:
self.component.invalidate_draw()
self.component.request_redraw()
else:
self.invalidate_draw()
self.request_redraw() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def OnAttributesUpdated():\n pass",
"def updateMyGUIValue(self, attributeName, newValue):\n if self.myGalaxy.shipSelected == self:\n d = {attributeName:newValue}\n self.myGalaxy.shipInfo.updateAttributes(d)",
"def on_change(self, attr, old, new):\n n = self.labels.index(new) + 1 # Select 0-indexed\n self.notify(set_figures(n))",
"def par_attr_changed(self, widget, adj, par, par_attr):\n try:\n val = float(widget.get_text())\n except ValueError:\n pass\n else:\n if par_attr == 'min':\n adj.set_lower(val)\n elif par_attr == 'max':\n adj.set_upper(val)\n elif par_attr == 'val':\n adj.set_value(val)\n incr = (adj.get_upper() - adj.get_lower()) / 100.0\n adj.set_step_increment(incr)\n adj.set_page_increment(incr)\n setattr(par, par_attr, val)",
"def visualAppearanceChanged(event, obj):\n\n if _currentPresentationManager >= 0:\n _PRESENTATION_MANAGERS[_currentPresentationManager].\\\n visualAppearanceChanged(event, obj)",
"def _on_change(self, *_):\n colour = self.on_colour if self.value else self.off_colour\n self.configure(bg=colour)\n if self.label:\n self.label.configure(bg=colour)",
"def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)",
"def _set_attributes(self):",
"def GetAttr(self, row, col, kind):\n \n #print \"Get Attr\",row,col,kind\n\n provider = self.GetAttrProvider()\n if provider and provider.GetAttr(row, col, kind):\n attr = provider.GetAttr(row, col, kind).Clone()\n else:\n attr = wx.grid.GridCellAttr()\n\n #color marks\n if self.colsel[col] in self.marks['X']:\n attr.SetBackgroundColour(wx.Colour(255, 230, 230))\n elif self.colsel[col] in self.marks['Y1']:\n attr.SetBackgroundColour(wx.Colour(255, 255, 205))\n elif self.colsel[col] in self.marks['Y2']:\n attr.SetBackgroundColour(wx.Colour(255, 255, 155))\n elif self.colsel[col] in self.marks['G']:\n attr.SetBackgroundColour(wx.Colour(155, 255, 155))\n\n #color dynamic columns\n if self.colsel[col] in self.dynamic_cols:\n attr.SetBackgroundColour(wx.Colour(200, 200, 200))\n\n #color last rows\n maxRows = self.GetNumberRows()\n if self.active:\n if maxRows - row == 1: #last row\n attr.SetBackgroundColour(wx.Colour(255, 230, 230))\n elif maxRows - row == 2: #second to last row\n attr.SetBackgroundColour(wx.Colour(255, 255, 205))\n elif maxRows - row == 3:\n if self.record:\n attr.SetBackgroundColour(wx.Colour(200, 255, 200))\n else:\n attr.SetBackgroundColour(wx.Colour(255, 100, 100))\n else:\n if maxRows - row <= 2:\n attr.SetBackgroundColour(wx.Colour(127, 127, 127))\n\n if self.rowmask[row]:\n attr.SetTextColour(wx.Colour(0,0,255))\n \n return attr",
"def handle_attributes_mouseover(self):\n pass",
"def callback_choosethedatefordisplay(self, attrname, old, new):\n self._update_chart(self.choosethedatefordisplay.value_as_datetime)",
"def _updateColAttrs(self, grid):\n col = 0\n\n for colname in self.table.columns:\n attr = wx.grid.GridCellAttr()\n renderer = MegaFontRenderer(self.table)\n attr.SetRenderer(renderer)\n grid.SetColAttr(col, attr)\n col += 1",
"def _anytrait_changed_for_component(self, new):\n\n self.canvas.request_redraw()",
"def _data_updated_callback(self, attr, old, new):\n pass",
"def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)",
"def _async_update_attrs(self) -> None:\n self._attr_is_on = self._device.light_on\n if self._device.light_brightness is not None:\n self._attr_brightness = int(min(255, self._device.light_brightness * 16))",
"def attribute_updated(self, attrid: int, value: Any, _: Any) -> None:\n attr_name = self._get_attribute_name(attrid)\n self.debug(\n \"Attribute report '%s'[%s] = %s\", self.cluster.name, attr_name, value\n )\n if attr_name == \"fan_mode\":\n self.async_send_signal(\n f\"{self.unique_id}_{SIGNAL_ATTR_UPDATED}\", attrid, attr_name, value\n )",
"def onGridInitialized():\n global IsGridInitialized\n IsGridInitialized=True",
"def check_attr(self):\n super(Scatter, self).check_attr()",
"def attrColorSliderGrp(*args, adjustableColumn: int=0, adjustableColumn2: int=0,\n adjustableColumn3: int=0, adjustableColumn4: int=0, adjustableColumn5:\n int=0, adjustableColumn6: int=0, annotation: Union[AnyStr, bool]=\"\",\n attrNavDecision: Union[List[name, AnyStr], bool]=None, attribute:\n Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float, float, float],\n bool]=None, columnAlign: Union[List[int, AnyStr], List[List[int,\n AnyStr]]]=None, columnAlign2: List[AnyStr, AnyStr]=None, columnAlign3:\n List[AnyStr, AnyStr, AnyStr]=None, columnAlign4: List[AnyStr, AnyStr,\n AnyStr, AnyStr]=None, columnAlign5: List[AnyStr, AnyStr, AnyStr, AnyStr,\n AnyStr]=None, columnAlign6: List[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr,\n AnyStr]=None, columnAttach: Union[List[int, AnyStr, int], List[List[int,\n AnyStr, int]]]=None, columnAttach2: List[AnyStr, AnyStr]=None,\n columnAttach3: List[AnyStr, AnyStr, AnyStr]=None, columnAttach4:\n List[AnyStr, AnyStr, AnyStr, AnyStr]=None, columnAttach5: List[AnyStr,\n AnyStr, AnyStr, AnyStr, AnyStr]=None, columnAttach6: List[AnyStr, AnyStr,\n AnyStr, AnyStr, AnyStr, AnyStr]=None, columnOffset2: List[int, int]=None,\n columnOffset3: List[int, int, int]=None, columnOffset4: List[int, int,\n int, int]=None, columnOffset5: List[int, int, int, int, int]=None,\n columnOffset6: List[int, int, int, int, int, int]=None, columnWidth:\n Union[List[int, int], List[List[int, int]]]=None, columnWidth1: int=0,\n columnWidth2: List[int, int]=None, columnWidth3: List[int, int,\n int]=None, columnWidth4: List[int, int, int, int]=None, columnWidth5:\n List[int, int, int, int, int]=None, columnWidth6: List[int, int, int,\n int, int, int]=None, defineTemplate: AnyStr=\"\", docTag: Union[AnyStr,\n bool]=\"\", dragCallback: Script=None, dropCallback: Script=None, enable:\n bool=True, enableBackground: bool=True, enableKeyboardFocus: bool=True,\n exists: bool=True, fullPathName: bool=True, height: Union[int, bool]=0,\n highlightColor: Union[List[float, float, float], bool]=None, hsvValue:\n Union[List[float, float, float], bool]=None, isObscured: bool=True,\n label: Union[AnyStr, bool]=\"\", manage: bool=True, noBackground:\n bool=True, numberOfPopupMenus: bool=True, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, preventOverride: bool=True, rgbValue:\n Union[List[float, float, float], bool]=None, rowAttach: Union[List[int,\n AnyStr, int], List[List[int, AnyStr, int]]]=None, showButton: bool=True,\n statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass",
"def change(self):\r\n\r\n # If checkboxes are available, check status and set boat speed reference line visibility accordingly.\r\n if self.cb:\r\n if self.cb_bt.checkState() == QtCore.Qt.Checked:\r\n for item in self.bt:\r\n item.set_visible(True)\r\n else:\r\n for item in self.bt:\r\n item.set_visible(False)\r\n # GGA\r\n if self.cb_gga.checkState() == QtCore.Qt.Checked:\r\n for item in self.gga:\r\n item.set_visible(True)\r\n # self.gga[0].set_visible(True)\r\n elif self.gga is not None:\r\n for item in self.gga:\r\n item.set_visible(False)\r\n # self.gga[0].set_visible(False)\r\n # VTG\r\n if self.cb_vtg.checkState() == QtCore.Qt.Checked:\r\n for item in self.vtg:\r\n item.set_visible(True)\r\n # self.vtg[0].set_visible(True)\r\n elif self.vtg is not None:\r\n for item in self.vtg:\r\n item.set_visible(False)\r\n # self.vtg[0].set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()",
"def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):\n method_name = f\"_update_column_{attr}\"\n if hasattr(self, method_name):\n # Right now this is so we can be lazy and not implement updaters\n # for every attribute yet--some we may not need at all, TBD\n getattr(self, method_name)(column, idx, old_value, new_value)",
"def set_text_format(self, attribute, value):\n if self._simplecell:\n self.fetch()\n if attribute not in [\"foregroundColor\", \"fontFamily\", \"fontSize\", \"bold\", \"italic\",\n \"strikethrough\", \"underline\"]:\n raise InvalidArgumentValue(\"Not a valid attribute. Check documentation for more information.\")\n if self.text_format:\n self.text_format[attribute] = value\n else:\n self.text_format = {attribute: value}\n self.update()\n return self",
"def attrFieldSliderGrp(*args, adjustableColumn: int=0, adjustableColumn2: int=0,\n adjustableColumn3: int=0, adjustableColumn4: int=0, adjustableColumn5:\n int=0, adjustableColumn6: int=0, annotation: Union[AnyStr, bool]=\"\",\n attribute: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, columnAlign:\n Union[List[int, AnyStr], List[List[int, AnyStr]]]=None, columnAlign2:\n List[AnyStr, AnyStr]=None, columnAlign3: List[AnyStr, AnyStr,\n AnyStr]=None, columnAlign4: List[AnyStr, AnyStr, AnyStr, AnyStr]=None,\n columnAlign5: List[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]=None,\n columnAlign6: List[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]=None,\n columnAttach: Union[List[int, AnyStr, int], List[List[int, AnyStr,\n int]]]=None, columnAttach2: List[AnyStr, AnyStr]=None, columnAttach3:\n List[AnyStr, AnyStr, AnyStr]=None, columnAttach4: List[AnyStr, AnyStr,\n AnyStr, AnyStr]=None, columnAttach5: List[AnyStr, AnyStr, AnyStr, AnyStr,\n AnyStr]=None, columnAttach6: List[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr,\n AnyStr]=None, columnOffset2: List[int, int]=None, columnOffset3:\n List[int, int, int]=None, columnOffset4: List[int, int, int, int]=None,\n columnOffset5: List[int, int, int, int, int]=None, columnOffset6:\n List[int, int, int, int, int, int]=None, columnWidth: Union[List[int,\n int], List[List[int, int]]]=None, columnWidth1: int=0, columnWidth2:\n List[int, int]=None, columnWidth3: List[int, int, int]=None,\n columnWidth4: List[int, int, int, int]=None, columnWidth5: List[int, int,\n int, int, int]=None, columnWidth6: List[int, int, int, int, int,\n int]=None, defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\",\n dragCallback: Script=None, dropCallback: Script=None, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, exists:\n bool=True, extraButton: bool=True, extraButtonCommand: Script=None,\n extraButtonIcon: Union[AnyStr, bool]=\"\", fieldMaxValue: Union[float,\n bool]=0.0, fieldMinValue: Union[float, bool]=0.0, fieldStep: Union[float,\n bool]=0.0, forceAddMapButton: bool=True, fullPathName: bool=True,\n height: Union[int, bool]=0, hideMapButton: bool=True, highlightColor:\n Union[List[float, float, float], bool]=None, isObscured: bool=True,\n label: Union[AnyStr, bool]=\"\", manage: bool=True, maxValue: Union[float,\n bool]=0.0, minValue: Union[float, bool]=0.0, noBackground: bool=True,\n numberOfPopupMenus: bool=True, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, precision: int=0, preventOverride: bool=True,\n rowAttach: Union[List[int, AnyStr, int], List[List[int, AnyStr,\n int]]]=None, sliderMaxValue: Union[float, bool]=0.0, sliderMinValue:\n Union[float, bool]=0.0, sliderStep: Union[float, bool]=0.0,\n statusBarMessage: AnyStr=\"\", step: Union[float, bool]=0.0, useTemplate:\n AnyStr=\"\", vertical: bool=True, visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass",
"def paint(self, grid):\n grid[self.y, self.x] = self.appearance\n return grid",
"def change_attr(self) -> None:\n\n self.attr = randint(0, 10)",
"def update_widget_attribute(self, pyte_widget: Pyted_Widget_Type, attr: str,\n new_value: Union[str, bool, List[str]],\n init=False) -> Union[None, tuple]:\n\n old_value = getattr(pyte_widget, attr)\n\n if not init:\n setattr(pyte_widget, attr, new_value)\n\n try:\n tk_widget = pyte_widget.tk_name\n except AttributeError:\n tk_widget = None\n\n attr_template = pyte_widget.get_code_template(attr)\n\n if attr_template == pyted_widget_types.CONFIG_CODE:\n tk_widget[attr] = getattr(pyte_widget, attr)\n\n elif attr_template == pyted_widget_types.TITLE_CODE:\n return\n\n elif attr_template == pyted_widget_types.GRID_CODE:\n if init:\n # when user form is drawn grid placement will be handled by user form initialisation code\n return\n try:\n old_position = {'row': tk_widget.grid_info()['row'], 'column': tk_widget.grid_info()['column']}\n new_position = {'row': tk_widget.grid_info()['row'], 'column': tk_widget.grid_info()['column']}\n except KeyError:\n # widget has remove set true so no need to update tk_widget\n return\n new_attr_val = getattr(pyte_widget, attr)\n new_position[attr] = new_attr_val\n if (int(new_position['row']) >= int(self.widgets.find_pyte_parent(pyte_widget).number_rows) or\n int(new_position['column']) >= int(self.widgets.find_pyte_parent(pyte_widget).number_columns)):\n # pyte_widget.row = old_position['row']\n # pyte_widget.column = old_position['column']\n pyte_widget.remove = True\n pyte_widget.tk_name.grid_remove()\n self.handles.remove_selected_widget_handles()\n self.user_form.new_filler_label(self.widgets.find_tk_parent(pyte_widget),\n old_position['column'], old_position['row'])\n messagebox.showwarning('Widget being moved off grid',\n 'Row or column greater than grid size. Widget has been removed. '\n 'To get widget back move back onto grid and set remove to false')\n else:\n\n filler_widget = self.widgets.find_tk_parent(pyte_widget).grid_slaves(row=new_position['row'],\n column=new_position['column'])[0]\n if filler_widget not in self.user_form.filler_labels and filler_widget != pyte_widget.tk_name:\n # trying to move widget onto existing widget\n pyte_widget.remove = True\n pyte_widget.tk_name.grid_remove()\n self.handles.remove_selected_widget_handles()\n self.user_form.new_filler_label(self.widgets.find_tk_parent(pyte_widget),\n old_position['column'], old_position['row'])\n messagebox.showwarning('Widget being moved onto existing widget',\n 'Row and column the same as another widget. Widget has been removed. '\n 'To get widget back move back onto empty slot and set remove to false')\n return\n filler_widget.grid(row=old_position['row'], column=old_position['column'])\n tk_widget.grid({attr: new_attr_val})\n self.handles.place_selected_widget_handles(pyte_widget.tk_name)\n\n elif attr_template == pyted_widget_types.GRID_SIZE_CODE:\n if init:\n # when user form is drawn the widget parent will be handled by user form initialisation code\n return\n self.user_form.empty_tk_container_widget(pyte_widget)\n self.user_form.fill_tk_container_frame(pyte_widget)\n self.handles.place_selected_widget_handles(pyte_widget.tk_name)\n\n elif attr_template == pyted_widget_types.ROW_CONFIGURE or attr_template == pyted_widget_types.COLUMN_CONFIGURE:\n # row and column configuration handled elsewhere in program\n pass\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'remove':\n if init:\n # when user form is drawn grid_remove will be handled by user form initialisation code\n return\n\n tk_widget_in_grid = not(len(pyte_widget.tk_name.grid_info()) == 0)\n if getattr(pyte_widget, 'remove'):\n if tk_widget_in_grid:\n widget_to_hide = pyte_widget\n self.user_form.new_filler_label(self.widgets.find_tk_parent(widget_to_hide), widget_to_hide.column,\n widget_to_hide.row)\n widget_to_hide.tk_name.grid_remove()\n self.handles.remove_selected_widget_handles()\n else:\n # remove attribute is false, if widget not displayed then try to display it\n if not tk_widget_in_grid:\n # check that the widget is on the grid\n if (int(pyte_widget.row) >= int(self.widgets.find_pyte_parent(pyte_widget).number_rows) or\n int(pyte_widget.column) >= int(self.widgets.find_pyte_parent(pyte_widget).number_columns)):\n messagebox.showwarning('Widget off grid',\n 'Row or column greater than grid size. '\n 'To get widget back move back onto grid and set remove to false')\n setattr(pyte_widget, 'remove', True)\n return\n # check that there is not a widget already visible\n filler_widget = self.widgets.find_tk_parent(pyte_widget).grid_slaves(row=pyte_widget.row,\n column=pyte_widget.column)[0]\n if filler_widget not in self.user_form.filler_labels:\n pyte_widget.remove = True\n pyte_widget.tk_name.grid_remove()\n # self.remove_selected_widget_handles()\n messagebox.showwarning('Existing widget at grid location',\n 'Row and column the same as another widget. '\n 'To get widget back move onto empty slot and set remove to false')\n return\n # remove filler label and show user widget\n filler_widget = self.widgets.find_tk_parent(pyte_widget).grid_slaves(row=pyte_widget.row,\n column=pyte_widget.column)[0]\n filler_widget.grid_forget()\n filler_widget.destroy()\n pyte_widget.tk_name.grid(row=pyte_widget.row, column=pyte_widget.column)\n self.handles.place_selected_widget_handles(pyte_widget.tk_name)\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'name':\n if init:\n # when user form is drawn the widget name will be handled by user form initialisation code\n return\n # check name is really changed\n if new_value == old_value:\n return\n # check name is not already taken\n for i_pyte_widget in self.widgets.widget_list:\n if i_pyte_widget != pyte_widget:\n if pyte_widget.name == i_pyte_widget.name:\n # can't messagebox here as this would move focus out of entry box and cause binding to run again\n # messagebox.showwarning('Renaming problem',\n # 'Name already exists for another widget and Name not changed')\n setattr(pyte_widget, attr, old_value)\n return 'Renaming problem', 'Name already exists for another widget and Name not changed'\n for i_pyte_widget in self.widgets.widget_list:\n if i_pyte_widget.parent == old_value:\n i_pyte_widget.parent = new_value\n # self.update_navigator_tree()\n self.navigator_tree_obj.navigator_tree_change_item_name(pyte_widget, old_value)\n # raise Exception(f'renaming widget not yet implemented')\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and (attr == 'comment'):\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n return\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and (attr == 'win_close'):\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n return\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and (attr == 'tab_text'):\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n tk_parent = self.widgets.find_tk_parent(pyte_widget)\n if isinstance(tk_parent, ttk.Notebook):\n tk_parent.tab(pyte_widget.tk_name, text=new_value)\n # self.widgets.find_tk_parent(pyte_widget).tab(pyte_widget.tk_name, text=new_value)\n return\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'tk_name':\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n raise Exception(f'renaming tk_name for widget should not occur')\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'parent':\n # not used as parent attribute not shown in attribute edit frame\n if init:\n # when user form is drawn the widget parent will be handled by user form initialisation code\n return\n raise Exception(f'renaming widget parent not yet implemented')\n\n elif attr_template == pyted_widget_types.VAR_SET_CODE:\n setattr(pyte_widget, pyted_widget_types.VAR_SET_CODE, new_value)\n\n elif attr_template.startswith('<'):\n if init:\n # when user form is drawn the widget parent will be handled by user form initialisation code\n return\n return\n\n else:\n raise Exception(f'attr_template \"{attr_template}\" for \"{attr}\" not yet configured')\n # print(f'attr_template {attr_template} not yet implemented for {attr}')",
"def _setAttributes(self, primaryAttr, attrs):\n return False",
"def update_style(self):\n pass",
"def set_style(self):"
] | [
"0.6373653",
"0.59116966",
"0.5899366",
"0.5886156",
"0.58624667",
"0.572089",
"0.5701744",
"0.5678289",
"0.565258",
"0.5574653",
"0.5537824",
"0.550981",
"0.5494045",
"0.54727757",
"0.5457516",
"0.5414864",
"0.5389769",
"0.5363795",
"0.53519875",
"0.5351073",
"0.5350573",
"0.53136826",
"0.5307969",
"0.5305344",
"0.5296642",
"0.52866125",
"0.52814424",
"0.52784926",
"0.52768743",
"0.52704316"
] | 0.6970147 | 0 |
Gets a list of the related characters (edges), the sentiment of their relation and the amount of common appearances, and creates a graph with the edges, their colors (determined by the sentiment) and their weights (determined by their common appearances). | def construct_network_from_neighbours_list(related_characters: list):
graph = nx.Graph()
for edge in related_characters:
sentiment = edge[1]
color = ''
if sentiment == 'Positive':
color = 'g'
elif sentiment == 'Negative':
color = 'r'
elif sentiment == 'Neutral':
color = 'k'
# graph.add_node(edge[0][0], popularity=
graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])
return graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_graph(self):\n\n for i in range(0, self.num_words):\n self.graph.append([])\n\n for word in self.words:\n for pos in range(0, 4):\n for let in range(0, 26):\n temp = ''\n if pos > 0:\n temp = word[0:pos] + (chr(let + ord('a'))) + word[pos +\n 1:4]\n else:\n temp = chr(let + ord('a')) + word[pos + 1:4]\n if temp[pos] != word[pos]:\n if temp in self.index.keys():\n i = self.index[word]\n j = self.index[temp]\n self.graph[i].append(j)",
"def compute_graph_stats():\n\n #create a dictionary to hold the stats\n #note that it's set up as a defaultdict of defaultdicts, so as not to have\n #to create the structure ahead of time (the \"pretty\" is so it can be\n #pretty-printed for debugging purposes)\n stats = PrettyDefaultDict(lambda: PrettyDefaultDict(list))\n\n #compute the stats and add them to the dictionary\n for search_type in [\"BFS\", \"DFS\"]:\n for word_length in range(2, 11):\n\n #get all the relevant searches out of the database\n searches = (\n Search.query.filter(Search.search_type == search_type,\n Search.word_length == word_length)\n .all())\n\n #compute and store medians for path length, search time,\n #efficiency, and words explored\n med_path_length = median([search.med_path_length\n for search in searches])\n stats[\"pathLength\"][search_type].append(\n int(round(med_path_length)))\n\n\n med_search_time = median([search.med_search_time\n for search in searches])\n stats[\"searchTime\"][search_type].append(\n round(med_search_time, 1))\n\n\n med_efficiency = median([search.med_efficiency\n for search in searches])\n stats[\"efficiency\"][search_type].append(\n round(100 * med_efficiency, 1))\n\n\n med_words_explored = median([search.med_words_explored\n for search in searches])\n stats[\"wordsExplored\"][search_type].append(\n int(round(med_words_explored)))\n\n\n #add metadata to the dictionary for graphing purposes\n stats[\"wordLengths\"] = range(2, 11)\n stats[\"pathLength\"][\"yAxisLabel\"] = \"num words in path\"\n stats[\"searchTime\"][\"yAxisLabel\"] = \"search time (ms)\"\n stats[\"wordsExplored\"][\"yAxisLabel\"] = \"num words explored\"\n stats[\"efficiency\"][\"yAxisLabel\"] = \"% of explored words used\"\n\n # pprint(stats)\n\n return stats",
"def edge_maker(novel_sent_tagged, combined_counts):\n edge_list = []\n \n for i in range(len(novel_sent_tagged)):\n persons = combine_persons(novel_sent_tagged[i])\n edge_list.extend(connect_persons(persons, combined_counts))\n \n return(edge_list)",
"def iGraphFromTuples(association_tuples):\n \n# #get unique words\n# vocab = set()\n# uppercase_tuples = []\n# for (s,r), stren in association_tuples:\n# uppercase_tuples.append((s.upper(), r.upper(), stren))\n# vocab.update(word_pair)\n \n# vocab = list(vocab) #convert to ordered list\n# \n# \n# graph = Graph(len(vocab), directed=True)\n# graph.vs[\"name\"] = vocab #set vertex names\n# edges, _ = zip(*association_tuples)\n# graph.add_edges(edges)\n #association_tuples = [(s.upper(),r.upper(),stren) for (s,r), stren in association_tuples]\n association_tuples = [(s,r,stren) for (s,r), stren in association_tuples]\n graph = Graph.TupleList(association_tuples, directed=True, weights=True)\n \n graph.vs[\"id\"] = graph.vs[\"name\"]\n \n #add weights\n# for s, r , stren in association_tuples:\n# graph[(s,r)] = stren\n neg_log_proportions = []\n for e in graph.es:\n neg_log_proportions.append(-log10(e[\"weight\"]))\n \n graph.es[\"-log weight\"] = neg_log_proportions\n \n assoc_object = AssociationIGraph()\n assoc_object.graph = graph\n return assoc_object",
"def neato_cooccurrence_graph( nC, v, labels, max_nodes = 10, fnam_stem = \"test\", label_nodes_directly = False, scale=1.0, min_node_size = 0.1 ):\n \n nv = v.astype( float32 ) / v.max()\n\n cutoff = cooccur_cutoff( nC, max_nodes );\n\n graph = pydot.Dot( graph_type = 'graph' )\n graph.set_overlap(\"false\")\n coords = zip(*(nC >= cutoff).nonzero())\n\n # make a dict of all nodes which are mentioned in the coords\n nodes = {}\n index = 1\n for coord in set(chain.from_iterable(coords)) :\n if not nodes.has_key( coord ) :\n node = pydot.Node( str(coord) )\n if v != None :\n #print coord\n label = labels[coord]\n if label_nodes_directly :\n node.set_label( label )\n else :\n node.set_label( str(index) )\n #node.set_penwidth( nv[ coord ] )\n node.set_fixedsize(\"true\")\n node.set_width( max(min_node_size,scale *nv[ coord ]) )\n node.set_shape(\"circle\")\n nodes[ coord ] = node\n graph.add_node( node )\n index = index + 1\n\n for coord in coords :\n \n edge = pydot.Edge( nodes[coord[0]], nodes[coord[1]] )\n edge.set_weight( nC[coord] )\n edge.set_penwidth( nC[coord]*5 )\n #edge.set_label( str(int(m[coord]) ))\n graph.add_edge(edge)\n\n if not label_nodes_directly : \n legend = pydot.Node( \"legend\" )\n nodelist = nodes.items()\n nodelist.sort( lambda a,b : cmp(node_index(a[1].get_label()),node_index(b[1].get_label())) )\n legend.set_label( \"\\l\".join([x[1].get_label()+\":\"+labels[x[0]] for x in nodelist])+\"\\l\" )\n legend.set_shape(\"box\")\n graph.add_node(legend)\n\n #print graph.to_string()\n graph.write_dot(fnam_stem+'.dot', prog='neato' )\n graph.write_png(fnam_stem+'.png', prog='neato' )\n #graph.write_pdf(fnam_stem+'.pdf', prog='neato' )",
"def evaluate_graph(dictionary, corpus, texts, limit):\n c_v = []\n lm_list = []\n for num_topics in range(1, limit):\n lm = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary)\n lm_list.append(lm)\n cm = CoherenceModel(model=lm, texts=texts, dictionary=dictionary, coherence='c_v')\n c_v.append(cm.get_coherence())\n\n # Show graph\n x = range(1, limit)\n plt.plot(x, c_v)\n plt.xlabel(\"num_topics\")\n plt.ylabel(\"Coherence score\")\n plt.legend((\"c_v\"), loc='best')\n plt.show()\n\n return lm_list, c_v",
"def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)",
"def neato_graph_from_corpus( corpus, max_nodes ) :\n\n O, row_dois, column_dois = cites_matrix( corpus )\n neato_cooccurrence_graph( O, column_dois )\n return None\n\n \n v = total_occurrences( O ) \n nv = v.astype( float32 ) / v.max()\n C = cooccurrence_matrix ( O )\n nC = normalized_cooccurrence_matrix( O )\n\n # now find our cutoff!\n # find the max number of cocites and start there\n cocite_cutoff = C.max()\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n # then reduce the number until we exceed max_nodes\n while num_nodes < max_nodes :\n cocite_cutoff = cocite_cutoff - 1\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n\n if num_nodes > max_nodes :\n cocite_cutoff = cocite_cutoff + 1\n \n C = C.copy()\n C[ C < cocite_cutoff ]= 0\n\n graph = pydot.Dot( graph_type = 'graph' )\n graph.set_overlap(\"false\")\n coords = zip(*(C >= cocite_cutoff).nonzero())\n\n # make a dict of all nodes which are mentioned in the coords\n nodes = {}\n index = 1\n for coord in set(chain.from_iterable(coords)) :\n if not nodes.has_key( coord ) :\n node = pydot.Node( str(coord) )\n if v != None :\n doi = column_dois[coord]\n node.set_label( str(index) )\n node.set_penwidth( nv[ coord ] )\n node.set_fixedsize(\"true\")\n node.set_width( 1.0 *nv[ coord ] )\n #node.set_shape(\"circle\")\n nodes[ coord ] = node\n graph.add_node( node )\n index = index + 1\n\n for coord in coords :\n \n edge = pydot.Edge( nodes[coord[0]], nodes[coord[1]] )\n edge.set_weight( nC[coord] )\n edge.set_penwidth( nC[coord]*5 )\n #edge.set_label( str(int(m[coord]) ))\n graph.add_edge(edge)\n\n \n legend = pydot.Node( \"legend\" )\n nodelist = nodes.items()\n nodelist.sort( lambda a,b : cmp(node_index(a[1].get_label()),node_index(b[1].get_label())) )\n legend.set_label( \"\\l\".join([x[1].get_label()+\":\"+column_dois[x[0]] for x in nodelist])+\"\\l\" )\n legend.set_shape(\"box\")\n graph.add_node(legend)\n\n print graph.to_string()\n #graph.write_dot('test.dot', prog='neato' )\n #graph.write_png('test.png', prog='neato' )\n #graph.write_pdf('test.pdf', prog='neato' )",
"def graph_nov_comments():\n graph_histogram_of_sentiment_scores_all_comments('politics_november_comments_cleaned_standardized_vader_flair.csv')\n # graph_histogram_of_sentiment_scores_on_link_ids('politics_november_comments_cleaned_standardized_vader_flair.csv')\n print_rolling_average_of_sentiment_scores('politics_november_comments_cleaned_standardized_vader_flair.csv', 100)\n graph_avg_sentiment_score_by_link_id('politics_november_comments_cleaned_standardized_vader_flair.csv', 100)",
"def words_graph():\r\n fh = gzip.open('words4_dat.txt.gz', 'r')\r\n words = set()\r\n for line in fh.readlines():\r\n line = line.decode()\r\n if line.startswith('*'):\r\n continue\r\n w = str(line[0:4])\r\n words.add(w)\r\n return generate_graph(words)",
"def buildGraph(pickl, his=False):\n\n dic = pickl\n\n dic1 = dic\n G = nx.Graph()\n dic3 = dict(dic)\n checked = []\n\n # Adding nodes with bios greater than 30 words.\n for key in dic:\n if((re.sub(\"[ ]+\", \"\", dic[key]) != \"\") and len(dic[key])) > 30:\n G.add_node(key)\n else:\n del dic3[key]\n\n dic1 = dic3\n\n vect = TfidfVectorizer(min_df=1)\n coefs = list()\n\n joint_dict = dict()\n # Cosine similarity measure matrix\n F = vect.fit_transform(dic3.values())\n Cosine_mat = (F*F.T).A # Symmetric matrix:\n # Traverse uper triangle for cosine similarity measures.\n for i, key in enumerate(dic3):\n for j, key1 in enumerate(dic1):\n if(i > j):\n # obtain coef for corresponding key\n tfidf = Cosine_mat[i, j]\n # Repeated nodes must be filtered\n if dic[key] == dic[key1]:\n\n continue\n else:\n coefs.append(tfidf)\n joint_dict[str(key) + str(key1)] = tfidf\n\n data = [c for c in coefs if c]\n # max(data)\n\n mu = np.mean(data)\n std = np.std(data)\n binwidth = 0.007\n if his:\n plt.subplot(1, 2, 0)\n plt.hist(data, bins=np.arange(min(data), max(data) + binwidth, binwidth))\n # PLot gaussian fit contrast\n plt.xlabel(\"$cos(\\\\theta)$\")\n plt.ylabel(\"frecuency count of $cos(\\\\theta)$ values\")\n plt.subplot(1, 2, 1)\n plt.plot(np.arange(0, max(data), 0.001),\n gaussian(np.arange(0, max(data), 0.001), mu, std),\n linewidth=2)\n plt.xlabel(\"$cos(\\\\theta)$\")\n plt.ylabel(\"fitted gaussian\")\n plt.show()\n\n # Edge creation !\n for key in dic3:\n for key1 in dic1:\n if(key != key1):\n try:\n x = joint_dict[str(key) + str(key1)]\n # If cosine similarity is an outlier with 95% change\n # Make edge between nodes that conform the similarity\n if(x - mu > 2 * std):\n G.add_edge(key, key1)\n except:\n pass\n\n # Return the conected component with largest cardinality of nodes\n # Throw away small connected components we are interested in the big one\n # For our mini project exploration purposes\n G = max(nx.connected_component_subgraphs(G), key=len)\n return G",
"def create_graph_network_visualization(graph_network, connections, connections_grouped):\n\n edge_trace = go.Scatter(\n x=[],\n y=[],\n customdata=[],\n text=[],\n line=dict(width=2, color='#888'),\n hoverinfo='all',\n mode='lines+text',\n textposition='top left',\n )\n edge_label_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n textposition='top left',\n mode='markers+text',\n hoverinfo='none',\n marker=go.Marker(\n opacity=0\n ),\n textfont=dict(size=20, color='black')\n )\n\n for edge in graph_network.edges():\n x0, y0 = graph_network.node[edge[0]]['pos']\n x1, y1 = graph_network.node[edge[1]]['pos']\n edge_weight = graph_network.node[edge[1]]['pos']\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n text = graph_network[edge[0]][edge[1]]['weight']\n edge_label_trace['x'] += tuple([(x0 + x1) / 2])\n edge_label_trace['y'] += tuple([(y0 + y1) / 2])\n edge_label_trace['text'] += tuple([text])\n\n # writing to edge customdata\n edge_trace['customdata'] += graph_network[edge[0]][edge[1]]['weight']\n edge_trace['text'] = str(graph_network[edge[0]][edge[1]]['weight'])\n # edge_trace['marker']['size'] += professor_graph[edge[0]][edge[1]]['weight']\n # print(graph_network[edge[0]][edge[1]]['weight'])\n\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n hovertext=[],\n mode=\"markers+text\",\n hoverinfo='text',\n textposition='bottom center',\n marker=dict(\n showscale=False,\n # colorscale options\n # ['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',\n # 'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',\n # 'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis]\n colorscale='YlGnBu',\n reversescale=True,\n color=[],\n size=40,\n colorbar=dict(\n thickness=15,\n title='Node Connections',\n xanchor='left',\n titleside='right'\n ),\n line=dict(width=2))\n )\n\n entry_bool = True\n\n for node in graph_network.nodes():\n x, y = graph_network.node[node]['pos']\n node_trace['x'] += tuple([x])\n node_trace['y'] += tuple([y])\n # node_trace['text'].append(node)\n\n # x, y = professor_graph.node[node]['pos']\n # node_trace['x'].append(x)\n # node_trace['y'].append(y)\n\n if entry_bool:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n entry_bool = False\n total_projects = \"Total Projects: {}\".format(len(connections[\"Proposal Number:\"].unique()))\n print(\"Total Projects\", total_projects)\n node_trace['hovertext'] += tuple([total_projects])\n else:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n some_text = []\n some_text.append(node + \"<br>\")\n for i in range(len(connections_grouped.loc[node]['proposal_number'])):\n if i > 0:\n some_text.append(\"<br>\")\n print(\"list index is \", i)\n print(\"prop number is \", connections_grouped.loc[node]['proposal_number'][i])\n some_text.append(connections_grouped.loc[node]['proposal_number'][i])\n # import pdb\n # pdb.set_trace()\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['proposal_title'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['project_status'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['institution'][i])\n some_text.append(\"<br>\")\n some_text = [x for x in some_text if str(x) != 'nan']\n\n some_text = \"\".join(some_text)\n print(node)\n print(\"yo is \", some_text)\n # node_trace['hovertext'].append(some_text)\n node_trace['hovertext'] += tuple([some_text])\n\n for node, adjacencies in enumerate(graph_network.adjacency_list()):\n # print(node,adjacencies)\n # print(professor_graph[node])\n node_trace['marker']['color'] += tuple([len(adjacencies)])\n\n return node_trace, edge_trace, edge_label_trace",
"def network(self):\n G = nx.MultiDiGraph()\n reaction_hash = []\n product_count = 0\n mapping = {}\n reaction_count = 0\n\n for r in self.reactions:\n reaction_count += 1\n\n reaction_dict = r.__dict__\n G.add_edge(reaction_dict.get('left'), hash(r))\n G.add_edge(reaction_dict.get('right'), hash(r))\n G.add_edge(hash(r), reaction_dict.get('left2'))\n G.add_edge(hash(r), reaction_dict.get('right2'))\n\n product_count += 1\n mapping[reaction_dict.get('left')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('left2')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right2')] = \"x{}\".format(product_count)\n\n mapping[hash(r)] = \"r{}\".format(reaction_dict.get(\"reaction_n\"))\n reaction_hash.append(hash(r))\n\n return G, mapping",
"def analyze_edges_and_weight(list_of_nodes):\n edges_info = []\n for node in list_of_nodes:\n n_edge_of_node = len(node.neighbors) # Counts the kys in the dictionary 'Node.neighbors'\n total_weight_of_node = sum(list(map(lambda x: node.neighbors[x], node.neighbors))) # Sums values of the dict\n node_info = (node.name, n_edge_of_node, total_weight_of_node)\n edges_info.append(node_info)\n total_n_edges = sum([tup[1] for tup in edges_info]) # Sum total number of edges\n total_weight_of_graph = sum([tup[2] for tup in edges_info]) # Sum total weight of edges\n sorted_info = sorted(edges_info, key=lambda tup: tup[1], reverse=True)\n return \"Total number of edges is {},\\nTotal weight of the graph is {}:\\nNodes sorted by no. of edges: {}.\".format(total_n_edges, total_weight_of_graph, sorted_info)",
"def graphviz_dot(sentence, font=\"Arial\", colors=BLUE):\n s = 'digraph sentence {\\n'\n s += '\\tranksep=0.75;\\n'\n s += '\\tnodesep=0.15;\\n'\n s += '\\tnode [penwidth=1, fontname=\"%s\", shape=record, margin=0.1, height=0.35];\\n' % font\n s += '\\tedge [penwidth=1];\\n'\n s += '\\t{ rank=same;\\n'\n # Create node groups for words, chunks and PNP chunks.\n for w in sentence.words:\n s += '\\t\\tword%s [label=\"<f0>%s|<f1>%s\"%s];\\n' % (w.index, w.string, w.type, _colorize(w, colors))\n for w in sentence.words[:-1]:\n # Invisible edges forces the words into the right order:\n s += '\\t\\tword%s -> word%s [color=none];\\n' % (w.index, w.index+1)\n s += '\\t}\\n'\n s += '\\t{ rank=same;\\n' \n for i, ch in enumerate(sentence.chunks):\n s += '\\t\\tchunk%s [label=\"<f0>%s\"%s];\\n' % (i+1, \"-\".join([x for x in (\n ch.type, ch.role, str(ch.relation or '')) if x]) or '-', _colorize(ch, colors))\n for i, ch in enumerate(sentence.chunks[:-1]):\n # Invisible edges forces the chunks into the right order:\n s += '\\t\\tchunk%s -> chunk%s [color=none];\\n' % (i+1, i+2)\n s += '}\\n'\n s += '\\t{ rank=same;\\n'\n for i, ch in enumerate(sentence.pnp):\n s += '\\t\\tpnp%s [label=\"<f0>PNP\"%s];\\n' % (i+1, _colorize(ch, colors))\n s += '\\t}\\n'\n s += '\\t{ rank=same;\\n S [shape=circle, margin=0.25, penwidth=2]; }\\n'\n # Connect words to chunks.\n # Connect chunks to PNP or S.\n for i, ch in enumerate(sentence.chunks):\n for w in ch:\n s += '\\tword%s -> chunk%s;\\n' % (w.index, i+1)\n if ch.pnp:\n s += '\\tchunk%s -> pnp%s;\\n' % (i+1, sentence.pnp.index(ch.pnp)+1)\n else:\n s += '\\tchunk%s -> S;\\n' % (i+1)\n if ch.type == 'VP':\n # Indicate related chunks with a dotted\n for r in ch.related:\n s += '\\tchunk%s -> chunk%s [style=dotted, arrowhead=none];\\n' % (\n i+1, sentence.chunks.index(r)+1)\n # Connect PNP to anchor chunk or S.\n for i, ch in enumerate(sentence.pnp):\n if ch.anchor:\n s += '\\tpnp%s -> chunk%s;\\n' % (i+1, sentence.chunks.index(ch.anchor)+1)\n s += '\\tpnp%s -> S [color=none];\\n' % (i+1)\n else:\n s += '\\tpnp%s -> S;\\n' % (i+1)\n s += \"}\"\n return s",
"def buildGraph(file):\r\n dict = {}\r\n graph = Graph()\r\n wfile = open(file,'r')\r\n for line in wfile:\r\n word = line[:-1]\r\n for i in range(len(word)):\r\n bucket = word[:i] + '_' + word[i+1:]\r\n if bucket in dict:\r\n dict[bucket].append(word)\r\n else:\r\n dict[bucket] = [word]\r\n for bucket in dict.keys():\r\n for word1 in dict[bucket]:\r\n for word2 in dict[bucket]:\r\n if word1 != word2:\r\n graph.addEdge(word1,word2)\r\n return graph",
"def count_unique_relations(graph):\n return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))",
"def build_word_graph(self,\n input_pos_text,\n original_tokens,\n window=2,\n syntactic_filter=None,\n reset_graph_context=False,\n preserve_common_words=False,\n node_attributes=None,\n edge_attributes=None):\n if syntactic_filter is None:\n syntactic_filter = ['ADJ', 'NOUN', 'PROPN', 'VERB', 'FW']\n\n # Extend the context of the graph\n if reset_graph_context:\n self.reset_graph()\n self.context.extend(original_tokens)\n\n if preserve_common_words:\n common_words = []\n else:\n common_words = self.common_words\n\n # Flattened input\n unfiltered_pos_list = [(word.lower(), pos) for sent in input_pos_text for word, pos in sent]\n\n # Filter input based on syntactic filters and Flatten it\n filtered_pos_list = [(word.lower(), pos) for sent in input_pos_text for word, pos in sent if pos in syntactic_filter]\n\n # Add nodes\n if node_attributes is not None:\n self.graph.add_nodes_from([(word.lower(), node_attributes) for word, pos in filtered_pos_list if word.lower() not in common_words])\n else:\n self.graph.add_nodes_from([word.lower() for word, pos in filtered_pos_list if\n word.lower() not in common_words])\n\n # Add edges\n # TODO Consider unfiltered token list to build cooccurrence edges.\n for i, (node1, pos) in enumerate(unfiltered_pos_list):\n if node1 in self.graph.nodes():\n\n for j in range(i + 1, min(i + window, len(unfiltered_pos_list))):\n node2, pos2 = unfiltered_pos_list[j]\n if node2 in self.graph.nodes() and node1 != node2:\n self.graph.add_edge(node1, node2, weight=1.0)\n else:\n continue\n\n cooccurence_graph = self.graph\n\n return cooccurence_graph",
"def graph_on_reaction(list_of_obj):\n\t# Use a multigraph so multiple edges can exist between nodes\n\treaction_graph = nx.MultiGraph(label='REACTION')\n\tfor gene in list_of_obj:\n\t\tprint gene.gene_ID\n\t\treaction_graph.add_node(gene.gene_ID)\n\t\n\t# Create edge dictionary\n\tedge_dict = {}\n\tfor gene in list_of_obj:\n\t\tif len(gene.reaction()) > 0:\n\t\t\tfor pred_reaction in gene.reaction:\n\t\t\t\tif len(pred_reaction) > 0: \n\t\t\t\t\tprint \"pred_reaction: \" + pred_reaction\n\t\t\t\t\tif pred_reaction not in edge_dict:\n\t\t\t\t\t\ttemp_gene_list = []\n\t\t\t\t\t\ttemp_gene_list.append(gene.gene_ID)\n\t\t\t\t\t\tedge_dict[pred_reaction] = temp_gene_list\n\t\t\t\t\telse:\n\t\t\t\t\t\tedge_dict[pred_reaction].append(gene.gene_ID)\n\t\n\t# Convert edge dictionary to edges with labels\n\tfor k in edge_dict:\n\t\tprint k, edge_dict[k]\n\t\tif len(edge_dict[k]) > 1:\n\t\t\tfor reacting_gene in edge_dict[k]:\n\t\t\t\ti = 0\n\t\t\t\twhile i < len(edge_dict[k]):\n\t\t\t\t\tif reacting_gene != edge_dict[k][i]:\n\t\t\t\t\t\tif test_edge_exists(reaction_graph, reacting_gene, edge_dict[k][i]) == False:\n\t\t\t\t\t\t\treaction_graph.add_edges_from([(reacting_gene,edge_dict[k][i])], reaction=k)\n\t\t\t\t\ti = i + 1\n\tprint reaction_graph.edges()\n\t\n\t#print test_edge_exists(reaction_graph, 'Rv2228c', 'Rv0054')\n\t\n\treturn reaction_graph",
"def create_social_graph(file):\n social_graph = NonDirectionalGraph(\"SocialGraph\")\n with open(file, \"rt\") as f:\n data = f.readlines()\n n_friendship = 0 # Represents the number of friendships in the graph in each iteration\n highest_n_friendship = 0 # Captures the highest record of n_friendship in the graph\n highest_n_neighbors_per_node_dict = {} # Captures the highest record of friendship per node\n for line in data:\n split_line = line.split()\n if \"became\" in split_line: # \"became\" is in lines where persons become connected\n for name in [split_line[0], split_line[2]]:\n # The following if statement makes sure to instantiate the node and adds it to the graph\n if name not in social_graph:\n node = Node(name)\n social_graph.add_node(node)\n highest_n_neighbors_per_node_dict[name] = 0 ##\n social_graph.add_edge(split_line[0],split_line[2]) # Adds a connection between the nodes\n n_friendship += 1 # Updates the number of friendships\n # The following for loop updates the highest number of friends (neighbors) if it changes\n for name in [split_line[0], split_line[2]]:\n if len(social_graph.nodes[name].neighbors) > highest_n_neighbors_per_node_dict[name]:\n highest_n_neighbors_per_node_dict[name] = len(social_graph.nodes[name].neighbors)\n elif \"cancelled\" in split_line: # \"became\" is in lines where persons become disconnected\n social_graph.remove_edge(split_line[0], split_line[2])\n n_friendship -= 1 # Updates the number of friendships\n # In case any of the words \"cancelled\" or \"became\" is in the line\n else:\n print(\"Unrecognized line\")\n # The following for loop updates the highest number of friendship if it changes\n if n_friendship > highest_n_friendship:\n highest_n_friendship = n_friendship\n return social_graph, highest_n_friendship, highest_n_neighbors_per_node_dict",
"def Scatter(data):\n data[\"titles\"] = [\" \".join(b.split()[:(len(b.split()) // 2)]) + \"<br>\" + \" \".join(b.split()[(len(b.split()) // 2):]) if len(b.split()) > 10 else b for idx, b in enumerate(data[\"titles\"])]\n return dcc.Graph(id=\"leftScatter\", figure=dict(\n data=[go.Scatter(\n x=data[\"x\"],\n y=data[\"y\"],\n text=data[\"titles\"],\n hoverinfo='text',\n mode='markers',\n marker=dict(\n size=12,\n opacity=0.5,\n color='#B22234',\n ),\n textfont=dict(family='Soria, Times New Roman, Times, serif')\n )],\n layout=dict(\n title=\"<b>Document Similarities</b>\",\n hovermode='closest',\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=10, r=10, t=50, b=10),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(showgrid=True,\n showline=False,\n showticklabels=False),\n yaxis=dict(showgrid=True,\n showline=False,\n showticklabels=False)\n )\n ))",
"def make_conn_graph(interaction_logs):\n G = pgv.AGraph(directed=True)\n\n for module_id in interaction_logs['module_id'].unique():\n G.add_node(module_id, label='module')\n\n grouped = interaction_logs.groupby('user_id')\n for user_id, group in grouped:\n G.add_node(user_id, label='student')\n for module_id in set(group['module_id'].values):\n G.add_edge(user_id, module_id)\n\n return G",
"def _get_graph_based_ic_dictionary(self):\n\n\t\t# TODO find the literature reference or presentation where this equation is from instead of just the presentation.\n\n\t\t#ic_dict = {}\n\t\t#num_terms_in_ontology = len(self)\n\t\t#for term in self.terms():\n\t\t#\tdepth = self._depth_dict[term.id]\n\t\t#\tnum_descendants = len(list(term.subclasses(with_self=False)))\n\t\t#\tic_value = float(depth)*(1-(math.log(num_descendants+1)/math.log(num_terms_in_ontology)))\n\t\t#\tic_dict[term.id] = ic_value\n\t\t#return(ic_dict)\n\n\n\t\t# Getting the information content of each term in the ontology based on graph structure.\n\t\tic_dict = {}\n\t\tnum_terms_in_ontology = len(self)\n\t\tfor term in self.terms():\n\t\t\tdepth = self._depth_dict[term.id]\n\t\t\tnum_descendants = len(list(term.subclasses(with_self=False)))\n\t\t\tic_value = float(depth)*(1-(math.log(num_descendants+1)/math.log(num_terms_in_ontology)))\n\t\t\tic_dict[term.id] = ic_value\n\n\n\t\t# Converting to weights based on information content rather than raw value.\n\t\tic_dict_as_weights = {}\n\t\tic_values = ic_dict.values()\n\t\tmin_ic = min(ic_values)\n\t\tmax_ic = max(ic_values)\n\t\tnew_max = 1.00\n\t\tnew_min = 0.00\n\t\tfor k,v in ic_dict.items():\n\t\t\told_range = max_ic-min_ic\n\t\t\tnew_range = new_max-new_min\n\t\t\tnew_value = (((v - min_ic) * new_range) / old_range) + new_min\n\t\t\tic_dict_as_weights[k] = new_value\n\n\t\treturn(ic_dict, ic_dict_as_weights)",
"def get_association_strengths(self, word_pairs):\n \n word_pairs = [(s.upper(), t.upper()) for s, t in word_pairs] #node names are all uppercase\n \n sources = set()\n targets = set()\n vocab = set(self.graph.vs[\"name\"])\n for s, t in word_pairs:\n if s in vocab:\n sources.add(s)\n if t in vocab:\n targets.add(t)\n \n sources = list(sources) #enforce consistent order\n targets = list(targets)\n s_map = {s:i for i, s in enumerate(sources)} #get indexes\n t_map = {t:i for i, t in enumerate(targets)}\n \n #For large numbers of pairs, it's quicker to get all the paths at once than to get them one at a time\n matrix = self.graph.shortest_paths(sources, targets, weights=\"-log weight\", mode=OUT)\n \n strengths = []\n for s, t in word_pairs:\n neg_log_dist = matrix[s_map[s]][t_map[t]] if (s in s_map and t in t_map) else float(\"inf\")\n strengths.append(10**-neg_log_dist)\n \n return strengths",
"def build_graph(self, char_embeddings):\n with vs.variable_scope(\"CharLevelCNN\"):\n batch_size = tf.shape(char_embeddings)[0]\n phrase_len = tf.shape(char_embeddings)[1]\n word_len = tf.shape(char_embeddings)[2]\n char_embedding_size = tf.shape(char_embeddings)[3]\n # b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b\")\n\n # flatten\n # char_embeddings = tf.reshape(char_embeddings,[-1, word_len, char_embedding_size])\n char_embeddings = tf.reshape(char_embeddings, shape = [batch_size*phrase_len, word_len, self.char_embedding_size])\n\n conv = tf.layers.conv1d(inputs = char_embeddings, filters = self.filters, kernel_size = self.kernel_size, activation = tf.nn.relu, reuse = tf.AUTO_REUSE) # shape (batch_size, phrase_len, word_len, filters)\n\n # unflatten\n conv = tf.reshape(conv, [batch_size, phrase_len, -1, self.filters])\n \n # h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\n # Max-pooling over the outputs\n # cnn_char_embeddings = tf.nn.max_pool(conv, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID')\n \n cnn_char_embeddings = tf.reduce_max(conv, axis = 2)\n\n # dropout\n cnn_char_embeddings = tf.nn.dropout(cnn_char_embeddings, self.keep_prob)\n return cnn_char_embeddings",
"def graph_dict2graph(self, graph_dict):\n\t\tno2index = {}\t\t#used to keep track of whether one node has appeared or not\n\t\tindex2no = {}\n\t\tgraph = Graph.Graph()\n\t\tno_of_genes = 0\n\t\tfor (edge, weight) in graph_dict.iteritems():\n\t\t\tif edge[0] not in no2index:\n\t\t\t\tindex1 = no_of_genes\n\t\t\t\tno2index[edge[0]] = index1\t\t\t\t\n\t\t\t\tindex2no[index1] = edge[0]\n\t\t\t\tno_of_genes += 1\n\t\t\telse:\n\t\t\t\tindex1 = no2index[edge[0]]\n\t\t\t\t\n\t\t\tif edge[1] not in no2index:\n\t\t\t\tindex2 = no_of_genes\n\t\t\t\tno2index[edge[1]] = index2\n\t\t\t\tindex2no[index2] = edge[1]\n\t\t\t\tno_of_genes += 1\n\t\t\telse:\n\t\t\t\tindex2 = no2index[edge[1]]\n\t\t\tif index1<index2:\n\t\t\t\tgraph.add_edge(index1, index2, weight)\n\t\t\telse:\n\t\t\t\tgraph.add_edge(index2, index1, weight)\n\t\t\n\t\treturn (index2no, graph)",
"def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )",
"def graph():\n\n graph = {'A': ['B', 'C'],\n 'B': ['C', 'D'],\n 'C': ['D'],\n 'D': ['C'],\n 'E': ['F'],\n 'F': ['C']}\n\n def generate_edges(graph):\n \"\"\" Convert the dict representation of a graph into a list one\n - https://www.geeksforgeeks.org/generate-graph-using-dictionary-python/\n \"\"\"\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges\n\n a = generate_edges(graph=graph)\n print(a)",
"def graph_2hop(profile, recom, filename):\n g = Graph()\n\n # ! ids are raw -> strings\n clickedItems = set(map(lambda x: str(x[0]), profile)) # set of clicked items\n recomItems = set() # set of recommended items\n\n # get recommended items\n for click in range(0, len(profile)): # for all the clicks\n for rec in recom[click]: # for the topN recommendations\n recomItems.add(str(rec[0]))\n\n # write clicked item-nodes in an outter ring\n angleStep = 2*np.pi / float(len(clickedItems)) # polar coordinates angle step\n angle = 0 # polar coordinates angle [0, 2pi]\n R = 1000 # outter\n for item in clickedItems: # for all the clicks\n target = str(item)\n g.add_node(target)\n g.nodes[target]['color'] = [255,0,0,1] # RGBA format\n g.nodes[target]['pos'] = [R * np.cos(angle), R * np.sin(angle)]\n g.nodes[target]['text'] = target\n\n angle += angleStep\n \n # write the rest item-nodes in an inner ring\n angleStep = 2*np.pi / float(len(recomItems - clickedItems)) # polar coordinates angle step\n angle = 0 # polar coordinates angle [0, 2pi]\n R = 600 # outter\n for item in recomItems - clickedItems: # for the rest of the items\n target = str(item)\n g.add_node(target)\n g.nodes[target]['color'] = [0,0,255,1] # RGBA format\n g.nodes[target]['pos'] = [R * np.cos(angle), R * np.sin(angle)]\n g.nodes[target]['text'] = target\n\n angle += angleStep\n \n # construct edges\n edges = {} # dictionary: (source_iid, target_iid) -> Vertex object\n weight_prop = g.new_edge_property('float')\n \n for click in range(0, len(profile)): # for all the clicks\n for rec in recom[click]: # for the topN recommendations\n target= str(rec[0])\n source = str(profile[click][0])\n weight = rec[1]\n\n g.add_edge(source, target)\n g.edges(source, target)['weight'] = weight\n \n return g",
"def create_similarity_graph(user_combined_reviews):\n similarity_graph = {}\n for curr_user_id, review in user_combined_reviews.items():\n similarity_graph[curr_user_id] = []\n for other_user_id, others_review in user_combined_reviews.items():\n if other_user_id != curr_user_id:\n similarity_graph[curr_user_id].append({\n other_user_id: similarity(review, others_review)\n })\n return similarity_graph"
] | [
"0.59624755",
"0.59536886",
"0.57947934",
"0.571604",
"0.56609696",
"0.5658557",
"0.56583476",
"0.5643803",
"0.5616223",
"0.56003886",
"0.55466473",
"0.55275136",
"0.5498436",
"0.54606956",
"0.5412498",
"0.54070103",
"0.5398754",
"0.5396786",
"0.538623",
"0.5336978",
"0.5328832",
"0.5308178",
"0.5289192",
"0.52834755",
"0.5282573",
"0.52768266",
"0.52602196",
"0.5255824",
"0.5240205",
"0.5234195"
] | 0.69246936 | 0 |
Computes the degree, closeness and betweeness centrality of the nodes in the graph, and creates a dictionary with the nodes as keys and their centralities as values. | def compute_centrality_for_nodes(graph: nx.Graph):
nodes_centralities = {}
degree_centralities = nx.degree_centrality(graph)
betweeness_centralities = nx.betweenness_centrality(graph, normalized=True)
closeness_centralities = nx.closeness_centrality(graph)
for node in graph.nodes:
closeness = closeness_centralities[node]
degree = degree_centralities[node]
betweeness = betweeness_centralities[node]
nodes_centralities[node] = {
"degree": degree,
"closeness": closeness,
"betweeness": betweeness
}
return nodes_centralities | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_degree_centrality(self, vertices, edges):\n # here we are calculating our own deg cen res on the fly\n # edge counts will store the number of edges associated with\n # each vertex\n edge_counts = {}\n\n # get the edge frame in pandas form and iterate\n edge_pandas = edges.to_pandas()\n for (index, row) in edge_pandas.iterrows():\n # extract src and dest node index\n src = int(row[\"src\"])\n dest = int(row[\"dst\"])\n # now we increment the count for that node\n # in edge_counts, or initialize it to one\n # if it doesn't exist\n if src not in edge_counts.keys():\n edge_counts[src] = 1\n else:\n edge_counts[src] = edge_counts[src] + 1\n if dest not in edge_counts.values():\n edge_counts[dest] = 1\n else:\n edge_counts[dest] = edge_counts[dest] + 1\n return edge_counts",
"def degree_centrality(self):\n\n try:\n if self.G.is_directed():\n self.logger.warning('G为有向网络!')\n else:\n self.logger.info('正在计算无向网络的度中心性 ...')\n return self.order_dict(nx.degree_centrality(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))",
"def closeness_centrality(self):\n try:\n self.logger.info('正在计算网络的接近中心性 ...')\n return self.order_dict(nx.closeness_centrality(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))",
"def in_degree_distribution(digraph):\n\tdist_in_degree = {}\n\tzero_in_count = 0\n\tnode_indegs = compute_in_degrees(digraph)\n\n\t# now that we have our temp list of key-value pairs, let's consolidate any with the same key so as to not have duplicates\n\tfor key in node_indegs:\n\t\tif node_indegs[key] not in dist_in_degree:\n\t\t\tdist_in_degree[node_indegs[key]] = 1\n\t\telif node_indegs[key] in dist_in_degree:\n\t\t\tdist_in_degree[node_indegs[key]] += 1\n\n\t# Finally, let's add a count for nodes with zero in-degrees before creating our final dictionary and returning it\n\tfor node in node_indegs:\n\t\tif node_indegs[node] == 0:\n\t\t\tzero_in_count += 1\n\tif zero_in_count > 0:\n\t\tdist_in_degree[0] = zero_in_count\n\n\treturn dist_in_degree",
"def compute_in_degrees(digraph):\r\n indegree_dict = dict()\r\n for node in digraph.keys():\r\n indegree_dict[node] = 0\r\n for head_set in digraph.values():\r\n for head_node in head_set:\r\n indegree_dict[head_node] += 1\r\n #for node in digraph.keys():\r\n # indegree = 0\r\n # for head_set in digraph.values():\r\n # if node in head_set:\r\n # indegree += 1\r\n # indegree_dict.update({node: indegree})\r\n \r\n return indegree_dict",
"def node_edge_centrality(\n H,\n f=lambda x: np.power(x, 2),\n g=lambda x: np.power(x, 0.5),\n phi=lambda x: np.power(x, 2),\n psi=lambda x: np.power(x, 0.5),\n max_iter=100,\n tol=1e-6,\n):\n from ..algorithms import is_connected\n\n # if there aren't any nodes or edges, return an empty dict\n if H.num_nodes == 0 or H.num_edges == 0 or not is_connected(H):\n return {n: np.nan for n in H.nodes}, {e: np.nan for e in H.edges}\n # if the hypergraph is not connected,\n # this metric doesn't make sense and should return nan.\n # if not is_connected(H):\n # return {n: np.nan for n in H.nodes}, {e: np.nan for e in H.edges}\n\n n = H.num_nodes\n m = H.num_edges\n x = np.ones(n) / n\n y = np.ones(m) / m\n\n I, node_dict, edge_dict = incidence_matrix(H, index=True)\n\n check = np.inf\n\n for iter in range(max_iter):\n u = np.multiply(x, g(I @ f(y)))\n v = np.multiply(y, psi(I.T @ phi(x)))\n # multiply by the sign to try and enforce positivity\n new_x = np.sign(u[0]) * u / norm(u, 1)\n new_y = np.sign(v[0]) * v / norm(v, 1)\n\n check = norm(new_x - x) + norm(new_y - y)\n if check < tol:\n break\n x = new_x.copy()\n y = new_y.copy()\n else:\n warn(\"Iteration did not converge!\")\n return {node_dict[n]: new_x[n] for n in node_dict}, {\n edge_dict[e]: new_y[e] for e in edge_dict\n }",
"def calc_cc(graph):\n\tclustering_coeffs = {}\n\tfor node in graph.nodes():\n\t\tclustering_coeffs[node] = { \"cc\" : nx.clustering(graph, node)}\n\tnx.set_node_attributes(graph, clustering_coeffs)",
"def compute_in_degrees (digraph) :\n in_degree = dict()\n\n # initialize the in-degree of each node with 0s\n for key in digraph :\n in_degree[key] = 0\n\n for node in digraph :\n for head_node in digraph[node] :\n in_degree[head_node]+=1\n\n return in_degree",
"def get_clusters(nodes: Dict[int, PhyloNode], hits: Dict[int, int], cluster_degree: int) \\\n -> Dict[PhyloNode, List[int]]:\n\n clusters = {}\n # pigeonhole ancestors of a specific degree of hits\n for taxid in hits.keys():\n cluster = nodes[taxid]\n for _ in range(cluster_degree):\n if cluster.parent is not None:\n cluster = cluster.parent\n if cluster in clusters:\n clusters[cluster].append(taxid)\n else:\n clusters[cluster] = [taxid]\n return clusters",
"def compute_in_degrees(digraph):\r\n if type(digraph)!= dict:\r\n return \"Incorrect input\"\r\n else:\r\n in_dict = dict()\r\n for node in digraph.keys():\r\n in_dict[node]=0\r\n for connected_nodes in digraph.values():\r\n for node in connected_nodes:\r\n in_dict[node]+=1\r\n return in_dict",
"def PlotCentralities(graph):\n c_degree = nx.degree_centrality(graph)\n c_degree = list(c_degree.values())\n\n c_eigenvector = nx.katz_centrality(graph)\n c_eigenvector = list(c_eigenvector.values())\n\n c_harmonic = nx.harmonic_centrality(graph)\n c_harmonic = list(c_harmonic.values())\n\n c_betweenness = nx.betweenness_centrality(graph)\n c_betweenness = list(c_betweenness.values())\n\n plt.figure(figsize=(18, 12))\n f, axarr = plt.subplots(2, 2, num=1)\n plt.sca(axarr[0, 0])\n nx.draw(\n graph,\n cmap=plt.get_cmap('inferno'),\n node_color=c_degree,\n node_size=300,\n with_labels=True)\n axarr[0, 0].set_title('Degree Centrality', size=16)\n\n plt.sca(axarr[0, 1])\n nx.draw(\n graph,\n cmap=plt.get_cmap('inferno'),\n node_color=c_eigenvector,\n node_size=300,\n with_labels=True)\n axarr[0, 1].set_title('Eigenvalue Centrality (Katz)', size=16)\n\n plt.sca(axarr[1, 0])\n nx.draw(\n graph,\n cmap=plt.get_cmap('inferno'),\n node_color=c_harmonic,\n node_size=300,\n with_labels=True)\n axarr[1, 0].set_title('harmonic_centrality Centrality', size=16)\n\n plt.sca(axarr[1, 1])\n nx.draw(\n graph,\n cmap=plt.get_cmap('inferno'),\n node_color=c_betweenness,\n node_size=300,\n with_labels=True)\n axarr[1, 1].set_title('Betweenness Centrality', size=16)",
"def in_degree_centrality(self):\n try:\n if not self.G.is_directed():\n self.logger.warning('G为无向网络!')\n else:\n self.logger.info('正在计算有向网络的入度中心性 ...')\n return self.order_dict(nx.in_degree_centrality(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))",
"def _build_nodes_dict(self, graph):\n nodes_dict = {}\n for node, data in graph.nodes_iter(data=True):\n nodes_dict.update({node: data['label']})\n return nodes_dict",
"def get_graph_attributes(net_G):\r\n # number of nodes\r\n num_of_nodes = net_G.number_of_nodes()\r\n # number of nodes\r\n num_of_edges = net_G.number_of_edges()\r\n # density of net\r\n net_density = nx.density(net_G)\r\n # maximum degree and average degree\r\n nodes_degree = nx.degree(net_G)\r\n maximum_degree = max(nodes_degree, key=itemgetter(1))[0]\r\n average_degree = sum([node[1] for node in nodes_degree])/num_of_nodes\r\n # global clustering coefficient: n - count numbers of paths of length two\r\n nodes_triangles = nx.triangles(net_G)\r\n num_of_triangles = sum(nodes_triangles.values())\r\n pairs_path_length = dict(nx.all_pairs_shortest_path_length(net_G))\r\n n = 0 \r\n for node in pairs_path_length.keys(): \r\n for item in pairs_path_length[node].values():\r\n if item == 2:\r\n n = n + 1\r\n global_clustering_coefficient = (num_of_triangles * 6) / n\r\n # size of giant component\r\n giant_component = max(nx.connected_component_subgraphs(net_G),key=len)\r\n # return number of edges in graph=graph size\r\n size_of_giant = nx.Graph.size(giant_component)\r\n # calculate the average path length of giant component\r\n average_shortest_path_length = nx.average_shortest_path_length(giant_component)\r\n # maximum centrality and average centrality\r\n nodes_centrality = nx.degree_centrality(net_G)\r\n maximum_of_centrality = max(nodes_centrality.values())\r\n average_of_centrality = sum(nodes_centrality.values())/num_of_nodes\r\n # maximum betweenness centrality\r\n nodes_betweenness_centrality = nx.betweenness_centrality(net_G)\r\n maximum_betweenness_centrality = max(nodes_betweenness_centrality.values())\r\n # maximum closeness centrality\r\n nodes_closeness_centrality = nx.closeness_centrality(net_G)\r\n maximum_closeness_centrality = max(nodes_closeness_centrality.values())\r\n average_closeness_centrality = sum(nodes_closeness_centrality.values())/num_of_nodes\r\n # summarize graph attributes\r\n graph_attributes = [[\"Number of nodes:\", num_of_nodes], \\\r\n [\"Number of edges:\", num_of_edges], \\\r\n [\"Global clustering coefficient:\", global_clustering_coefficient], \\\r\n [\"Maximum degree:\", maximum_degree], \\\r\n [\"Average degree:\", average_degree], \\\r\n [\"Size of giant component:\", size_of_giant], \\\r\n [\"Average path length:\", average_shortest_path_length],\\\r\n [\"Maximum centrality:\", maximum_of_centrality], \\\r\n [\"Average centrality:\", average_of_centrality],\\\r\n [\"Maximum betweenness centrality:\", maximum_betweenness_centrality],\\\r\n [\"Maximum closeness centrality:\", maximum_closeness_centrality], \\\r\n [\"Average closeness centrality:\", average_closeness_centrality], \\\r\n [\"Net density:\", net_density]]\r\n return graph_attributes",
"def in_degree_distribution (digraph) :\n\n in_degree_dist = dict ()\n in_degrees = compute_in_degrees (digraph)\n\n for node in in_degrees :\n if in_degrees[node] in in_degree_dist :\n in_degree_dist[in_degrees[node]] += 1\n else :\n in_degree_dist[in_degrees[node]] = 1\n\n return in_degree_dist",
"def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes",
"def get_graph_dictionary(self):\n nodes = {}\n n = 0\n for node in self.__nodes:\n nodes[n] = tuple(node.get_data())\n n += 1\n\n edges = set()\n for edge in self.__edges:\n new_edge = (edge.get_node_a().get_id(), edge.get_node_b().get_id())\n edges.add(new_edge)\n\n graph_dict = {}\n graph_dict[\"nodes\"] = nodes\n graph_dict[\"edges\"] = edges\n\n return graph_dict",
"def get_nodes_info(graph):\n nodes = collections.defaultdict(lambda: {\n 'name': None,\n 'sg': None,\n 'up': set(),\n 'down': set(),\n })\n for node in graph.get_nodes():\n name = node.get_name()\n nodes[name]['name'] = name.strip('\"')\n for sg in graph.get_subgraphs():\n sgname = sg.get_name().strip('\"')\n if sgname.startswith('cluster_'):\n sgname = sgname[8:]\n sgname = sgname.replace('__', '.').replace('_dash_', '-')\n for node in sg.get_nodes():\n name = node.get_name()\n nodes[name]['name'] = name.strip('\"')\n nodes[name]['sg'] = sgname\n return dict(nodes)",
"def getNodesAndDistances():\n\n\tglobal width, height\n\n\t# First we generate the list\n\n\tprint \"\\tGetting node list...\"\n\t\n\tnodeDict = {}\n\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttheType = getSquare(x, y)\n\n\t\t\tprint \"\\t\\tGetting list for node (%d, %d) of type %d...\" % (x, y, theType)\n\n\t\t\ttempList = getNodeList(x, y, theType)\n\n\t\t\tif tempList == []:\n\t\t\t\tprint \"\\t\\t\\tNo nodes here.\"\n\t\t\telse:\n\t\t\t\tfor i in range(len(tempList)):\n\t\t\t\t\tnode = tempList[i]\n\t\t\t\t\tnodeName = node[0]\n\t\t\t\t\tnodeDict[nodeName] = node[1:]\t# Everything but the first element\n\t\t\t\t\tprint \"\\t\\t\\tAdded node '%s'...\" % nodeName\n\n\tprint \"\\tDone getting node list (%d nodes)...\" % (len(nodeDict.keys()))\n\tprint \"\"\n\n\t# Now that we've got that, we get a list of pairs\n\n\tpairList = getPairList(nodeDict)\n\n\t# Now we calculate the distance between every pair of nodes that connect\n\n\tprint \"\"\n\tprint \"\\tCreateing dictionary of distances between connected nodes...\"\n\n\tdistanceDict = {}\n\n\tfor tuple in pairList:\n\t\t(nodeA, nodeB) = tuple\n\t\tprint \"\\t\\tCalculating distance between '%s' and '%s'...\" % (nodeA, nodeB)\n\t\tdistance = distanceBetween(nodeA, nodeB, nodeDict)\n\t\tpairName = \"%s%s\" % (nodeA, nodeB)\n\t\tdistanceDict[pairName] = distance\n\t\tprint \"\\t\\t\\tDistace was %f.\" % (distance)\n\n\tprint \"\\tDone creating dictionary of node differences (%d pairs).\" % (len(distanceDict.keys()))\n\n\treturn nodeDict, distanceDict",
"def degreeCentrality(graph, numberOfPoints):\n c_degree = nx.degree_centrality(graph)\n c_degree = heapq.nlargest(numberOfPoints, list(c_degree.values()))\n return c_degree",
"def _compute_node_degrees(self):\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append(matrix)\n res = parallel_process(array=args, function=mt.calculate_degrees, n_jobs=self.n_jobs, front_num=0)\n for metaedge, (out_degree, in_degree) in zip(mes, res):\n self.out_degree[metaedge] = out_degree\n self.in_degree[metaedge] = in_degree",
"def out_degree_centrality(self):\n\n try:\n if not self.G.is_directed():\n self.logger.warning('G为无向网络!')\n else:\n self.logger.info('正在计算有向网络的出度中心性 ...')\n return self.order_dict(nx.out_degree_centrality(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))",
"def degree(graph, nodes=None, weight=None):\n\n if nodes is None:\n nodes = graph.nodes\n else:\n not_in_graph = [nid for nid in nodes if nid not in graph.nodes]\n if not_in_graph:\n logger.error('Nodes {0} not in graph'.format(not_in_graph))\n\n results = {}\n if weight:\n for node in nodes:\n results[node] = sum([graph.edges[(node, n)].get(weight, 1) for n in graph.adjacency[node]])\n if node in graph.adjacency[node]:\n results[node] += graph.edges[(node, node)].get(weight, 1)\n else:\n for node in nodes:\n results[node] = len(graph.adjacency[node])\n if node in graph.adjacency[node]:\n results[node] += 1\n\n return results",
"def in_degree_distribution(digraph):\n degree_distr = {}\n num_degree = compute_in_degrees(digraph)\n for node in num_degree:\n degree_distr[num_degree[node]] = degree_distr.get(num_degree[node],0) + 1\n return degree_distr",
"def compute_in_degrees(digraph):\n num_degree = {}\n for dummy_node in digraph:\n num_degree[dummy_node] = 0\n for key in digraph:\n for node in digraph[key]:\n num_degree[node] += 1\n return num_degree",
"def get_node_degree(self, node_id):\n kind = self.id_to_metanode[node_id]\n idx = self.nid_to_index[node_id]\n node_degrees = dict()\n\n for metaedge, start in self.metanode_to_edges[kind].items():\n current_matrix = self.adj_matrices[metaedge]\n if start['start']:\n deg = self.out_degree[metaedge][idx]\n else:\n deg = self.in_degree[metaedge][idx]\n node_degrees[metaedge] = deg\n return node_degrees",
"def make_complete_graph(num_nodes):\n\tif num_nodes <= 0:\n\t\treturn {}\n\tdict_graph = {}\n\tfor node in range(num_nodes):\n\t\tnode_set = set()\n\t\tfor neighbor in range(num_nodes):\n\t\t\tif node != neighbor:\n\t\t\t\tnode_set.add(neighbor)\n\t\tdict_graph[node] = node_set\n\n\treturn dict_graph",
"def in_degree_distribution(digraph):\n\tdist_in_degree = {}\n\tzero_in_count = 0\n\t\n\t# Returns:\n\t# { key, i.e., in-degree, number of edges coming into a node: \n\t# value, i.e., int, number of nodes with this value for in-degree }\n\n\t# first, create a temporary 2d list, each interior list containing (1) a key or in-degree and (2) a value or number of nodes with this corresponding in-degree",
"def get_all_local_clustering_coef(g):\n local_cc = {}\n\n for n in nx.nodes(g):\n local_cc[n] = get_local_clustering_coef(g, n)\n\n return local_cc",
"def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict"
] | [
"0.68229616",
"0.681152",
"0.66527605",
"0.64947486",
"0.6480199",
"0.6477992",
"0.6423478",
"0.6360602",
"0.62909657",
"0.62738115",
"0.6249793",
"0.62413496",
"0.6192363",
"0.6146016",
"0.6130091",
"0.6122413",
"0.61208606",
"0.60985196",
"0.60903275",
"0.60845214",
"0.6076657",
"0.59973156",
"0.59830046",
"0.5963951",
"0.5952047",
"0.59331167",
"0.59074336",
"0.5907338",
"0.58869934",
"0.5885185"
] | 0.8465829 | 0 |
Reads all the gml files of the graphs and stores them in a list. Each index corresponds to the book number in the series. | def load_all_graphs():
all_graphs = []
for i in range(7):
with open(f'Full_Network_Book_{i+1}.gml', 'rb') as graph_file:
all_graphs.append(nx.read_gml(graph_file))
return all_graphs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))",
"def loadGraphs(cls\n , cGraphClass # graph class (must be subclass)\n , lsFilename\n , bNeighbourhood=True\n , bDetach=False\n , bLabelled=False\n , iVerbose=0\n , attachEdge=False # all incident edges for each node\n ):\n lGraph = []\n for sFilename in lsFilename:\n if iVerbose: traceln(\"\\t%s\" % sFilename)\n [g] = cls.getSinglePages(cGraphClass, sFilename, bNeighbourhood, bDetach, bLabelled,\n iVerbose)\n g._index()\n if not g.isEmpty():\n if attachEdge and bNeighbourhood: g.collectNeighbors(attachEdge=attachEdge)\n if bNeighbourhood: g.collectNeighbors()\n if bLabelled: g.parseDocLabels()\n if bDetach: g.detachFromDoc()\n lGraph.append(g)\n return lGraph",
"def read_graph_list(graph_class, f: IO[str]) -> list[Graph]:\n graphs = []\n cont = True\n\n while cont:\n graph, cont = read_graph(graph_class, f)\n graphs.append(graph)\n\n return graphs",
"def load_graph( gname ):\n return NX.read_gpickle( gname )",
"def readFiles(self):\n #return a list of traces\n alltraces = []\n for dfile in self._datafiles:\n traces,headers = readgeonet(dfile)\n alltraces += traces\n return alltraces",
"def named_graphs(min_order, max_order, dotdir, verbose=False):\n\n path = dotdir\n # graph the full list of graph file names\n files = os.listdir(path)\n\n for filename in files:\n G = from_dot(path + filename)\n if (\n G.size() > 0\n and len(G) >= min_order\n and len(G) <= max_order\n and nx.is_connected(G)\n ):\n if verbose:\n print(f\"Reading {filename}\")\n yield (G, filename)",
"def allGraphs(date):\n g = getGraph()\n for uri, label, filename in subgraphs(date):\n if not label:\n label = \"(no label provided)\"\n g.parse(filename, format=SUBGRAPH_FORMAT)\n return g",
"def collect_graphs(inputfiles):\n graphs = {}\n global _open_files\n for inf in inputfiles:\n logging.debug('Opening file {}'.format(inf))\n f = r.TFile.Open(inf)\n _open_files.append(f)\n\n graphs[inf] = collectGraphs(f)\n\n logging.debug('Collected {} graphs'.format(len(graphs[inf])))\n\n return graphs",
"def read_graph_list(graph_class, f: IO[str]) -> Tuple[List[Graph], List[str]]:\n options = []\n graphs = []\n cont = True\n\n while cont:\n graph, new_options, cont = read_graph(graph_class, f)\n options += new_options\n graphs.append(graph)\n\n return graphs, options",
"def read_dir():\n file_list=[]\n title_list = []\n for filename in os.listdir(\"alignments/\"):\n if filename.endswith(\".aln\"): #Retrieve only alignment files.\n file_list.append(filename)\n with open (\"genID.txt\",'r') as x: #The genID.txt file contains relevant gene names.\n while True:\n rule = x.readline()\n if len(rule) > 0: #If the rule is empty, the program does not use it.\n if rule[0] == \"B\": #Only fetch gen names.\n title_list.append(rule) #The title_list is used to create the variant files in a later stadium\n else:\n break\n return file_list,title_list",
"def Galaxies(name_path):\n\tp = pathlib.Path(name_path)\n\tgalaxies = []\n\tfor f in p.glob('*.fits'):\n\t\thdu = fits.open(f)\n\t\tZ1= hdu[0].data\n\t\tgalaxies.append(Z1)\n \n\treturn galaxies",
"def subgraphs(date):\n dateDir = \"graph/%s\" % date\n if not os.path.isdir(dateDir):\n return\n for name in os.listdir(dateDir):\n filename = \"graph/%s/%s\" % (date, name)\n if filename.endswith('.uri'):\n continue\n uri, label = open(filename + \".uri\").read().split(\"\\0\")\n yield uri, label, filename",
"def read_models():\n model_files_cvd = np.sort(glob.glob(\"./grad_results/cvd*N1024_f0003.npy\"))\n model_files_mnist = np.sort(glob.glob(\"./grad_results/mnist*N25000_f02.npy\"))\n\n model_files_cvd = np.array([model_files_cvd[2], model_files_cvd[1], model_files_cvd[0]])\n\n results_cvd = []\n results_mnist = []\n\n for filename in model_files_cvd:\n results_cvd.append(np.load(filename))\n \n for filename in model_files_mnist:\n results_mnist.append(np.load(filename))\n\n return np.array(results_mnist), np.array(results_cvd)",
"def getBooks(self):\n srcIds = set([srcId for srcId,altId in self.libMap.values()])\n altIds = set([altId for srcId,altId in self.libMap.values()])\n factory = {'BOOK':Book}\n for modName in mwIniFile.loadOrder:\n print modName\n fileRep = FileRep(modInfos[modName],False)\n fileRep.load(keepTypes=None,factory=factory)\n for record in fileRep.records:\n if record.name == 'BOOK':\n bookId = record.getId()\n if bookId in srcIds:\n print '',bookId\n self.srcBooks[bookId] = (record,modName)\n elif bookId in altIds:\n print '',bookId\n self.altBooks[bookId] = (record,modName)",
"def get_graph(filename, data_folder):\n g = nx.MultiGraph()\n with open(data_folder + \"/\" + filename) as fp:\n line = fp.readline()\n while line:\n (o, d, t, e) = line.split()\n g.add_edge(int(o), int(d), start=int(t), duration=int(e))\n line = fp.readline()\n return g",
"def get_svg_list(self):\n svg_list = []\n for path, subdirs, files in os.walk(self.directory):\n for file in files:\n if file.endswith(self.from_extension):\n abspath_file = path + os.sep + file\n svg_list.append(abspath_file)\n print('{} SVG files found in directory'.format(len(svg_list)))\n return svg_list",
"def load_pgn(filename: str) -> List[chess.pgn.Game]:\n with open(filename) as pgn:\n games = []\n while True:\n game = chess.pgn.read_game(pgn)\n if game is not None:\n games.append(game)\n else:\n break\n return games",
"def read_snapshots(inputDir:str, format=None,frequency=1,prefix=\"\") -> DynGraphSN:\n\n\n anSnGraph = tn.DynGraphSN(frequency=frequency)\n files = os.listdir(inputDir)\n visibleFiles = [f for f in files if f[0] != \".\"]\n\n if format==None:\n format=_detectAutomaticallyFormat(visibleFiles[0])\n\n for f in visibleFiles:\n g = _read_network_file(inputDir + \"/\" + f, format) # type:nx.Graph\n anSnGraph.add_snapshot(int(os.path.splitext(f)[0][len(prefix):]), g)\n\n\n return anSnGraph",
"def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)",
"def mel_gff_list():\n\tmod_gff3 = sys.argv[1]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]",
"def get_graphml_paths(paths):\n graphml_paths = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n graphml_paths += eval(f.read())\n\n return graphml_paths",
"def readSubgraph(graph, ctxURI):\n filename = filenameFromURI(ctxURI)\n for dateDir in os.listdir(\"graph\"):\n f = \"graph/%s/%s\" % (dateDir, filename)\n if os.path.exists(f):\n print \"readSubgraph adds\", f\n graph.parse(f, format=SUBGRAPH_FORMAT, publicID=ctxURI)",
"def get_songs(path):\r\n song_list = []\r\n genre_paths = glob.glob(path + '/*')\r\n for genre_path in genre_paths:\r\n artist_paths = glob.glob(genre_path + '/*')\r\n for artist_path in artist_paths:\r\n album_paths = glob.glob(artist_path + '/*')\r\n for album_path in album_paths:\r\n lyrics_paths = glob.glob(album_path + '/*.txt')\r\n for lyrics_path in lyrics_paths:\r\n song = {}\r\n song[\"genre\"] = genre_path.replace(path + '/', '')\r\n song[\"artist\"] = artist_path.replace(genre_path + '/', '')\r\n song[\"album\"] = album_path.replace(artist_path + '/', '')\r\n song[\"lyrics\"] = open(lyrics_path).read()\r\n song[\"name\"] = lyrics_path[:-4].replace(album_path + '/', '')\r\n song[\"x\"] = 0\r\n song[\"y\"] = 0\r\n song_list.append(song)\r\n return song_list",
"def load_data_list(self):\n\n data = mat4py.loadmat(self.ann_file)['images']\n names = data['name']\n labels = data['class']\n parts = data['set']\n num = len(names)\n assert num == len(labels) == len(parts), 'get error ann file'\n\n if self.split == 'train':\n target_set = {1}\n elif self.split == 'val':\n target_set = {2}\n elif self.split == 'test':\n target_set = {3}\n else:\n target_set = {1, 2}\n\n data_list = []\n for i in range(num):\n if parts[i] in target_set:\n img_name = names[i]\n img_path = self.backend.join_path(self.img_prefix, img_name)\n gt_label = labels[i] - 1\n info = dict(img_path=img_path, gt_label=gt_label)\n data_list.append(info)\n\n return data_list",
"def get_rss(self):\r\n rssfiles = []\r\n \r\n rssfiles.append(feedparser.parse(self.url))\r\n return rssfiles",
"def load_mel_dataset(song_folder_name):\n\n # Get all songs saved as numpy arrays in the given folder\n song_list = os.listdir(song_folder_name)\n\n # Create empty lists\n label = []\n spectrogram = []\n song_name = []\n\n # Load each song into memory if the artist is included and return\n for song in song_list:\n with open(os.path.join(song_folder_name, song), 'rb') as fp:\n loaded_song = dill.load(fp)\n\n label.append(loaded_song[0])\n spectrogram.append(loaded_song[1])\n song_name.append(loaded_song[2])\n\n return label, spectrogram, song_name",
"def load_files(self) -> Tuple[List[str], List[str]]:\n filename, _, ext = self.file_path.rpartition(\".\")\n features_file = filename + \"_nospace.\" + ext\n labels_file = filename + \"_bies.\" + ext\n features = self.read_dataset(features_file)\n labels = self.read_dataset(labels_file)\n avg_len = sum(len(s) for s in features) // len(features)\n print(\"Dataset average length:\", avg_len)\n self.max_length = avg_len + (avg_len // 3)\n return features, labels",
"def get_all_network_graphs(list_mtfs):\n list_graphs = []\n \n for mtf in list_mtfs:\n list_graphs.append(get_network_graph(mtf))\n \n return list_graphs",
"def get_graph_files(ndmg_participant_dir, atlas=\"desikan\"):\n d = os.path.abspath(ndmg_participant_dir) # to make things easier\n out = [\n os.path.join(\n directory, filename\n ) # Returns list of absolute path files in ndmg_participant_dir that end in '_adj.csv'.\n for directory, _, filenames in os.walk(d)\n for filename in filenames\n if (\n (\n filename.endswith(\"_adj.csv\")\n or filename.endswith(\"_elist.csv\")\n or filename.endswith(\"_elist.ssv\")\n or filename.endswith(\"desikan.ssv\")\n or filename.endswith(\"_adj.ssv\")\n )\n and atlas in filename\n ) # Soft check on filenames. Will break if filename has 'atlas' and '_adj.csv' in it but is not the adjacency matrix for that atlas.\n ]\n return out",
"def g_n():\n for gname in os.listdir(sroot):\n if gname != 's1-league1-game1':\n continue\n if gname.startswith('s1'):\n p0 = os.path.join(sroot, gname)\n p1 = os.path.join(p0, 'commitment', 'jperret')\n p2 = os.path.join(p0, 'commitment', 'sa')\n if os.path.isdir(p1) and os.path.isdir(p2):\n for fname in os.listdir(p1):\n if fname.endswith('.aa'):\n bname = fname[:-3]\n #~ if bname == 's1-league1-game2_07':\n #~ continue\n a = ad.Annotations(os.path.join(p1, fname))\n a.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a.gen_full_struct()\n a.commitments = list(u for u in a.units if u.type == 'Commitment')\n a2 = ad.Annotations(os.path.join(p2, fname))\n a2.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a2.gen_full_struct()\n a2.commitments = list(u for u in a2.units if u.type == 'Commitment')\n yield bname, (a, a2)"
] | [
"0.6021632",
"0.5763704",
"0.5745561",
"0.570008",
"0.5696488",
"0.5618715",
"0.5566793",
"0.55402064",
"0.5510486",
"0.5398594",
"0.5379996",
"0.53715515",
"0.5363411",
"0.53072804",
"0.52946395",
"0.52931494",
"0.5290251",
"0.5279643",
"0.5209348",
"0.5164126",
"0.516346",
"0.51567847",
"0.5123193",
"0.5117058",
"0.511532",
"0.51021284",
"0.5093468",
"0.5089821",
"0.5087723",
"0.5085971"
] | 0.7873407 | 0 |
Create a task for a given queue with an arbitrary payload. | def create_task(project, queue, location, payload=None, in_seconds=None):
# [START cloud_tasks_appengine_create_task]
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
import datetime
import json
# Create a client.
client = tasks_v2.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# queue = 'my-appengine-queue'
# location = 'us-central1'
# payload = 'hello' or {'param': 'value'} for application/json
# in_seconds = None
# Construct the fully qualified queue name.
parent = client.queue_path(project, location, queue)
# Construct the request body.
task = {
"app_engine_http_request": { # Specify the type of request.
"http_method": tasks_v2.HttpMethod.POST,
"relative_uri": "/example_task_handler",
}
}
if payload is not None:
if isinstance(payload, dict):
# Convert dict to JSON string
payload = json.dumps(payload)
# specify http content-type to application/json
task["app_engine_http_request"]["headers"] = {
"Content-type": "application/json"
}
# The API expects a payload of type bytes.
converted_payload = payload.encode()
# Add the payload to the request.
task["app_engine_http_request"]["body"] = converted_payload
if in_seconds is not None:
# Convert "seconds from now" into an rfc3339 datetime string.
d = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(
seconds=in_seconds
)
# Create Timestamp protobuf.
timestamp = timestamp_pb2.Timestamp()
timestamp.FromDatetime(d)
# Add the timestamp to the tasks.
task["schedule_time"] = timestamp
# Use the client to build and send the task.
response = client.create_task(parent=parent, task=task)
print(f"Created task {response.name}")
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_task(self, body, *, task_cls=Task):\n return task_cls(self, body)",
"def new_task(data):\n rabbit_host = os.getenv('RABBIT_HOST', 'localhost')\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbit_host)\n )\n channel = connection.channel()\n channel.basic_publish(\n exchange='',\n routing_key='task_queue',\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n connection.close()",
"def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)",
"def create_task(self, task_body, req_context):\n design_ref = task_body.get('design_ref', None)\n node_filter = task_body.get('node_filter', None)\n action = task_body.get('action', None)\n\n if design_ref is None or action is None:\n raise errors.InvalidFormat(\n 'Task creation requires fields design_ref, action')\n\n task = self.orchestrator.create_task(design_ref=design_ref,\n action=action,\n node_filter=node_filter,\n context=req_context)\n\n task.set_status(hd_fields.TaskStatus.Queued)\n task.save()\n return task",
"def create_task():",
"def addTask(queues, func, *args, **kwargs):\n if not isinstance(queues, list):\n queues = DEFAULT_QUEUES\n\n _raiseIfExists = kwargs.pop('_raiseIfExists', False)\n taskName = kwargs.pop('_name', None)\n countdown = kwargs.pop('_countdown', None)\n eta = kwargs.pop('_eta', None)\n target = kwargs.pop('_target', None)\n transactional = kwargs.pop('_transactional', False)\n retry_options = kwargs.pop('_retry_options', None)\n parent = kwargs.pop('_parent', None)\n\n if not target and BACKGROUND_MODULE:\n # Tasks from the default module are executed into the background module.\n # Tasks from other modules (stage, background) stays inside their module.\n if modules.get_current_module_name() == 'default':\n # Target mirror of current version to avoid compatibility issues\n # If that version does not exist, it will fall back to the default background version.\n target = modules.get_current_version_name() + '.' + BACKGROUND_MODULE\n\n success = False\n try:\n yield _defer(queues, func, args, kwargs, countdown, eta, taskName, target, transactional, retry_options, parent)\n success = True\n\n except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError):\n # TaskAlreadyExistsError: a task with same name is in the queue\n # TombstonedTaskError: a task with same name has been in the queue recently\n if taskName:\n # If we specified a name it's to avoid duplicated so this error is expected\n logging.info(\"TaskAlreadyExistsError: task with name %s already enqueued.\", taskName)\n if _raiseIfExists:\n raise\n else:\n logging.exception(\"Could not enqueue the task\")\n except:\n logging.exception(\"Could not enqueue the task\")\n\n raise ndb.Return(success)",
"def create_task(self, name, value):\n pass",
"def test_queue_enqueue_command(runner, tmpworkdir, queue, target_factory): # pylint: disable=unused-argument\n\n atarget = target_factory.build(queue=queue)\n apath = Path('ips.txt')\n apath.write_text(f'{atarget.target}\\n \\n ', encoding='utf-8')\n\n result = runner.invoke(command, ['queue-enqueue', 'notexist', atarget.target])\n assert result.exit_code == 1\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, atarget.target])\n assert result.exit_code == 0\n assert Queue.query.get(queue.id).targets[0].target == atarget.target\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, '--file', apath])\n assert result.exit_code == 0\n assert len(Queue.query.get(queue.id).targets) == 2",
"def add_task(self, func, *args, **kwargs):\n self.queue.put((func, args, kwargs))",
"def create_task():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories/{1}/tasks\".format(STORED_ID['project_id'], STORED_ID['story_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"description\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['task_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())",
"def newTask(name, description, assigner, id=None, priority=None, submitter_email=None, whose=None):\n if whose:\n user_id = jutdaapi.find_user(whose)\n if not user_id:\n raise ValueError('bad whose assignment: '+str(whose))\n #title = name + ' for: '+assigner.title()\n # that was the old scheme\n title = '('+assigner.title()+') '+name\n\n if priority != None:\n #priority = (int(priority) + 2) / 2\n priority = int(priority)\n RA_queue = 3\n #if assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+assigner+'\"/>'\n if isinstance(id, str):\n description += '<tasktrackermeta id=\"'+id+'\"/>'\n ticket_id = jutdaapi.create_ticket(RA_queue, title, description,\n priority=priority, submitter_email=submitter_email)\n # Is there a race condition here? In this kind of database\n # I would assume not.\n time.sleep(1)\n ticket = jutdaapi.get_detailed_ticket(ticket_id)\n t = ticketToTask(ticket)\n return t",
"def queue_cloud_task(request):\n project = os.environ.get(\"PROJECT_ID\")\n queue = os.environ.get(\"QUEUE_NAME\")\n location = os.environ.get(\"QUEUE_REGION_LOCATION\")\n service_account_email = os.environ.get(\"SERVICE_ACCOUNT_EMAIL\")\n\n request_json = request.get_json()\n\n # the http endpoint the task will send to\n url = request_json.get('url')\n # the post data that should be forwarded to the http endpoint\n payload = request_json.get('payload')\n # the time in seconds to delay task execution\n in_seconds = request_json.get('in_seconds')\n # the unique name of the task we are queueing\n task_name = request_json.get('task_name')\n\n try:\n # Create a client.\n client = tasks_v2.CloudTasksClient()\n # Construct the fully qualified queue name.\n parent = client.queue_path(project, location, queue)\n except Exception as e:\n print(e)\n return f\"{e}\", 500\n\n # Construct the request body.\n task = {\n \"http_request\": { # Specify the type of request.\n \"http_method\": tasks_v2.HttpMethod.POST,\n \"url\": url,\n \"oidc_token\": {\"service_account_email\": service_account_email},\n }\n }\n if payload is not None:\n if isinstance(payload, dict):\n # Convert dict to JSON string\n payload = json.dumps(payload)\n # specify http content-type to application/json\n task[\"http_request\"][\"headers\"] = {\"Content-type\": \"application/json\"}\n\n # The API expects a payload of type bytes.\n converted_payload = payload.encode()\n\n # Add the payload to the request.\n task[\"http_request\"][\"body\"] = converted_payload\n\n if in_seconds is not None:\n # Convert \"seconds from now\" into an rfc3339 datetime string.\n d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)\n\n # Create Timestamp protobuf.\n timestamp = timestamp_pb2.Timestamp()\n timestamp.FromDatetime(d)\n\n # Add the timestamp to the tasks.\n task[\"schedule_time\"] = timestamp\n\n if task_name is not None:\n # Add the name to tasks.\n name = f\"projects/{project}/locations/{location}/queues/{queue}/tasks{task_name}\"\n task[\"name\"] = name\n\n try:\n # Use the client to build and send the task.\n response = client.create_task(request={\"parent\": parent, \"task\": task})\n return f\"Created task {response.name}\", 200\n except Exception as e:\n print(e)\n return f\"{e}\", 500",
"def queue(self, *args, **kwargs):\n queue_args = self._pop_tq_add_args(kwargs)\n app = queue_args.pop('app', None) or flask.current_app\n\n with app.test_request_context():\n # flask.url_for uses the request context if it is present\n # as we're most likely in a request context, use a\n # test_request_context() instead.\n url = self.url()\n\n payload = pickle.dumps((args, kwargs))\n\n taskqueue.add(\n url=url,\n queue_name=self.queue_name,\n payload=payload,\n **queue_args\n )",
"def DispatchTask(self, transactional=False):\n processed_params = dict([(str(key), value)\n for (key, value) in self.params.iteritems()])\n processed_params['transactional'] = transactional\n return taskqueue.add(**processed_params)",
"def msg_to_task(msg):\n if not isinstance(msg, dict):\n return None\n t = Task()\n t.args = msg[MessageBuilder.FIELD_DATA]\n t.isFault = msg[MessageBuilder.FIELD_ISF]\n t.seqNum = msg[MessageBuilder.FIELD_SEQNUM]\n t.timestamp = msg[MessageBuilder.FIELD_TIME]\n t.duration = msg[MessageBuilder.FIELD_DUR]\n t.cores = msg[MessageBuilder.FIELD_CORES] if MessageBuilder.FIELD_CORES in msg else None\n return t",
"def create_ingest_task(storage, task_queue):\n for filename in storage.list_files(prefix='build/'):\n t = IngestTask(\n chunk_path=storage.get_path_to_file('build/'+filename),\n chunk_encoding='npz',\n layer_path=storage.layer_path,\n )\n task_queue.insert(t)",
"def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)",
"def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)",
"def instantiate_queue(self):\n serialized_queue = self.cache.get('queue')\n queue = ast.literal_eval(serialized_queue.decode('utf-8'))\n return queue",
"def fusion_api_create_task(self, body, api=None, headers=None):\n return self.task.create(body, api, headers)",
"def executeTask (self, queue='default'):\n tasks = self.taskqueue_stub.GetTasks(queue)\n if tasks:\n task = tasks[0]\n self.taskqueue_stub.DeleteTask (queue, task['name'])\n params = base64.b64decode(task[\"body\"])\n if dict(task['headers']).get('Content-Type') == 'application/json':\n return self.testapp.post_json(task[\"url\"], json.loads(params))\n else:\n return self.testapp.post(task[\"url\"], params)",
"def build(arg_dict):\n\n task_item = Task()\n\n try:\n task_item.key = arg_dict['key']\n except KeyError:\n task_item.key = None\n\n try:\n task_item.title = arg_dict['title']\n except KeyError:\n task_item.title = None\n\n try:\n task_item.notes = arg_dict['notes']\n except KeyError:\n task_item.notes = None\n\n return task_item",
"def create_task(author, title, text, **kwargs):\n mc = MathContent(text=text)\n mc.save()\n task = Task(author=author, name=title, content=mc, **kwargs)\n task.save()\n return task",
"def derive_task(self, headers: Dict[str, Any]) -> \"Task\":\n new_task = Task(\n headers=headers,\n payload=self.payload,\n payload_persistent=self.payload_persistent,\n )\n return new_task",
"def create_task(*args, **kwargs):\n loop = asyncio.get_event_loop()\n return wrapped_create_task(loop.create_task, None, args, kwargs)",
"def push(self, *payloads, **task_args):\n tasks = [taskqueue.Task(payload=self.serializer.dumps(p),\n method='PULL',\n tag=self.tag,\n **task_args)\n for p in payloads]\n self.queue.add(tasks)",
"def create_transfer_tasks(\n task_queue, src_layer_path, dest_layer_path, \n chunk_size=None, shape=Vec(2048, 2048, 64), \n fill_missing=False, translate=(0,0,0), \n bounds=None, mip=0, preserve_chunk_size=True\n ):\n shape = Vec(*shape)\n vol = CloudVolume(src_layer_path, mip=mip)\n translate = Vec(*translate) // vol.downsample_ratio\n \n if not chunk_size:\n chunk_size = vol.info['scales'][mip]['chunk_sizes'][0]\n chunk_size = Vec(*chunk_size)\n\n try:\n dvol = CloudVolume(dest_layer_path, mip=mip)\n except Exception: # no info file\n info = copy.deepcopy(vol.info)\n dvol = CloudVolume(dest_layer_path, info=info)\n dvol.commit_info()\n\n dvol.info['scales'] = dvol.info['scales'][:mip+1]\n dvol.info['scales'][mip]['chunk_sizes'] = [ chunk_size.tolist() ]\n dvol.commit_info()\n\n create_downsample_scales(dest_layer_path, \n mip=mip, ds_shape=shape, preserve_chunk_size=preserve_chunk_size)\n \n if bounds is None:\n bounds = vol.bounds.clone()\n else:\n bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)\n bounds = Bbox.clamp(bounds, vol.bounds)\n\n total = int(reduce(operator.mul, np.ceil(bounds.size3() / shape)))\n for startpt in tqdm(xyzrange( bounds.minpt, bounds.maxpt, shape ), desc=\"Inserting Transfer Tasks\", total=total):\n task = TransferTask(\n src_path=src_layer_path,\n dest_path=dest_layer_path,\n shape=shape.clone(),\n offset=startpt.clone(),\n fill_missing=fill_missing,\n translate=translate,\n mip=mip,\n )\n task_queue.insert(task)\n task_queue.wait('Uploading Transfer Tasks')\n\n job_details = {\n 'method': {\n 'task': 'TransferTask',\n 'src': src_layer_path,\n 'dest': dest_layer_path,\n 'shape': list(map(int, shape)),\n 'fill_missing': fill_missing,\n 'translate': list(map(int, translate)),\n 'bounds': [\n bounds.minpt.tolist(),\n bounds.maxpt.tolist()\n ],\n 'mip': mip,\n },\n 'by': OPERATOR_CONTACT,\n 'date': strftime('%Y-%m-%d %H:%M %Z'),\n }\n\n dvol = CloudVolume(dest_layer_path)\n dvol.provenance.sources = [ src_layer_path ]\n dvol.provenance.processing.append(job_details) \n dvol.commit_provenance()\n\n if vol.path.protocol != 'boss':\n vol.provenance.processing.append(job_details)\n vol.commit_provenance()",
"def add_task(self, raw_message):\n if not raw_message:\n raise ValueError(\"No message set\")\n\n data = {\n \"content\": {\n \"raw\": raw_message,\n }\n }\n\n return Task(self.post(\"tasks\", data), **self._new_session_args)",
"def create_task(self, unused_parent, task, **kwargs):\n self.uri = task.get('app_engine_http_request').get('relative_uri')\n self.body = task.get('app_engine_http_request').get('body')\n logging.info('Task uri: %r', self.uri)\n logging.info('Task body: %r', self.body)\n return 'fake task'",
"def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None"
] | [
"0.6768147",
"0.6623838",
"0.64467216",
"0.6429034",
"0.62594825",
"0.6179536",
"0.60559773",
"0.604819",
"0.6045625",
"0.6010803",
"0.5994922",
"0.5981026",
"0.59526443",
"0.5941117",
"0.5934588",
"0.5893707",
"0.5845482",
"0.5818808",
"0.5815683",
"0.5814882",
"0.5803863",
"0.57872736",
"0.57449853",
"0.5710992",
"0.5709317",
"0.5705543",
"0.5702503",
"0.56863916",
"0.56427145",
"0.56395704"
] | 0.662663 | 1 |
returns the value of self.desc, you probably want to use get_description instead | def get_desc(self):
return self._desc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_desc(self):\n return self.__desc",
"def get_description(self):",
"def _get_description(self):\n return self.__description",
"def _get_description(self):\n return self.__description",
"def get_description(self):\n pass",
"def get_description(self):\r\n return self.__description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def description(self):",
"def get_description(self):\n return self.__description",
"def get_description(self) -> str:\n pass",
"def getDescription(self):\n return self.description",
"def get_description(self):\n raise NotImplementedError",
"def description(self) :\n\t\ttry :\n\t\t\treturn self._description\n\t\texcept Exception as e:\n\t\t\traise e",
"def description(self) :\n\t\ttry :\n\t\t\treturn self._description\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_description():\n raise NotImplementedError",
"def Description(self) -> str:",
"def Description(self) -> str:",
"def get_description(self):\n return self._description",
"def get_description(self):\n if self._visited is False:\n self._visited = True\n return self._desc\n else:\n return self._sdesc",
"def GetDescription(self):\n return str(self.description)",
"def description(self):\r\n if \"description\" in self.data:\r\n return self.data[\"description\"]\r\n return None",
"def _description(self):\n return None",
"def description(self):\n pass",
"def description(self):\n pass",
"def getDescription(self):\n return self.base.get(\"description\", [])",
"def getDescription(self):\n return self._description",
"def description(self) -> str:\n return self.data['description']"
] | [
"0.8956709",
"0.86876816",
"0.85753155",
"0.85753155",
"0.85631055",
"0.8487863",
"0.8434454",
"0.8434454",
"0.8434454",
"0.8434454",
"0.83120143",
"0.8276276",
"0.8232532",
"0.8210673",
"0.81978345",
"0.8197641",
"0.8197641",
"0.817225",
"0.8128614",
"0.8128614",
"0.8091071",
"0.8013247",
"0.79947674",
"0.79880446",
"0.79748005",
"0.79728264",
"0.79728264",
"0.79653984",
"0.7914",
"0.7877728"
] | 0.8705425 | 1 |
Takes a String and allows you to change the value stored in self.desc | def set_desc(self, desc: str):
self._desc = desc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_desc(self, item_desc):\r\n self.description = item_desc",
"def change_description(self, description):\n if type(description) == str:\n self.description = description\n elif description is None:\n self.description = None\n else:\n raise TypeError('str expect, not {}'.format(type(description)))",
"def set_description(desc):\n global last_description\n last_description = desc",
"def processDescrString(self):\n\t\tself.descrString = self._getVal(4, 1)",
"def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc",
"def set_description(self, data):\n self._description = self._uni(data)",
"def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e",
"def SetDescription(self, description):\n self.description = str(description)",
"def description(self, value):\n self.definition.description = value",
"def set_sdesc(self, sdesc: str):\n self._sdesc = sdesc",
"def set_description(self, description):\r\n self.__description = description",
"def description(self, value):\n self._update_values('description', value)",
"def description(self, new_description):\r\n self.set({\"description\": new_description})",
"def add_description(self, desc):\n self.description = desc",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def _set_desc(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'0 .. 64']}), default=unicode(\"\"), is_leaf=True, yang_name=\"desc\", rest_name=\"desc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Description of the user (default='')\", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"desc must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'0 .. 64']}), default=unicode(\"\"), is_leaf=True, yang_name=\"desc\", rest_name=\"desc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Description of the user (default='')\", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__desc = t\n if hasattr(self, '_set'):\n self._set()",
"def set_description(self, description):\n self.description = description",
"def description(self, newDescription=None):\n pass",
"def set_description(self, descr):\n self._current_test_descr = descr",
"def update_description(self, option, desc):\n _, command = self.__options[option]\n self.__options[option] = (desc, command)",
"def setAccessibleDescription(self, description: Union[AnyStr, QString]):",
"def set_description(self, description):\n self.__description = description",
"def create_descr(self, attr_name):",
"def setLabel(self, desc):\n self.label = desc",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description"
] | [
"0.76709884",
"0.7437435",
"0.7259118",
"0.72400737",
"0.7232622",
"0.72088665",
"0.7198149",
"0.7173733",
"0.70973545",
"0.7069758",
"0.7037778",
"0.70267266",
"0.70070595",
"0.6996479",
"0.69851184",
"0.69851184",
"0.69851184",
"0.69851184",
"0.69482446",
"0.69233495",
"0.6915471",
"0.6915412",
"0.6878546",
"0.6802371",
"0.6782797",
"0.67507166",
"0.672023",
"0.67107654",
"0.67107654",
"0.67107654"
] | 0.77175134 | 0 |
returns the value of self.sdesc, you probably want to use get_description instead | def get_sdesc(self):
return self._sdesc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_desc(self):\n return self.__desc",
"def get_desc(self):\n return self._desc",
"def get_description(self):",
"def _get_description(self):\n return self.__description",
"def _get_description(self):\n return self.__description",
"def get_description(self):\n if self._visited is False:\n self._visited = True\n return self._desc\n else:\n return self._sdesc",
"def get_description(self):\r\n return self.__description",
"def get_description(self):\n pass",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def description(self) :\n\t\ttry :\n\t\t\treturn self._description\n\t\texcept Exception as e:\n\t\t\traise e",
"def description(self) :\n\t\ttry :\n\t\t\treturn self._description\n\t\texcept Exception as e:\n\t\t\traise e",
"def description(self):",
"def get_description(self):\n return self.__description",
"def get_description(self) -> str:\n pass",
"def getDescription(self):\n return self.description",
"def Description(self) -> str:",
"def Description(self) -> str:",
"def get_description(self):\n return self._description",
"def GetDescription(self):\n return str(self.description)",
"def get_description(self):\n raise NotImplementedError",
"def get_description():\n raise NotImplementedError",
"def description():",
"def description(self):\r\n if \"description\" in self.data:\r\n return self.data[\"description\"]\r\n return None",
"def _description(self):\n return None",
"def description(self):\n pass",
"def description(self):\n pass",
"def getDescription(self):\n return self._description"
] | [
"0.8508528",
"0.82894135",
"0.81323934",
"0.8102514",
"0.8102514",
"0.80323124",
"0.8008092",
"0.79686874",
"0.79250777",
"0.79250777",
"0.79250777",
"0.79250777",
"0.78419924",
"0.78419924",
"0.7821938",
"0.78067356",
"0.77630645",
"0.77029717",
"0.7696475",
"0.7696475",
"0.7660701",
"0.76493216",
"0.762613",
"0.76066345",
"0.7593535",
"0.7534146",
"0.7485029",
"0.7480354",
"0.7480354",
"0.748025"
] | 0.87275475 | 0 |
Takes a String and allows you to change the value stored in self.sdesc | def set_sdesc(self, sdesc: str):
self._sdesc = sdesc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_desc(self, desc: str):\n self._desc = desc",
"def processDescrString(self):\n\t\tself.descrString = self._getVal(4, 1)",
"def set_desc(self, item_desc):\r\n self.description = item_desc",
"def change_description(self, description):\n if type(description) == str:\n self.description = description\n elif description is None:\n self.description = None\n else:\n raise TypeError('str expect, not {}'.format(type(description)))",
"def set_description(desc):\n global last_description\n last_description = desc",
"def set_description(self, sDescription):\n\t\tcall_sdk_function('PrlVirtNet_SetDescription', self.handle, sDescription)",
"def set_description(self, data):\n self._description = self._uni(data)",
"def SetDescription(self, description):\n self.description = str(description)",
"def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e",
"def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc",
"def set_description(self, sNewDescription):\n\t\tcall_sdk_function('PrlVmDev_SetDescription', self.handle, sNewDescription)",
"def description(self, value):\n self.definition.description = value",
"def set_description(self, description):\r\n self.__description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def _set_desc(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'0 .. 64']}), default=unicode(\"\"), is_leaf=True, yang_name=\"desc\", rest_name=\"desc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Description of the user (default='')\", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"desc must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'0 .. 64']}), default=unicode(\"\"), is_leaf=True, yang_name=\"desc\", rest_name=\"desc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Description of the user (default='')\", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__desc = t\n if hasattr(self, '_set'):\n self._set()",
"def description(self, value):\n self._update_values('description', value)",
"def set_description(self, descr):\n self._current_test_descr = descr",
"def description(self, new_description):\r\n self.set({\"description\": new_description})",
"def setAccessibleDescription(self, description: Union[AnyStr, QString]):",
"def set_event_desc(self, event):\n self.set_form_or_event_attribute(\"desc\", self.lhs, event)\n self.msg(\"Desc of event set to:\\n%s\" % self.lhs)",
"def update_description(self, option, desc):\n _, command = self.__options[option]\n self.__options[option] = (desc, command)",
"def set_description(self, description):\n self.description = description",
"def setLabel(self, desc):\n self.label = desc",
"def description(self, newDescription=None):\n pass",
"def setDescription(self, valueName, valueDescription):\n\t\tself.settings[valueName][1] = valueDescription",
"def add_description(self, desc):\n self.description = desc",
"def set_description(self, sNewShareDescription):\n\t\tcall_sdk_function('PrlShare_SetDescription', self.handle, sNewShareDescription)"
] | [
"0.7194806",
"0.7135903",
"0.7020856",
"0.68636084",
"0.67902267",
"0.6778444",
"0.66278446",
"0.66240335",
"0.6601598",
"0.65420234",
"0.6499118",
"0.6461897",
"0.641746",
"0.6398002",
"0.6398002",
"0.6398002",
"0.6398002",
"0.638523",
"0.63843817",
"0.63764817",
"0.6338989",
"0.6328392",
"0.6307533",
"0.62987876",
"0.6281241",
"0.6252965",
"0.62507254",
"0.62286484",
"0.62166125",
"0.6200882"
] | 0.7977611 | 0 |
If the visited bool is False, this returns desc. If the visited bool is True this returns sdesc | def get_description(self):
if self._visited is False:
self._visited = True
return self._desc
else:
return self._sdesc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sdesc(self):\n return self._sdesc",
"def _get_desc(self):\n return self.__desc",
"def get_descr_short(self):\n desc_text = []\n stack = [(self._desc, 0)]\n while stack:\n dl, di = stack.pop()\n while di < len(dl):\n if dl[di] == 0:\n di += 1\n elif dl[di] > 0 and dl[di] < 100000:\n desc_text.append(\"%06d\" % dl[di])\n elif dl[di] >= 100000 and dl[di] < 200000:\n desc_text.append(\"%06d LOOP\" % dl[di])\n elif dl[di] >= 200000 and dl[di] < 300000:\n desc_text.append(\"%06d OPER\" % dl[di])\n elif dl[di] >= 300000 and dl[di] < 400000:\n desc_text.append(\"%06d SEQ\" % dl[di])\n di += 1\n return desc_text",
"def testViewingDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'viewing_desc')\n )\n\n self.assertEqual(\n None,\n self.node.viewing_desc\n )",
"def get_desc(self):\n return self._desc",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testViewingDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'viewing_desc')\n )\n\n self.assertEqual(\n None,\n self.node.viewing_desc\n )\n\n self.node.viewing_desc = 'Darker with a tinge of blah'\n\n self.assertEqual(\n 'Darker with a tinge of blah',\n self.node.viewing_desc\n )",
"def describe():",
"def get_description(self):\n if self.state == 0:\n return self.pre_action_des\n elif self.state == 1:\n return self.in_action_des\n else:\n return self.post_action_des",
"def getDescriptors(self, dsc = \"\"):\r\n return \"\"",
"def get_graph_summary(self):\n\n pass",
"def description():",
"def describe(self) -> str:",
"def describe(self, *args, **kwargs):\n\t\treturn self.data.describe(*args, **kwargs)",
"def description(self):",
"def explain(self):",
"def _desc_op(attr_name):",
"def get_descr_full(self):\n desc_text = []\n stack = [(self._desc, 0)]\n while stack:\n dl, di = stack.pop()\n while di < len(dl):\n if dl[di] == 0:\n di += 1\n elif dl[di] > 0 and dl[di] < 100000:\n desc_text.append(str(self._tables.tab_b[dl[di]]))\n di += 1\n elif dl[di] >= 100000 and dl[di] < 200000:\n lm = dl[di] // 1000 - 100\n ln = dl[di] % 1000\n desc_text.append(\"%06d : LOOP, %d desc., %d times\" % (dl[di], lm , ln))\n di += 1\n elif dl[di] >= 200000 and dl[di] < 300000:\n en = self._tables.tab_c.get(dl[di])\n am = dl[di] // 1000 - 200\n an = dl[di] % 1000\n if en is None:\n en = (str(am), \"\")\n if dl[di] < 222000:\n desc_text.append(\"%06d : OPERATOR %s: %d\" % (dl[di], en[0], an))\n else:\n desc_text.append(\"%06d : OPERATOR '%s'\" % (dl[di], en[0]))\n di += 1\n elif dl[di] >= 300000 and dl[di] < 400000:\n stack.append((dl, di + 1))\n da = dl[di]\n dl = self._tables.tab_d[dl[di]]\n di = 0\n desc_text.append(\"%06d : SEQUENCE, %d desc.\" % (da, len(dl)))\n return desc_text",
"def get_discovery_summary():\n pass",
"def getDescByValue(self, value):\n pass",
"def do_do_desc(self, address):\n start = self.ParseAddressExpr(address)\n if ((start & 1) == 1): start = start - 1\n DescriptorArray(FixedArray(self.heap, None, start)).Print(Printer())",
"def description(self):\n return self.visual_desc",
"def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]",
"def get_description(self):",
"def set_sdesc(self, sdesc: str):\n self._sdesc = sdesc",
"def get_description(self) -> str:\n\t\treturn get_subnode_description(self.name)",
"def desc(self):\n return self._changeset.get('desc', None)"
] | [
"0.63786477",
"0.5819069",
"0.56570476",
"0.54884607",
"0.5439245",
"0.5368486",
"0.5368486",
"0.5368486",
"0.5368486",
"0.5300903",
"0.52831614",
"0.5195914",
"0.51588786",
"0.5158566",
"0.51578665",
"0.5157608",
"0.51489806",
"0.51314163",
"0.51264346",
"0.5101942",
"0.5099895",
"0.5061283",
"0.5050732",
"0.5047797",
"0.5031741",
"0.5015089",
"0.4995696",
"0.49788907",
"0.49714553",
"0.49677038"
] | 0.74315256 | 0 |
sets the value of the visited bool to the given value, either True or False | def set_visited(self, visited: bool):
self._visited = visited | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_visited(self):\n self.__visited = True",
"def SetVisited(self, visited=True):\r\n\r\n self._visited = visited",
"def virtual_flag(self, value):\n if not isinstance(value, bool):\n raise TypeError(\"virtual_flag must be bool.\")\n self._virtual_flag = value",
"def _setForBinding (self, value):\n if not isinstance(value, bool):\n raise TypeError(value)\n self.__forBinding = value\n return value",
"def set(self, boolean):\n self._val = boolean",
"def setBit(self,i,boolval):\n self.boolVals[i]=boolval",
"def set_is_watering(valve: Valve, value: bool) -> None:\n valve.is_watering = value",
"def set_has_fan(self, value: bool = True):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"has_fan\", value))\r\n self._has_fan = value",
"def activated(self, value: bool) -> None:\n\n if not isinstance(value, bool):\n raise TypeError(f\"<value> should be {bool}, {type(value)} given.\")\n\n self._activated = value",
"def _setForDocument (self, value):\n if not isinstance(value, bool):\n raise TypeError(value)\n self.__forDocument = value\n return value",
"def set_bools(self, value, bools, limit):\n for x in range(limit):\n if value & 1 << x:\n bools[x]['value'] = True\n else:\n bools[x]['value'] = False\n pass",
"def set(self, value):\n if value == self.value:\n return False\n self.value = value\n return True",
"def set_flag(self, new):\n self.flag = new",
"def log(self, value):\n\n if isinstance(value, bool) or value is None:\n self.__log = value",
"def light(self, value: bool | int, /) -> None:",
"def set(self, attr, value=True):\n if type(value) == bool:\n self.__dict__['_'+attr] = value\n print attr, \"set to\", value\n else:\n print 'Value must be a bool, either \"True\" or \"False\" (no quotes)!'",
"def _setBoolFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureBoolSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def setFlag(self, flag, value) -> None:\n ...",
"def writeBoolean(self, value: bool):\n self.writeByte(1 if value else 0)",
"def write(writer: BitStreamWriter, value: bool) -> None:\n\n writer.writeBool(value)",
"def set_seen_op(self, boolean):\n\n self.seen_op = boolean",
"def fungible(self, value):\n if value is not None:\n self._fungible = True if value else False",
"def set_boolean(x):\n\n if x:\n return \"True\"\n else:\n return \"False\"",
"def export(self, value):\n \n self._export = bool(value)",
"def val(self, new_val: bool) -> None:\n if type(new_val) != bool:\n raise TypeError(f\"Invalid literal {new_val} with type '{new_val.__class__.__name__}' for parameter 'new_val'\")\n self._val: bool = new_val\n return",
"def reset_visited(self):\n self.__visited = False",
"def set_bool_value(self, event):\n\n self.undo_add()\n\n key_list = list(self.patch.engine.misc_data.keys())\n key = key_list[self.selected_index]\n data = self.patch.engine.misc_data[key]\n\n if self.ValueEnabled.GetValue():\n self.patch.misc[key] = data['on']\n else:\n self.patch.misc[key] = data['off']\n\n self.is_modified(True)\n self.misclist_update_row(self.selected_index)",
"def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)",
"def put(self):\n self._val = True",
"def update_visited(self):\n\t\tcount = self.visited\n\t\tcount = count + 1\n\t\tself.visited = count"
] | [
"0.6711036",
"0.6412504",
"0.61133474",
"0.6041632",
"0.603861",
"0.59796745",
"0.59701663",
"0.5952818",
"0.5856937",
"0.5839396",
"0.58373827",
"0.5834938",
"0.57633555",
"0.57626206",
"0.5750486",
"0.5733883",
"0.57196856",
"0.5628653",
"0.5622766",
"0.5595729",
"0.55891633",
"0.5588501",
"0.5566048",
"0.5557302",
"0.55178386",
"0.5515008",
"0.55077153",
"0.550459",
"0.54582137",
"0.5420417"
] | 0.6772577 | 0 |
takes an Object object's ID and adds it to this rooms self.objects; if the ID already exists inside the list, this method raises an exception | def add_object(self, obj: str):
if obj not in self._objects:
self._objects.append(obj)
else:
raise IDAlreadyExists | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, object):\n if not object:\n self.save()\n return\n if not hasattr(object, 'id') or not object.id:\n raise ValueError(\"The model must be saved before add\")\n if not self.object_type:\n self.object_type = str(object._meta.object_name)\n elif str(object._meta.object_name) != self.object_type:\n raise ValueError(\"Model type don't match\")\n if self.objects_id:\n already_objects = self.objects_id.split(';')\n else:\n already_objects = []\n if str(object.id) in already_objects:\n return\n already_objects.append(str(object.id))\n self.objects_id = self._convertListToString(already_objects)\n self.save()",
"def addobj(self, obj):\n self._objslock.acquire()\n if obj.objid in self._objs:\n self._objslock.release()\n raise KeyError(\"non-unique EMANE object id %s for %s\" % (obj.objid, obj))\n self._objs[obj.objid] = obj\n self._objslock.release()",
"def add_object(self, obj):\n\t\tself.objects.append(obj)",
"def add_object(self, obj):\n self._objects.append(obj)",
"def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)",
"def add(self, obj: object) -> None:\n self._contains.append(obj)",
"def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)",
"def add(self, obj):\n ID = id(obj)\n self.pDict[ID] = obj\n return ID",
"def _add_object(self, object_dict):\n # Attempt to map the object first. This will raise an\n # ItemExistsError if a named object of the same type already\n # exists.\n self._add_object_to_map(self.append_key, object_dict)\n\n # Add the object to the end of the model.\n # TODO: which objects need added to the beginning?\n self.model_dict[self.append_key] = object_dict\n\n # Update append key.\n self._update_append_key()",
"def addidfobject(self, new_object):\n key = new_object.key.upper()\n self.idfobjects[key].append(new_object)\n self._reset_dependant_vars(\"idfobjects\")",
"def add(self, obj: model.IdentifiableArtefact):\n for field, field_info in direct_fields(self.__class__).items():\n # NB for some reason mypy complains here, but not in __contains__(), below\n if isinstance(\n obj, get_args(field_info.outer_type_)[1], # type: ignore [attr-defined]\n ):\n getattr(self, field)[obj.id] = obj\n return\n raise TypeError(type(obj))",
"def _add_rooms(self):\r\n rooms = self.model.get_all_rooms()\r\n\r\n for room in rooms:\r\n self._add_room(room)",
"def add(self, obj):\n raise NotImplementedError",
"def add_object(self, object_to_be_added):\n new_mapping = Map.add_object(self.id, object_to_be_added)\n if new_mapping:\n object_to_be_added.save()\n new_mapping.ref_id = object_to_be_added.id\n return True\n else:\n return False",
"def store_object(self, _object):\n\n # replace an existing list member, else, append\n\n index = [self.object_store.index(_object_) for _object_ in self.object_store if _object_.LocalID == _object.LocalID]\n\n if index != []:\n\n self.object_store[index[0]] = _object\n\n #if self.settings.LOG_VERBOSE: logger.debug('Updating a stored object: %s in region \\'%s\\'' % (_object.FullID, self.region.SimName))\n\n else:\n\n self.object_store.append(_object)\n\n #if self.settings.LOG_VERBOSE: logger.debug('Stored a new object: %s in region \\'%s\\'' % (_object.LocalID, self.region.SimName))",
"def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()",
"def _add_id_color(self, id_objects, id_color, parent):\n\n id_item = ElementGroupItem(id_color, parent, self)\n id_item.color_button.clicked.connect(lambda:\n self._add_items_to_color(id_item))\n\n for obj in id_objects:\n object_item = ElementItem(obj, id_item)\n\n if self.items_dict.get(obj, None) is None:\n self.items_dict[obj] = {parent: object_item}\n else:\n self.items_dict[obj][parent] = object_item\n\n return",
"def add_object(self, obj):\n if self.it_fits(obj):\n self.content.append(obj)\n return self\n else:\n raise Exception(f\"Object {obj.name} does not fit on the box\")",
"def get_object_by_id(self, object_list, object_id):\n obj = None\n for i in object_list:\n if i.get_id() == object_id:\n obj = i\n break\n return obj",
"def associateObject (self, obj):\n self.__associatedObjects.add(obj)",
"def append(self, obj):\r\n raise NotImplementedError",
"def id_in_list(obj_list, sb_object):\n if __debug__:\n print(\"Checking if sb_object in list...\")\n for sb_objects in obj_list:\n if sb_object.ID == sb_objects.ID:\n if __debug__:\n print(\"Object in list.\")\n return True\n if __debug__:\n print(\"Object not in list\")\n return False",
"def create_object(self):\n i = 0\n for i in range(0, self.objects_numbers):\n self.list_objects.insert(i, Obj(self, i))",
"def add_boid(self, new_boid):\r\n self.collection.append(new_boid)",
"def add_to_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='add')",
"def append(self, object):\n self.data['object'].append(object)\n self.data['id'].append(self.start_id)\n for col in self.cols:\n if col != 'object' and col != 'id':\n self.data[col].append(None)\n self.start_id += 1\n return self",
"def append(self, object):\r\n raise NotImplementedError()",
"def _add_object_to_map(self, model_key, object_dict):\n # Grab reference to the object sub-dict.\n object_map = self.model_map['object']\n\n # Get type of object.\n obj_type = object_dict['object']\n\n # Define key object pair\n key_obj = [model_key, object_dict]\n\n # If this type isn't in the map, add it. NOTE: this can lead to\n # empty entries if the object isn't named.\n if obj_type not in object_map:\n object_map[obj_type] = {}\n\n try:\n # Never try to map an already existing named object.\n if object_dict['name'] in object_map[obj_type]:\n s = '{} already exists in the {} map!'\n raise ItemExistsError(s.format(object_dict['name'], obj_type))\n\n except KeyError:\n # Unnamed object. Add it to the unnamed list.\n self.model_map['object_unnamed'].append(key_obj)\n\n else:\n # Named object, map it.\n object_map[obj_type][object_dict['name']] = key_obj\n\n # No need to return; we're directly updating self.model_map",
"def add_object(self, object):\n object.save()",
"def addObject(self, new_object_location):\n\n # store new object location\n self.objects[self.nextObjectID] = new_object_location\n\n # initialize frame_counts for when new object is undetected\n self.lost[self.nextObjectID] = 0\n\n self.nextObjectID += 1"
] | [
"0.68665564",
"0.67305875",
"0.65298045",
"0.6167127",
"0.6132322",
"0.60848",
"0.6058289",
"0.6034722",
"0.6028659",
"0.59585255",
"0.5952439",
"0.5893816",
"0.581697",
"0.5816542",
"0.581456",
"0.581387",
"0.58077705",
"0.57872295",
"0.5765092",
"0.5715964",
"0.5690903",
"0.56703943",
"0.566265",
"0.5662307",
"0.56411344",
"0.5640877",
"0.56184906",
"0.558908",
"0.5539477",
"0.55371356"
] | 0.7379597 | 0 |
takes an Object object's ID and attempts to remove it from this room's self.objects. If the ID does not exist inside the list, this method raises an exception | def remove_object(self, obj: str):
if obj in self._objects:
self._objects.remove(obj)
else:
raise IDDoesNotExist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self, object):\n if not hasattr(object, 'id') or not object.id:\n raise ValueError(\"The model must have an ID\")\n if str(object._meta.object_name) != self.object_type:\n raise ValueError(\"Model type don't match\")\n already_objects = self.objects_id.split(';')\n if str(object.id) in already_objects:\n already_objects.remove(str(object.id))\n self.objects_id = self._convertListToString(already_objects)\n self.save()",
"def remove(self, _id):\n if self.objects.get(_id):\n self.objects.pop(_id)",
"def removeObject(self, objectID):\n del self.objects[objectID]\n del self.lost[objectID]",
"def remove(self, obj: Viewable) -> None:\n new_objects = list(self)\n new_objects.remove(obj)\n self.objects = new_objects",
"def remove_object(self, obj):\n pass",
"def removeObject(self,object):\n self.removeList.append(object)",
"def remove(self, game_obj):\r\n self.game_objects_for_removal.append(game_obj)",
"def remove_object_from_store(self, ID = None):\n\n victim = self.get_object_from_store(LocalID = ID)\n\n if victim == None:\n victim = self.get_avatar_from_store(LocalID = ID)\n\n # if we do not know about this object, pass\n if victim == None or victim == []:\n return\n\n # this is an avatar\n if victim.PCode == PCodeEnum.Avatar:\n\n self.kill_stored_avatar(ID)\n\n # this is a Primitive\n elif victim.PCode == PCodeEnum.Primitive:\n\n self.kill_stored_object(ID)\n\n else:\n\n if self.settings.LOG_VERBOSE and self.settings.ENABLE_OBJECT_LOGGING:\n logger.debug(\"Not processing kill of unstored object type %s\" % (PCodeEnum(victim.PCode)))",
"def remove_from_grid(self, object_id, remove_from_carrier=True):\n # Remove object first from grid\n grid_obj = self.get_env_object(object_id) # get the object\n loc = grid_obj.location # its location\n\n self.__grid[loc[1], loc[0]].remove(grid_obj.obj_id) # remove the object id from the list at that location\n if len(self.__grid[loc[1], loc[0]]) == 0: # if the list is empty, just add None there\n self.__grid[loc[1], loc[0]] = None\n\n # Remove object from the list of registered agents or environmental objects\n # Check if it is an agent\n if object_id in self.__registered_agents.keys():\n # Check if the agent was carrying something, if so remove property from carried item\n for obj_id in self.__registered_agents[object_id].is_carrying:\n self.__environment_objects[obj_id].carried_by.remove(object_id)\n\n # Remove agent\n success = self.__registered_agents.pop(object_id,\n default=False) # if it exists, we get it otherwise False\n\n # Else, check if it is an object\n elif object_id in self.__environment_objects.keys():\n # remove from any agents carrying this object if asked for\n if remove_from_carrier:\n # If the object was carried, remove this from the agent properties as well\n for agent_id in self.__environment_objects[object_id].carried_by:\n obj = self.__environment_objects[object_id]\n self.__registered_agents[agent_id].is_carrying.remove(obj)\n\n # Remove object\n success = self.__environment_objects.pop(object_id,\n default=False) # if it exists, we get it otherwise False\n else:\n success = False # Object type not specified\n\n if success is not False: # if succes is not false, we successfully removed the object from the grid\n success = True\n\n if self.__verbose:\n if success:\n print(f\"@{os.path.basename(__file__)}: Succeeded in removing object with ID {object_id}\")\n else:\n print(f\"@{os.path.basename(__file__)}: Failed to remove object with ID {object_id}.\")\n\n return success",
"def remove(self, ID):\n i = 0\n for i in range(0, len(self.__lst)):\n if self.__lst[i].getId() == ID:\n self.__lst.pop(i)\n return\n raise ValueError(\"Nu exista disciplina\")",
"def remove_object(cls, object_to_be_removed):\n cls.query.filter_by(x=object_to_be_removed.x,\n y=object_to_be_removed.y).delete()",
"def process_object(self, obj):\n\n # if the object is not our list neplatic, do nothing\n if obj.get('list') != 'neplatic':\n return \n # check if this item is already on the list of our objects to sync,\n # if so, remove it from there (as not to add duplicates)\n try:\n self.objlist.remove( obj.get('address') )\n except ValueError:\n # not on list -> delete -- the object doesn't exist in our objects to sync\n obj.delete()\n except Exception:\n # does not have an attribute address -> continue\n return",
"def remove(obj_objectid_or_path_tuple):",
"def removeObject(self):\n\t\tfor SelectedItem in self.objects_lw.selectedItems():\n\t\t\tself.objects_lw.takeItem(self.objects_lw.row(SelectedItem) )",
"def remove_object(self, object_to_be_removed):\n Map.remove_object(object_to_be_removed)\n object_to_be_removed.query.delete()",
"def delete_object(self, object_id: str) -> bool:\n del self.objects[object_id]",
"def delete(self, obj=None):\n if not obj:\n return\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n if key in self.__objects:\n del self.__objects[key]\n self.save()",
"def remove(self):\n self.workspace.client._perform_empty(\n \"DELETE\", \"/workspaces/%s/objects/%s\" % (self.workspace.workspace_key, self.data['id']))",
"def delete(self, obj=None):\n if obj is not None:\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n try:\n del self.__objects[key]\n except KeyError:\n pass",
"def remove(self,object):\n if object in self.cell.objects:\n self.cell.objects.remove(object)\n else:\n self.cell.tempObjects.remove(object)\n self.cell.setChanged()",
"def removeItem(self, object):\n if object in self.database:\n self.database.remove(object)\n return \"Usunięto z bazy.\"\n else:\n return \"Nie znaleziono w bazie.\"",
"def removeidfobject(self, idfobject):\n key = idfobject.key.upper()\n self.idfobjects[key].remove(idfobject)\n self._reset_dependant_vars(\"idfobjects\")",
"def remove(self, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.remove(id_obj, query_data)\n return self.collection.remove(query_data)",
"def remove_object(self, name):\n name = name if isinstance(name, str) else name.name\n for obj in self._objects:\n if name == obj.name:\n logger.debug('Removing object with name \"{}\"'.format(name))\n self._objects.remove(obj)",
"def _delObject(self, id, dp=1, suppress_events=False):\n ob = self._getOb(id)\n\n compatibilityCall('manage_beforeDelete', ob, ob, self)\n\n if not suppress_events:\n notify(ObjectWillBeRemovedEvent(ob, self, id))\n\n self._objects = tuple([i for i in self._objects\n if i['id'] != id])\n self._delOb(id)\n\n # Indicate to the object that it has been deleted. This is\n # necessary for object DB mount points. Note that we have to\n # tolerate failure here because the object being deleted could\n # be a Broken object, and it is not possible to set attributes\n # on Broken objects.\n try:\n ob._v__object_deleted__ = 1\n except Exception:\n pass\n\n if not suppress_events:\n notify(ObjectRemovedEvent(ob, self, id))\n notifyContainerModified(self)",
"def object_delete(self, object_name, object_id):\n cmd = self.object_cmd(object_name, 'list')\n cmd_delete = self.object_cmd(object_name, 'delete')\n if object_id in self.cinder(cmd):\n self.cinder(cmd_delete, params=object_id)",
"def processDeleteCommand(self, objId):\n editor = self._parent\n obj = editor.findWithUUID(objId)\n if obj:\n print(\"DELETE FOR\",objId)\n # delete from object cache\n if objId in editor._total['objects']:\n del editor._total['objects'][objId]\n # clear uuid\n obj.opensim.uuid = \"\"\n scene = editor.get_current_scene()\n # unlink\n scene.objects.unlink(obj)\n editor.queueRedraw()",
"def obj_delete(self, request=None, **kwargs):\n self.get_collection(request).remove({ \"_id\": ObjectId(kwargs.get(\"pk\")) })",
"def remove_from_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='remove')",
"def remove(self, obj):\n self._pkcache.pop(obj.pk, None)\n for ctype in obj._content_types:\n if obj.pk in self._typecache[ctype]:\n self._typecache[ctype].pop(obj.pk, None)"
] | [
"0.7728423",
"0.71416676",
"0.7120008",
"0.69277936",
"0.6895414",
"0.6843761",
"0.6624404",
"0.656321",
"0.65363413",
"0.64806545",
"0.6465507",
"0.64554214",
"0.6446992",
"0.6391789",
"0.63457537",
"0.6327873",
"0.6278356",
"0.6277022",
"0.6270044",
"0.6269895",
"0.6269221",
"0.62543094",
"0.6222839",
"0.61986715",
"0.6170009",
"0.61664206",
"0.6079499",
"0.6046418",
"0.6038634",
"0.60017616"
] | 0.7596361 | 1 |
takes a feature's ID and adds it to this rooms self.features; if the ID already exists inside the list, this method raises an exception | def add_feature(self, feat: str):
if feat not in self._features:
self._features.append(feat)
else:
raise IDAlreadyExists | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, feature):\n \n if self.bo is not None:\n feature.attach(self.bo)\n \n bo_feature_name = feature.name\n \n if bo_feature_name not in self._d_features:\n log.info(\"Add feature '%s'\" % feature)\n self._d_features[bo_feature_name] = feature\n return(True)\n else:\n log.error(\"Feature name '%s' ever exists - you must delete it before\" % bo_feature_name)\n return(False)",
"def _add_feature(self, feature):\n\n if feature.name in self.feature_name_index:\n logger.info(\"Feature %s already exists at %i, overwriting\" %\n (feature.name, self.feature_name_index[feature.name]))\n self.features[self.feature_name_index[feature.name]] = feature\n else:\n self.features.append(feature)\n self.feature_name_index[feature.name] = len(self.features) - 1\n logger.info(\"Adding %s to model at location %i\" % (\n feature.name, len(self.features)))\n self._add_domain_fault_above(feature)\n self._add_unconformity_above(feature)\n feature.set_model(self)",
"def add_feature(self, feature):\n # type: (Any) -> int\n # A copy of self.feature_names is always made, because it might be\n # \"owned\" by someone else.\n # It's possible to make the copy only at the first call to\n # self.add_feature to improve performance.\n idx = self.n_features\n if isinstance(self.feature_names, (list, np.ndarray)):\n self.feature_names = list(self.feature_names)\n self.feature_names.append(feature)\n elif isinstance(self.feature_names, dict):\n self.feature_names = dict(self.feature_names)\n self.feature_names[idx] = feature\n elif self.feature_names is None:\n self.feature_names = {idx: feature}\n self.n_features += 1\n return idx",
"def create(self, new_feature):\n\n all_data = self._load()\n\n if self.id_field not in new_feature and\\\n self.id_field not in new_feature['properties']:\n new_feature['properties'][self.id_field] = str(uuid.uuid4())\n\n all_data['features'].append(new_feature)\n\n with open(self.data, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def add_feature(self, feature):\n self.features += [feature]\n for stock in self.stocks:\n feature(self.stock_data[stock])",
"def attach_feature(self, feature):\r\n\r\n # Filter out literally identical features\r\n if feature in self._features:\r\n return # the feature is already present\r\n\r\n # Filter out functionally identical features.\r\n # Features may use their on_attach method to raise\r\n # toolbox.AlreadyThere if they detect that some\r\n # installed feature does the same thing already\r\n attach = getattr(feature, 'on_attach', None)\r\n if attach is not None:\r\n try:\r\n attach(self)\r\n except toolbox.AlreadyThere:\r\n return\r\n self.execute_callbacks_times.setdefault(feature, 0)\r\n #it would be nice if we could require a specific class instead of\r\n #a \"workalike\" so we could do actual error checking\r\n #if not isinstance(feature, toolbox.Feature):\r\n # raise TypeError(\"Expected gof.toolbox.Feature instance, got \"+\\\r\n # str(type(feature)))\r\n\r\n # Add the feature\r\n self._features.append(feature)",
"def add_feature(self, feat: Feature) -> None:\n self.data_features.append(feat)",
"def create(self, new_feature):\n all_data = self._load()\n\n # Hijack the feature id and make sure it's unique\n new_feature['id'] = str(uuid.uuid4())\n\n all_data['features'].append(new_feature)\n\n with open(self.path, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')",
"def add_feature(self, featureName):\n newFeature = {\n \"name\": featureName,\n \"isRange\" : False\n }\n\n self.features.append(newFeature)",
"def update(self, identifier, new_feature):\n all_data = self._load()\n for i, feature in enumerate(all_data['features']):\n if feature['id'] == identifier:\n # ensure new_feature retains id\n new_feature['id'] = identifier\n all_data['features'][i] = new_feature\n break\n\n with open(self.path, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def remove_feature(self, feat: str):\n if feat in self._features:\n self._features.remove(feat)\n else:\n raise IDDoesNotExist",
"def getFeatureByGMLid(self, gmlid):\n raise NotImplementedError",
"def add_feature(request):\n if request.method == \"POST\":\n if request.user.is_staff:\n form = StaffRequestFeatureForm(request.POST)\n else:\n form = RequestFeatureForm(request.POST)\n if form.is_valid():\n ticket = form.save(commit=False)\n ticket.created_by = request.user\n ticket.ticket_type = \"Feature\"\n ticket.save()\n messages.success(\n request, 'Your feature has been requested successfully.')\n return redirect('feature', featureid=ticket.pk)\n\n return render(request, 'addfeature.html', {'form': form})\n\n if request.user.is_staff:\n form = StaffRequestFeatureForm()\n else:\n form = RequestFeatureForm()\n return render(request, 'addfeature.html', {'form': form})",
"def update(self, identifier, new_feature):\n\n all_data = self._load()\n for i, feature in enumerate(all_data['features']):\n if self.id_field in feature:\n if feature[self.id_field] == identifier:\n new_feature['properties'][self.id_field] = identifier\n all_data['features'][i] = new_feature\n elif self.id_field in feature['properties']:\n if feature['properties'][self.id_field] == identifier:\n new_feature['properties'][self.id_field] = identifier\n all_data['features'][i] = new_feature\n with open(self.data, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def add_to_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='add')",
"def register(cls, feature_name, feature):\n if feature_name in cls.feature_registry:\n raise FeatureAlreadyRegistered(feature_name)\n cls.feature_registry[feature_name] = feature",
"def addtocart(request, featureid):\n\n cart = request.session.get('cart', {})\n if featureid not in cart:\n\n cart[featureid] = {\n 'id': featureid,\n 'contrib_amount': request.POST['contribution_amount']\n }\n\n else:\n\n messages.error(\n request, 'You\\'re already contributing to this feature.')\n return redirect('/tickets/feature/'+featureid)\n\n request.session['cart'] = cart\n\n return redirect(\"cart\")",
"def add_feature(request):\n\n r = {}\n if request.POST.get('code','000') == 'ch00seW199Er':\n # pick a random location\n featured_already = Featured.objects.all().values('location')\n locations = Location.objects.exclude(id=1).exclude(id__in=featured_already).exclude(name__iregex=r'[\\w# ]+(wash|washer|dryer|dyer)[\\w# ]*').filter(type=Location.EATERY)\n features = sample(locations, 10)\n i = randint(0,9)\n selected = features[i]\n tomorrow = date.today()+timedelta(1)\n \n f = Featured(location=selected, \n day=tomorrow,\n description=\"50 cents off if you transact here today\",\n amount=0.5,\n expires=datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13,59))\n f.save() \n r['result'] = {'location': selected.name, 'loc_id': selected.id}\n else:\n r['result'] = '-1'\n return JSONHttpResponse(r)",
"def add_features(self, obj, annotation):\n if annotation['problem']:\n obj.add(folia.Feature, subset='problem', cls=annotation['problem'])\n if annotation['pos']:\n obj.add(folia.Feature, subset='pos', cls=annotation['pos'])",
"def add_feature_group(self, feature_group: FeatureGroupBase, *args, **kwargs):\n self.features_group_list.append(feature_group)",
"def add_feature(layer, branchID, segs, lines, lon, lat, Ttime, density, Initial_loc, solubility, flows, concentration, water_level, dist): \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)\r\n # Add points individually to the line\r\n #xy = lines[i]\r\n \r\n #line.AddPoint_2D(xy[0][0],xy[0][1])\r\n #line.AddPoint_2D(xy[1][0],xy[1][1])\r\n point.AddPoint(lon[i], lat[i])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(point)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(point)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID[i])) \r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Lon', \"{:.3f}\".format(lon[i]))\r\n feature.SetField('Lat', \"{:.3f}\".format(lat[i]))\r\n #feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n #feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n feature.SetField('T (day)', int(Ttime[i]))\r\n feature.SetField('Density', density[i])\r\n feature.SetField('Initial', Initial_loc[i])\r\n feature.SetField('Solubility', solubility[i])\r\n feature.SetField('Flow', flows[i])\r\n feature.SetField('C (mg/L)', concentration[i])\r\n feature.SetField('WSE (ft)', water_level[i])\r\n feature.SetField('D (ft)', dist[i])\r\n \r\n layer.CreateFeature(feature)",
"def create_from_feature_list(self, features): \n for f in features:\n featuretype = f.pop('featuretype', None)\n if featuretype is None:\n raise LoopException\n if featuretype == 'strati':\n self.create_and_add_foliation(f)\n # if featuretype == 'fault':\n # self.create_and_add_fault(f)\n if featuretype == 'folded_strati':\n self.create_and_add_folded_foliation(f)",
"def create(feature, bo=None):\n if feature is None:\n features = BOFeatures(bo)\n return(features)\n \n else:\n \n if feature.is_collection:\n return(feature)\n else:\n features = BOFeatures(bo)\n features.add(feature)\n return(features)",
"def _get_feature2field(self):\n fea_id = 0\n for names in self.feature_names:\n if names is not None:\n for name in names:\n self.feature2id[name] = fea_id\n fea_id += 1\n\n if self.fields is None:\n field_id = 0\n for key, value in self.feature2id.items():\n self.feature2field[self.feature2id[key]] = field_id\n field_id += 1\n else:\n for key, value in self.fields.items():\n for v in value:\n try:\n self.feature2field[self.feature2id[v]] = key\n except:\n pass",
"def add_feature(X, feature_to_add):\n from scipy.sparse import csr_matrix, hstack\n return hstack([X, csr_matrix(feature_to_add).T], 'csr')",
"def add_to_feature_set(self, feature_set, edge, feature_name, value=1):\n if feature_name is None:\n return False\n\n else:\n feature_name = self.__set_final_name(feature_name)\n\n if not feature_set.is_locked:\n feature_index = feature_set.get(feature_name, None)\n\n if feature_index is None:\n feature_index = len(feature_set)\n feature_set[feature_name] = feature_index\n print_verbose(\"Feature map: {} == {} -- _1st_ value: {}\".format(str(feature_index), feature_name, str(value)))\n\n edge.features[feature_index] = value\n return True\n\n else:\n feature_index = feature_set.get(feature_name, None)\n\n if feature_index is not None:\n edge.features[feature_index] = value\n return True\n else:\n return False",
"def test_add_to_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(3 in Feature(\"testing\").blacklist)",
"def addLight(self, id):\r\n\t\t\r\n\t\tnewLight = Light(id)\r\n\t\tself.lights[id] = newLight",
"def geojson(self, feature_id):\n if self._geojson.get('id', feature_id) == feature_id:\n return self._geojson\n else:\n geo = self._geojson.copy()\n geo['id'] = feature_id\n return geo"
] | [
"0.68805796",
"0.6844957",
"0.6421802",
"0.6395068",
"0.63879305",
"0.6179667",
"0.6063365",
"0.6020336",
"0.59701157",
"0.59400314",
"0.5881402",
"0.58373827",
"0.57476234",
"0.5714467",
"0.5696737",
"0.5640279",
"0.5621728",
"0.561404",
"0.5570973",
"0.5481455",
"0.5402501",
"0.5384136",
"0.53673065",
"0.5366095",
"0.5343665",
"0.53313196",
"0.53085643",
"0.52838",
"0.5260409",
"0.52508193"
] | 0.7566028 | 0 |
takes a feature's ID and attempts to remove it from this room's self.features. If the ID does not exist inside the list, this method raises an exception | def remove_feature(self, feat: str):
if feat in self._features:
self._features.remove(feat)
else:
raise IDDoesNotExist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_feature(self, feature):\r\n try:\r\n self._features.remove(feature)\r\n except Exception:\r\n return\r\n detach = getattr(feature, 'on_detach', None)\r\n if detach is not None:\r\n detach(self)",
"def remove(self, ID):\n i = 0\n for i in range(0, len(self.__lst)):\n if self.__lst[i].getId() == ID:\n self.__lst.pop(i)\n return\n raise ValueError(\"Nu exista disciplina\")",
"def delete(self, identifier):\n all_data = self._load()\n for i, feature in enumerate(all_data['features']):\n if feature['id'] == identifier:\n all_data['features'].pop(i)\n break\n\n with open(self.path, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def delete(self, identifier):\n\n all_data = self._load()\n for i, feature in enumerate(all_data['features']):\n if self.id_field in feature:\n if feature[self.id_field] == identifier:\n all_data['features'].pop(i)\n elif self.id_field in feature['properties']:\n if feature['properties'][self.id_field] == identifier:\n all_data['features'].pop(i)\n with open(self.data, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.train.pop(name)\n self.test.pop(name)",
"def remove_from_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='remove')",
"def remove(self, feature_type):\n with self._map_lock.write_lock():\n del self._feature2memory[feature_type]",
"def do_delete(self, **kwargs) -> dict[str, str]:\n # TODO(jrobbins): implement undelete UI. For now, use cloud console.\n feature_id = kwargs.get('feature_id', None)\n feature = self.get_specified_feature(feature_id=feature_id)\n if feature is None:\n return {'message': 'ID does not match any feature.'}\n\n user = users.get_current_user()\n app_user = AppUser.get_app_user(user.email())\n if ((app_user is None or not app_user.is_admin)\n and user.email() != feature.creator_email):\n self.abort(403)\n feature.deleted = True\n feature.put()\n rediscache.delete_keys_with_prefix(FeatureEntry.feature_cache_prefix())\n\n # Write for new FeatureEntry entity.\n feature_entry: FeatureEntry | None = (\n FeatureEntry.get_by_id(feature_id))\n if feature_entry:\n feature_entry.deleted = True\n feature_entry.put()\n\n return {'message': 'Done'}",
"def delete (self):\n self.binary_features_model._remove_feature(self)\n del Feature._cache[(self.binary_features_model, self.name)]\n self.binary_features_model = None",
"def delete_feature(self, dataset, fid):\n uri = URITemplate(\n self.baseuri + '/{owner}/{did}/features/{fid}').expand(\n owner=self.username, did=dataset, fid=fid)\n return self.session.delete(uri)",
"def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.dataset.pop(name)",
"def delete_feature(self, feature):\r\n cmd = DeleteFeatureCommand(self._delete_feature, self._set_features, self._features, feature)\r\n self.get_invoker().store_and_execute(cmd)",
"def remove(self, name):\n if name in self.other_sections:\n raise AttributeError('cannot remove feature from sections: %s' % (\n ', '.join(self.other_sections)))\n elif name in self.key:\n raise AttributeError('cannot remove features in key')\n\n # Check all other sections and remove from each where it appears.\n removed = 0\n for section in self.feature_sections:\n try:\n getattr(self, section).remove(name)\n removed += 1\n except KeyError:\n pass\n\n if not removed:\n raise KeyError(\"feature '%s' not in feature guide\" % name)",
"def __delitem__(self, feature):\n self[feature] = None",
"def delete(self, request, feature_slug, guest_id):\n feature = get_object_or_404(Feature, slug=feature_slug)\n\n num_removed: int = feature.guest_queue.remove(guest_id)\n if num_removed:\n error_code = status.HTTP_204_NO_CONTENT\n session_store = get_session_store(guest_id)\n session_store.flush()\n async_to_sync(state.broadcast_feature_state)(feature)\n\n else:\n error_code = status.HTTP_404_NOT_FOUND\n\n return Response(status=error_code)",
"def remove(self, *args):\n return _libsbml.ListOfSpeciesFeatures_remove(self, *args)",
"def remove(self, *args):\n return _libsbml.SubListOfSpeciesFeatures_remove(self, *args)",
"def removefromcart(request, featureid):\n cart = request.session.get('cart', {})\n\n if featureid in cart:\n\n del cart[featureid]\n messages.success(request, \"Feature removed\")\n\n request.session['cart'] = cart\n\n return redirect(reverse('cart'))",
"def delete(self, feature):\n if (self._isFIdx(feature)):\n self.data = np.delete(self.data, self._getFIdx(feature), axis=1)\n self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))\n return 0",
"def remove(self, ID):\n if ID in self.pDict:\n del self.pDict[ID]",
"def delete_features(in_features):\r\n for in_feature in in_features:\r\n if arcpy.Exists(in_feature):\r\n try:\r\n arcpy.Delete_management(in_feature)\r\n except arcpy.ExecuteError:\r\n arcpy.AddWarning(\"Error deleting temporary %s. Program will \"\r\n \"continue.\" % in_feature)",
"def remove_item(self, item_id):\n\t\tself.todolist.remove(item_id) \n\t\tstore = self.store\n\t\tfor row in store:\n\t\t\tif row[0] == item_id:\n\t\t\t\tstore.remove(row.iter)\n\t\t\t\tbreak",
"def unregister_feature(self, feature_name):\n self.disco_info.remove_feature(feature_name)",
"def removeSpot(self, ID):\n for spot in self.parkingSpots:\n if spot.id == ID:\n self.parkingSpots.remove(spot)\n #for i in range(len(self.parkingSpots)): # relabel all spots to keep the id numbers\n # self.parkingSpots[i].id = i # representative of the number of spots\n return\n raise Exception(\"No spot with given id \" + str(ID) + \" found.\")",
"def removeNeighbor(self, neighborID):",
"def remove_by_id(self,nodeid,verbose=False):\n self.remove(self[nodeid],verbose=verbose)",
"def remove(self, _id):\n if self.objects.get(_id):\n self.objects.pop(_id)",
"def test_remove_from_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.feature_test.remove_from_blacklist(3)\n self.assertFalse(3 in Feature(\"testing\").blacklist)",
"def remove(table, id_):\n\n removed = False\n\n for item in table:\n if item[0] == id_[0]:\n table.remove(item)\n removed = True\n\n if not removed:\n ui.print_error_message(\"There isn't a game with such ID!\")\n return table",
"def unsetId(self):\n return _libsbml.SubListOfSpeciesFeatures_unsetId(self)"
] | [
"0.6875988",
"0.6782476",
"0.64399135",
"0.6266873",
"0.61839545",
"0.61799604",
"0.6138365",
"0.6099943",
"0.6070698",
"0.6038895",
"0.60361105",
"0.59816796",
"0.5923942",
"0.5885377",
"0.58838093",
"0.5869145",
"0.5850429",
"0.5808127",
"0.57137704",
"0.56388587",
"0.56224364",
"0.56032974",
"0.5555947",
"0.5510534",
"0.55040383",
"0.54950017",
"0.54665756",
"0.54587114",
"0.5452884",
"0.54521173"
] | 0.78996176 | 0 |
takes a list of ids and sets self.features to the list | def set_features(self, features: list):
self._features = features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_features(self, features):\n self.features_ = list(features)",
"def ids(self, ids):\n self._ids = ids",
"def features(self, features):\n\n self._features = features",
"def set_features(self, features: np.ndarray):\n self.features = features",
"async def async_set_features(self, features):\n self._features = features",
"def get_locations_by_ids(self, id_list):",
"def get_map_features(self, ids, maps):\n maps = self.backbone(maps)\n\n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_map = maps[i].repeat(sample_size, 1)\n \n # concatenate the group of sample maps\n if i == 0:\n map_features = sample_map \n else:\n map_features = torch.cat((map_features, sample_map), dim=0)\n \n return map_features",
"def load_features(self, features):\n pass\n # self.features = features",
"def setId(self, *args):\n return _libsbml.SubListOfSpeciesFeatures_setId(self, *args)",
"def featuresets(self, featuresets):\n\n self._featuresets = featuresets",
"def _get_feature_ids(self):\n # return union of all used features by slave classifiers\n feature_ids = set([])\n for clf in self.__clfs:\n feature_ids = feature_ids.union(set(clf.ca.feature_ids))\n return list(feature_ids)",
"def features(self, features: List[Feature]):\n if features is None:\n raise ValueError(\"Invalid value for `features`, must not be `None`\") # noqa: E501\n\n self._features = features",
"def setFeatureVector(self, features, entity1=None, entity2=None):\n self.features = features\n self.entity1 = entity1\n self.entity2 = entity2\n self.tokenFeatures = {}",
"def map_feature_ids_to_restaurants(feature_list, rest_id_to_int):\n\n\tinvert_mapping = dict()\n\tfor (rest_id, feature) in rest_id_to_int.items():\n\t\tif feature not in feature_list:\n\t\t\tcontinue\n\n\t\tif feature not in invert_mapping:\n\t\t\tinvert_mapping[feature] = list()\n\n\t\tinvert_mapping[feature].append(rest_id)\n\n\trest_list = list()\n\tfor feature in feature_list:\n\t\trest_list += invert_mapping[feature]\n\n\treturn rest_list",
"def _int64_list_feature(values):\n if not isinstance(values, collections.Iterable):\n values = [values]\n\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))",
"def _int64_list_feature(values):\n if not isinstance(values, collections.Iterable):\n values = [values]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))",
"def __init__(\n self, features, collection_id\n ):\n self.features = features\n self.id = collection_id",
"def idf_object_features_set(set_id):\n # idf for calc features of new docs\n # object-features for learning model\n # doc_index links doc_id and row index in object-features\n # lemma_index links lemmas and column index in object-features\n\n # get lemmas of all docs in set\n docs = db.get_lemmas_freq(set_id)\n\n # document frequency - number of documents with lemma\n doc_freq = {}\n # number (sum of weights) of lemmas in document\n doc_size = {}\n # index of lemma in overall list\n lemma_index = {}\n # lemma counter in overall list\n lemma_counter = 0\n # document index\n doc_index = {}\n # document counter in overall list\n doc_counter = 0\n\n for doc_id in docs:\n # initialize doc_size\n doc_size[doc_id] = 0\n # add document in overall list by giving index\n doc_index[doc_id] = doc_counter\n doc_counter += 1\n # count lemmas of doc\n for lemma in docs[doc_id]:\n # increase number of docs with lemma\n doc_freq[lemma] = doc_freq.get(lemma, 0) + 1\n # increase number of lemmas in document\n doc_size[doc_id] += docs[doc_id][lemma]\n\n # compute idf\n idf = {}\n for lemma in doc_freq:\n idf[lemma] = - math.log(doc_freq[lemma]/doc_counter)\n\n # and lemmas add in overall list by giving index\n for lemma in idf:\n if idf[lemma] != 0:\n lemma_index[lemma] = lemma_counter\n lemma_counter += 1\n\n # initialization objects-features matrix\n object_features = np.zeros((doc_counter, lemma_counter))\n\n # fill objects-features matrix\n for doc_id in docs:\n doc_lemmas = docs[doc_id]\n for lemma in doc_lemmas:\n if lemma_index.get(lemma, -1) != -1:\n object_features[doc_index[doc_id], lemma_index[lemma]] = \\\n doc_lemmas[lemma] / doc_size[doc_id] * idf[lemma]\n\n # check features with 0 for all documents\n feat_max = np.sum(object_features, axis=0)\n # print_lemmas(set_id, [k for k, v in enumerate(feat_max) if v == 0], lemma_index, idf)\n # check documents with 0 for all lemmas\n # print(np.min(np.sum(object_features, axis=1)))\n\n # save to db: idf, indexes and object_features\n db.put_training_set_params(set_id, idf, doc_index, lemma_index, object_features)\n\n # print(idf)\n # print(doc_index)\n # print(lemma_index)\n # print(object_features)",
"def learn_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n self.ids[key] = item[self.id_key]",
"def features(self, features: List[Feature]):\n for feature in features:\n if not isinstance(feature, Feature):\n raise Exception(\"object type is not a Feature: \" + str(type(feature)))\n\n for key in list(self._fields.keys()):\n if isinstance(self._fields[key], Feature):\n del self._fields[key]\n\n if features is not None:\n self._add_fields(features)",
"def convert_examples_to_features(self, examples_paths, label_list, max_seq_length, tokenizer, set_type):\n \n if all([os.path.exists(path.replace('examples', 'features')) for path in examples_paths]):\n features_paths = examples_paths\n \n else:\n\n def f(example):\n labels_ids = torch.FloatTensor(example.label).unsqueeze(0).to(torch.int64)\n input_ids = torch.FloatTensor(example.text_a).unsqueeze(0).to(torch.int64)\n #attention_mask = torch.ones(input_ids.size()).to(torch.int64)\n attention_mask = torch.FloatTensor(example.text_b).unsqueeze(0).to(torch.int64)\n token_type_ids = torch.zeros(input_ids.size()).to(torch.int64)\n output_mask = (labels_ids != -100)\n return InputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label_ids=labels_ids,\n output_mask=output_mask)\n\n for index_split, examples_split in enumerate(examples_paths):\n split = self.load_object(examples_split)\n print(f\"Computing split {index_split+1} / {self.n_splits}... Split size: {len(split)}\")\n features = Parallel(n_jobs=-1)(delayed(f)(example) for example in tqdm(split))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_features_split-{index_split}.pkl'), features)\n\n features_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_features_split-{index_split}.pkl') for index_split in range(self.n_splits)]\n \n return features_paths",
"def train(self, features, labels):\n pass",
"def match(self, features_list):\n moraic_ids = [\n moraic_id\n for moraic_id, moraic_details in self.morae.items()\n if set(features_list).issuperset(set(moraic_details['features']))\n ]\n return moraic_ids",
"def convert_ids_to_people(self, ids):\n return [ self.voters[i] for i in ids ]",
"def set_to_features(X_set):\n ext = Extractor()\n features = []\n for i in range(len(X_set)):\n print(i, \" out of \", len(X_set))\n bag_of_features = [ext.extract(X_set[i][j]) for j in range(len(X_set[i]))]\n\n features.append(bag_of_features)\n\n return features",
"def create_tasks(\n self,\n feature_ids: List,\n features: List,\n ) -> None:\n for i, feature_id in enumerate(feature_ids):\n task = Task(self, feature_id, features[i])\n self.tasks.append(task)\n self.numberOfTasks = len(self.tasks)",
"def add_features(self, fbids):\n if not fbids:\n warnings.warn(\"No fbids provided.\")\n return False\n feats = self.name_synonym_lookup(fbids)\n proc_names = [f._asdict() for f in feats.values()]\n for d in proc_names:\n d['synonyms'] = '|'.join(d['synonyms'])\n statement = \"MERGE (n:Feature:Class { short_form : line.fbid } ) \" \\\n \"SET n.label = line.symbol SET n.synonyms = split(line.synonyms, '|') \" \\\n \"SET n.iri = 'http://flybase.org/reports/' + line.fbid\" # Why not using ni? Can kbw have switch to work via csv?\n self.commit_via_csv(statement, proc_names)\n self.addTypes2Neo(fbids)\n return feats",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def feature_subset(self,node,db,labels,ids):\n return None"
] | [
"0.7332479",
"0.65602887",
"0.65116227",
"0.6437067",
"0.61490893",
"0.61423177",
"0.611747",
"0.60845536",
"0.6038654",
"0.600164",
"0.5947493",
"0.5946477",
"0.5907041",
"0.58642536",
"0.5661323",
"0.5651587",
"0.56424946",
"0.5610698",
"0.55820936",
"0.55676144",
"0.55540377",
"0.55153275",
"0.5497487",
"0.5472341",
"0.5461921",
"0.54524",
"0.5446726",
"0.54286116",
"0.54286116",
"0.5413962"
] | 0.6994589 | 1 |
takes a person's ID and adds it to this rooms self.persons; if the ID already exists inside the list, this method raises an exception | def add_person(self, per: str):
if per not in self._people:
self._people.append(per)
else:
raise IDAlreadyExists | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_person(self, person):\n # Check if the person is an instance of the Person class\n # and if the room has a vacancy, then allocate\n # TODO: Perform SQL Query\n # if isinstance(person, Person) and has_vacancy(self):\n # self.occupants.append(person)\n pass",
"def add_person_to_household(household_id, person_id):\n logger.debug(\"=====add_person_to_household fired...\")\n client = create_client()\n\n response = client.update_item(TableName=\"code_word_households\",\n Key={'householdID': {'S': household_id}},\n UpdateExpression=\"ADD people :element\",\n ExpressionAttributeValues={\":element\": {\"SS\": [person_id]}})\n\n return response",
"def add_person(self, person: Person):\n self._workers.append(person)\n # logging.info('{} has been added to the list of employees'.\n # format(person.give_name()))",
"def add_person(room_id, person=None, isModerator='false'):\n\n url = 'https://api.ciscospark.com/v1/memberships'\n headers = {'Authorization': 'Bearer '+context.get('spark.CISCO_SPARK_PLUMBERY_BOT')}\n payload = {'roomId': room_id,\n 'personEmail': person,\n 'isModerator': isModerator }\n response = requests.post(url=url, headers=headers, data=payload)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))",
"def add_partner(self, other_person,s):\n if self.number_of_partners == 0:\n #no longer single!\n s.number_of_singles -= 1\n s.singles.remove(self.identifier)\n if other_person.identifier not in self.current_partners:\n #only add if not already in a partnership\n self.number_of_partners += 1\n self.current_partners.add(other_person.identifier)",
"def add_people(self, people_list):\n\n for person in people_list:\n self.add_person(person)",
"def person(self, person_id):\r\n return persons.Person(self, person_id)",
"def add_person(self, name):\n\n if name not in self.nodes:\n # Be careful not to just add them a second time -- otherwise,\n # if we accidentally added someone twice, we'd clear our their list\n # of friends!\n self.nodes[name] = PersonNode(name)",
"def add(self, item):\n if item.email and item.email not in [i.email for i in self.lst]:\n self.lst.append(item)\n else:\n print(\"WARN: Recipient not added because a recipient with that email address already exists: {}\", item)",
"def remove_person(self, per: str):\n if per in self._people:\n self._people.remove(per)\n else:\n raise IDDoesNotExist",
"def add_member(self, persona):\n if persona not in self.members:\n self.members.append(persona)",
"def add_person(self, name):\n self.lst.append(name) # adds the name to the list and extends it whenever a person joins the queue\n print(f\"{name} has been added to the queue\") # Constast time O(1) to just add a pointer",
"def add_person(self, person):\n\n self.nodes.add(person)",
"def __ui_add_new_person(self):\n person_id = int(input(\"ID: \"))\n person_name = input(\"Name: \").strip()\n person_phone_number = input(\"Phone number: \").strip()\n self.__person_service.service_add_person(person_id, person_name, person_phone_number)\n print(\"Person successfully added to your agenda!\\n\")",
"def person_id(self, person_id):\n\n self._person_id = person_id",
"def add_person_to_db(self):\n fullname = self.AddPerson.add_person_to_db(self.sql)\n if fullname:\n self.fullname.setText(fullname)\n # likely same name as before so no triggered search\n self.search_people_by_name()",
"def add_members(id): # pylint: disable=I0011,W0622\n\n l = Legacy.query.get_or_404(id)\n\n if current_app.config.get('IGNORE_AUTH') is not True: # pragma: no cover\n if l.owner_id != g.user.id:\n raise Http403('Access denied')\n\n if not l.can_modify(g.user.id):\n raise Http403('Access denied')\n\n if request.json is None or 'members' not in request.json:\n raise NoData('\"members\" was not specified')\n\n member_list = request.json['members']\n\n if not isinstance(member_list, list):\n raise IncorrectData('\"members\" was not a valid list')\n\n for member in member_list:\n member = str(member)\n\n if not bool(re.match(r'^\\d+$', member)):\n member = Person.query.filter_by(email=member).first()\n\n if member is None:\n # TODO: Invite member\n # TODO: Queue a task to assign member on signup\n\n continue\n else:\n member = Person.query.get(member)\n\n if member is None:\n raise IncorrectData('Member not found')\n\n # TODO: Queue a task to assign member on acceptance\n\n # TODO: Remove following after member confirmation is done\n l.members.append(member)\n\n # TODO: Remove following after member confirmation is done\n l.save()\n\n return {}",
"def add_person_to_task():\n # get values from user\n responses = accept_inputs([\"Person\", \"Task label\"])\n # get the person's ID\n id = query_with_results(\"select id from person where name = ?\", [responses[\"Person\"]])[0][0]\n # insert into db\n query_no_results(\"insert into task_person_pair (person, task) values(?, ?)\", [id, responses[\"Task label\"]])\n print(\"%s added to task %s\" % (responses[\"Person\"], responses[\"Task label\"]))",
"def addParticipant(self, participant):\n if len(self.participants) < self.maxParticipants:\n self.participants[participant.discordId] = participant\n else:\n raise ValueError('Max number of participants has been reached')",
"def add_person(self, name, email, typ, wants_accomodation='N'):\n if typ == \"FELLOW\":\n if not email in self.all_persons.keys():\n new_fellow = Fellow(name, email, wants_accomodation)\n self.fellows[email] = new_fellow\n self.allocate_room(new_fellow)\n return new_fellow\n else:\n return \"Email already used!\"\n elif typ == \"STAFF\":\n if not email in self.all_persons.keys():\n new_staff = Staff(name, email)\n self.staff[email] = new_staff\n self.allocate_room(new_staff)\n return new_staff\n else:\n return \"Email already used!\"\n else:\n return -1",
"def add_or_edit_person(date, amount, name):\n if name not in names:\n person_new = {}\n person_new.update({\"name\": name})\n person_new.update({\"amount\": amount})\n person_new.update({\"date\": date})\n data_people.append(person_new)\n names.append(name)\n else:\n person_edit = next(filter(lambda person: person['name'] == name,\n data_people))\n person_edit[\"amount\"] += amount",
"def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)",
"def addToDominatingSet(self,id2):\n self.dominatingSet.append(id2)",
"def add_book(book):\n\n global book_list\n book.id = generate_id()\n book_list.append(book)",
"def add_individual(self, i):\n if i.iid in self.individuals.keys():\n print(f'US22 - {i.iid} id has a duplicate in line number {i._iid_line}')\n self.individuals[i.iid] = i\n return Individual()",
"def add_record(d):\n\n print(\"\\nEnter the information of the person you'd like to add\")\n firstname = input('First name: ')\n lastname = input('Last name: ')\n phone = input('Phone: ')\n address = input('Address: ')\n\n name_is_equal = False\n\n for pid in d:\n if firstname == d[pid].get('First name') and lastname == d[pid].get('Last name'):\n name_is_equal = True\n\n if name_is_equal is True:\n print('\\n# The contact is already in the phone book')\n else:\n d[len(d)] = {'First name': firstname, 'Last name': lastname, 'Phone': phone, 'Address': address}\n print('\\n# The contact has been added to the phone book')\n\n return d",
"def add_person():\n # Find the last used PK\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT id FROM person ORDER BY id DESC;\")\n for row in cursor.fetchone():\n last_pk = row\n\n # Auto-increment the primary key for the person table.\n last_pk = last_pk + 1\n\n # Prompt the user for the rest of their information.\n first_name = input(\"Enter your first name: \")\n middle_name = input(\"Enter your middle name: \")\n last_name = input(\"Enter your last name: \")\n suffix_name = input(\"Enter your suffix: \")\n e_mail = input(\"Enter your email: \")\n # Default status of the person is active (1).\n status = 1\n\n # Store the input in a variable.\n person_data = (last_pk, first_name, middle_name, last_name, suffix_name,\n e_mail, status)\n\n # Connect and insert the data into the person table.\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO person VALUES(?, ?, ?, ?, ?, ?, ?);\",\n person_data)\n connection.commit()",
"def add_user_to_items(self, id):\n item_users = self.execute(TABELLE['items']['select']['select'])\n # print(\"item_users\",item_users)\n if not item_users: # se il db è vuoto\n self.execute(TABELLE['items']['insert']['new_user'], (id,))\n return\n\n if not isinstance(item_users, list): item_users = [item_users]\n # print(item_users)\n\n for user in item_users:\n if id == user['id']: return # se lo user è gia presente nel db lascio stare\n\n # se sono arrivato qua lo user non è nel db e quindi lo aggiungo\n self.execute(TABELLE['items']['insert']['new_user'], (id,))",
"def save_allocations(self, this_room_name, this_person_id):\n cursor = self.cur()\n cursor.execute('INSERT INTO allocation (room_name, person_id) VALUES(?, ?)', (this_room_name, this_person_id)\n )",
"def fill_repo_with_random_persons(self, n=10, id_lb=1, id_ub=100):\r\n random_ids, random_names, random_phone_numbers = self.generate_random_persons(n, id_lb, id_ub)\r\n for id_, name, phone_num in zip(random_ids, random_names, random_phone_numbers):\r\n self.add_person(id_, ' '.join(name), phone_num)"
] | [
"0.65676033",
"0.61027765",
"0.5929824",
"0.59218085",
"0.5875231",
"0.5845906",
"0.5837034",
"0.5836314",
"0.5750028",
"0.5706715",
"0.56969297",
"0.5672724",
"0.56648934",
"0.56353074",
"0.56224453",
"0.558963",
"0.55000347",
"0.5431597",
"0.5428824",
"0.5368888",
"0.53667474",
"0.5324031",
"0.53102547",
"0.52925676",
"0.5280441",
"0.5258016",
"0.52462554",
"0.52368534",
"0.5229361",
"0.5194732"
] | 0.7483298 | 0 |
takes a person's ID and attempts to remove it from this rooms self.persons. If the ID does not exist inside the list, this method raises an exception | def remove_person(self, per: str):
if per in self._people:
self._people.remove(per)
else:
raise IDDoesNotExist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_by_person_id(self, p_id):\r\n self.__repo.remove_appointment_by_person_id(p_id)",
"def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s' that we could remove.\" % person_name)\n return\n # the person exists, so remove it\n query_no_results(\"delete from person where name = ?\", [person_name])\n # remove all associations with tasks\n query_no_results(\"delete from task_person_pair where person = ?\", [results[0][0]])\n print(\"Person with name '%s' removed.\" % person_name)",
"def remove(self, ID):\n i = 0\n for i in range(0, len(self.__lst)):\n if self.__lst[i].getId() == ID:\n self.__lst.pop(i)\n return\n raise ValueError(\"Nu exista disciplina\")",
"def remove_person(self, document):\n del self.__people[document]",
"def __ui_remove_person(self):\n remove_person_id = int(input(\"Introduce the ID of the person you want to remove: \"))\n self.__person_service.service_remove_person(remove_person_id)\n print(\"Person successfully removed from your agenda!\\n\")",
"def test_removeperson(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t.remove_person(p2)\n t.store()\n\n t2 = model.Team(id=id)\n self.assertEqual(t2.persons, [p1.id, p3.id])\n\n with self.assertRaises(ValueError): # cannot be removed again\n t2.remove_person(p2)",
"def remove_person(self, remove_person_id):\n self.__load_persons_from_file_into_memory()\n super().remove_person(remove_person_id)\n self.__save_persons_from_memory_to_file()",
"def remove(self, ID):\n if ID in self.pDict:\n del self.pDict[ID]",
"def remove_member(self, persona):\n if persona in self.members:\n self.members.remove(persona)",
"def rm_person_from_task():\n # get person name from user\n responses = accept_inputs([\"Person name\", \"Task label\"])\n person_name = responses[\"Person name\"]\n task_label = responses[\"Task label\"]\n # check for existence of person\n person_results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(person_results) == 0:\n print(\"No person found with name '%s'.\" % person_name)\n return\n # check for the existence of task\n task_results = query_with_results(\"select * from task where label = ?\", [task_label])\n if len(task_results) == 0:\n print(\"No task found with label '%s'.\" % task_label)\n return\n # disassociate the person from the task\n query_no_results(\"delete from task_person_pair where person = ? and task = ?\", [person_results[0][0], task_label])\n print(\"Person '%s' removed from task with label '%s'.\" % (person_name, task_label))",
"def remove(self,s):\n \n p1, p2 = self.persons\n \n p1.remove_partner(p2,s)\n p2.remove_partner(p1,s)",
"def remove_person_from_the_station(self, station: TelegramController.Station):\n\n if station.line_number in self.__stations_dict and station.station_number in self.__stations_dict[\n station.line_number]:\n if self.__stations_dict[station.line_number][station.station_number] == 1:\n del self.__stations_dict[station.line_number][station.station_number]\n if len(self.__stations_dict[station.line_number]) == 0:\n del self.__stations_dict[station.line_number]\n elif self.__stations_dict[station.line_number][station.station_number] > 1:\n self.__stations_dict[station.line_number][station.station_number] -= 1\n self.__message_sender.send_line(station.line_number, update_passengers=True)\n else:\n print(\"whoops an error, looks like the current station doesn't exit and there's no person waiting for it.\")",
"def remove(name):\n del person_database[name]",
"def delete_person_by_id(self, person_id, record_undo=True, record_redo=False, as_redo=False):\r\n try:\r\n person_id = int(person_id)\r\n except ValueError:\r\n raise PersonIDException(\"Error! The person ID must be an integer.\")\r\n if person_id <= 0:\r\n raise PersonIDException(\"Error! The person ID must be a positive integer.\")\r\n person_to_remove, _ = self.find_person_by_id(person_id)\r\n if person_to_remove is None:\r\n raise PersonIDException(f\"Error! There is no person with the ID {person_id} in the database.\")\r\n self.__person_repository.delete_by_id(person_id)\r\n\r\n if record_undo:\r\n self.save_undo_operation(self.delete_person_by_id, person_to_remove.id, person_to_remove.name,\r\n person_to_remove.phone_number)\r\n if not as_redo: self.__redo_repository.clear_stack()\r\n if record_redo:\r\n self.save_redo_operation(self.delete_person_by_id, person_to_remove.id, person_to_remove.name,\r\n person_to_remove.phone_number)\r\n\r\n return person_to_remove",
"def remove(self, _id):\n if self.objects.get(_id):\n self.objects.pop(_id)",
"def remove_members(id): # pylint: disable=I0011,W0622\n\n l = Legacy.query.get_or_404(id)\n\n if current_app.config.get('IGNORE_AUTH') is not True: # pragma: no cover\n if l.owner_id != g.user.id:\n raise Http403('Access denied')\n\n if not l.can_modify(g.user.id):\n raise Http403('Access denied')\n\n if request.json is None or 'members' not in request.json:\n raise NoData('\"members\" was not specified')\n\n member_list = request.json['members']\n\n if not isinstance(member_list, list):\n raise IncorrectData('\"members\" was not a valid list')\n\n for member in member_list:\n member = Person.query.get(member)\n\n if member is None:\n continue\n\n try:\n l.members.remove(member)\n except ValueError:\n pass\n\n l.save()\n\n return {}",
"def remove_item(self, item_id):\n\t\tself.todolist.remove(item_id) \n\t\tstore = self.store\n\t\tfor row in store:\n\t\t\tif row[0] == item_id:\n\t\t\t\tstore.remove(row.iter)\n\t\t\t\tbreak",
"def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table",
"def test_remove_by_identifier(self):\n view = SchemaView(SCHEMA)\n patcher = ObjectChanger(schemaview=view)\n dataset = yaml_loader.load(DATA, target_class=Dataset)\n n_persons = len(dataset.persons)\n dataset: Dataset\n change = RemoveObject(value=Person(id='P:002'))\n r = patcher.apply(change, dataset)\n logging.info(yaml_dumper.dumps(dataset))\n self.assertEqual(len(dataset.persons), n_persons-1)\n self.assertEqual(dataset.persons[0].id, 'P:001')",
"def remove(self, item):\n\t\tif self.len == 0:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\tself.borrar_primero()\n\t\t\treturn\n\t\tanterior = self.prim\n\t\tactual = anterior.prox\n\t\twhile actual and actual.dato != item:\n\t\t\tanterior = anterior.prox\n\t\t\tactual = actual.prox\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\tanterior.prox = actual.prox\n\t\tself.len -= 1",
"def remove_partner(self, other_person,s):\n self.number_of_partners -= 1\n self.current_partners.remove(other_person.identifier)\n \n if self.number_of_partners == 0:\n #no partners left -> single\n s.number_of_singles += 1\n s.singles.add(self.identifier)",
"def remove(table, id_):\n count=0\n searched_index=-1\n in_it=False\n for i in table:\n if i[0]==id_:\n searched_index=count\n in_it=True\n count+=1\n\n if in_it: \n table.pop(searched_index)\n else:\n ui.print_error_message(\"ID not found\")\n \n return table",
"def remove(self, identifier: int):\n self.items = list(filter(lambda x: x.identifier != identifier, self.items))",
"def remove(table, id_):\n\n removed = False\n\n for item in table:\n if item[0] == id_[0]:\n table.remove(item)\n removed = True\n\n if not removed:\n ui.print_error_message(\"There isn't a game with such ID!\")\n return table",
"def remove_member(self, db: Session, *, room: Room, user: User) -> Room:\n members = [x for x in room.members if x.id != user.id]\n return self.update(db=db, db_obj=room, obj_in={\"members\": members})",
"async def remove(ctx, pkmn_id: int):\n res = database.remove_from_party(ctx.message.author, pkmn_id)\n if not res:\n ctx.send(\"**Oak**: Make sure you actually have that pokemon or if your party is not full ya scrub.\")\n return await show_party(ctx.message.author)",
"def removeSpot(self, ID):\n for spot in self.parkingSpots:\n if spot.id == ID:\n self.parkingSpots.remove(spot)\n #for i in range(len(self.parkingSpots)): # relabel all spots to keep the id numbers\n # self.parkingSpots[i].id = i # representative of the number of spots\n return\n raise Exception(\"No spot with given id \" + str(ID) + \" found.\")",
"def remove(obj_objectid_or_path_tuple):",
"def delete_record(d):\n\n print(\"\\nEnter the name of the person you'd like to remove\")\n firstname = input('First name: ')\n lastname = input('Last name: ')\n\n for pid in d:\n if firstname == d[pid].get('First name') and lastname == d[pid].get('Last name'):\n del d[pid]\n print('\\n# The contact has been deleted')\n return d\n print('\\n# The contact is not in the phone book')",
"def remove_from_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='remove')"
] | [
"0.70279044",
"0.67927957",
"0.670005",
"0.6508729",
"0.6489163",
"0.63619334",
"0.62927204",
"0.6246291",
"0.62200475",
"0.61251545",
"0.597299",
"0.5888846",
"0.58179843",
"0.5777153",
"0.5680758",
"0.5679263",
"0.56768095",
"0.56184345",
"0.5617217",
"0.5612672",
"0.55934626",
"0.5587385",
"0.5572361",
"0.5556552",
"0.55166644",
"0.54854065",
"0.5454879",
"0.5453074",
"0.544253",
"0.5423862"
] | 0.7739273 | 0 |
takes a list of ids and sets self.people to the list | def set_people(self, people: list):
self._people = people | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_ids_to_people(self, ids):\n return [ self.voters[i] for i in ids ]",
"def ids(self, ids):\n self._ids = ids",
"def set_persons(self, persons: List[Person]):\n self.person_list.model().set_person_list(persons)",
"def add_people(self, people_list):\n\n for person in people_list:\n self.add_person(person)",
"def find_data(id_list):\n people_list = []\n for id in id_list:\n new_person = Query()\n new_person.id = id\n new_person.find_details()\n people_list.append(new_person)\n \n return people_list",
"def search_among_agents(self, agent_ids):\n self._id_list = agent_ids",
"def person_list(self, new_persons_list):\n self._person_list = new_persons_list\n self.__save_persons_from_memory_to_file()",
"def setUserIDRefs( self, text ):\n self.user_id_list= text.split()",
"def get_locations_by_ids(self, id_list):",
"def setup_people(access_control_list):\n all_users = set()\n for users in access_control_list.values():\n all_users.update({(user[\"email\"], user[\"name\"]) for user in users})\n\n with factories.single_commit():\n for email, name in all_users:\n factories.PersonFactory(email=email, name=name)",
"def __init__(self, users=()):\n self.users = {str(x.id): x for x in users}",
"def set_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n item[self.id_key] = self.ids.get(key) or self._get_next_id()",
"def search_among_sequence_rules(self, bank_ids):\n self._id_list = bank_ids",
"def findIds(self, names):\n idSuggestions = {}\n for name in names:\n idSuggestion = self.searchForUsers(name)\n idSuggestions[name] = idSuggestion\n\n return idSuggestions",
"def _change_objs_to_IDs(self):\n if self.location:\n self.location = self.location.id\n if self.contents:\n self.contents = [obj.id for obj in self.contents]",
"def service_ids(self, service_ids):\n\n self._service_ids = service_ids",
"def users(self, users):\n\n self._users = users",
"def users(self, users):\n\n self._users = users",
"def users(self, users):\n\n self._users = users",
"def on_prefill(self, ids):\n pass",
"def learn_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n self.ids[key] = item[self.id_key]",
"def set_all_from_json(self, value:list):\n self.clear()\n for item in value:\n relation_id = item['relation_id']\n members = item['members']\n self[relation_id] = members",
"def external_ids(self, external_ids):\n\n self._external_ids = external_ids",
"def on_meta_ids(self, ids):\n pass # pylint: disable=unnecessary-pass",
"def assign_gids(self, int[::1] gids):\n self.mdb.get().assign_gids(<int> gids.size, <const int *> &gids[0])",
"def remap_ids(self, id_map: Dict[int, int]) -> None:",
"def match_ids(cls, ids):\n id_prefix = getattr(cls.get_model(), 'id_prefix', None)\n if id_prefix is not None:\n return [i for i in ids if i.startswith(id_prefix)]\n return ids",
"def fill_repo_with_random_persons(self, n=10, id_lb=1, id_ub=100):\r\n random_ids, random_names, random_phone_numbers = self.generate_random_persons(n, id_lb, id_ub)\r\n for id_, name, phone_num in zip(random_ids, random_names, random_phone_numbers):\r\n self.add_person(id_, ' '.join(name), phone_num)",
"def _write_uids(self, ids, resp):\n for uid, result in izip(ids, resp.json()[\"results\"]):\n if not result[\"data\"]:\n self._missing.send(uid)\n if self.doclog.isEnabledFor(logging.TRACE):\n self.doclog.trace(\"uid %s does not have properties\" % uid)\n else:\n self._has_properties.add(uid)",
"def setId(self, *args):\n return _libsbml.ListOfMembers_setId(self, *args)"
] | [
"0.73439336",
"0.7020338",
"0.6686739",
"0.64292365",
"0.6300952",
"0.6075446",
"0.59576225",
"0.59004533",
"0.58260936",
"0.57484186",
"0.56893873",
"0.56491727",
"0.562238",
"0.561242",
"0.5599444",
"0.5596111",
"0.5545097",
"0.5545097",
"0.5545097",
"0.5518421",
"0.5499519",
"0.5489225",
"0.54492295",
"0.54294735",
"0.5426588",
"0.5417273",
"0.5373917",
"0.53670424",
"0.5360795",
"0.53552985"
] | 0.7332554 | 1 |
takes a string which specifies a direction and removes it from the list of connections to this room. If the connection is not in the dict it returns False. Otherwise it removes it and returns True | def remove_connection(self, direction: str):
if direction in self._connections:
self._connections.pop(direction)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_route(self, start, destination):\r\n if start in self.edges and destination in self.edges:\r\n for edge in self.edges[start]:\r\n if edge.destination == destination:\r\n self.edges[start].remove(edge)\r\n for edge in self.edges[destination]:\r\n if edge.destination == start:\r\n self.edges[destination].remove(edge)\r\n return True\r\n return False",
"def remove_route(g, origin, destination, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n \n # Removes both directions and returns \n if(choice_dir == \"y\"):\n \n \n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != destination_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != origin_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n \n # Removes one direction and returns\n if(choice_dir == \"n\"):\n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n g.city_dict[key].set_flights_in(new_flights_in)\n \n return g",
"def remove_city(self, code):\r\n if code in self.vertices:\r\n self.vertices.pop(code)\r\n self.edges.pop(code)\r\n for _code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.start == code or edge.destination == code:\r\n _list.remove(edge)\r\n return True\r\n return False",
"def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False",
"def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False",
"def remove(self, destination: n):\n try:\n self.connections.pop(destination)\n except KeyError:\n pass",
"async def unpair(self) -> bool:\n return await self._backend.unpair()",
"def _remove_cfg_from_list(self, server_id):\n\t\treturn True if self.Settings.pop(server_id, None) else False",
"def all_clean(room):\r\n\r\n for row in room:\r\n for cell in row:\r\n if cell == \"dirt\":\r\n return False\r\n\r\n return True",
"def rpc_remove_connection(client, source, dest,\n rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n rpc_server = get_ip_by_unknown(client, source)\n dest = get_ip_by_unknown(client, dest)\n rpc_connection = AuthServiceProxy(\"http://%s:%s@%s:%s\" % (rpc_user, rpc_password, rpc_server, rpc_port))\n rpc_connection.addnode(dest, \"remove\")\n return True\n except JSONRPCException as err:\n print(err)\n return False",
"def remove_connection(self, var1, var2):\n conn, swap = self._find_connection_element(var1, var2)\n if not conn:\n raise ModelModificationError(\"Cannot remove non-existent connection.\")\n if swap:\n var1, var2 = var2, var1\n # Find the relevant map_variables element\n mapv = conn.xml_xpath(u'cml:map_variables[@variable_1=\"%s\" and @variable_2=\"%s\"]'\n % (var1.name, var2.name))\n if not mapv:\n raise ModelModificationError(\"Cannot remove non-existent connection.\")\n conn.xml_remove_child(mapv[0])\n if not hasattr(conn, u'map_variables'):\n conn.xml_parent.xml_remove_child(conn)",
"def remove_connections(self, var):\n cname, vname = var.component.name, var.name\n for conn in list(getattr(self.model, u'connection', [])):\n if cname == conn.map_components.component_1:\n vid = u'variable_1'\n elif cname == conn.map_components.component_2:\n vid = u'variable_2'\n else:\n continue\n for mapv in conn.map_variables:\n if vname == getattr(mapv, vid, ''):\n # Found a connection\n conn.xml_remove_child(mapv)\n if not hasattr(conn, u'map_variables'):\n conn.xml_parent.xml_remove_child(conn)\n # There can't be any more matching map_variables in this connection\n break",
"def remove(self, proxy_string):\n try:\n self.proxies.remove(proxy_string)\n return True\n except ValueError:\n return False",
"def validate_move(board: list, character: dict, direction: str) -> bool:\n if direction.strip().upper() == \"N\":\n return (character[\"Position\"][0] - 1, character[\"Position\"][1]) in board\n elif direction.strip().upper() == \"S\":\n return (character[\"Position\"][0] + 1, character[\"Position\"][1]) in board\n elif direction.strip().upper() == \"W\":\n return (character[\"Position\"][0], character[\"Position\"][1] - 1) in board\n elif direction.strip().upper() == \"E\":\n return (character[\"Position\"][0], character[\"Position\"][1] + 1) in board\n else:\n print(\"Please enter only directions shown above\")\n return False",
"def remove_dead_entries(self, packet):\n for route in self.forwarding_table:\n for dead_entry in packet[MESG]:\n sameSource = route[SRCE] == packet[SRCE]\n sameDest = route[DEST] == packet[DEST]\n if sameSource and sameDest and dead_entry[NTWK] == route[NTWK] and dead_entry[NMSK] == route[NMSK]:\n self.forwarding_table.remove(route)\n self.revoked.append(route)\n break",
"def remove(self, connection):\n\n net_tuple = self.read_nodestate(0)\n\n # Tuples are immutable; convert it to a list.\n network_list = list(net_tuple)\n\n # Identify and remove said connection\n try:\n index = network_list.index(connection)\n network_list.pop(index)\n\n # Connection not in network tuple, or socket is [closed]\n except ValueError:\n log_msg = str(\"Not removing non-existent connection: \"+str(connection))\n Primitives.log(log_msg, in_log_level=\"Warning\")\n\n # Update the network tuple with the new one\n self.write_nodestate(nodeState, 0, tuple(network_list))",
"def deleteroute(self, new_route):\n route_key = new_route.replace('-', ',')\n error, exists, message, code, lines = self.selectroute(route_key)\n if error or not exists:\n return False, message, code\n else:\n error, message, code = self.commandroute('Delete', lines, route_key)\n if not error:\n return True, message, 200\n else:\n return False, message, code",
"def handle_link_down (self, port):\n for dest in self.hosts.keys():\n currPort = self.hosts[dest][0]\n if currPort == port:\n del self.hosts[dest]\n \n deleteDests = set()\n for dest in self.routesToDest:\n currPort = self.routesToDest[dest][0]\n \n if currPort == port:\n\n if dest in self.hosts:\n self.routesToDest[dest] = self.hosts[dest]\n packet = basics.RoutePacket(dest, self.routesToDest[dest][1])\n self.send(packet, self.routesToDest[dest][0], True)\n else:\n self.sendPoison(dest)\n deleteDests.add(dest)\n\n\n for dest in deleteDests:\n del self.routesToDest[dest]\n\n del self.neighbours[port]",
"def __isDirection__(self, word):\n self.directions = ('north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back')\n for direction in self.directions:\n if direction == word:\n return ('direction', word), True\n return None, False",
"def remove_option_from_value(self, o):\n result = False\n for k in self._options:\n if self._options.get(k) == o:\n self._options.pop(k)\n result = True\n return result",
"def deleteStructureChatbotDict(self,sentence):\n if sentence in self.dictChatBots:\n del self.dictChatBots[sentence]\n if not(self.currentStructureChatBot is None) and sentence == self.currentStructureChatBot.name:\n self.currentStructureChatBot = None # se reestablece el chatbot actual\n self.output.exec('El ChatBot \"'+sentence+'\" ha dejado de ser el ChatBot actual.')\n self.output.exec('El ChatBot \"' + sentence + '\" se ha eliminado correctamente .')\n else:\n self.output.exec('El ChatBot \"' + sentence + '\" no existe .')",
"def removeEquate(self, name: unicode) -> bool:\n ...",
"def remove(self, word):\n\t\tif word in self.link_words:\n\t\t\tself.link_words.remove(word)",
"def collison(direction):\n if direction == 3 and screen.inch(head[0]-1,head[1]) !=ord(' '):\n return True\n elif direction == 2 and screen.inch(head[0]+1,head[1]) !=ord(' '):\n return True\n elif direction == 1 and screen.inch(head[0],head[1]-1) !=ord(' '):\n return True\n elif direction == 0 and screen.inch(head[0],head[1]+1) !=ord(' '):\n return True \n else:\n return False",
"def remove_wall(self, direction):\n assert direction\n if self._walls & direction != 0:\n self.walls &= ~direction",
"def removeAlias(self, alias):\r\n for k, v in self.aliases.iteritems():\r\n if v.title == alias:\r\n del self.aliases[k]\r\n return True\r\n return False",
"def singleConnectUnconnected(house, district):\n district.batteries.sort(key=lambda x: x.capacity, reverse=True)\n\n for battery in district.batteries:\n for connectedHouse in battery.connectedHouses:\n oldConnection = connectedHouse.connection\n\n if (connectedHouse.output + connectedHouse.connection.capacity) > house.output:\n for b in connectedHouse.possible_connections:\n if b[0].capacity >= connectedHouse.output:\n switch(connectedHouse, b[0])\n switch(house, oldConnection)\n\n if house.connection.capacity < 0:\n house.distance = 0\n house.connection.capacity += house.output\n house.connection.connectedHouses.remove(house)\n house.connection = \"NOT CONNECTED!\"\n\n switch(connectedHouse, oldConnection)\n break\n\n district.disconnectedHouses.remove(house)\n return",
"def remove(self, node):\r\n\r\n # Allow node name, get the real node object\r\n if isinstance(node, basestring):\r\n name = node\r\n node = self.nodes[name]\r\n else:\r\n name = self.node_name(node)\r\n\r\n del self.nodes[name]\r\n\r\n remove = [c for c in self.connections if c[0] == node or c[1] == node]\r\n\r\n for connection in remove:\r\n self.connections.remove(connection)",
"def remove_servers_channels(self):\n for _hash in self._sections.keys():\n if not re.match(ur'^ server ', _hash) and not re.match(ur'^ channel ', _hash):\n continue\n del self._sections[_hash]",
"def removeConnection(self):\n\n self._connection._removeConnection(self)\n self._connection = None\n\n return True"
] | [
"0.56556493",
"0.5615543",
"0.52277464",
"0.5139612",
"0.5139612",
"0.50432074",
"0.5015839",
"0.49956447",
"0.49876505",
"0.49738443",
"0.49474016",
"0.4874587",
"0.4867951",
"0.48516744",
"0.4841608",
"0.4818004",
"0.4816945",
"0.47627732",
"0.47486317",
"0.4746608",
"0.47311625",
"0.47051585",
"0.47015437",
"0.46865034",
"0.46814942",
"0.46796012",
"0.46777883",
"0.46699014",
"0.46649247",
"0.46441406"
] | 0.7282875 | 0 |
takes a dictionary of connections to this room in the form of room IDs and sets self.connections to this dict | def set_connections(self, connections: dict):
self._connections = connections | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connections(self, connections):\n\n self._connections = connections",
"def loadConnections(self):\n\n #create each connecting map\n for direction, (fn, offset) in self.connections.items():\n self.connectedMaps[direction] = (Tilemap(fn), offset)",
"def __init__(self):\n self.connections = {}",
"def mset(self, mapping):\n servers = {}\n for key, value in mapping.items():\n server_name = self.get_server_name(key)\n servers.setdefault(server_name, [])\n servers[server_name].append((key, value))\n for name, items in servers.items():\n self.connections[name].mset(dict(items))\n return True",
"def __init__(self, connections):\n self._connections = connections.split()",
"def convert_connections(self, connections):\n model = self.model\n for conn in getattr(model, u'connection', []):\n comp1 = model.get_component_by_name(conn.map_components.component_1)\n comp2 = model.get_component_by_name(conn.map_components.component_2)\n for mapping in conn.map_variables:\n var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)\n var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)\n if frozenset([var1, var2]) in connections:\n self.convert_mapping(mapping, comp1, comp2, var1, var2)",
"def rooms(self, rooms):\n\n self._rooms = rooms",
"def connection(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def test_connections_updated(self):\n assert self.skill_config.connections == {self.new_connection_id}",
"def add_connection(self, connection_id):\n self.connection_load = random.random() * 10 + 1\n self.connection = {connection_id: connection_load}\n self.connections.update(connection)",
"def test_connections_updated(self):\n assert self.agent_config.connections == {self.new_connection_id}",
"def updateConnections(self, *connections):\n\n # Verify if ports are valid, otherwise do nothing.\n for connection in connections:\n for k1, v1 in connection.items():\n if v1 not in k1.ports:\n logger.error(\"Port '%s' is not in '%s: %s'\", v1, k1, k1.ports)\n raise RuntimeError(\"Port '{}' is not in '{}: {}'\".format(v1, k1, k1.ports))\n\n # Remove old conflicting connections\n def check_if_port_is_not_connected(connection, k1, v1):\n for k2, v2 in connection.items():\n if (k1, v1) == (k2, v2):\n logger.warning(\"Deleting existing connection %s.\", connection)\n return False\n return True\n for connection in connections:\n for k1, v1 in connection.items():\n connectioncheck2 = lambda connection: check_if_port_is_not_connected(\n connection, k1, v1)\n self.connections[:] = [x for x in self.connections if connectioncheck2(x)]\n\n # Add new connections\n for connection in connections:\n if connection not in self.connections:\n self.connections.append(connection)\n else:\n logger.warning(\"Connection already exists: %s\", connection)\n return True",
"def add_connection_entry(self,client_id, display_name,session_id,host,conn,addr):\n self.connections[client_id] = {\n \"display_name\" : display_name,\n \"session_id\" : session_id,\n \"host\" : host,\n \"CONN\" : conn,\n \"ADDR\" : addr,\n \"connected\" : True\n }",
"def test_connections_updated(self):\n assert self.connection_config.connections == {self.new_connection_id}",
"def rotateconnections(self,cells,cellkey):\n cell = cells[cellkey]\n for c in cell.conns:\n if c == 'u':\n key = str(cell.xpos) + str(cell.ypos - 1)\n elif c == 'd':\n key = str(cell.xpos) + str(cell.ypos + 1)\n elif c == 'l':\n key = str(cell.xpos-1) + str(cell.ypos)\n elif c == 'r':\n key = str(cell.xpos +1) + str(cell.ypos) \n self.rotatecell(cells[key])",
"def connect(self):\n\n label = self.scope[\"url_route\"][\"kwargs\"][\"label\"]\n self.user = self.scope[\"user\"]\n\n try:\n room = Relationship.objects.get(label=label)\n except Relationship.DoesNotExist:\n log.warning('No relationship have this label=%s', label)\n self.close()\n return\n except Exception as error:\n log.error(\"建立聊天室channel時發生錯誤: %s\" % error)\n self.close()\n return\n\n if not (room.client == self.user or room.performer == self.user):\n log.warning(\n '%s try to connect to the relationship that not belog to him', self.user)\n self.close()\n return\n\n self.scope[\"room\"] = room\n # Accept the incoming connection\n self.accept()\n\n async_to_sync(self.channel_layer.group_add)(\n \"chat\" + str(label), self.channel_name)",
"def add_connection(self, room1_id, room2_id, direction):\n opposite_direction = {'n': 's', 's': 'n', 'e': 'w', 'w': 'e'}\n if room1_id in self.rooms and room2_id in self.rooms:\n self.rooms[room1_id]['exits'][direction] = room2_id\n self.rooms[room2_id]['exits'][opposite_direction[direction]] = room1_id\n else:\n raise IndexError('That room does not exist!')",
"def __init__(self) :\n self.remoteConnections = {}",
"def add_connection(self, ip, port, key):\n\n # Socket declaration\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((ip, port))\n\n # Adding connection to the list\n self.connections[key] = sock",
"def make_connections(self):\n return\n destinations={}\n sources={}\n for gsq in self.gatesqs:\n destinations[self.local2global(gsq)]=set()\n sources[self.local2global(gsq)]=set()\n if rm.all_sols=='timeout':\n return\n for sol in self.all_sols:\n for sa in sol:\n start,indv,path,covered,end=sa\n destinations[self.local2global(start)].add((self.local2global(end),tuple(path)))\n sources[self.local2global(end)].add((self.local2global(start),tuple(path)))\n self.sources=sources\n self.destinations=destinations",
"def add_connections(self, connections):\r\n\r\n for node1, node2, w in connections:\r\n self.add(node1, node2, w)",
"def remap_ids(self, id_map: Dict[int, int]) -> None:\n super().remap_ids(id_map)\n self.door = id_map.get(self.door, 0)",
"def remote_prepareConnection(self, connID, key, auth):\r\n assert connID not in self._pendingConnections\r\n self._pendingConnections[connID] = [key, auth]",
"def _add_connections(self, connections):\r\n\r\n for node1, node2 in connections:\r\n self._add(node1, node2)",
"def _process_connections(self, connections):\n # create connection\n for con in connections:\n self._add_connection(con)\n\n for inp_lab, inp in self.inputs.items():\n # use self._find_routes() to find routes from input inp\n routes_inp = self._find_routes(inp)\n # create routes\n for route in routes_inp:\n self._add_route(route)\n # sort the routes dictionary\n self._sort_routes()",
"def populate_redis(self, d):\n for k, v in d.items():\n self.redis_conn.set(k, v)",
"def add_connections(self, connections):\r\n\r\n for node1, node2 in connections:\r\n self.add(node1, node2)",
"def connection_id(self, connection_id: PublicId) -> None:\n if self._connection_id is not None:\n raise ValueError(\"connection_id already set!\") # pragma: nocover\n self._connection_id = connection_id",
"def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )",
"def _update_connections(self, oldVar, newVar):\n vars = [v for v in self.model.get_all_variables() if v.get_source_variable(True) is oldVar]\n # Remove old connections, including interfaces and types so creating the new connection works\n for v in vars:\n self.remove_connections(v)\n self.del_attr(v, u'public_interface')\n self.del_attr(v, u'private_interface')\n v.clear_dependency_info()\n # Create new connections\n for v in vars:\n self.connect_variables(newVar, v)"
] | [
"0.6299071",
"0.6227639",
"0.60175973",
"0.56802016",
"0.5665811",
"0.56469935",
"0.55497473",
"0.5549558",
"0.5485854",
"0.5449879",
"0.54207087",
"0.5355422",
"0.529117",
"0.52848494",
"0.5275657",
"0.52159476",
"0.51966083",
"0.51816684",
"0.51708376",
"0.5163054",
"0.51397526",
"0.51339823",
"0.5122589",
"0.51153773",
"0.50854564",
"0.50596225",
"0.5024631",
"0.50215894",
"0.50128955",
"0.49836817"
] | 0.6811939 | 0 |
takes a dict of interactions as keys with possible responses as the value. Saves that dict as the self.interactions variable | def set_interactions(self, interactions: dict):
self._interactions = interactions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_interaction(self, interaction: str, response: str):\n if interaction not in self._interactions:\n self._interactions[interaction] = response\n else:\n raise KeyAlreadyExists",
"def update_interactions_dict(interaction_dict, verbose=False):\n if verbose:\n print(\"Updating interaction dictionary...\")\n for chain in interaction_dict:\n for interaction_tple in interaction_dict[chain]:\n chain1, chain2, ref_inter = interaction_dict[chain][interaction_tple] # De-packs the interaction tuple value\n # Generates a list with all interactions of that key minus the current one\n chain1_filtered_interactions_lst = [x for x in interaction_dict[chain1.id].keys() if x != interaction_tple]\n # Updates chain_interaction attribute with the list of interaction tuples\n chain1.add_interaction_lst(chain1_filtered_interactions_lst)\n # Generates a list with all interactions minus the one from chain2 to chain1\n chain2_filtered_interactions_lst = [x for x in interaction_dict[chain2.id].keys() if x != ref_inter]\n chain2.add_interaction_lst(chain2_filtered_interactions_lst)\n parent = chain1.get_parent()\n parent.child_list = [chain1, chain2] # Updates the model chains with these new updated chains\n interaction_dict[chain][interaction_tple] = chain1, chain2 # Updates the dictionary value\n # (now without the interaction from chain 2 to 1)\n if verbose:\n print(\"Interaction dictionary updated\")",
"def interactions(self) -> Sequence[Interaction[Context,Action]]:\n\n if self._simulation is not None:\n return self._simulation.interactions\n \n raise Exception(\"A JsonSimulation must be loaded before it can be used.\")",
"def visit_interaction(self, interaction):\n for opt in self.event_json['options']:\n opt_json = self.world_json[opt['id']]\n self._connect_option(interaction, opt_json)",
"def interactions(self) -> Sequence[Interaction[_C_out, _A_out]]:\n ...",
"def get_interactions(self):\n return self._interactions",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._interactions",
"def register_interaction(cls, interaction, *names):\n for name in names:\n cls.GLOBAL_INTERACTION_MAP[name] = interaction",
"def interactions(self) -> Sequence[Interaction[_C_out, Tuple[int,...]]]:\n return self._simulation.interactions",
"def context_by_action(self, actions_structure):\n verbs_alone, verbs_complex = get_not_action_verbs()\n for i, action_dct in enumerate(actions_structure):\n for j, verb_dct in enumerate(action_dct['context']['verbs']):\n\n if verb_dct['aux'] and len(actions_structure[i]['context_actions']) > 0:\n actions_structure[i]['context_actions'][-1]['What is the action?']['initial_value'] += ' ' + verb_dct['initial_value']\n actions_structure[i]['context_actions'][-1]['What is the action?'][\n 'replacement_value'] += ' ' + verb_dct['replacement_value']\n #update_action_additional_info_auxiliar(actions_structure, i, verb_dct)\n else:\n action, init_index_verb, final_index_verb = create_action(verb_dct)\n init_index_next_verb = get_init_index_next_verb(j, action_dct)\n action = add_element_to_action(action, action_dct, 'attributes', final_index_verb, init_index_next_verb, question='What is the action?')\n action = add_element_to_action(action, action_dct, 'direct_object', final_index_verb, init_index_next_verb, question='What is the action?')\n has_direct_object = True if len(action[\"What is the action?\"]) > 0 else False\n action = add_element_to_action(action, action_dct, 'indirect_object', final_index_verb, init_index_next_verb, question='Who is the action directed to?')\n has_indirect_object = True if len(action[\"Who is the action directed to?\"]) > 0 else False\n action = add_element_to_action(action, action_dct, 'adverb_mod', final_index_verb, init_index_next_verb, question='How is the action made')\n\n action = add_element_to_action(action, action_dct, 'auxiliar_object', final_index_verb, init_index_next_verb, question='What is the action?')\n\n action = add_subject_to_action(action, verb_dct, action_dct, init_index_verb)\n\n action = add_element_to_action(action, action_dct, 'agents', final_index_verb, init_index_next_verb, verb_dct=verb_dct)\n\n action = check_element_order(action)\n action = remove_extra_index_from_context_actions(action)\n\n if verb_dct['initial_value'] not in verbs_alone:\n initial_value = verb_dct['initial_value'] + ' ' + ' '.join(\n [element['initial_value'] for element in action['What is the action?']]) \\\n if len([element['initial_value'] for element in action['What is the action?']]) > 0\\\n else verb_dct['initial_value']\n\n replacement_value = verb_dct['replacement_value']+' '+' '.join(\n [element['replacement_value'] for element in action['What is the action?']]) \\\n if len([element['replacement_value'] for element in action['What is the action?']]) > 0\\\n else verb_dct['replacement_value']\n\n action['What is the action?'] = {'initial_value': initial_value,\n 'replacement_value': replacement_value}\n actions_structure[i]['context_actions'].append(action)\n\n # TODO Model loaded from local, improve model\n #action_info = get_action_additional_info(action_dct, verb_dct, action, has_direct_object,\n # has_indirect_object, init_index_verb, final_index_verb)\n\n #action[\"action_info\"] = action_info\n #is_main_action = predict_main_action(action)\n #action[\"is_main_action\"] = is_main_action\n\n for pobj_dct in action_dct['context']['auxiliar_object']:\n pobj_dct.pop('prep_added')\n actions_structure[i].pop('doc')\n #for act in actions_structure[i][\"context_actions\"]:\n # act.pop(\"action_info\")\n #is_main_action = predict_main_action(actions_structure)",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._simulation.interactions",
"def to_dict(self) -> BaseInteractionDict:\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'answer_type': self.answer_type,\n 'display_mode': self.display_mode,\n 'is_terminal': self.is_terminal,\n 'is_trainable': self.is_trainable,\n 'is_linear': self.is_linear,\n 'needs_summary': self.needs_summary,\n 'customization_arg_specs': [{\n 'name': ca_spec.name,\n 'description': ca_spec.description,\n 'default_value': ca_spec.default_value,\n 'schema': ca_spec.schema,\n } for ca_spec in self.customization_arg_specs],\n 'instructions': self.instructions,\n 'narrow_instructions': self.narrow_instructions,\n 'default_outcome_heading': self.default_outcome_heading,\n 'rule_descriptions': self._rule_description_strings,\n 'can_have_solution': self.can_have_solution,\n 'show_generic_submit_button': self.show_generic_submit_button,\n }",
"def add_session_interactions(self):\n for i, val in enumerate(self.input_df.values):\n if i == 0:\n # skip first record and\n continue # forward\n interaction = Interaction(val).get_instance()\n\n self.interactions.append({\n 'name': interaction.get('id'),\n 'text': interaction.get('text'),\n 'media': interaction.get('media'),\n 'quick_replies': interaction.get('quick_replies'),\n 'next': interaction.get('next'),\n 'pre': interaction.get('id'),\n 'attribute': interaction.get('attribute')\n })\n\n return self.interactions",
"def interaction_table_from_dict(interaction_dictionary):\n return InteractionTable(\n df=pd.DataFrame(interaction_dictionary['interactions']),\n labels=interaction_dictionary['labels']\n )",
"def act(self):\n channel_act = copy.deepcopy(self.observation)\n\n for user_act in channel_act['user_acts']:\n # Dialogue Act\n da_conf = self.generate_confidence()\n da_value = user_act[\"dialogue_act\"][\"value\"]\n\n if np.random.random() > da_conf:\n if da_value == UserAct.AFFIRM:\n da_value = UserAct.NEGATE\n elif da_value == UserAct.NEGATE:\n da_value == UserAct.AFFIRM\n else:\n pass\n\n user_act[\"dialogue_act\"][\"value\"] = da_value\n user_act[\"dialogue_act\"][\"conf\"] = self.generate_confidence()\n\n # Intent\n if \"intent\" in user_act:\n intent_value = user_act[\"intent\"][\"value\"]\n if self.intents[intent_value].get(\"speech\", False):\n intent_conf = 1.\n else:\n intent_conf = self.generate_confidence()\n intent_possible_values = self.slots[\"intent\"][\n \"possible_values\"].copy()\n\n if np.random.random() > intent_conf:\n intent_possible_values.remove(intent_value)\n intent_value = np.random.choice(intent_possible_values)\n\n user_act['intent']['value'] = intent_value\n user_act['intent']['conf'] = intent_conf\n\n # Slot Values\n for slot_dict in user_act.get('slots', list()):\n slot_name = slot_dict[\"slot\"]\n slot_value = slot_dict[\"value\"]\n\n if self.slots[slot_name][\"node\"] != \"BeliefNode\":\n slot_conf = 1.0\n else:\n slot_conf = self.generate_confidence()\n\n slot_possible_values = self.slots[slot_name].get(\n \"possible_values\")\n\n if slot_possible_values is None:\n slot_possible_values = list()\n\n slot_possible_values = slot_possible_values.copy()\n if len(slot_possible_values) and np.random.random() > slot_conf:\n slot_possible_values.remove(slot_value)\n slot_value = np.random.choice(slot_possible_values)\n\n slot_dict['conf'] = slot_conf\n\n channel_act[\"channel_utterance\"] = self.template_nlg(\n channel_act['user_acts'])\n return channel_act",
"def handle_action_dict(self, speaker: str, d: Dict, chatstr: str) -> Optional[DialogueObject]:\n coref_resolve(self.agent.memory, d, chatstr)\n logging.info('ttad post-coref \"{}\" -> {}'.format(hash_user(speaker), d))\n\n if d[\"dialogue_type\"] == \"NOOP\":\n return Say(\"I don't know how to answer that.\", **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"HUMAN_GIVE_COMMAND\":\n return Interpreter(speaker, d, **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"PUT_MEMORY\":\n return PutMemoryHandler(speaker, d, **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"GET_MEMORY\":\n logging.info(\"this model out: %r\" % (d))\n logging.info(\"querying previous model now\")\n if self.ttad_prev_model:\n prev_model_d = self.ttad(s=chatstr, model=self.ttad_prev_model, chat_as_list=True)\n logging.info(\"prev model out: %r\" % (prev_model_d))\n if (\n prev_model_d[\"dialogue_type\"] != \"GET_MEMORY\"\n ): # this happens sometimes when new model sayas its an Answer action but previous says noop\n return Say(\n \"I don't know how to answer that.\", **self.dialogue_object_parameters\n )\n return GetMemoryHandler(speaker, prev_model_d, **self.dialogue_object_parameters)\n else:\n return GetMemoryHandler(speaker, d, **self.dialogue_object_parameters)\n else:\n raise ValueError(\"Bad dialogue_type={}\".format(d[\"dialogue_type\"]))",
"def interaction( self ) :\n\n return( self.__interaction )",
"def interact(self, antagonist):\n pass",
"def printDictIntents(self):\n result = \", \".join(str(value.tag) for key, value in self.dicIntents.items())\n self.ouput.exec('Las Intenciones del ChatBot \"'+self.name+'\" son:'+result)",
"def act(self, observation: Dict[int, np.ndarray]) -> Dict[int, np.ndarray]:\n actions = {}\n for player_id in observation:\n actions[player_id] = self.action_space.sample()\n return actions",
"def interact(self):\r\n pass",
"def interaction_responses(interaction, i):\n\n agent_eid = interaction[i].agent_eid\n\n # no agent, no response (REM: that's not right, but is given how we're creating interactions right now)\n if agent_eid is None: return []\n\n end = interaction[i].end_clock\n\n for j in range(i+1, len(interaction)):\n next = interaction[j]\n if next.start_clock <= end and next.behavior_target_id() == agent_eid:\n yield (interaction[j])",
"def __init__(self, **kwargs):\n Interaction.__init__(self, **kwargs)\n self._produces = [] # the resource(s) produced by this interaction\n self._consumes = [] # the resource(s) consumed by this interaction",
"def parse_test_interactions(test_file):\n has_action = re.compile(r\"^\\#\\$\\s(input|output|verify)\\=(.+$)\")\n interactions = {}\n with open(test_file, 'r') as file:\n for line_no, line in enumerate(file.readlines(), start=1):\n check_line = has_action.match(line)\n if check_line:\n # interaction key should be the line after the marker\n # so add 1 to the current line number\n interactions[(line_no + 1)] = {\"action\": check_line.group(1),\n \"value\": check_line.group(2)}\n else:\n if line.startswith(\"#$\"):\n exc_msg = [\n \"Improper interaction syntax on\",\n f\"line {line_no} in '{test_file}'\",\n ]\n raise SyntaxWarning(\" \".join(exc_msg))\n #print(interactions)\n return interactions",
"def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()",
"def handle_interaction(request):\n\n payload = json.loads(request.POST['payload'])\n interaction_type = payload.get('type', None)\n\n # Handle shortcut\n if interaction_type == \"shortcut\":\n callback_id = payload.get('callback_id', None)\n if callback_id == \"tfed\":\n blocks = views.tfed_modal()\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if modal_id:\n return HttpResponse()\n return HttpResponseServerError(\"Failed to open modal\")\n if interaction_type == \"message_action\":\n callback_id = payload.get('callback_id', None)\n if callback_id == \"report\":\n channel = payload.get('channel', {'id': None})['id']\n sender = payload['message'].get('user', None)\n if not sender:\n sender = payload['message']['username']\n ts = payload['message']['ts']\n text = payload['message']['text']\n message, created = models.SlackMessage.objects.get_or_create(posted_to=channel, posted_by=sender, ts=ts,\n content=text)\n blocks = views.report_message_modal(message)\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if modal_id:\n return HttpResponse()\n return HttpResponseServerError(\"Failed to open modal\")\n\n # Handle modal view submission\n if interaction_type == \"view_submission\":\n values = payload['view']['state']['values']\n callback_id = payload['view'].get('callback_id', None)\n\n # TFed ticket submission\n if callback_id == \"tfed-modal\":\n subject = values['subject']['subject-action']['value']\n description = values['description']['description-action']['value']\n topic = values['rt_topic']['rt_topic-action']['selected_option']['value']\n user_id = payload['user']['id']\n user = user_profile(user_id)\n if user['ok']:\n __create_ticket(user, subject, description, topic)\n return HttpResponse()\n return HttpResponseServerError(\"Failed to obtain user information\")\n\n # Update TFed ticket\n elif callback_id == \"ticket-update-modal\":\n ticket_info = payload['view']['blocks'][1]\n owner_id = None\n if ticket_info['type'] != \"divider\":\n ticket_info = payload['view']['blocks'][2]\n owner_id = values['ticket_assignee']['ticket_assignee-action']['selected_user']\n ticket_id = ticket_info['block_id'].split(\"#\")[0]\n channel = ticket_info['block_id'].split(\"#\")[1]\n ts = ticket_info['block_id'].split(\"#\")[2]\n status = values['ticket_status']['ticket_status-action']['selected_option']\n if status:\n status = status['value']\n comments = values['ticket_comment']['ticket_comment-action']['value']\n checkboxes = values['email_requestor']['email_requestor-action']['selected_options']\n notify_requestor = False\n if len(checkboxes) > 0:\n notify_requestor = True\n\n # Obtain user's RT token\n user_id = payload['user']['id']\n token = __retrieve_rt_token(user_id)\n\n __update_ticket(ticket_id, status, owner_id, comments, notify_requestor, token, user_id, channel, ts)\n return HttpResponse()\n elif callback_id == \"ticket-comment-modal\":\n ticket_id = payload['view']['blocks'][0]['block_id']\n comments = values[ticket_id]['comment-action']['value']\n user_id = payload['user']['id']\n token = __retrieve_rt_token(user_id)\n __post_ticket_comment(ticket_id, user_id, comments, token)\n return HttpResponse()\n elif callback_id == \"report-modal\":\n message_id = payload['view']['blocks'][0]['block_id']\n comments = values['report-comment']['comment-action']['value']\n reporter = payload['user']['id']\n __save_report(message_id, reporter, comments)\n return HttpResponse()\n return HttpResponseNotFound()\n\n # Handle block interaction event\n if interaction_type == \"block_actions\":\n action = payload['actions'][0]['action_id']\n channel = payload.get('channel', None)\n if channel:\n channel = channel['id']\n message = payload.get('message', None)\n view = payload.get('view', None)\n\n # TFed message\n if channel in [settings.SLACK_TARGET_TFED, settings.SLACK_TARGET_TFED_DB] and message and not view:\n ticket_id = message['blocks'][0]['block_id'].split('~')[0]\n blocks = views.ticket_update_modal(ticket_id, channel, message['ts'], action)\n\n # Get current ticket from RT\n __refresh_ticket_async(channel, message)\n\n # Check that user has token, if not display a warning\n user_id = payload['user']['id']\n token = __retrieve_rt_token(user_id)\n if not token:\n error_message = \"Hi there! Before you can update tickets, you'll need to set up access to your RT \" \\\n \"account. Visit https://lnl.wpi.edu\" + reverse(\"support:link-account\") + \\\n \" to get started.\"\n post_ephemeral(channel, error_message, user_id, 'Request Tracker')\n return HttpResponse()\n\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if modal_id:\n return HttpResponse()\n return HttpResponseServerError(\"Failed to open modal\")\n\n # Home tab menu options\n if action == \"home-ticket-update\":\n ticket_id = payload['actions'][0]['block_id']\n option = payload['actions'][0]['selected_option']['value']\n if option == 'Comment':\n blocks = views.ticket_comment_modal(ticket_id)\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if not modal_id:\n return HttpResponseServerError(\"Failed to open modal\")\n return HttpResponse()\n return HttpResponseNotFound()",
"def clear_interactions(self):\n for key in self._memory.keys():\n self._memory[key].clear_interaction()",
"def __init__(self, constants, ontology):\n\n self.state = {}\n self.max_round = constants['run']['max_round_num']\n self.agent_possible_intents = constants['agent']['agent_actions']\n self.ontology = ontology\n # - start by setting all responses to default\n self.user_responses = dict((a,self._default_response) for a in self.agent_possible_intents)\n self.goal = None\n self.round = 0",
"async def _record_interaction(self, request_id: int) -> None:\n raise NotImplementedError()",
"async def dict(self, ctx, *keywords):\n\n if not keywords:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Did you tried `{}help dict` yet?'.format(self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['error'])\n return\n if keywords:\n old_keyword = \" \".join(keywords)\n try:\n keywords = \"%20\".join(keywords)\n url = 'http://api.urbandictionary.com/v0/define?term={}'.format(keywords)\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n source = await response.json(encoding='utf8')\n\n source = json.dumps(source, indent=2)\n result = json.loads(str(source))\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***'.format(old_keyword),\n colour=0xf20006)\n embed.add_field(name='Word:', value='`{}`'.format(result['list'][0]['word']), inline=False)\n embed.add_field(name='Definition:', value='```{}```'.format(result['list'][0]['definition']), inline=False)\n embed.add_field(name='example:', value='```{}```'.format(result['list'][0]['example']), inline=True)\n embed.add_field(name='Author:', value='`{}`'.format(result['list'][0]['author']), inline=False)\n embed.add_field(name='Link:', value='{}'.format(result['list'][0]['permalink']), inline=False)\n embed.add_field(name='Likes:', value='\\U0001f44d `{}`'.format(result['list'][0]['thumbs_up']),\n inline=True)\n embed.add_field(name='Dislikes:', value='\\U0001f44e `{}`'.format(result['list'][0]['thumbs_down']),\n inline=True)\n\n\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['succes'])\n except Exception as e:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***\\n\\nNothing found :sailboat:'.format(old_keyword, self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['warning'])"
] | [
"0.62171793",
"0.6054251",
"0.59967774",
"0.5978557",
"0.59675455",
"0.59557086",
"0.5855327",
"0.5854367",
"0.5819042",
"0.580996",
"0.5787042",
"0.5772705",
"0.5753315",
"0.5635294",
"0.5614898",
"0.5597447",
"0.55236447",
"0.55106354",
"0.5420445",
"0.54100233",
"0.536872",
"0.53683084",
"0.5352438",
"0.53523916",
"0.53406173",
"0.53180206",
"0.52642035",
"0.52391726",
"0.5238236",
"0.52344084"
] | 0.731535 | 0 |
returns the dicts stored in the self._interactions value. Keys are all possible interactions/commands, value is the response for those commands | def get_interactions(self):
return self._interactions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._interactions",
"def interactions(self) -> Sequence[Interaction[_C_out, Tuple[int,...]]]:\n return self._simulation.interactions",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._simulation.interactions",
"def results(self):\n return dict(self.commands)",
"def interactions(self) -> Sequence[Interaction[Context,Action]]:\n\n if self._simulation is not None:\n return self._simulation.interactions\n \n raise Exception(\"A JsonSimulation must be loaded before it can be used.\")",
"def commands(self) -> dict:\n return self._command_lookup",
"def interaction( self ) :\n\n return( self.__interaction )",
"def interactions(self) -> Sequence[Interaction[_C_out, _A_out]]:\n ...",
"def get_interaction(self) -> direction:\n return self.player.get_response()",
"def extract_commands(self):\n # import pdb; pdb.set_trace()\n left_i = 0\n right_i = 1\n commands = {}\n cmd = self.cmd\n\n if not cmd:\n return\n while left_i < len(cmd):\n sub_cmd = cmd[left_i:right_i]\n if sub_cmd in self.action_list:\n arg_len, arguments = self.extract_command_arguments(right_i)\n commands[sub_cmd] = arguments\n left_i = right_i + arg_len\n right_i = left_i + 1\n else:\n left_i, right_i = self.update_i(left_i, right_i)\n return commands",
"def get_dialogue_acts(self):\n return self.DAs",
"def get_commands(self, component_loads):\n return {}",
"def _commands(self) -> Dict[str, List[str]]:\r\n pass",
"def output(self):\n acts = []\n for i in self.actions:\n acts.append(i.output())\n return {\n \"delay\": self.delay,\n \"actions\": acts\n }",
"def get_commands(self):\r\n return self._commands",
"def get_commands(self):\n return list(self.commands.values())",
"def get_commands(self):\n\t\treturn list(self.command_handlers.keys())",
"def get_actions(self):\n return self.agent.get_actions()",
"def get_outputs(self):\n outputs = set()\n outputs.update(self.get_interaction().get_outputs())\n return outputs",
"def to_dict(self) -> BaseInteractionDict:\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'answer_type': self.answer_type,\n 'display_mode': self.display_mode,\n 'is_terminal': self.is_terminal,\n 'is_trainable': self.is_trainable,\n 'is_linear': self.is_linear,\n 'needs_summary': self.needs_summary,\n 'customization_arg_specs': [{\n 'name': ca_spec.name,\n 'description': ca_spec.description,\n 'default_value': ca_spec.default_value,\n 'schema': ca_spec.schema,\n } for ca_spec in self.customization_arg_specs],\n 'instructions': self.instructions,\n 'narrow_instructions': self.narrow_instructions,\n 'default_outcome_heading': self.default_outcome_heading,\n 'rule_descriptions': self._rule_description_strings,\n 'can_have_solution': self.can_have_solution,\n 'show_generic_submit_button': self.show_generic_submit_button,\n }",
"def interaction_responses(interaction, i):\n\n agent_eid = interaction[i].agent_eid\n\n # no agent, no response (REM: that's not right, but is given how we're creating interactions right now)\n if agent_eid is None: return []\n\n end = interaction[i].end_clock\n\n for j in range(i+1, len(interaction)):\n next = interaction[j]\n if next.start_clock <= end and next.behavior_target_id() == agent_eid:\n yield (interaction[j])",
"def _get_actions(self):\n return self.__actions",
"def _get_actions(self):\n return self.__actions",
"def _get_actions(self):\n return self.__actions",
"def get_commands(self):\n return self._commands",
"def terminal_commands(self):\n return OrderedDict([\n ('query_commands', (['hi', 'how', 'hello'], self._query_commands)),\n ('control_stop', (['stop'], self._control_stop)),\n ('control_pause', (['pause'], self._control_pause)),\n ('control_play', (['start', 'play'], self._control_play)),\n ('query_info', (['who', 'what'], self._query_info)),\n ('control_forward', (['skip', 'next'], self._control_skip)),\n\n ])",
"def get_commands(self):\n return self.__commands",
"def commands(self) -> typing.List[str]:\n return self._values.get(\"commands\")",
"def actions(self):\n return self._actions",
"def get_command_handlers(self):\n\t\treturn self.command_handlers"
] | [
"0.692725",
"0.6895816",
"0.68038774",
"0.659962",
"0.64865965",
"0.62307435",
"0.61985147",
"0.6181229",
"0.6017866",
"0.59859645",
"0.5944773",
"0.58879167",
"0.58798057",
"0.58722335",
"0.5850495",
"0.5821322",
"0.5816931",
"0.57883894",
"0.57844925",
"0.57220525",
"0.57059324",
"0.5673624",
"0.5673624",
"0.5673624",
"0.5673039",
"0.56320614",
"0.56239",
"0.560053",
"0.5600408",
"0.55851936"
] | 0.7258197 | 0 |
Takes a interaction and a response, adds it to self._interactions as a key and a value paid | def add_interaction(self, interaction: str, response: str):
if interaction not in self._interactions:
self._interactions[interaction] = response
else:
raise KeyAlreadyExists | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def propose_interaction(self, other: 'Guest', action: str):\n other.interaction_proposals.append((self, action))\n self.interaction_proposals.append((other, action))",
"def can_i_afford_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n\n slots = handler_input.request_envelope.request.intent.slots\n print(f\"Slots: {slots}\")\n purchase = slots['purchase'].value.lower()\n print(f\"purchase: {purchase}\")\n\n monzo = MonzoGetter(ACCESS_TOKEN)\n monthly_spend = monzo.get_monthly_spend_pounds()\n\n try:\n price = price_lookup_pounds[purchase]\n if price > (MONTHLY_BUDGET - monthly_spend):\n speech_text = f\"Sorry, you can't afford this. A {purchase} \" \\\n f\"costs about {price} pounds. You've already spent \" \\\n f\"{monthly_spend} pounds this month.\"\n else:\n remaining = MONTHLY_BUDGET - monthly_spend - price\n speech_text = f\"You can afford that. A {purchase} costs about \" \\\n f\"{price} pounds. If you buy it your remaining \" \\\n f\"monthly budget will be {remaining}\"\n except KeyError:\n # Just in case....\n speech_text = \"Sorry, we couldn't find a price for that product.\" \\\n f\"You have {MONTHLY_BUDGET - monthly_spend} pounds\" \\\n \" left to spend this month\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n False)\n return handler_input.response_builder.response",
"def set_interactions(self, interactions: dict):\n self._interactions = interactions",
"async def _record_interaction(self, request_id: int) -> None:\n raise NotImplementedError()",
"def new_skill_interaction(self, skill):\n self.skill_interact[skill] = True",
"def response(self, response: Dict) -> None:\n\n if \"satisfied\" in response and response[\"satisfied\"]:\n if not response[\"solution_index\"] and not response[\"solution_index\"] == 0:\n raise RPMException(\n \"If you are satisfied with one of the solutions, please specify the index of the \"\n \"solution as 'solution_index'.\"\n )\n if not (0 <= response[\"solution_index\"] <= self._f_current.shape[0]):\n msg = (\n \"Solution index must range from 0 to number of objectives - 1 '{}'. \" \"Given solution index: '{}.\"\n ).format(self._f_current.shape[0], response[\"solution_index\"])\n raise RPMException(msg)\n else:\n if \"reference_point\" not in response:\n raise RPMException(\"New reference point information missing. Please specify it as 'reference_point'.\")\n else:\n validate_reference_point(response[\"reference_point\"], self._ideal, self._nadir)\n\n self._response = response",
"async def _interaction(self, request: Request) -> web.Response:\n if (timestamp := request.headers.get(\"X-Signature-Timestamp\")) is None \\\n or (ed25519 := request.headers.get(\"X-Signature-Ed25519\")) is None:\n return web.Response(status=401, reason=\"Unauthorised\")\n\n try:\n self._verify_key.verify((timestamp + await request.text()).encode(), bytes.fromhex(ed25519))\n except BadSignatureError:\n return web.Response(status=401, reason=\"Unauthorised\")\n\n payload = await request.json()\n if payload.get('type') == 1:\n return web.Response(status=200, text=dumps({\"type\": 1}), content_type=\"application/json\")\n else:\n response = await self._handle(payload)\n return web.Response(status=200, text=response.json(), content_type=\"application/json\")",
"def AddFoodIntent_handler(handler_input):\n # type: (HandlerInput) -> Response\n\n logger.info(\"In AddFoodIntent\")\n\n handler_input.response_builder.set_should_end_session(True)\n\n if not check_reminders_permissions(handler_input):\n return handler_input.response_builder.response\n\n slots = handler_input.request_envelope.request.intent.slots\n food = slots[FOOD_SLOT].value\n date = slots[DATE_SLOT].value\n\n reminder_request = create_reminder_request(date, f'{food} scade oggi')\n if reminder_request == None:\n speech = \"Non posso mettere un promemoria con scadenza nel passato.\"\n return handler_input.response_builder.speak(speech).set_card(\n SimpleCard(\n \"Promemoria non creato per \", f'{food}.')).response\n \n api_client = handler_input.service_client_factory.get_reminder_management_service()\n #Create Reminder\n try:\n reminder_response = api_client.create_reminder(reminder_request)\n logger.info(f\"Created reminder : {reminder_response}\")\n except ServiceException as e:\n #Exception encountered : {\"code\":\"INVALID_BEARER_TOKEN\",\"message\":\"Invalid Bearer token\"}\n #Exception encountered : {'code': 'DEVICE_NOT_SUPPORTED',\n # 'message': 'Reminders are not supported on this device.'}\n logger.info(\"Exception encountered creating a Reminder: {}\".format(e.body))\n speech_text = \"Oops. Non ho potuto creare il promemoria.\"\n return handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\n \"Reminder not created:\", str(e.body))).response\n\n attributesManager = handler_input.attributes_manager\n \n newdata = {\n \"object\": food,\n \"expiration\": date,\n \"token\": reminder_response.alert_token\n }\n try:\n saved_attr = attributesManager.persistent_attributes\n logger.info(saved_attr)\n if saved_attr == {}:\n newid = \"1\"\n else:\n maxid = max(saved_attr.keys())\n newid = str(int(maxid) + 1)\n except AttributesManagerException:\n logger.info(\"Persistent Adapter is not defined\")\n except:\n newid = \"1\"\n#TODO clean expired entries before saving\n#TODO develop an Interceptor to cache data\n#TODO Lettere accentate\n#TODO gestisci le scadenze come una lista il cui Item contiene sia l'oggetto\n##### sia la data di scadenza??\n newpersdata = { newid: newdata }\n logger.info(f'Newpersdata={newpersdata}')\n attributesManager.persistent_attributes = { **saved_attr, **newpersdata }\n attributesManager.save_persistent_attributes()\n\n speech = \"Ho messo un promemoria per \" + food + \" alle ore 13 del \" + date\n handler_input.response_builder.set_should_end_session(True)\n handler_input.response_builder.speak(speech).set_card(\n SimpleCard(\"Reminder created with id =\", reminder_response.alert_token))\n return handler_input.response_builder.response",
"def interact(parts, method):\n details = ACTIONS[method]\n command = details['command'] % (details[parts['action']], parts['attacker'])\n status, output = commands.getstatusoutput(command)\n activity[parts['attacker']] = (parts['action'], status,)\n \n return command, status, output",
"def __init__(self, **kwargs):\n Interaction.__init__(self, **kwargs)\n self._demands = None # the resource demanded by this interaction\n self._penalty = None # how to penalize for not meeting demand NOT IMPLEMENTED",
"def act(self, observation, reward, done):\r\n\r\n # Choosing action randomly in proportion with number of views.\r\n prob = self.organic_views / sum(self.organic_views)\r\n action = choice(self.config.num_products, p = prob)\r\n\r\n return {\r\n **super().act(observation, reward, done),\r\n **{\r\n 'a': action,\r\n 'ps': prob[action]\r\n }\r\n }",
"def housepredict(intent_request):\r\n \r\n \r\n location_zip = get_slots(intent_request)[\"location\"]\r\n housetype_zip = get_slots(intent_request)[\"housetype\"]\r\n source = intent_request['invocationSource']\r\n \r\n print('received request: ' + str(intent_request))\r\n print (\"housetype\",housetype_zip)\r\n print (\"location1\",location_zip)\r\n\r\n if source == 'DialogCodeHook':\r\n # Perform basic validation on the supplied input slots.\r\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\r\n slots = get_slots(intent_request)\r\n print('slots are' ,str(slots)) \r\n validation_result = validate_housepred(location_zip)\r\n if not validation_result['isValid']:\r\n slots[validation_result['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result['violatedSlot'],\r\n validation_result['message'])\r\n\t\t\r\n validation_result2 = validate_housepred_hstyp(housetype_zip)\r\n if not validation_result2['isValid']:\r\n slots[validation_result2['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result2['violatedSlot'],\r\n validation_result2['message'])\r\n\r\n # Pass the price of the flowers back through session attributes to be used in various prompts defined\r\n # on the bot model.\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n if location_zip is not None and housetype_zip is not None:\r\n output_session_attributes['Price'] = house_price_pred(location_zip,housetype_zip)#len(location_zip)*5#house_price_pred(location_zip,housetype_zip) \r\n #price = house_price_pred(location_zip,housetype_zip)# Elegant pricing model\r\n\t\t\t\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n # Order the flowers, and rely on the goodbye message of the bot to define the message to the end user.\r\n # In a real bot, this would likely involve a call to a backend service.\r\n print(intent_request['sessionAttributes']['Price']) \r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Approx. next year growth prediction for {hstyp} in {loc} is {prc}%'.format(hstyp=housetype_zip,loc=location_zip,prc=intent_request['sessionAttributes']['Price'])})",
"async def _capability_response(self, data):\n self.query_reply_data[PrivateConstants.CAPABILITY_RESPONSE] = data[1:-1]",
"async def _capability_response(self, data):\n self.query_reply_data[PrivateConstants.CAPABILITY_RESPONSE] = data[1:-1]",
"def action(self):\n # --- Ruled Based Test Policy ---\n # Stay still just send communication event\n if self.uid == 0:\n if random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n action = 3\n action_param = {}\n self.comm_count += 1\n else:\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": 0}\n return action, action_param\n\n # Others\n # If wall in vision, rotate\n vision_array = self.vision[1]\n if 1 in vision_array[0]:\n accel = -1 if self.speed > 0 else 0\n action = 1\n action_param = {\"ang_accel\": (random.randint(20, 45) * math.pi / 180), \"accel\": accel}\n\n # If hider in front, tag\n elif self.agt_class == 3 and 2 in vision_array[0] and vision_array[1][list(vision_array[0]).index(2)] < 60:\n action = 2\n action_param = {}\n\n # Randomly invoked communication event\n # elif random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n # action = 3\n # action_param = {}\n # self.comm_count += 1\n\n # If communication received head towards nearest comm. agent for three steps\n elif len(self.comm) > 0:\n closest_agent = min(self.comm, key=lambda x: x[0])\n\n # Calculate target angle to the event sender\n target_angle = closest_agent[1] + self.angle\n target_angle = 2*math.pi + target_angle if target_angle < 0 else target_angle\n target_angle = target_angle - 2*math.pi if target_angle > 2*math.pi else target_angle\n\n # Add target angle to history such that the agent moves until it finds the target angle\n self.history.append(target_angle)\n direction = closest_agent[1]/abs(closest_agent[1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n\n # If target angle not found, continue searching\n elif len(self.history) > 0:\n direction = self.history[-1]/abs(self.history[-1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n if self.history[-1] - math.pi/9 < self.angle < self.history[-1] + math.pi/9:\n self.history.pop(-1)\n\n # When there isn't a special event, just move forward\n else:\n st_rate = self.stamina/self.max_stamina\n if st_rate > 0.75:\n accel = np.random.normal(3, 1, 1)\n elif st_rate > 0.4:\n accel = np.random.randint(-1, 3)\n else:\n accel = -1\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": accel}\n\n return action, action_param",
"def on_response(self, ch, method, props, body):\n if self.corr_id == props.correlation_id:\n self.response = body",
"def save_response(self, key, response):\n self.responses[key] = response, datetime.now(timezone.utc)",
"def respond(self, request_id, response):\n response['rdf:type'] = self.response_type\n response['response_to'] = uri(request_id)\n\n LOG.debug(\n 'Responding to request {0} with {1}.'.format(request_id, response))\n\n response_triples = []\n for key, values in response.iteritems():\n if not isinstance(values, list):\n values = [values]\n for value in values:\n response_triples.append(Triple(bnode('id'), key, value))\n\n self.sc.insert(response_triples)",
"def handle_action_dict(self, speaker: str, d: Dict, chatstr: str) -> Optional[DialogueObject]:\n coref_resolve(self.agent.memory, d, chatstr)\n logging.info('ttad post-coref \"{}\" -> {}'.format(hash_user(speaker), d))\n\n if d[\"dialogue_type\"] == \"NOOP\":\n return Say(\"I don't know how to answer that.\", **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"HUMAN_GIVE_COMMAND\":\n return Interpreter(speaker, d, **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"PUT_MEMORY\":\n return PutMemoryHandler(speaker, d, **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"GET_MEMORY\":\n logging.info(\"this model out: %r\" % (d))\n logging.info(\"querying previous model now\")\n if self.ttad_prev_model:\n prev_model_d = self.ttad(s=chatstr, model=self.ttad_prev_model, chat_as_list=True)\n logging.info(\"prev model out: %r\" % (prev_model_d))\n if (\n prev_model_d[\"dialogue_type\"] != \"GET_MEMORY\"\n ): # this happens sometimes when new model sayas its an Answer action but previous says noop\n return Say(\n \"I don't know how to answer that.\", **self.dialogue_object_parameters\n )\n return GetMemoryHandler(speaker, prev_model_d, **self.dialogue_object_parameters)\n else:\n return GetMemoryHandler(speaker, d, **self.dialogue_object_parameters)\n else:\n raise ValueError(\"Bad dialogue_type={}\".format(d[\"dialogue_type\"]))",
"def create_converse_responder(response, skill):\n default_converse = skill.converse\n converse_return = None\n\n def wait_for_new_converse():\n \"\"\"Wait until there is a new converse handler then send sentence.\n \"\"\"\n nonlocal converse_return\n start_time = time.monotonic()\n while time.monotonic() < start_time + 5:\n if skill.converse != default_converse:\n skill.converse([response])\n break\n\n time.sleep(0.1)\n\n return wait_for_new_converse",
"def _rewards(self, action: Action) -> Dict[Text, float]:\n raise NotImplementedError",
"def interact(self, antagonist):\n pass",
"def dispatch(intent_request):\r\n\r\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\r\n\r\n intent_name = intent_request['currentIntent']['name']\r\n\r\n # Dispatch to your bot's intent handlers\r\n if intent_name == 'gethousepredict':\r\n return housepredict(intent_request)\r\n elif intent_name == 'availablehouses':\r\n housetype = intent_request['currentIntent']['slots']['housetypesavail']\r\n location = intent_request['currentIntent']['slots']['locationavail']\r\n item_dtl = house_price_dtl(location,housetype)\r\n #print (\"housetype\",housetype)\r\n #print (\"location\",location)\r\n #print (\"House Pirce\",price)\r\n response = {\r\n \"dialogAction\": {\r\n \"type\": \"Close\",\r\n \"fulfillmentState\": \"Fulfilled\",\r\n \"message\": {\r\n \"contentType\": \"SSML\",\r\n \"content\": \" Hosue Details \\n {item_dtls}\".format(item_dtls = item_dtl)\r\n },\r\n }\r\n }\r\n print('result = ' + str(response))\r\n return response\r\n\r\n raise Exception('Intent with name ' + intent_name + ' not supported')",
"def visit_interaction(self, interaction):\n for opt in self.event_json['options']:\n opt_json = self.world_json[opt['id']]\n self._connect_option(interaction, opt_json)",
"def store_producer_decision_and_response(producer_decision_and_response):\n pass",
"def _reward(self, action):\n raise NotImplementedError",
"def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info",
"def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)",
"def _oneInteraction(self):\n self.stepid += 1\n self.agent.integrateObservation(self.task.getObservation())\n self.task.performAction(self.agent.getJointAction())\n reward = self.task.getReward()\n self.agent.giveJointReward(reward)\n return reward",
"def interaction_type(self, interaction_type):\n\n self._interaction_type = interaction_type"
] | [
"0.6155525",
"0.5505451",
"0.54621994",
"0.5352942",
"0.53252554",
"0.521187",
"0.52045745",
"0.5198892",
"0.5178887",
"0.5170591",
"0.51653457",
"0.5150958",
"0.5149953",
"0.5149953",
"0.51460904",
"0.51315385",
"0.5109652",
"0.5102466",
"0.50457823",
"0.5038383",
"0.50301087",
"0.5017118",
"0.5016419",
"0.49843305",
"0.4982718",
"0.49819186",
"0.49808353",
"0.49083164",
"0.48979568",
"0.48829326"
] | 0.72816384 | 0 |
Takes a interaction and removes it and the response from self._interactions | def remove_interaction(self, interaction: str):
if interaction in self._interactions:
self._interactions.pop(interaction)
else:
raise KeyDoesNotExist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_interactions(self):\n for key in self._memory.keys():\n self._memory[key].clear_interaction()",
"def add_interaction(self, interaction: str, response: str):\n if interaction not in self._interactions:\n self._interactions[interaction] = response\n else:\n raise KeyAlreadyExists",
"async def disableinteraction(self, ctx, interaction=None):\n interaction_msg = f\"> **The available interactions are {', '.join(self.ex.cache.interaction_list)}.**\"\n if not interaction:\n return await ctx.send(interaction_msg)\n interaction = interaction.lower()\n if interaction not in self.ex.cache.interaction_list:\n return await ctx.send(f\"> **That is not an interaction.**\\n{interaction_msg}\")\n if not await self.ex.check_interaction_enabled(server_id=ctx.guild.id, interaction=interaction):\n # enable it\n await self.ex.u_miscellaneous.enable_interaction(ctx.guild.id, interaction)\n await ctx.send(f\"> **{interaction} has been enabled in this server.**\")\n else:\n # disable it\n await self.ex.u_miscellaneous.disable_interaction(ctx.guild.id, interaction)\n await ctx.send(f\"> **{interaction} has been disabled in this server.**\")",
"def interaction(self) -> None:\n if isinstance(self.target, ob.Void):\n self.interaction_void()\n elif isinstance(self.target, ob.Door):\n self.interaction_door()\n elif isinstance(self.target, ob.Hole):\n self.interaction_hole()\n elif isinstance(self.target, ob.Box):\n self.interaction_box()\n elif isinstance(self.target, ob.TurnstileBloc):\n self.interaction_turnstile()",
"def removeBehavior(self, behavior):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.removeBehavior(behavior)",
"def interaction( self ) :\n\n return( self.__interaction )",
"def interaction_void(self) -> None:\n self.grid.obj_list.swap_obj(self.moving_character, self.target)",
"def remove_from_hand(self):\n pass",
"def _patch(self, interaction: JSON) -> NoReturn:\n self._id = interaction['id']\n\n self._data.patch_dpy(interaction) # Update data.",
"def remove_input(self, action):\n if action in self.inputs:\n idx = self.inputs.index(action)\n return self.inputs.pop(idx)\n return",
"def unbind_receive_response(self, msg_tag):\n if self._callable:\n del self._response_bindings[msg_tag]\n else:\n raise SAMPClientError(\"Client not callable.\")",
"def remove_action(self, action):\n if action not in self.actions:\n return\n old_actions = self.actions\n self.actions = []\n if action in old_actions:\n for a in old_actions:\n if a != action:\n self.actions += [a]\n\n if not self.actions:\n self.actions = Action.actor_idle",
"def set_interactions(self, interactions: dict):\n self._interactions = interactions",
"def unset_running_behavior(self, behavior: Behavior) -> None:",
"async def callback(self, interaction: Interaction) -> None:\n if isinstance(interaction.user, discord.User):\n log.trace(\"User %s is not a member\", interaction.user)\n await interaction.message.delete()\n self.view.stop()\n return\n\n await members.handle_role_change(\n interaction.user,\n interaction.user.remove_roles if self.assigned else interaction.user.add_roles,\n discord.Object(self.role.role_id),\n )\n\n self.assigned = not self.assigned\n try:\n await self.update_view(interaction)\n send_function = interaction.followup.send\n except discord.NotFound:\n log.debug(\"Subscribe message for %s removed before buttons could be updated\", interaction.user)\n self.view.stop()\n send_function = interaction.response.send_message\n\n await send_function(\n self.LABEL_FORMAT.format(action=\"Added\" if self.assigned else \"Removed\", role_name=self.role.name),\n ephemeral=True,\n )",
"def interaction_type(self, interaction_type):\n\n self._interaction_type = interaction_type",
"def clear_mission(self):\n cmds = self.vehicle.commands\n self.vehicle.commands.clear()\n self.vehicle.flush()\n\n # After clearing the mission, we MUST re-download the mission from the \n # vehicle before vehicle.commands can be used again.\n # See https://github.com/dronekit/dronekit-python/issues/230 for \n # reasoning.\n self.download_mission()",
"async def _record_interaction(self, request_id: int) -> None:\n raise NotImplementedError()",
"def removeReaction(self, *args):\n return _libsbml.Model_removeReaction(self, *args)",
"def interact(self):\r\n pass",
"def interact(self, antagonist):\n pass",
"async def fox(self, interaction: Interaction):\n await post_random_animal_command(interaction)",
"def remove_tactic(self):\n tactic_removed = input(\"Enter a tactic to be removed: \")\n self.proof.tactics.remove(tactic_removed)\n for gene in self.population:\n gene.chromosome = [e for e in gene.chromosome if e != tactic_removed]",
"def removeBehavior(portal, behavior, name):\n fti = getUtility(IDexterityFTI, name=name)\n\n behaviors = list(fti.behaviors)\n\n if behavior in behaviors:\n behaviors.remove(behavior)\n fti.behaviors = behaviors",
"def propose_interaction(self, other: 'Guest', action: str):\n other.interaction_proposals.append((self, action))\n self.interaction_proposals.append((other, action))",
"def get_interaction(self) -> direction:\n return self.player.get_response()",
"def remove(self, *args):\n return _libsbml.ListOfReactions_remove(self, *args)",
"def new_skill_interaction(self, skill):\n self.skill_interact[skill] = True",
"async def clear(self, itx: discord.Interaction, /) -> None:\n\n if TYPE_CHECKING:\n assert itx.guild is not None\n\n async with Session.begin() as session:\n if (\n guild_prefs := await GuildPref.for_guild(session, itx.guild)\n ) is not None:\n await session.delete(guild_prefs)\n attribute_id = 'deleted'\n else:\n attribute_id = 'already-deleted'\n\n await utils.send_embed(\n itx,\n description=self.localizer.format(\n f'clear.{attribute_id}', locale=itx.locale\n ),\n ephemeral=True,\n )",
"def __del__(self):\n self.uninstall_handle_input()"
] | [
"0.5966735",
"0.59624356",
"0.5866722",
"0.57075024",
"0.55454844",
"0.5524417",
"0.54974705",
"0.5476416",
"0.54251266",
"0.53514576",
"0.5295323",
"0.5274241",
"0.52559656",
"0.51991886",
"0.51894754",
"0.5179845",
"0.5179359",
"0.5135641",
"0.51273286",
"0.5122307",
"0.5068839",
"0.5062308",
"0.5046273",
"0.50265276",
"0.50171256",
"0.50010747",
"0.49986166",
"0.49978366",
"0.49845794",
"0.49596387"
] | 0.77598727 | 0 |
ssh_deploy.main(chain, local_path, remote_path, action='check', files_upload=None, ignore_patterns=None, files_download=None, | def main():
args = cli()
title = ' [%s] ***' % PROG
print('*' * (80 - len(title)) + title)
print(' Remote Hosts : %s' % (' -> '.join(args.hosts)))
print(' Local Path : %s' % args.local)
print(' Remote Path : %s' % args.remote)
print(' Upload Files : %s' % args.upload_files)
print('Download Files : %s' % args.download_files)
print(' Action : %s' % args.action)
print(' Ignored Dirs : %s' % args.ignore_dirs)
print(' Ignored Files : %s' % args.ignore_files)
print('*' * 80)
if args.test:
return
if args.ignore_dirs:
not_match_dir = '(.*/)?(%s)/.*' % ('|'.join([re.escape(i) for i in args.ignore_dirs]))
else:
not_match_dir = None
if args.ignore_files:
not_match_file = '.*/(%s)' % ('|'.join([re.escape(i) for i in args.ignore_files]))
else:
not_match_file = None
not_match = '(%s)' % ('|'.join(['(%s)' % i for i in [not_match_dir, not_match_file, args.ignore] if i]))
print('Ignore: %r' % not_match)
chain = build_chain(args.hosts)
try:
ignore_patterns = []
ssh_deploy.main(chain, args.local, args.remote, action=args.action,
files_upload=args.upload_files, ignore_patterns=ignore_patterns,
files_download=args.download_files,
not_match=not_match)
except Exception as error:
LOG.exception('Uncaught Exception: %s', error)
finally:
chain.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy():",
"def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)",
"def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)",
"def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)",
"def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\", help=\"The app local directory\")\n parser.add_option(\"-r\", \"--remote_dir\", dest=\"remote_dir\", help=\"The app remote directory\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"The django app name\")\n parser.add_option(\"-f\", \"--full\", help=\"Provision before deploy\", default=False)\n parser.add_option(\"-o\", \"--no_files\", help=\"Don't copy the app files\", default=False)\n\n (options, args) = parser.parse_args()\n\n execute(deploy, **options.__dict__)",
"def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")",
"def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))",
"def deploy(local_directory, remote_url, filelist=None):\n # ensure destination directory exist before deploying data\n if not (remote_url.host and remote_url.remote_directory):\n message = \"Remote URL is invalid; host and remote directory must be specified\"\n raise Exception(message)\n user = \"%s@\" % remote_url.user if remote_url.user else \"\"\n cmd = [\"ssh\"]\n if remote_url.port:\n cmd.extend([\"-p\", str(remote_url.port)])\n cmd.extend([\"%s%s\" % (user, remote_url.host)])\n cmd.extend([\"mkdir\", \"-p\", remote_url.remote_directory])\n qisys.command.call(cmd)\n # This is required for rsync to do the right thing,\n # otherwise the basename of local_directory gets\n # created\n local_directory = local_directory + \"/.\"\n cmd = [\"rsync\",\n \"--recursive\",\n \"--links\",\n \"--perms\",\n \"--times\",\n \"--specials\",\n \"--progress\", # print a progress bar\n \"--checksum\", # verify checksum instead of size and date\n \"--exclude=.debug/\"]\n if remote_url.port:\n cmd.extend([\"-e\", \"ssh -p %d\" % remote_url.port])\n if filelist:\n cmd.append(\"--files-from=%s\" % filelist)\n cmd.append(local_directory)\n cmd.append(\"%s%s:%s\" % (user, remote_url.host, remote_url.remote_directory))\n qisys.command.call(cmd)",
"def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"host\", type=str, nargs=\"+\")\n parser.add_argument(\"--user\", type=str, default=getpass.getuser())\n parser.add_argument(\"--path\", type=str, required=True)\n parser.add_argument(\"--keep\", type=int, default=3)\n parser.add_argument(\"--deployuser\", type=str, default=None)\n parser.add_argument(\"--postcmd\", type=str, default=None)\n\n args = parser.parse_args()\n if args.host is None:\n parser.print_usage()\n sys.exit(1)\n\n if args.deployuser is None:\n args.deployuser = args.user\n\n init(autoreset=True)\n deploy(args)",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n archive_path = do_pack()\n if archive_path is False:\n return false\n\n deploy_return = do_deploy(archive_path)\n return deploy_return",
"def deploy(parameters):\n\n print(\"In deploy module\")",
"def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')",
"def deploy():\n\n archive_path = do_pack()\n\n if archive_path is None:\n return False\n\n return do_deploy(archive_path)",
"def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dryrun', '-n', action='store_true',\n help=('check TileDB/SSH files differences only, '\n 'does not perform any copy'))\n parser.add_argument('--debug', '-d', action='store_true',\n help=('prints debug messages'))\n parser.add_argument('--tdmq-url', action='store', type=str, required=True,\n dest='tdmq_url',\n help=('tdmq server and path of the form'))\n parser.add_argument('--tdmq-auth-token', action='store', type=str, required=True,\n dest='tdmq_auth_token',\n help=('tdmq server authorization token'))\n parser.add_argument('--ssh-url', action='store', type=str, required=True,\n dest='ssh_url',\n help=(\n 'ssh server and path of the form: '\n '<USER>@<NAME_NODE>:<PORT>/PATH'))\n parser.add_argument('--ssh-key', action='store', type=str, required=True,\n dest='ssh_key',\n help=('key for ssh server authentication'))\n parser.add_argument('--desc-file', action='store', type=str, required=True,\n dest='source_desc_file',\n help=('source descrption file'))\n\n # Only one of --hours and --sync can be provided on command line\n sync_group = parser.add_mutually_exclusive_group()\n sync_group.add_argument('--hours', action='store',\n dest='hours', default=24, type=int,\n help=('uploads only the radar images '\n 'more recent than the given number of hours'))\n sync_group.add_argument('--sync', '-s', action='store_true',\n dest='sync',\n help=('upload all the missing radar images'))\n\n args = parser.parse_args()\n\n # If the debug flag is set, print all messages\n if args.debug:\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(levelname)s] %(message)s')\n else:\n logging.basicConfig(\n level=logging.INFO,\n format='[%(levelname)s] %(message)s')\n\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n\n (_ssh_username, _ssh_hostname, _ssh_port,\n _ssh_root) = check_ssh_url(args.ssh_url)\n if _ssh_hostname is None:\n logging.error(\n 'Wrong, incomplete or absent SSH path: \\'%s\\'', args.ssh_url)\n sys.exit(1)\n\n if os.path.isfile(args.ssh_key) == False:\n logging.error(\n 'SSH key file not found: \\'%s\\'', args.ssh_key)\n sys.exit(1)\n\n if os.path.isfile(args.source_desc_file) == False:\n logging.error(\n 'Source description file not found: \\'%s\\'', args.source_desc_file)\n sys.exit(1)\n\n _source_desc = load_description(args.source_desc_file)\n\n ssh_client = SSHClient(\n username=_ssh_username,\n hostname=_ssh_hostname,\n port=_ssh_port,\n key_file=args.ssh_key,\n root_dir=_ssh_root\n )\n\n _folder_list = ssh_client.list_folder()\n\n def _name_filter(file_name):\n # Is a radar image file\n if re.match(r'cag01est2400\\d{4}-\\d{2}-\\d{2}_\\d{2}:\\d{2}:\\d{2}.png', file_name):\n return True\n else:\n return False\n\n # Filter out not image files\n _image_list = list(filter(_name_filter, _folder_list))\n\n # Instantiates a TDMQ client, retrieves the source if exists or registers a\n # new one\n tdmq_client = Client(args.tdmq_url, args.tdmq_auth_token)\n sources = tdmq_client.find_sources({'id': _source_desc['id']})\n if len(sources) > 0:\n assert len(sources) == 1\n source = sources[0]\n logging.info(f\"Using source {source.tdmq_id} for {source.id}.\")\n else:\n source = tdmq_client.register_source(_source_desc)\n logging.info(f\"Created source {source.tdmq_id} for {source.id}.\")\n\n try:\n ts = source.timeseries()\n times = ts.time\n last_image_time = max(sorted(times))\n _last_slot = max(ts.tiledb_indices)\n except Exception as ex: # FIXME too general\n times = []\n last_image_time = datetime.datetime(1970, 1, 1, 0, 0, 0)\n _last_slot = 0\n\n # Builds the list of file to download\n if args.sync:\n _images_to_ingest = ingest_missings(_image_list, times)\n else:\n start_time = (\n datetime.datetime.now() - datetime.timedelta(hours=args.hours)\n ).replace( minute=0, second=0, microsecond=0)\n\n logging.info(f\"Requested images from {start_time} (last local image is {last_image_time}).\")\n if start_time > last_image_time:\n last_image_time = start_time\n\n _images_to_ingest = ingest_latests(last_image_time, _image_list)\n\n logging.info(\n f\"Remote files: {len(_folder_list)}, remote images: \"\n f\"{len(_image_list)}, images to sync: {len(_images_to_ingest)}.\")\n\n for _image in _images_to_ingest:\n _timestamp = datetime.datetime.strptime(\n _image, 'cag01est2400%Y-%m-%d_%H:%M:%S.png')\n _last_slot = _last_slot + 1\n\n if args.dryrun:\n logging.debug(f\"[DRY-RUN] Ingesting data at time {_timestamp}, slot {_last_slot}.\")\n else:\n logging.debug(f\"Ingesting data at time {_timestamp}, slot {_last_slot}.\")\n _data = fetch_radar_data(ssh_client, _image)\n source.ingest(_timestamp, _data, _last_slot)\n logging.info(f\"Done ingesting.\")",
"def deploy():\n archive_path = do_pack()\n\n if not archive_path:\n return False\n\n return do_deploy(archive_path)",
"def deploy(self, topology):\n print \"ABC - Deployer.deploy()\"",
"def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res",
"def deploy():\n comp = do_pack()\n\n if (not comp):\n return False\n return do_deploy(comp)",
"def deploy(\n context, instance, user=get_local_user(), initial=False, stack=None, branch=BRANCH,\n):\n remote = True\n\n if initial:\n clone(context, instance, user, branch)\n else:\n backup(context, user, remote, instance, stack)\n\n update(context, user, remote, instance, branch)\n up(context, user, remote, instance, stack)",
"def do_deploy(archive_path):\n if path.exists(archive_path):\n\n # File name without .tgz\n file_ext = archive_path.split('/')[1]\n file_alone = file_ext.split(\".\")[0]\n curr_release = \"/data/web_static/releases/\" + file_alone + '/'\n\n result = True\n\n # Deploy compressed file to the server /tmp/ directory\n upload = put(archive_path, \"/tmp/\")\n if upload.failed:\n result = False\n\n # Make dir to store the release\n dir_release = run(\"sudo mkdir -p \" + curr_release)\n if dir_release.failed:\n result = False\n\n # Uncompress file inside the folder created\n uncompress = run(\"sudo tar -xzf \" + \"/tmp/\\\n\" + file_ext + \" -C \" + curr_release)\n if uncompress.failed:\n result = False\n\n # Move all files from web_static to folder release\n move_info = run(\"sudo mv \" + curr_release + \"\\\nweb_static/* \" + curr_release)\n if move_info.failed:\n result = False\n\n # Remove empty web_static directory\n rm_empty = run(\"sudo rm -rf \" + curr_release + \"\\\nweb_static/\")\n if rm_empty.failed:\n result = False\n\n # Remove symbolic link current\n rm_link = run(\"sudo rm -rf /data/\\\nweb_static/current\")\n if rm_link.failed:\n result = False\n\n # Make new symbolic link\n new_link = run(\"sudo ln -s \" + curr_release + \" /data/\\\nweb_static/current\")\n if new_link.failed:\n result = False\n\n return result\n else:\n return False",
"def deploy():\n build()\n copy()\n install()",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def deploy_from_local():\n # don't die if tests fail\n # with settings(warn_only=True):\n # run_tests()\n # defaults rsync options:\n # -pthrvz\n # -p preserve permissions\n # -t preserve times\n # -h output numbers in a human-readable format\n # -r recurse into directories\n # -v increase verbosity\n # -z compress file data during the transfer\n extra_opts = '--omit-dir-times -e \"ssh\"' # -p 80\"'\n project.rsync_project(\n env.code_root,\n local_dir=env.local_dir,\n exclude=RSYNC_EXCLUDE,\n delete=True,\n extra_opts=extra_opts,\n )\n touch()\n restart_route()",
"def deploy(config, args):\n log = logging.getLogger('kraftwerk.deploy')\n \n # TODO better way to detect new, or maybe move to dedicated command\n stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)\n new = bool(stderr) or args.override\n \n # Sync codebase over with the web user\n destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)\n stdout, stderr = args.project.rsync(destination)\n if stderr:\n log.error(\"Sync error: %s\" % stderr)\n sys.exit(stderr)\n \n # Copy requirements\n args.project.copy(args.node, 'requirements.txt')\n \n # Put together the setup script\n cmd = config.template(\"scripts/project_setup.sh\", \n project=args.project, new=new, \n upgrade_packages=args.upgrade_packages)\n stdout, stderr = args.node.ssh(cmd, pipe=True)\n if stderr:\n print stderr\n \n # TODO detect new services\n if not args.no_service_setup and new:\n for service in args.project.services():\n args.node.ssh(service.setup_script)\n \n print u\"%s live at %r\" % (args.project.canonical_domain(), args.node.hostname)",
"def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))"
] | [
"0.6592391",
"0.6451214",
"0.6343521",
"0.63425916",
"0.62953395",
"0.628891",
"0.6167329",
"0.6156592",
"0.6116155",
"0.6102239",
"0.6097342",
"0.6097342",
"0.6097342",
"0.6084119",
"0.6065058",
"0.60215795",
"0.59963775",
"0.5987958",
"0.5984369",
"0.59840107",
"0.5972111",
"0.59473836",
"0.59349316",
"0.59160525",
"0.5905346",
"0.587397",
"0.58497816",
"0.58429116",
"0.5801697",
"0.5799098"
] | 0.7633964 | 0 |
Benchmarks any callable passed in arguments, calling it with the remaining arguments (all functions must all accept these remaining given arguments and named arguments) Takes "inNumber" as an hidden named argument for the number of calls (bigger values, =~100 are more accurate but longer of course...) | def timeThem(*args, **kwargs):
funcs = []
funcArgs = list(args[:])
#filter arguments
for arg in args:
if callable(arg):
funcs.append(arg)
funcArgs.remove(arg)
key = "inNumber"
inNumber=10
if key in kwargs:
inNumber = kwargs[key]
del kwargs[key]
durations = []
refTime = 0.0
for func in funcs:
retVal = func(*funcArgs, **kwargs)
duration = timeit(partial(func, *funcArgs, **kwargs), number=inNumber)
comparison = ""
if refTime <= 0.0:
refTime = duration
else:
comparison = " ( *{:.2f})".format(duration / refTime)
print("{: <16} : {:.4f}".format(func.__name__, duration) + comparison + " returns '{}' ({})".format(retVal, type(retVal).__name__))
durations.append(duration)
return durations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def benchmark(trials:int):\n def benchmark_method(function:Callable[[int],int]) -> Callable[[int],Tuple[float,str]]:\n def time_wrapper(*args) -> Tuple[float,str]:\n \"\"\" Return the time taken to run a fibonacci method in microseconds \"\"\"\n t1 = time.time()\n for _ in range(trials):\n function(*args)\n return ((time.time()-t1)/trials) * 1e6, function.__name__\n return time_wrapper\n return benchmark_method",
"def measure_func(func, args, number=1):\n f = partial(func, *args) # pylint: disable=W0142\n while True:\n start = timer()\n r = timeit.repeat(f, number=number, repeat=1)\n if timer() - start > 1: # at least 1 second per measurement\n break\n number *= 2\n return min(r + timeit.repeat(f, number=number, repeat=2)) / number",
"def benchmarkFunc(iter, args=()):\n def decorator(func):\n benchmarkFuncs.append((func, args, iter))\n return func\n return decorator",
"def benchmarkNFunc(iter, ns):\n def decorator(func):\n for n in ns:\n benchmarkFuncs.append((func, (n,), iter))\n return func\n return decorator",
"def test_named_args(self):\n cfunc = njit(f)\n self.assertEqual(cfunc(1, 2, 3), f(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), f(1, 2, 3))",
"def timeit_compare(funcs, inputs, setups='pass', **kwargs):\n number = kwargs.get('number', 100000)\n print_conditions = kwargs.get('print_conditions', False)\n performance = defaultdict(list)\n if isinstance(setups, list): \n # user specifies their own list of setups corresponding to funcs\n pass\n elif setups == 'pass':\n # specify no setups for built-in functions like join\n setups = ['pass' for f in funcs]\n elif setups == 'main': \n # uniformly import all setups from the local environment\n fnames = [f[:f.find(\"(\")] for f in funcs]\n setups = [\"from __main__ import \" + fname for fname in fnames]\n \n # convert the input ranges to a set of conditions\n conditions = get_conditions(inputs)\n if print_conditions: \n print \"conditions: \" + conditions\n \n def timer(func, value, setup):\n return timeit.Timer(func.format(*value), setup=setup)\n\n for i, f in enumerate(funcs):\n print \"testing \" + f + \"...\"\n for value in conditions:\n test = timer(f, value, setups[i])\n result = test.timeit(number=number)\n performance[f].append(list(value) + [result])\n return performance",
"def timer(*args, **kwargs):\r\n\r\n accepted_args = ['n']\r\n\r\n for argument in kwargs.keys():\r\n if argument not in accepted_args:\r\n raise AttributeError(\r\n '{} not accepted argument: please see documentation for allowed arguments'.format(argument))\r\n\r\n def make_wrapper(func):\r\n\r\n @wraps(func)\r\n def wrapper(*fargs, **fkwargs):\r\n\r\n times = []\r\n\r\n for n in range(kwargs['n']):\r\n\r\n start = time.time()\r\n\r\n func(*fargs, **fkwargs)\r\n\r\n end = time.time()\r\n\r\n times.append(end-start)\r\n\r\n print('{} runtime {}'.format(func.__name__, np.mean(times)))\r\n\r\n return wrapper\r\n return make_wrapper",
"def test_fn_called():\n l = [1, 2, 3, 4, 5]\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called(fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in s7.fn_called_dict.keys() and str(s7.fn_called_dict[fn]) in s7.check_all_fn_called(fn)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__",
"def num_func_mapper(nums, funs):\n pass",
"def benchmark(func):\n\n def decoredFunc(*args, **keyArgs):\n t1 = time.time()\n r = func(*args, **keyArgs)\n t2 = time.time()\n print(f'Function={func.__name__}, Time={t2 - t1}')\n return r\n\n return decoredFunc",
"def test(f, args_string):\n print f.__name__\n t = timeit.repeat(\"%s(%s)\" % (f.__name__, args_string),\n repeat=5, number=1,\n setup=\"from __main__ import %s, %s\" % (f.__name__,\n args_string))\n print min(t)",
"def benchmark(func, inputs):\n t0 = time.clock()\n results = [func(x) for x in inputs]\n t1 = time.clock()\n average_time = (t1 - t0) / len(inputs)\n return average_time, results",
"def profiled(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n inner.ncalls += 1\n return func(*args, **kwargs)\n\n inner.ncalls = 0\n return inner",
"def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]",
"def __init__(self, func, args_list, kwargs_dict, setup_line_list, check_too_fast, run_sec, name, perf_counter_reference_time):\n self.func = func\n self.orig_func_name = getattr(self.func, \"__name__\", self.func)\n self.args_list = args_list.copy()\n self.kwargs_dict = kwargs_dict.copy()\n self.setup_line_list = setup_line_list\n self.check_too_fast = check_too_fast\n self.run_sec = run_sec\n self.name = name\n self.perf_counter_reference_time = perf_counter_reference_time\n if callable(self.func):\n _ns = {}\n self.src = self.__get_final_inner_function()\n if self.run_sec is not None and self.run_sec != -1 and self.run_sec < 0.1:\n raise Err('_TimeIT.__init__()', 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'.format(self.run_sec))\n\n _code = compile(self.src, 'benchmarkit-src', \"exec\")\n exec(_code, globals(), _ns)\n self.inner = _ns[\"inner\"]\n else:\n raise ValueError('<func>: is not a `callable` type: <{}>'.format(self.func))",
"def test_func(f, n):\n t = [[1]] * n\n\n start = etime()\n f(t, [])\n end = etime()\n elapsed = end - start\n return elapsed",
"def funcInFunc(func, num = 519):\n \n return func(num)",
"def setNumIterations(*argv):",
"def wrapper(*args, **kwargs):\n start = time.time()\n\n return func(*args, **kwargs), int(1000 * (time.time() - start))",
"def benchmark_algos(algos: List[str], setup_script: str, number: int,\n *args) -> None:\n for algo_name in algos:\n print('\\nRAN {} {} times with args {}'.format(algo_name, number,\n *args))\n print(timeit('{}({})'.format(algo_name, *args),\n setup=setup_script, number=number))",
"def _get_timings_perinput(funcs, input_=None):\n\n global _TIMEOUT\n global _NUM_REPEATS\n\n timings_l = []\n\n from IPython import get_ipython\n if get_ipython() is None:\n iter_funcs = trange(len(funcs), desc='Loop functions', leave=False)\n else:\n iter_funcs = range(len(funcs))\n\n for j in iter_funcs:\n f = funcs[j]\n ii = 1\n process_next = True\n while process_next:\n for jj in 1, 2, 5:\n iter_rep = ii * jj\n if input_ is None:\n t = min(timeit.repeat(functools.partial(f), repeat=_NUM_REPEATS, number=iter_rep))\n else:\n t = min(timeit.repeat(functools.partial(f, *input_), repeat=_NUM_REPEATS, number=iter_rep))\n if t > _TIMEOUT:\n process_next = False\n break\n ii *= 10\n timings_l.append(t / iter_rep)\n return timings_l",
"def profile_function(fun: Callable,\n args: tuple or list = (),\n kwargs: dict or None = None,\n backends=None,\n trace=True,\n subtract_trace_time=True,\n retime=True,\n warmup=1,\n call_count=1) -> Profile:\n kwargs = kwargs if isinstance(kwargs, dict) else {}\n for _ in range(warmup):\n fun(*args, **kwargs)\n with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof:\n fun(*args, **kwargs)\n if retime:\n with prof.retime():\n fun(*args, **kwargs)\n if call_count > 1:\n with prof._accumulate_average(call_count):\n for _ in range(call_count - 1):\n fun(*args, **kwargs)\n return prof",
"def countcalls(f):\n def _f(fn):\n countcalls[fn]",
"def run(method, n):\n \n m1,m2 = generate(n)\n \n start = time.time()\n method(m1,m2)\n end = time.time()\n \n exe = end - start\n \n return exe",
"def timedcalls(n, fn, *args):\n if isinstance(n, int):\n times = [timedcall(fn, *args)[0] for _ in xrange(n)]\n\n elif isinstance(n, float):\n timer, times = 0.0, []\n while timer < n:\n times.append(timedcall(fn, *args)[0])\n timer += times[-1]\n\n return min(times), average(times), max(times)",
"def do_benchmark(items, function_to_test, benchmark=None):\n def do():\n for _ in function_to_test(items):\n pass\n if benchmark is None:\n do()\n else:\n benchmark(do)",
"def test_figurate(label, func, inverse_func, incl_func, args):\n for k, n in enumerate(args, start=1):\n assert func(k) == n, f\"Failed to calculate {k}th {label} number\"\n assert inverse_func(n) == k, f\"Failed to invert {k}th {label} number\"\n assert incl_func(n), f\"Failed to verify inclusion of {n} in {label} numbers\"",
"def test_dup_args_in_call(x):\n return x * x",
"def test_dup_args_in_call(x):\n return x * x",
"def benchmark(func):\n import time\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = time.clock()\n res = func(*args, **kwargs)\n print(func.__name__, time.clock()-t)\n return res\n return wrapper"
] | [
"0.60976577",
"0.59307265",
"0.58209634",
"0.58199495",
"0.5737953",
"0.5634653",
"0.5607949",
"0.5593593",
"0.5568835",
"0.5557964",
"0.5527588",
"0.54920816",
"0.5489414",
"0.5486434",
"0.54733574",
"0.5459806",
"0.54595375",
"0.5433512",
"0.541623",
"0.5362226",
"0.5323065",
"0.53168404",
"0.53114307",
"0.52694994",
"0.5267452",
"0.52447206",
"0.52381873",
"0.522996",
"0.522996",
"0.5222335"
] | 0.6841135 | 0 |
Return getJobsEff from Jobs Client | def doCommand(self):
super(JobsEff_Command, self).doCommand()
self.APIs = initAPIs( self.__APIs__, self.APIs )
try:
res = self.APIs[ 'JobsClient' ].getJobsEff( self.args[0], self.args[1], self.args[2] )
except Exception, e:
_msg = '%s (%s): %s' % ( self.__class__.__name__, self.args, e )
gLogger.exception( _msg )
return { 'Result' : S_ERROR( _msg ) }
return { 'Result' : S_OK( res ) } | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))",
"def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()",
"def _get_jobs():\n return _get_bigquery_service().jobs()",
"def get_job(self) -> CustomJob:\n return self._client.get_custom_job(name=self._job_name)",
"def GetJob(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"async def get_jobs(): \n return mngr.getAllJobs()",
"def GetBatchJob(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get(ctx, job):\n\n def get_experiment():\n try:\n response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)\n cache.cache(config_manager=ExperimentManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n get_experiment_details(response)\n\n def get_experiment_job():\n try:\n response = PolyaxonClient().experiment_job.get_job(user,\n project_name,\n _experiment,\n _job)\n cache.cache(config_manager=ExperimentJobManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.resources:\n get_resources(response.resources.to_dict(), header=\"Job resources:\")\n\n response = Printer.add_status_color(response.to_light_dict(\n humanize_values=True,\n exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']\n ))\n Printer.print_header(\"Job info:\")\n dict_tabulate(response)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job()\n else:\n get_experiment()",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def query(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be type list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n req = list()\n if len(jobs) > 1:\n for r in self._batch_request(jobs):\n req.append(\n ''.join([self._scheduler_endpoint, '?', '&'.join(r)]))\n else:\n req = \"{}?job={}\".format(\n self._scheduler_endpoint, jobs[0])\n\n try:\n ret = list()\n for resp in self._api_get(req):\n ret.extend(resp.json())\n return ret\n except HTTPError as e:\n raise JobClientError(e.message)",
"def get_jobs(self, *, params: Optional[dict] = None) -> \"resource_types.Jobs\":\n\n return communicator.Jobs(self.__requester).fetch(parameters=params)",
"def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details",
"def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"args\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None:\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n try:\n qstat_command = config.Config.PBS_QSTAT + \" -f \" + job_id + \" -Fjson\"\n try:\n get_job_info = subprocess.check_output(shlex.split(qstat_command))\n try:\n sanitize_input = get_job_info.decode(\"utf-8\")\n for match in re.findall(\n '\"project\":(\\d+),', sanitize_input, re.MULTILINE\n ):\n # Clear case where project starts with digits to prevent leading zero errors\n print(\n f'Detected \"project\":{match}, > Will be replaced to prevent int leading zero error'\n )\n sanitize_input = sanitize_input.replace(\n f'\"project\":{match},', f'\"project\":\"{match}\",'\n )\n\n job_info = ast.literal_eval(sanitize_input)\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated. Error: \"\n + str(job_info),\n }, 210\n\n job_key = list(job_info[\"Jobs\"].keys())[0]\n return {\"success\": True, \"message\": job_info[\"Jobs\"][job_key]}, 200\n\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve Job ID (job may have terminated and is no longer in the queue)\",\n }, 210\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500",
"async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()",
"def job(job_name):\n ClientID = Job.get_client_id(job_name)\n return tasks_for_client_job(ClientID, job_name)",
"def get_job_detail():\n\n return JobDetail.query.all()",
"def jobserver_job():\n return _MakeJob()",
"def getFailedJobs(self):\n return self.__failedJobs",
"def job(self):\n return self.batch[self.job_id]",
"def get_job_run(\n self,\n ) -> Callable[[cloud_deploy.GetJobRunRequest], cloud_deploy.JobRun]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_job_run\" not in self._stubs:\n self._stubs[\"get_job_run\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/GetJobRun\",\n request_serializer=cloud_deploy.GetJobRunRequest.serialize,\n response_deserializer=cloud_deploy.JobRun.deserialize,\n )\n return self._stubs[\"get_job_run\"]",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def get_job(self) -> Union[Dict[Text, Text], CustomJob]:\n pass",
"def doCommand(self ):\n \n super (JobsEffSimple_Command, self).doCommand()\n self.APIs = initAPIs( self.__APIs__, self.APIs )\n\n try:\n \n if self.args[0] == 'Service':\n name = self.APIs[ 'ResourceStatusClient' ].getGeneralName( self.args[0], self.args[1], 'Site' ) \n name = name[ 'Value' ][ 0 ]\n granularity = 'Site'\n elif self.args[0] == 'Site':\n name = self.args[1]\n granularity = self.args[0]\n else:\n return { 'Result' : S_ERROR( '%s is not a valid granularity' % self.args[ 0 ] ) }\n \n res = self.APIs[ 'JobsClient' ].getJobsSimpleEff( name )\n \n if res == None:\n res = S_OK( 'Idle' )\n else:\n res = S_OK( res[ name ] ) \n \n except Exception, e:\n _msg = '%s (%s): %s' % ( self.__class__.__name__, self.args, e )\n gLogger.exception( _msg )\n return { 'Result' : S_ERROR( _msg ) }\n\n return { 'Result' : res }",
"def export_getRequestForJobs(self,jobIDs):\n gLogger.info(\"RequestManagerHandler.getRequestForJobs: Attempting to get request names for %s jobs.\" % len(jobIDs))\n try:\n res = requestDB.getRequestForJobs(jobIDs)\n return res\n except Exception,x:\n errStr = \"RequestManagerHandler.getRequestForJobs: Exception which getting request names.\"\n gLogger.exception(errStr,'',lException=x)\n return S_ERROR(errStr)",
"def getUpdatedBatchJob(self, maxWait):\n raise NotImplementedError('Abstract method: getUpdatedBatchJob')",
"def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure",
"def cli(ctx, job_id):\n return ctx.gi.jobs.get_inputs(job_id)",
"def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos"
] | [
"0.649465",
"0.6452751",
"0.6445332",
"0.63952684",
"0.6362363",
"0.63500166",
"0.6129403",
"0.61118275",
"0.6073132",
"0.6073132",
"0.6022542",
"0.5930793",
"0.5909269",
"0.59045947",
"0.58976734",
"0.587431",
"0.5870174",
"0.58529925",
"0.5841137",
"0.5816044",
"0.58158857",
"0.5812952",
"0.57887375",
"0.57516384",
"0.57473063",
"0.57456154",
"0.57286036",
"0.5727557",
"0.5688381",
"0.56459886"
] | 0.7219805 | 0 |
Apply monotonic attention constraint. This function apply the monotonic attention constraint | def _apply_attention_constraint(e,
last_attended_idx,
backward_window=1,
forward_window=3):
# for dygraph to static graph
# if e.shape[0] != 1:
# raise NotImplementedError(
# "Batch attention constraining is not yet supported.")
backward_idx = paddle.cast(
last_attended_idx - backward_window, dtype='int64')
forward_idx = paddle.cast(last_attended_idx + forward_window, dtype='int64')
if backward_idx > 0:
e[:, :backward_idx] = -float("inf")
if forward_idx < paddle.shape(e)[1]:
e[:, forward_idx:] = -float("inf")
return e | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hard_monotonic_attention(e_ma, aw_prev, eps_wait, p_threshold=0.5):\n bs, H_ma, qlen, klen = e_ma.size()\n assert qlen == 1\n assert e_ma.size(-1) == aw_prev.size(-1)\n aw_prev = aw_prev[:, :, :, -klen:]\n _p_choose = torch.sigmoid(e_ma[:, :, 0:1])\n p_choose = (_p_choose >= p_threshold).float()\n p_choose *= torch.cumsum(aw_prev[:, :, 0:1, -e_ma.size(3):], dim=-1)\n alpha = p_choose * exclusive_cumprod(1 - p_choose)\n if eps_wait > 0:\n for b in range(bs):\n if alpha[b].sum() == 0:\n continue\n leftmost = alpha[b, :, -1].nonzero()[:, -1].min().item()\n rightmost = alpha[b, :, -1].nonzero()[:, -1].max().item()\n for h in range(H_ma):\n if alpha[b, h, -1].sum().item() == 0:\n alpha[b, h, -1, min(rightmost, leftmost + eps_wait)] = 1\n continue\n if alpha[b, h, -1].nonzero()[:, -1].min().item() >= leftmost + eps_wait:\n alpha[b, h, -1, :] = 0\n alpha[b, h, -1, leftmost + eps_wait] = 1\n return alpha, _p_choose",
"def soft_attention_alignment(input_1, input_2):\n\n attention = Dot(axes=-1)([input_1, input_2])\n\n w_att_1 = Lambda(lambda x: softmax(x, axis=1),\n output_shape=unchanged_shape)(attention)\n w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2),\n output_shape=unchanged_shape)(attention))\n in1_aligned = Dot(axes=1)([w_att_1, input_1])\n in2_aligned = Dot(axes=1)([w_att_2, input_2])\n return in1_aligned, in2_aligned",
"def parallel_monotonic_attention(e_ma, aw_prev, trigger_points, eps, noise_std, no_denom, decot, lookahead, stableemit_weight):\n bs, H_ma, qlen, klen = e_ma.size()\n aw_prev = aw_prev[:, :, :, :klen]\n if decot:\n aw_prev_pad = aw_prev.new_zeros(bs, H_ma, qlen, klen)\n aw_prev_pad[:, :, :, :aw_prev.size(3)] = aw_prev\n aw_prev = aw_prev_pad\n bs, H_ma, qlen, klen = e_ma.size()\n p_choose = torch.sigmoid(add_gaussian_noise(e_ma, noise_std))\n alpha = []\n if stableemit_weight > 0:\n p_choose = (1 - stableemit_weight) * p_choose\n cumprod_1mp_choose = safe_cumprod(1 - p_choose, eps=eps)\n for i in range(qlen):\n denom = 1 if no_denom else torch.clamp(cumprod_1mp_choose[:, :, i:i + 1], min=eps, max=1.0)\n cumsum_in = aw_prev / denom\n monotonic = False\n if monotonic and i > 0:\n cumsum_in = torch.cat([denom.new_zeros(bs, H_ma, 1, 1), cumsum_in[:, :, :, 1:]], dim=-1)\n aw_prev = p_choose[:, :, i:i + 1] * cumprod_1mp_choose[:, :, i:i + 1] * torch.cumsum(cumsum_in, dim=-1)\n if decot:\n assert trigger_points is not None\n for b in range(bs):\n aw_prev[b, :, :, trigger_points[b, i:i + 1] + lookahead + 1:] = 0\n alpha.append(aw_prev)\n alpha = torch.cat(alpha, dim=2) if qlen > 1 else alpha[-1]\n return alpha, p_choose",
"def _attention(self, inputs):\n attn_weights = K.batch_dot(x=inputs,\n y=K.permute_dimensions(inputs,\n pattern=(0, 2, 1)))\n return K.permute_dimensions(attn_weights, (0, 2, 1))",
"def Anatomical_attention_gate(featureMap1,featureMap2):\n ndims = len(featureMap1.get_shape()) - 2\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n# input_channels = featureMap1.get_shape().as_list()[-1]\n# batch_size1 = tf.shape(down_in)[0]\n# nf = tf.min(batch_size0,batch_size1)\n featureMap = concatenate([featureMap1, featureMap2])\n Conv = getattr(KL, 'Conv%dD' % ndims)\n tensorweight1 = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(featureMap)\n# tensorweight1 = Activation('relu')(tensorweight1)\n w_featureMap1 = Multiply()([featureMap1,tensorweight1])\n tensorweight2 = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(featureMap)\n# tensorweight2 = Activation('relu')(tensorweight2)\n w_featureMap2 = Multiply()([featureMap2,tensorweight2])\n w_featureMap = Add()([w_featureMap1,w_featureMap2])\n return w_featureMap",
"def call(self, x, mask=None):\n u_i = K.tanh(K.dot(x, self.W) + self.b)\n v = K.squeeze(K.dot(u_i, K.expand_dims(self.u)), axis=-1)\n attention = softmax_masked(v, mask)\n\n return attention",
"def attention_imp_merge():\n global X_DIM, Y_DIM\n # Load Embeddings matrix\n embedding_weights = joblib.load(config.DUMPED_VECTOR_DIR + 'mb_voc_embeddings.pkl')\n\n # model cnn\n\n model_atn = Sequential()\n model_atn.add(Embedding(max_features,\n embedding_dims,\n input_length=max_len,\n weights=[embedding_weights],\n trainable=True))\n model_atn.add(Bidirectional(GRU(100, return_sequences=True), name='bidirectional'))\n model_atn.add(TimeDistributed(Dense(200), name='time_dist'))\n model_atn.add(AttLayer(name='att'))\n\n model_feature_vec = Sequential()\n model_feature_vec.add(Dense(200, input_dim=N_Features, init='normal', activation='relu'))\n model_feature_vec.add(Dense(100, init='normal', activation='relu'))\n model_feature_vec.add(Dropout(0.2))\n model_feature_vec.add(Dense(50, init='normal', activation='relu'))\n model_feature_vec.add(Dense(10, init='normal', activation='relu'))\n\n merged_layer = Sequential()\n merged_layer.add(Merge([model_atn, model_feature_vec], mode='concat',\n concat_axis=1, name='merge_layer'))\n merged_layer.add(Dense(200, activation='relu'))\n # merged_layer.add(Bidirectional(GRU(100, return_sequences=True), name='bidirectional_2'))\n # merged_layer.add(TimeDistributed(Dense(200), name='time_dist'))\n # merged_layer.add(AttLayer(name='att'))\n merged_layer.add(Dense(1, init='normal', name='combined_dense'))\n\n # # Compile model\n merged_layer.compile(loss='mean_squared_error', optimizer='adam')\n\n print(merged_layer.summary())\n return merged_layer",
"def compute_attention(t1, t2):\n dim = t1.shape.as_list()[2]\n init = tf.constant_initializer(1.0 / dim)\n\n t1_logits = ops.last_dim_weighted_sum(t1, \"t1_w\")\n t2_logits = ops.last_dim_weighted_sum(t2, \"t2_w\")\n\n dot_w = tf.get_variable(\n \"dot_w\", shape=dim, initializer=init, dtype=tf.float32)\n # Compute x * dot_weights first, then batch mult with x\n dots = t1 * tf.expand_dims(tf.expand_dims(dot_w, 0), 0)\n dot_logits = tf.matmul(dots, t2, transpose_b=True)\n\n return dot_logits + \\\n tf.expand_dims(t1_logits, 2) + \\\n tf.expand_dims(t2_logits, 1)",
"def attention_weight(x, fixed_weights_attention, biais_attention, step_dim):\n \"\"\" fixed_weights_attention (array) : Fixed weight of the learned attention layer\n biais_attention (array) : bias of the learned attention layer\n step_dim (int) : maxlen \"\"\"\n \"\"\" return : weights (array)\"\"\"\n\n features_dim = fixed_weights_attention.shape[0]\n\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),\n K.reshape(fixed_weights_attention, (features_dim, 1))), (-1, step_dim))\n\n eij += biais_attention\n\n eij = K.tanh(eij)\n\n a = K.exp(eij)\n\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n weights = K.expand_dims(a)\n # weighted_input = x * a\n return weights",
"def _Attention(self, name, is_causal=True):\n p = self.params\n tr_atten_p = TransformerAttentionLayer.Params().Set(\n name='transformer_atten',\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n is_masked=is_causal,\n num_heads=p.num_heads,\n residual_dropout_prob=p.residual_dropout_prob,\n atten_dropout_prob=p.atten_dropout_prob,\n fprop_dtype=p.fprop_dtype,\n add_unnormalized_input=p.selfatten_add_unnormalized_input,\n )\n tr_atten_p.atten_tpl.use_bias = p.use_bias\n tr_atten_p.atten_tpl.enable_value_proj = p.selfatten_enable_value_proj\n tr_atten_p.atten_tpl.enable_query_scale = p.enable_query_scale\n tr_atten_p.atten_tpl.enable_per_dim_scale = p.enable_per_dim_scale\n tr_atten_p.atten_tpl.device_mesh = p.device_mesh\n tr_atten_p.atten_tpl.weight_split_dims_mapping = (\n p.weight_split_dims_mapping.dnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.blnh = (\n p.activation_split_dims_mapping.blnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.bld = (\n p.activation_split_dims_mapping.bld)\n if p.deterministic_dropout:\n tr_atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n tr_atten_p.atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n\n return self._Graph(\n name,\n ['i'], # input NestedMap with {vec, paddings}\n ['o'], # output NestedMap with {vec, paddings}\n ('i.vec->split_i',\n self.MeshSplit('input_split', p.activation_split_dims_mapping.bld)),\n ('split_i,split_i,i.paddings->o.vec,unused_prob', tr_atten_p),\n ('i.paddings->o.paddings', self._Id('id')))",
"def hard(self, monotonic_attention, chunk_energy):\n batch_size, sequence_length = monotonic_attention.size()\n\n mask = monotonic_attention.new_tensor(monotonic_attention)\n for i in range(1, self.chunk_size):\n mask[:, :-i] += monotonic_attention[:, i:]\n\n # mask '-inf' energy before softmax\n masked_energy = chunk_energy.masked_fill_(\n (1 - mask).byte(), -float('inf'))\n return masked_energy",
"def attention(query, step):\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta",
"def _soft_alignment(self, inputs):\n attention = inputs[0]\n sentence = inputs[1]\n\n # Subtract the max. from the attention weights to avoid overflows.\n exp = K.exp(attention - K.max(attention, axis=-1, keepdims=True))\n exp_sum = K.sum(exp, axis=-1, keepdims=True)\n softmax = exp / exp_sum\n\n return K.batch_dot(softmax, sentence)",
"def mas_width1(attn_map):\n # assumes mel x text\n opt = np.zeros_like(attn_map)\n attn_map = np.log(attn_map)\n attn_map[0, 1:] = -np.inf\n log_p = np.zeros_like(attn_map)\n log_p[0, :] = attn_map[0, :]\n prev_ind = np.zeros_like(attn_map, dtype=np.int64)\n for i in range(1, attn_map.shape[0]):\n for j in range(attn_map.shape[1]): # for each text dim\n prev_log = log_p[i - 1, j]\n prev_j = j\n\n if j - 1 >= 0 and log_p[i - 1, j - 1] >= log_p[i - 1, j]:\n prev_log = log_p[i - 1, j - 1]\n prev_j = j - 1\n\n log_p[i, j] = attn_map[i, j] + prev_log\n prev_ind[i, j] = prev_j\n\n # now backtrack\n curr_text_idx = attn_map.shape[1] - 1\n for i in range(attn_map.shape[0] - 1, -1, -1):\n opt[i, curr_text_idx] = 1\n curr_text_idx = prev_ind[i, curr_text_idx]\n opt[0, curr_text_idx] = 1\n return opt",
"def self_attention(self, hidden):\n mul1 = self.attention1(hidden)\n mul2 = self.attention2(mul1)\n return self.softmax(mul2)",
"def blockAttention(self,x):\n b, n, c, w, h = x.shape\n pre_x = x.contiguous().view(b, n, -1)\n att_x, relations = self.forward_unified(pre_x, pre_x, pre_x)\n att_x = att_x.contiguous().view(b, n, c, w, h)\n return att_x",
"def attn(self, embed, mask, name=\"\"):\n with tf.variable_scope(\"attn_\"+name):\n K = self.get_trans_param(\"K\")\n Q = self.get_trans_param(\"Q\")\n V = self.get_trans_param(\"V\")\n kdata = tf.einsum(\"nml,lk->nmk\", embed, K)\n qdata = tf.einsum(\"nml,lk->nmk\", embed, Q)\n vdata = tf.einsum(\"nml,lk->nmk\", embed, V) # nbatch x max_atom x n_trans\n kq = tf.einsum(\"nml,nkl->nmk\", qdata, kdata)*(1/math.sqrt(self.n_trans)) # nbatch x max_atom x max_atom\n #mask = tf.expand_dims(mask, 1) # nbatch x 1 x max_atom\n mask = tf.keras.backend.repeat(mask, self.max_atom) \n score = tf.where(mask, -9999*tf.ones_like(kq), kq)\n #score = kq\n #score = tf.scatter_update(tf.Variable(kq, validate_shape=False), mask, -9999)# assign a large number\n w = tf.nn.softmax(score, axis=-1) # calculate attention weight, nbatch x max_atom x max_atom\n vout = tf.einsum(\"nml,nlk->nmk\", w, vdata) # nbatch x max_atom x n_trans\n return vout",
"def ACmomentConstraint(p, var):\n \n #extract the polynomial and variables \n x = var[0]\n th = var[1]\n\n\n #Identify support set, prepare for polytope reduction\n #A_pre = np.array(P.monoms())\n #b = np.array(P.coeffs())\n \n # #function to generate parameter coefficients\n # if len(var) == 1:\n # fb = lambda p: p\n # else:\n # fb = sp.lambdify(x, b, \"numpy\")\n if type(p) == list:\n fout = extract_monom(var, p)\n else:\n fout = extract_monom(var,[p])\n fb = fout[\"fb\"]\n A_pre = fout[\"A_pre\"]\n monom_poly = fout[\"monom_poly\"]\n geom = fout[\"geom\"]\n \n #add in constant term?\n z_blank = np.zeros([1, A_pre.shape[1]])\n z_list = [int(z) for z in z_blank.tolist()[0]] #probably a better way to do this\n add_z = []\n \n if z_blank not in A_pre:\n A_pre= np.append(A_pre, z_blank, axis = 0)\n b = np.append(b, 0)\n add_z = z_list\n \n #always add the constant term to the monomial set\n monom_all = A_pre.tolist()\n A = np.ones((A_pre.shape[1] + 1, A_pre.shape[0]), dtype = int)\n A[1:,:] = A_pre.T \n \n #find the support and generators of all monomials \n support = np.array(polytope.interior(A, strict = False))\n half_support = [list(v // 2) for v in support if v.any() and not (v % 2).any()]\n #once again, add back the constant\n\n\n #augmented support set, 1 + half_support + current support\n #TODO: This is incorrect, breaks the lexicographic ordering and many assumptions. Fix this\n #aug_support = monom_all + add_z + [i for i in half_support if i not in monom_all]\n monom_data = np.array(sum([[list(m) for m in monom_poly[i]] for i in range(len(geom)) if not geom[i]], []))\n\n keys_classify = np.lexsort(np.flipud(monom_data.T))\n\n #monom_classify = monom_data[keys_classify, :].tolist()\n monom_classify = monom_data.tolist()\n #for i = range(monom_poly):\n # if geom[i]:\n\n\n all_support = half_support + add_z + monom_classify\n aug_support = np.flip(np.unique(np.array(all_support), axis=0), axis=0).tolist()\n \n \n #lookup table to associate generating indices with monomials\n #fill out the moment constraints\n lookup = {} \n for vi in range(len(aug_support)):\n v = aug_support[vi]\n for ui in range(vi, len(aug_support)):\n u = aug_support[ui]\n s = tuple([u[i] + v[i] for i in range(len(v))])\n if s in lookup:\n lookup[s] += [(ui, vi)]\n else:\n lookup[s] = [(ui, vi)]\n \n M_out = {\"supp\": aug_support, \"monom_all\": monom_all, \"monom_poly\": monom_poly, \"monom_classify\":monom_classify,\n \"cons\": lookup, \"fb\": fb, \"geom\":geom}\n #M_out = {\"supp\" : aug_support, \"half_supp\" : half_support, \"monom\": monom, \"cons\" : lookup, \"fb\": fb} \n \n return M_out",
"def constraint_mu_null(w, mu, mu_null):\n ret = np.dot(np.transpose(w), mu)\n return ret - mu_null",
"def _minmax_constraints_to_loss_fn(model_object, model_metadata_dict, weight):\n\n if weight is None:\n return None\n\n if isinstance(model_object.input, list):\n list_of_input_tensors = model_object.input\n else:\n list_of_input_tensors = [model_object.input]\n\n return weight * physical_constraints.minima_and_maxima_to_loss_fn(\n list_of_input_tensors=list_of_input_tensors,\n cnn_metadata_dict=model_metadata_dict)",
"def constraint_B_k_mu_mu(self):\n ms = self.ms\n width_contr = 0.0\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in B_k_mu_mu_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mB - mk - ms) * (mB + mk - ms) * (mB - mk + ms) * (mB + mk + ms)\n ) / (2.0 * mB)\n # Probability that S decays close to the primary vertex\n pr_vis = 1.0 - np.exp(-B_k_mu_mu_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # print(pr_vis)\n # print(widths_s[\"mu mu\"] / width_s)\n\n # Compute the contribution to the mu mu decay width\n width_contr = self.width_B_k_s() * widths_s[\"mu mu\"] / width_s * pr_vis\n\n return B_k_mu_mu_obs.width_bound - width_contr",
"def hard(self, monotonic_attention, chunk_energy):\n batch_size, sequence_length = monotonic_attention.size()\n\n mask = fliped_cumsum(monotonic_attention)\n\n # mask '-inf' energy before softmax\n masked_energy = chunk_energy.masked_fill_(\n (1 - mask).byte(), -float('inf'))\n return masked_energy",
"def _attn_mean_pooling(self, x, x_mask):\n emb_squish = F.tanh(self.attn_linear_w_1(x))\n emb_attn = self.attn_linear_w_2(emb_squish)\n emb_attn.data.masked_fill_(x_mask.unsqueeze(2).data, float(\"-inf\"))\n emb_attn_norm = F.softmax(emb_attn.squeeze(2), dim=0)\n emb_attn_vectors = torch.bmm(x.transpose(1, 2), emb_attn_norm.unsqueeze(2)).squeeze(2)\n return emb_attn_vectors",
"def compute_attention_mask(x_mask, mem_mask, x_word_dim, key_word_dim):\r\n if x_mask is None and mem_mask is None:\r\n return None\r\n elif x_mask is None or mem_mask is None:\r\n raise NotImplementedError()\r\n\r\n x_mask = tf.cast(x_mask,dtype=bool)\r\n mem_mask = tf.cast(tf.transpose(mem_mask,perm=[0,2,1]), dtype=bool)\r\n join_mask = tf.logical_and(x_mask, mem_mask)\r\n return join_mask",
"def binarize_attention_parallel(self, attn, in_lens, out_lens):\n with torch.no_grad():\n attn_cpu = attn.data.cpu().numpy()\n attn_out = b_mas(attn_cpu, in_lens.cpu().numpy(), out_lens.cpu().numpy(), width=1)\n return torch.from_numpy(attn_out).to(attn.device)",
"def binarize_attention(self, attn, in_lens, out_lens):\n b_size = attn.shape[0]\n with torch.no_grad():\n attn_cpu = attn.data.cpu().numpy()\n attn_out = torch.zeros_like(attn)\n for ind in range(b_size):\n hard_attn = mas_width1(attn_cpu[ind, 0, : out_lens[ind], : in_lens[ind]])\n attn_out[ind, 0, : out_lens[ind], : in_lens[ind]] = torch.tensor(hard_attn, device=attn.get_device())\n return attn_out",
"def attention(inp, scope, e_dim, past, config):\n assert inp.shape.ndims == 3 # input should be of shape [batch, seqlen, embeddings] # [batch, sequence, features]\n assert e_dim % config.num_heads == 0 # embedding can be split in heads\n\n if past is not None:\n assert past.shape.ndims == 5 # [batch, 2, heads, seqlen, emebeddings]\n\n def split_heads(x):\n out = split_into_n_states(x, config.num_heads)\n out = tf.transpose(out, [0, 2, 1, 3])\n return out\n\n def merge_heads(x):\n out = merge_n_states(tf.transpose(x, [0, 2, 1, 3]))\n return out\n\n def mask_attention_weights(w):\n # w should have shape [batches, heads, dst_seq, src_seq], where information flows from scr to dst\n _, _, nd, ns = shapes_list(w)\n b = attention_mask(nd, ns, w.dtype)\n b = tf.reshape(b, [1, 1, nd, ns])\n w = w * b - tf.cast(1e10, w.dtype) * (1 - b)\n return w\n\n def multihead_attention(q, k, v):\n w = tf.matmul(q, k, transpose_b=True)\n w *= tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))\n\n # mask attention weights\n w = mask_attention_weights(w)\n w = softmax_with_reduce_max(w)\n out = tf.matmul(w, v)\n return out\n\n with tf.variable_scope(scope):\n c = conv1d(inp, 'convolutional_attention', e_dim * 3)\n q, k, v = map(split_heads, tf.split(c, 3, axis=2))\n present = tf.stack([k, v], axis=1)\n if past is not None:\n # there is a stack below it\n pk, pv = tf.unstack(past, axis=1)\n k = tf.concat([pk, k], axis=2)\n v = tf.concat([pv, v], axis=2)\n\n attn = multihead_attention(q, k, v)\n attn = merge_heads(attn)\n\n out = conv1d(attn, 'convolutional_projection', e_dim)\n return out, present",
"def __init__(self,\n num_units,\n line_memory,\n word_memory=None,\n soft_weight=None,\n hierarchy=True,\n line_memory_sequence_length=None,\n word_memory_sequence_length=None,\n scale=False,\n probability_fn=None,\n score_mask_value=float(\"-inf\"),\n name=\"CustomAttention\"):\n # For LuongAttention, we only transform the memory layer; thus\n # num_units **must** match expected the query depth.\n if probability_fn is None:\n probability_fn = nn_ops.softmax\n wrapped_probability_fn = lambda score: probability_fn(score)\n super(CustomAttention, self).__init__(\n query_layer=None,\n line_memory_layer=layers_core.Dense(\n num_units, name=\"line_memory_layer\", use_bias=False),\n line_memory=line_memory,\n word_memory=word_memory,\n probability_fn=wrapped_probability_fn,\n line_memory_sequence_length=line_memory_sequence_length,\n word_memory_sequence_length=word_memory_sequence_length,\n score_mask_value=score_mask_value,\n name=name)\n self._num_units = num_units\n self._scale = scale\n self._name = name\n self._hierarchy = hierarchy\n self._soft_weight = soft_weight",
"def soft_thresholding(w, alpha):\n return np.sign(w) * np.clip(np.abs(w) - alpha, 0.0, np.inf)",
"def forward(self, x, adj):\n output = torch.mm(x, self.weight)\n return torch.mm(adj, output) # can use sparse mm since adj is a sparse matrix"
] | [
"0.59919655",
"0.5717051",
"0.5559937",
"0.5518139",
"0.5506399",
"0.54885197",
"0.53224397",
"0.5284985",
"0.5283821",
"0.5255276",
"0.525297",
"0.5240578",
"0.52399945",
"0.5194057",
"0.5158129",
"0.512611",
"0.51100355",
"0.5108092",
"0.5106083",
"0.5093072",
"0.50762993",
"0.50584847",
"0.50430197",
"0.5038118",
"0.50225854",
"0.5018877",
"0.50158024",
"0.50131637",
"0.50070965",
"0.5002696"
] | 0.6920047 | 0 |
Initialize a failed command error. | def __init__(self, message_id: str, error_code: str):
super().__init__(f"Command failed: {error_code}")
self.message_id = message_id
self.error_code = error_code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, cmd, returncode, err):\n self.cmd = cmd\n self.returncode = returncode\n self.stderr = err\n super(Error, self).__init__(\"%s(%d):\\n%s\" % ( cmd, returncode, err))",
"def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)",
"def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)",
"def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)",
"def test_handles_error(self):\n with self.assertRaises(ForcedExit):\n self.run_command(mkdtemp())\n\n self.assertResults(\n result_with_hint(\n u'This repository has not been initialized.',\n GIT_REPO_NOT_INITIALIZED),\n self.error)",
"def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg",
"def on_init_fail(self, event_time, message):\n pass",
"def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg",
"def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)",
"def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)",
"def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'",
"def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)",
"def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg",
"def error_check(command):\r\n\r\n # TODO\r",
"def __init__(self, msg):\n\n super(DBConnectionError, self).__init__(msg)\n self.msg = msg",
"def __init__(self, message):\n logging.error(\"ERROR: {0}\".format(message))\n logging.error(\"Try running with --help for more information.\")",
"def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)",
"async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ) -> None:\n if getattr(error, \"handled\", False):\n logger.debug(\n f\"Command {ctx.command} had its error already handled locally; ignoring.\"\n )\n return\n\n error = getattr(error, \"original\", error)\n\n if isinstance(error, commands.CommandNotFound):\n return # Skip logging CommandNotFound Error\n\n elif isinstance(error, commands.UserInputError):\n if isinstance(error, commands.MissingRequiredArgument):\n description = (\n f\"`{error.param.name}` is a required argument that is missing.\"\n \"\\n\\nUsage:\\n\"\n f\"```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n else:\n description = (\n f\"Your input was invalid: {error}\\n\\nUsage:\\n\"\n f\"```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n\n embed = self.error_embed(description)\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.CommandOnCooldown):\n mins, secs = divmod(math.ceil(error.retry_after), 60)\n embed = self.error_embed(\n f\"This command is on cooldown:\\nPlease retry in **{mins} minutes {secs} seconds**.\"\n )\n await ctx.send(embed=embed, delete_after=10)\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(embed=self.error_embed(\"This command has been disabled.\"))\n\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\n embed=self.error_embed(\"This command can only be used in the server.\")\n )\n\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(\n embed=self.error_embed(\"You aren't allowed to use this command.\")\n )\n\n elif isinstance(error, commands.BadArgument):\n self.revert_cooldown_counter(ctx.command, ctx.message)\n embed = self.error_embed(\n \"The argument you provided was invalid: \"\n f\"{error}\\n\\nUsage:\\n```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n await ctx.send(embed=embed)\n else:\n await self.handle_unexpected_error(ctx, error)\n return # Exit early to avoid logging.\n\n logger.debug(\n f\"Error Encountered: {type(error).__name__} - {str(error)}, \"\n f\"Command: {ctx.command}, \"\n f\"Author: {ctx.author}, \"\n f\"Channel: {ctx.channel}\"\n )",
"def error(self, error):\n pass",
"def __init__(self, message, fatal, error_num=None):\n Exception.__init__(self, message)\n self.fatal = fatal\n self.errno = error_num",
"def failure(self, error):\n print \"comm failed Reason:\", error\n return error",
"def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))",
"def __init__(self, failed):\n\t\tsuper().__init__(\"One or more asynchronous HTTP requests failed: \" + failed)\n\t\tself.failed = failed",
"def safe_initialisation(custom_command=\"\", comm=None, nprocs=1):\n try:\n cosmo, data, command_line, success = initialise(custom_command)\n except io_mp.ConfigurationError as message:\n if comm:\n for index in range(1, nprocs):\n comm.send('failed', dest=index, tag=1)\n print str(message)\n raise io_mp.ConfigurationError(\n \"The initialisation was not successful, resulting in a \"\n \"potentially half created `log.param`. Please see the \"\n \"above error message. If you run the exact same command, it\"\n \" will not work. You should solve the problem, and try again.\")\n except KeyError:\n if comm:\n for index in range(1, nprocs):\n comm.send('failed', dest=index, tag=1)\n raise io_mp.ConfigurationError(\n \"You are running in a folder that was created following \"\n \"a non-successful initialisation (wrong parameter name, \"\n \"wrong likelihood, etc...). If you have solved the issue, you \"\n \"should remove completely the output folder, and try again.\")\n return cosmo, data, command_line, success",
"async def on_command_error(self, ctx: commands.Context, error: Any):\n \n # Notify user for MissingRequiredArgument errors\n if isinstance(error, commands.MissingRequiredArgument):\n command_name = ctx.message.content.split(\" \")[0]\n msg = translate(\"err_missing_parameter\", await culture(ctx)).format(command_name, error.param.name)\n return await ctx.send(msg)\n else:\n # Log the warning\n log_warn(error)\n\n # Notify user with general error\n msg = translate(\"err_unrecognized_command\", await culture(ctx))\n await ctx.send(msg)",
"def shell_fail_server(self, cmd):\n self.shell_cmd = cmd\n raise ConnectionResetError",
"def _config_error(self, message, status=2):\n self.parser.exit(status, f\"{self.parser.prog}: failed loading config: {message}\\n\")",
"def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg",
"def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)",
"def __init__(self, errors):\n strerrors = \"\\n - \".join(errors)\n text = tr(\n \"Application error occurred on secondary appliance. \"\n \"Please read logs on the secondary appliance.\"\n )\n HAError.__init__(\n self,\n SECONDARY_FAILED_TO_APPLY,\n \"%s\\n - %s\" % (text, strerrors)\n )"
] | [
"0.70043254",
"0.6804815",
"0.6804815",
"0.6804815",
"0.6448812",
"0.63277036",
"0.6303799",
"0.6294026",
"0.62792206",
"0.617966",
"0.6145696",
"0.6088277",
"0.6072331",
"0.60344654",
"0.60218865",
"0.60045916",
"0.5991444",
"0.5982957",
"0.5979462",
"0.5974293",
"0.5967398",
"0.59451246",
"0.594036",
"0.5915673",
"0.59156173",
"0.5896663",
"0.58882606",
"0.58676004",
"0.58606064",
"0.58585995"
] | 0.69288176 | 1 |
Register an async on_disconnect callback. | def register_on_disconnect(
self, on_disconnect_cb: Callable[[], Awaitable[None]]
) -> Callable:
def unsubscribe() -> None:
"""Unsubscribe listeners."""
if on_disconnect_cb in self._on_disconnect:
self._on_disconnect.remove(on_disconnect_cb)
self._on_disconnect.append(on_disconnect_cb)
return unsubscribe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def on_disconnect(self) -> None:",
"def async_on_disconnect(self, target: Callable[..., Awaitable]) -> None:\n self._async_disconnect_handler = target",
"async def on_disconnect(self, reconnecting):\n pass",
"def on_disconnect(self, target: Callable) -> None:\n self._sync_disconnect_handler = target",
"def add_on_disconnect_handler(self, handler):\n self._on_disconnect_handlers.append(handler)",
"def set_disconnect_handler(self, function):\n self._pres_manager.set_disconnect_handler(function)",
"def on_disconnect():\n print(\"User disconnected!\")",
"async def disconnect(self):",
"def on_disconnect(unused_client, unused_userdata, rc):\n print(f\"on_disconnect: {error_str(rc)}\")\n print()\n\n global connected\n connected = False",
"def unregisterDisconnect(self, function):\n self._sig_disconnect.unsubscribe(function)",
"def on_disconnect(self, raw_msg, server, port, **kwargs):",
"def add_on_connection_close_callback(self):\n logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)",
"def on_disconnect():\n logger.info(f\"{request.sid} Disconnected\")",
"def add_on_connection_close_callback(self):\n self.logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)",
"def on_disconnect(client, userdata, rc):\n if rc != 0:\n print(\"Error: Mqtt server disconnection.\")",
"def notifyDisconnect(self, function, **kwargs):\n self._sig_disconnect.subscribe(function, **kwargs)",
"def async_on_close(self, func: CALLBACK_TYPE) -> None:\n self._on_close.append(func)",
"async def disconnect_callback_async(self, excep):\r\n _LOGGER.debug(\" ........... attempting reconnection\")\r\n await self.service_panel_stop(excep)\r\n await self.service_panel_start(excep)",
"def _on_disconnect(self, *args, **kwargs):\n logger.info('Client disconnected %r', self.address)\n self.server.client_disconnected(self)\n # self.unregister()",
"def on_disconnect(unused_client, unused_userdata, rc):\n print('on_disconnect', error_str(rc))\n status_light.off()",
"def on_disconnect(unused_client, unused_userdata, rc):\n\tprint('on_disconnect', error_str(rc))\n\n\t# Since a disconnect occurred, the next loop iteration will wait with\n\t# exponential backoff.\n\tglobal should_backoff\n\tshould_backoff = True",
"def on_disconnect():\n print('User disconnected!')\n return 'disconnected'",
"async def async_disconnect(self) -> None:\n await self._sio.disconnect()\n self._watchdog.cancel()\n\n if self._async_disconnect_handler:\n await self._async_disconnect_handler()\n self._async_disconnect_handler = None\n if self._sync_disconnect_handler:\n self._sync_disconnect_handler()\n self._sync_disconnect_handler = None",
"def callback_disconnect():\n # if Networking.get_instance().is_host:\n logger.warning(\"It seems that client is not connected...\")\n Networking.get_instance().disconnect()\n EventQueue.post(CustomEvent(ChangeSceneEnum.DISCONNECT))",
"def disconnect(self, callback):\n if not callable(callback):\n raise TypeError('Cannot disconnect a non-callable from a Signal')\n conn = self._make_connection(callback)\n self._connection_dead(conn)",
"def on_disconnect(mqttc, obj, rc):\n if rc == 0:\n logger.debug(\"MQTT DISCONNECTED: rc: \" + str(rc))\n logger.debug(\"Disconnected Successfully from MQTT Broker\")",
"async def on_disconnect():\n print(\"Bot has logged off\")",
"def register_on_connect(\n self, on_connect_cb: Callable[[], Awaitable[None]]\n ) -> Callable:\n\n def unsubscribe() -> None:\n \"\"\"Unsubscribe listeners.\"\"\"\n if on_connect_cb in self._on_connect:\n self._on_connect.remove(on_connect_cb)\n\n self._on_connect.append(on_connect_cb)\n return unsubscribe",
"def on_disconnect(self):\n print('Client disconnected!')",
"def shutdown_callback():\n pass"
] | [
"0.76088375",
"0.71829623",
"0.7181412",
"0.715276",
"0.6923077",
"0.68131423",
"0.6806467",
"0.6722994",
"0.67129457",
"0.6570136",
"0.6568402",
"0.654046",
"0.6536708",
"0.6511085",
"0.64948267",
"0.6476269",
"0.6439348",
"0.6424654",
"0.64183086",
"0.64019936",
"0.6401964",
"0.6400416",
"0.6396223",
"0.6380196",
"0.63363993",
"0.6329731",
"0.6298",
"0.62819654",
"0.6234826",
"0.6173641"
] | 0.76131475 | 0 |
Register an async on_initialized_cb callback. | def register_on_initialized(
self, on_initialized_cb: Callable[[], Awaitable[None]]
) -> Callable:
def unsubscribe() -> None:
"""Unsubscribe listeners."""
if on_initialized_cb in self._on_initialized:
self._on_initialized.remove(on_initialized_cb)
self._on_initialized.append(on_initialized_cb)
return unsubscribe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _async_initialized_callback(*_: Any):\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)",
"def register_async_callback(self, async_callback):\n self._async_callbacks.append(async_callback)",
"def callWhenInitialized(func):\n if _api:\n func()\n else:\n _initCallbacks.append(func)",
"def on_init_start(self):\n for callback in self.callbacks:\n callback.on_init_start(self)",
"def on_initialize(self) -> None:\n pass",
"def on_init_end(self):\n for callback in self.callbacks:\n callback.on_init_end(self)",
"async def async_setup(self):\n pass",
"def onInit(*args):",
"def onInit(*args):",
"def onInit(*args):",
"def onInit(*args):",
"def on_startup():\n\n async def startup_handler(app):\n \"\"\"Run all initialization tasks.\n These are tasks that should be run after the event loop has been started but before the HTTP\n server has been started.\n \"\"\"\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)\n\n return startup_handler",
"def initialise_callbacks():\n adapter = mice.ice.createObjectAdapterWithEndpoints(\"Callback.Client\", \"tcp -h 127.0.0.1\")\n adapter.activate()\n cb=mice.Murmur.ServerCallbackPrx.uncheckedCast(adapter.addWithUUID(ServerCallbackI(s, adapter)))\n s.addCallback(cb)",
"def on_ready(self, callback, run_in_thread=True):\n def _wrapper_callback(proto):\n if run_in_thread:\n self.factory.manager.call_in_thread(callback)\n else:\n callback()\n\n return proto\n\n self.factory.on_ready(_wrapper_callback)",
"async def _init(self, **kwargs):",
"async def on_ready(self) -> None:",
"def async_register_initial_scan_callback(\n self,\n callback: CALLBACK_TYPE,\n ) -> CALLBACK_TYPE:\n if self.initial_scan_done:\n callback()\n return lambda: None\n\n self._initial_scan_callbacks.append(callback)\n\n @hass_callback\n def _async_remove_callback() -> None:\n if callback not in self._initial_scan_callbacks:\n return\n self._initial_scan_callbacks.remove(callback)\n\n return _async_remove_callback",
"async def initialize(self):",
"def register_callback(self, func):\n self.callback = func",
"def register(self, callback):\n self.callback = callback",
"def register_callback(self, callback):\n self.callbacks.add(callback)",
"async def init(self) -> None:",
"async def init(self) -> None:",
"def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)",
"def on_loaded(self, func):\n self._on_loaded_funcs.append(func)",
"def on_startup(self) -> None:\n ...",
"def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)",
"def register_callback(self, callback):\n self._callbacks.append(callback)",
"def __attrs_post_init__(self):\n super().__attrs_post_init__()\n if self.config.get(\"open_mode\", False) is False:\n # If the master is not configured to be in open mode, register an auth event callback\n # If we were passed an auth event callback, it needs to get this master as the first\n # argument\n if self.on_auth_event_callback:\n auth_event_callback = partial(self.on_auth_event_callback, self)\n else:\n auth_event_callback = self._on_auth_event\n self.before_start(\n self.event_listener.register_auth_event_handler, self.id, auth_event_callback\n )\n self.after_terminate(self.event_listener.unregister_auth_event_handler, self.id)",
"async def on_ready():\n\n print(\"Logged in.\")"
] | [
"0.69842035",
"0.6830291",
"0.6557393",
"0.64485997",
"0.6347111",
"0.63110167",
"0.6143493",
"0.61021274",
"0.61021274",
"0.61021274",
"0.61021274",
"0.6069899",
"0.602697",
"0.60012853",
"0.5994286",
"0.5974951",
"0.5959524",
"0.5951486",
"0.5902437",
"0.58705467",
"0.5852588",
"0.58022517",
"0.58022517",
"0.580172",
"0.5786633",
"0.56886125",
"0.5677903",
"0.56524706",
"0.56397134",
"0.560304"
] | 0.7724016 | 0 |
Wait until it's time till the next retry. | async def _wait_retry(self) -> None:
# Sleep 2^tries + 0…tries*3 seconds between retries
self.retry_task = asyncio.create_task(
asyncio.sleep(2 ** min(9, self.tries) + random.randint(0, self.tries * 3))
)
await self.retry_task
self.retry_task = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_retry_timeout(self, retry_timeout):",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def sleep_for(self):\n return max(0, (self._retry_after - datetime.now()).total_seconds())",
"def retry(times):\n return repeat_with_success_at_least(times, 1)",
"def _retry_occurred(self):",
"def wait(self, sleep_time):\n time.sleep(sleep_time)",
"def sleep_until(self, time):\n raise NotImplementedError()",
"def waitUntilSuccess():",
"def wait_if_needed(self):\n now = datetime.datetime.now()\n # Note that this quantity is always positive because now is always bigger than the timestamp.\n seconds_since_last_attempt = (now - self._timestamp).total_seconds()\n # Note again that because seconds_since_last_attempt is positive, the wait seconds will\n # never exceed self.effective_lockout_seconds, so\n # 0 <= wait_seconds <= self.effective_lockout_seconds\n wait_seconds = max(0.0, self.effective_lockout_seconds - seconds_since_last_attempt)\n if wait_seconds > 0.0:\n shared_message = (\"Last %s attempt was at %s (%s seconds ago).\"\n % (self.action, self._timestamp, seconds_since_last_attempt))\n if self.lockout_enabled:\n action_message = \"Waiting %s seconds before attempting another.\" % wait_seconds\n self.log.warning(\"%s %s\" % (shared_message, action_message))\n time.sleep(wait_seconds)\n else:\n action_message = \"Continuing anyway because lockout is disabled.\"\n self.log.warning(\"%s %s\" % (shared_message, action_message))\n self.update_timestamp()",
"def wait_for(self, selector, timeout=3, refresh_rate=0.5, retry=0):\n time_counter = 0\n retry_counter = 0\n while retry_counter <= retry:\n while time_counter <= timeout:\n time.sleep(refresh_rate)\n time_counter = time_counter + refresh_rate\n xml = self.android_device_driver.fetch_current_xml()\n parser = XmlParser(xml)\n exist = parser.find_first_element_by_selector(selector)\n if exist is not None:\n return True\n retry_counter = retry_counter + 1\n return False",
"def timeout_wait(self):\n if self._dtr_enabled:\n while (self.__micros() - self._resume_time) < 0:\n if False:\n break # TODO: Check for printer status here\n else:\n while (self.__micros() - self._resume_time) < 0:\n pass",
"def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)",
"def wait(self, ms=None):\r\n util.raiseNotDefined()",
"async def _sleep_for_retry(\n self,\n response: PipelineResponse[HTTPRequestType, AsyncHTTPResponseType],\n transport: AsyncHttpTransport[HTTPRequestType, AsyncHTTPResponseType],\n ) -> bool:\n retry_after = self.get_retry_after(response)\n if retry_after:\n await transport.sleep(retry_after)\n return True\n return False",
"def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')",
"def retry(self):\n return False",
"def _wait_before_call(self):\n while (dt.datetime.now() - self._last_call_ts) <= dt.timedelta(\n seconds=self.api_timeout\n ):\n time.sleep(0.5)\n self._last_call_ts = dt.datetime.now()",
"def wait(self):\n time.sleep(self.next())",
"def wait(wait_time):\n\n time.sleep(wait_time)",
"def waitTillReachable(self, sleep_per_try_secs=120, timeout=1200):\n elapsed_time = 0\n while elapsed_time < timeout:\n if self.isReachable():\n logger.info(\"Machine pingable. Reconnecting after 30 secs..\")\n time.sleep(30)\n self.connect()\n return True\n else:\n logger.info(\"Machine not yet pingable. Waiting for %s secs before retrying..\" % sleep_per_try_secs)\n time.sleep(sleep_per_try_secs)\n elapsed_time += sleep_per_try_secs\n logger.warning(\"TIMEOUT: Waited for %d secs, but machine still not reachable\" % elapsed_time)\n return False",
"def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass",
"def wait(self) -> None:\n now = time.time()\n if now < self.lockTime:\n diff = self.lockTime - now\n logger.debug(\"Waiting %ss to avoid ratelimit\", diff)\n time.sleep(diff)",
"def wait(delay=2):\n time.sleep(delay)",
"def test_retry_run(self):\n pass",
"def retry_after(self):\n return self._retry_after",
"def wait(self):\n time.sleep(0.010)",
"def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass"
] | [
"0.70267355",
"0.6887415",
"0.6887415",
"0.6887415",
"0.68812937",
"0.68608344",
"0.6727222",
"0.672179",
"0.6700682",
"0.6633597",
"0.6627615",
"0.65776014",
"0.65674937",
"0.65405065",
"0.6536948",
"0.6520785",
"0.6511901",
"0.64983654",
"0.64704025",
"0.64613533",
"0.64580476",
"0.64499426",
"0.6415834",
"0.6412773",
"0.64099276",
"0.639104",
"0.6385431",
"0.63804543",
"0.6367296",
"0.6346843"
] | 0.7936986 | 0 |
Perform a basic check on the server version compatability. | def _check_server_version(self, server_version: str) -> None:
cur_version = parse_version(server_version)
min_version = parse_version(MIN_SERVER_VERSION)
if cur_version < min_version:
raise InvalidServerVersion
if cur_version != min_version:
self._logger.warning(
"Connected to a Zwave JS Server with an untested version, \
you may run into compatibility issues!"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_version(server):\n\n assert isinstance(server.version(), six.string_types)",
"def _version_support_check(self, v_maps, **kwargs):\n if self.session._invalid_server_version():\n # server version is not valid, force a refresh right now\n self.session.get_server_version(**kwargs)\n\n if self.session._invalid_server_version():\n # server version is STILL invalid, return False\n return False\n\n for v_map in v_maps:\n if not self.session.server_version >= v_map:\n return False\n return True",
"def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass",
"def checkVersion(self, clientName, edamVersionMajor, edamVersionMinor):\r\n pass",
"def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions",
"def check_version(grafana_server):\n response = send_request(grafana_server+\"/login\", \"GET\")\n\n print(info + \" Checking for version...\" + end)\n\n r1 = re.search('[0-9]{1}\\.[0-9]{1}\\.[0-9]{1}', str(response))\n print(info + \" Grafana version appears to be: \" + r1.group(0) + end)\n\n target_version = r1.group(0)\n if \"5.\" in target_version : \n fixed_version = '5.4.5'\n else:\n fixed_version = '6.3.4'\n\n if compare_versions(fixed_version, target_version) == False:\n print(bad + \" Version seems to indicate it's probably not vulnerable.\" + end)\n else:\n print(good + \" Version seems to indicate it might be vulnerable!\" + end)",
"def is_valid_version(self):\n pass",
"def is_up_to_date(self, server_version):\r\n client_split = self.__version__.split('.')\r\n client_len = len(client_split)\r\n server_split = server_version.split('.')\r\n server_len = len(server_split)\r\n\r\n # Make both lists the same length\r\n for i in range(client_len, server_len):\r\n client_split.append('0')\r\n for i in range(server_len, client_len):\r\n server_split.append('0')\r\n\r\n for i in range(0, client_len):\r\n if 'b' in client_split[i]:\r\n # Using a beta version, don't check\r\n return True\r\n client = int(client_split[i])\r\n server = int(server_split[i])\r\n if client < server:\r\n return False\r\n elif server < client:\r\n return True\r\n\r\n return True",
"def test(self,version=''):\n p5cmd = ['srvinfo', 'lexxvers']\n try:\n res = self.nsdchat_call(p5cmd,5)\n p5_version = singlevalue(res)\n if (p5_version >= str(version)):\n return True\n return False\n except subprocess.TimeoutExpired:\n print(\"Could not connect to the archiware p5 server.\\nPlease review\"\n \"the connection and firewall settings.\")\n raise",
"def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)",
"def _check_compat(sock_info):\n ...",
"def check_version():\r\n\r\n session.forget()\r\n session._unlock(response)\r\n\r\n new_version, version_number = check_new_version(request.env.web2py_version,\r\n WEB2PY_VERSION_URL)\r\n\r\n if new_version == -1:\r\n return A(T('Unable to check for upgrades'), _href=WEB2PY_URL)\r\n elif new_version != True:\r\n return A(T('web2py is up to date'), _href=WEB2PY_URL)\r\n elif platform.system().lower() in ('windows','win32','win64') and os.path.exists(\"web2py.exe\"):\r\n return SPAN('You should upgrade to version %s' % version_number)\r\n else:\r\n return sp_button(URL('upgrade_web2py'), T('upgrade now')) \\\r\n + XML(' <strong class=\"upgrade_version\">%s</strong>' % version_number)",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def check_version():\n err = \"PaddlePaddle version 1.6 or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\" \\\n\n try:\n fluid.require_version('1.6.0')\n except Exception as e:\n logger.error(err)\n sys.exit(1)",
"def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)",
"def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())",
"def check_python_version():\n version = sys.version.split()[0]\n if version < \"2.6\" or version >= \"3\":\n raise CuckooStartupError(\"You are running an incompatible version of Python, please use 2.6 or 2.7\")",
"def test_2x_only_python_version_deploy():\n pass",
"def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()",
"def test_python_version(self):\n assert 2 == sys.version_info.major\n assert 7 == sys.version_info.minor\n assert 6 <= sys.version_info.micro",
"def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"",
"def _check_sse_version(self, **kwargs):\n if not self.session.platform_is_6_5(**kwargs):\n m = \"Server side export not supported in version: {}\".format\n m = m(self.session.server_version)\n raise pytan.exceptions.UnsupportedVersionError(m)",
"def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6",
"def test_version(self):\n pass",
"def check_stability(self):",
"def test_server_details_ok(self):\n response = self.call_api('server_details', {}, 200).json\n self.assertEqual(utils.get_app_version(), response['server_version'])",
"def python_compatible():\n result = False\n req_ver = vers.convert('3.9.5')\n pythonver = vers.convert('{major}.{minor}.{micro}'.format(major=sys.version_info.major,\n minor=sys.version_info.minor,\n micro=sys.version_info.micro))\n\n result = pythonver >= req_ver\n\n return result",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()",
"def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks"
] | [
"0.71473086",
"0.711342",
"0.7065705",
"0.7041194",
"0.7032798",
"0.6843562",
"0.6806416",
"0.6799755",
"0.670503",
"0.66898423",
"0.6678609",
"0.6671365",
"0.6639894",
"0.6619981",
"0.6613792",
"0.6588024",
"0.657879",
"0.6576938",
"0.6552172",
"0.65470165",
"0.6523204",
"0.64980674",
"0.6497379",
"0.649174",
"0.64737874",
"0.6459773",
"0.6428612",
"0.64265484",
"0.64134824",
"0.6412359"
] | 0.7609688 | 0 |
Caculate the posterior distribution given a new data (x, t), and return the update mean and precision | def posterior_distribution(x, t, M, noise_precision, prior_mu, prior_precision):
A = np.array([x ** i for i in range(M)]).reshape((1, M)) # (M, 1)
new_precision = prior_precision + noise_precision * np.dot(np.transpose(A), A)
new_mu = np.dot(np.linalg.inv(new_precision), noise_precision * t * np.transpose(A) + np.dot(prior_precision, prior_mu))
return new_mu, new_precision | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]",
"def posterior(mu, x, sigma):\n post = like(x, sigma, mu) * prior(mu)\n evidencia = np.trapz(post, mu)\n return post/evidencia",
"def q_posterior(self, x_start, x_t, t):\n\n x_t_shape = tf.shape(x_t)\n posterior_mean = (\n self._extract(self.posterior_mean_coef1, t, x_t_shape) * x_start\n + self._extract(self.posterior_mean_coef2, t, x_t_shape) * x_t)\n posterior_variance = self._extract(self.posterior_variance, t, x_t_shape)\n posterior_log_variance_clipped = self._extract(\n self.posterior_log_variance_clipped, t, x_t_shape)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped",
"def computePosterior(self):\n # in their log form, posterior = prior + beta * datalikelihood\n # make a copy of prior at first\n self.posterior.copy(self.prior)\n # add the data likelihood\n altar.blas.daxpy(self.beta, self.data, self.posterior)\n # all done\n return self",
"def posterior(self, x: Tensor) -> Distribution:\n # Compute the parameters of the posterior\n h_x = self.encoder(x)\n mu, log_sigma = h_x.chunk(2, dim=-1)\n\n # Return a distribution `q(z|x) = N(z | \\mu(x), \\sigma(x))`\n return Normal(mu, log_sigma.exp())",
"def get_posterior_sample(self):\n (a, b) = (self.prior_success + 1e-6 - 1, self.prior_failure + 1e-6 - 1)\n # The modes are not well defined unless alpha, beta > 1\n assert np.all(a > 0)\n assert np.all(b > 0)\n\n \"\"\"\n g(φ) denote a log-concave probability density function\n 对于二项分布而言, 概率分布为:P(x)=c(n,r)*(x^r)*(1-x)^(n-r), x为变量\n lnP(x) = lnc(n,r) + r*lnx+(n-r)*ln(1-x)\n dlnP(x)/dx = r/x + (n-r)/(1-x)*(-1) = a/x - b/(1-x)\n d2lnP(x)/d2x = -a/x^2 - b/(1-x)^2\n \n 此处x为众数,即:x = np = a/(a+b)\n \n 众数(Mode)是指在统计分布上具有明显集中趋势点的数值,代表数据的一般水平。 \n 也是一组数据中出现次数最多的数值,有时众数在一组数中有好几个\n 在高斯分布中,众数位于峰值。\n \"\"\"\n\n mode = a / (a + b) # 众数(对于连续分布), a:[arm,1], b: [arm, 1]\n hessian = a / mode + b / (1 - mode) # [arm, 1], TODO:此处是否计算有误?应该为 a/mode**2 + b/(1-mode)**2 ?\n \"\"\"\n 参见论文5.2:\n An approximate posterior sample θˆ is then drawn\n from a Gaussian distribution with mean θ and covariance matrix\n (−∇2 ln(ft−1(θ)))−1\n \"\"\"\n laplace_sample = mode + np.sqrt(1 / hessian) * np.random.randn(self.n_arm) # 采样arm个样本\n return laplace_sample",
"def posterior(self, val, **kwargs) -> float:\n\n data = self.data\n\n # override val with parameters specified via kwargs\n val = copy.deepcopy(val)\n for key, value in kwargs.items():\n setattr(val, key, value)\n\n # extract parameters\n gain = val.gain\n states = val.states\n pi = val.transitions\n pi_conc = val.transitions_conc\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n load_weight = val.load_weight\n num_rois = val.num_rois\n num_load = val.num_load\n num_data = val.num_data\n num_states = val.num_states\n\n # calculate shape parameters\n idx = mu_flor_mean > 0\n mu_flor_scale = np.zeros(mu_flor_mean.shape)\n mu_flor_scale[idx] = mu_flor_mean[idx] / mu_flor_shape[idx]\n mu_back_scale = mu_back_mean / mu_back_shape\n # calculate effective pi for collapsed state space when weight on load is taken into account\n pi_eff = pi.copy()\n pi_eff[-1, :] *= load_weight\n pi_eff[-1, -1] = 1 - load_weight\n\n # probability from likelihood\n brightness = np.zeros(shape=data.shape)\n for r in range(num_rois):\n brightness[r, :] = mu_flor @ states_to_pops(states[r, :, :], num_states) + mu_back[r]\n lhood = np.sum(stats.gamma.logpdf(data, a=brightness, scale=gain))\n\n # probability from phototrajectory\n kinetic = 0\n for i in range(num_states):\n if pi_eff[-1, i] > 0:\n kinetic += np.sum(states[:, :, 0] == i) * np.log(pi_eff[-1, i])\n for j in range(num_states):\n if pi_eff[i, j] > 0:\n kinetic += np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j)) * np.log(pi_eff[i, j])\n\n # probability from prior\n prior = (\n # prior on fluorophore brightness (ignore dark states)\n np.sum(stats.gamma.logpdf(mu_flor[idx], a=mu_flor_shape[idx], scale=mu_flor_scale[idx]))\n # prior on background brightness\n + np.sum(stats.gamma.logpdf(mu_back, a=mu_back_shape, scale=mu_back_scale))\n # prior on transitions\n + np.sum(Dirichlet.logpdf(pi, pi_conc))\n )\n\n prob = lhood + kinetic + prior\n\n return prob",
"def sample_GP_posterior(x_train, y_train, x_test, mean_func, cov_func,\n\t\t\t\t\t kernel_params, sigma=0.1, seed=42, n_samples=5):\n K = cov_func(x_train, x_train, *kernel_params)\n L = np.linalg.cholesky(K + sigma**2 * np.identity((x_train.shape)[0]))\n K_star = cov_func(x_train, x_test, *kernel_params)\n alpha = np.linalg.solve(np.transpose(L), np.linalg.solve(L, y_train))\n f_star = np.dot(np.transpose(K_star), alpha)\n v = np.linalg.solve(L, K_star)\n K_star_star = cov_func(x_test, x_test, *kernel_params)\n V_f = K_star_star - np.dot(np.transpose(v), v)\n prng = np.random.RandomState(int(seed))\n sample = prng.multivariate_normal(f_star, V_f, n_samples)\n return sample, f_star, np.diag(V_f)",
"def sample_params_from_posterior(self, y_NT, prev_params):\n posterior_shape = self._shape + y_NT.sum(axis=1)\n posterior_scale = 1.0/((1.0/self._scale) + self.T)\n self._lambda_T[:] = rn.gamma(posterior_shape, posterior_scale)\n return self._lambda_T.copy()",
"def calc_posterior(likelihood, prior, norm_list):\n Pa = 0\n \n for t in norm_list:\n x = t[0] * t[1]\n Pa+=x\n\n return (likelihood*prior)/Pa",
"def update(self, caliStep, likelihood):\n posterior = np.zeros(self.numSamples)\n if caliStep == 0:\n posterior = likelihood / self.proposal\n else:\n posterior = self.posterior[:, caliStep - 1] * likelihood\n\n # regularize likelihood\n posterior /= np.sum(posterior)\n return posterior",
"def compute_posterior(prior, likelihood, y):\n\n # -------------------------------------------------------------------------\n # ERROR CHECKS -- DO NOT MODIFY\n #\n\n # check that prior probabilities sum to 1\n if np.abs(1 - np.sum(prior)) > 1e-06:\n exit('In compute_posterior: The prior probabilities need to sum to 1')\n\n # check that likelihood is specified as a 2D array\n if len(likelihood.shape) != 2:\n exit('In compute_posterior: The likelihood needs to be specified as ' +\n 'a 2D array')\n\n K, M = likelihood.shape\n\n # make sure likelihood and prior agree on number of hidden states\n if len(prior) != M:\n exit('In compute_posterior: Mismatch in number of hidden states ' +\n 'according to the prior and the likelihood.')\n\n # make sure the conditional distribution given each hidden state value sums\n # to 1\n for m in range(M):\n if np.abs(1 - np.sum(likelihood[:, m])) > 1e-06:\n exit('In compute_posterior: P(Y | X = %d) does not sum to 1' % m)\n\n #\n # END OF ERROR CHECKS\n # -------------------------------------------------------------------------\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (b)\n #\n # Place your code to compute the log of the posterior here: store it in a\n # NumPy array called `log_answer`. If you exponentiate really small\n # numbers, the result is likely to underflow (i.e., it will be so small\n # that the computer will just make it 0 rather than storing the right\n # value). You need to go to log-domain. Hint: this next line is a good\n # first step.\n log_prior = np.log(prior)\n# print(log_prior)\n# print(likelihood)\n# print(y)\n unnormal = log_prior + np.log(likelihood[y,:]).sum(axis=0)\n# print(unnormal)\n log_answer = unnormal - scipy.misc.logsumexp(unnormal)\n# print(log_answer)\n\n #\n # END OF YOUR CODE FOR PART (b)\n # -------------------------------------------------------------------------\n\n # do not exponentiate before this step\n posterior = np.exp(log_answer)\n return posterior",
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def posterior_distr(self, y, **args):\n raise NotImplementedError",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)",
"def one_step_update(model, posterior_tm1, Y_t):\n prediction = model.transmat_ @ posterior_tm1\n likelihood = np.exp(model._compute_log_likelihood(Y_t))\n posterior_t = prediction * likelihood\n return posterior_t",
"def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist",
"def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)",
"def posterior(self, samples):\n unique_samps = set(samples)\n denominator = 0\n posteriors = []\n n_samps = len(samples)\n for concept in self.concepts:\n num = 0\n if unique_samps.issubset(set(concept.extension)):\n num = concept.prior*concept.likelihood(n_samps)\n denominator += num\n posteriors.append(num)\n return np.divide(posteriors, denominator)",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)",
"def predict(self, t, **kwargs):\n means, vars_ = self._get_posterior_params(t, j=kwargs.get('j', None))\n rl_post = self._R[t-1, :t]\n # Index is t-1 because we make no predictions at time t=0.\n self._pmean[t-1] = np.sum(means * rl_post)\n self._pvar[t-1] = np.sum(vars_ * rl_post)",
"def compute_rt(rs, pred_prob, h):\r\n rp = rs*pred_prob\r\n rph = rp*h\r\n # probability of change point\r\n cp_prob = rph.sum()\r\n # probability that each run grows\r\n growth_prob = rp - rph\r\n rt = np.r_[cp_prob, growth_prob]\r\n return rt/rt.sum()",
"def _getCurrentPosteriorLikelihood(self): \n likelihood = 0\n T = self.T\n K= self.K \n final_likelihood = 0\n total_log_lik = 0\n \n for n in range(1,self.N+1):\n # Compute total Likelihood for all Instances P(x1...xn / theta) \n tot_lik = 0\n tot_scale_factor = 0\n \n for i in range(1,self.K+1): \n likelihood = self.posterior_state_trellis[n][(T,i)]\n tot_lik = tot_lik + likelihood\n\n try:\n total_log_lik = math.log(tot_lik) \n except ValueError:\n ipdb.set_trace()\n \n for t in range(1,self.T):\n scale_factor = self.forward_scaling_vector[n][t] \n tot_scale_factor = tot_scale_factor + math.log(scale_factor)\n\n final_likelihood = final_likelihood + (total_log_lik - tot_scale_factor)\n\n return final_likelihood",
"def estimate_propensities(X, t):\n learner = get_default_estimator()\n learner.fit(X, t)\n return calibrate_propensities(learner.predict_proba(X)[:, 1], t)",
"def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma",
"def posterior_samples(self, nsamples):\n prior = self.prior_samples(nsamples)\n\n S = self.eval_S(self.kappa, self.sigma_f)\n K_chol = self.eval_K_chol(S, self.sigma_n, self.sigma_f)\n\n residue = (self.Y - prior[..., self.X]).T # shape (n, s)\n\n K_chol_inv_R = solve_triangular(K_chol,\n residue, lower=True) # shape (n, s)\n\n phi_S_phistar = (self.eigenfunctions[self.X] * S[None, :]) @ \\\n self.eigenfunctions.T # shape (n, t)\n\n K_chol_inv_phi_S_phistar = solve_triangular(K_chol,\n phi_S_phistar,\n lower=True) # shape (n, t)\n update_term = np.einsum('nt,ns->st',\n K_chol_inv_phi_S_phistar,\n K_chol_inv_R)\n\n return prior + update_term # shape (s, t)",
"def test_sample_posterior_predictive():\n df = pd.DataFrame(dict(x=[1.0, 2.0, 3.0, 4.0]))\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(x=dict(transformer=lambda x: x.x, prior=dist.Normal(0, 1)))\n\n config = {\"samples\": {\"x\": onp.ones((10, 100000))}}\n model = Model.from_dict(config)\n pred = model.sample_posterior_predictive(df, rng_key=onp.array([0, 0]))\n log_pred = onp.log(pred).round(2)\n assert df.x.astype(\"float32\").equals(log_pred.astype(\"float32\"))",
"def taucurveder(self, p, x):\n y = -(p[1] * numpy.exp((p[2] + x) / p[3]) / p[3] - p[4] * numpy.exp(-(p[5] + x) / p[6]) / p[6]) / (\n p[1] * numpy.exp((p[2] + x) / p[3]) +\n p[4] * numpy.exp(-(p[5] + x) / p[6])) ** 2.0\n # print 'dy: ', y\n return y",
"def log_posterior(self, z):\n log_prior = self.prior.log_prob(z)\n log_likelihood = self.log_likelihood(z)\n assert log_prior.shape == log_likelihood.shape\n return log_prior + log_likelihood",
"def _sample_posterior(self):\n\n latent_dim = self.network_architecture['latent_dim']\n\n # Sample eps from standard Normal\n eps = tf.random_normal([self.batch_size, latent_dim], 0, 1,\n dtype=tf.float32)\n\n # Transform using Z = mean + root_cov*eps\n samp = self.rec_mean + tf.mul(tf.sqrt(tf.exp(self.rec_log_sigma_sq)),\n eps)\n return samp"
] | [
"0.65724057",
"0.6324972",
"0.61905974",
"0.60330665",
"0.6015493",
"0.59848833",
"0.5980878",
"0.59782517",
"0.59262353",
"0.5910541",
"0.5895196",
"0.58566135",
"0.5828628",
"0.57966477",
"0.5779137",
"0.5766752",
"0.5717351",
"0.56898874",
"0.56757206",
"0.5641809",
"0.5637649",
"0.5629539",
"0.5622362",
"0.5574125",
"0.55649114",
"0.55263174",
"0.55241525",
"0.55169386",
"0.5516755",
"0.5514369"
] | 0.748848 | 0 |
github logging in (3 requests needed) | def __github_login(self, github_http_session):
self.__debugInfo("Logging into Github")
try: # 1st request (grab some data needed for the login form)
github_html_login = github_http_session.get('https://github.com/login')
sleep(GITHUB_HTTP_DELAY)
github_soup_login = BeautifulSoup(github_html_login.text, 'html.parser')
form_data_login = {
'commit': 'Sign in',
'authenticity_token': github_soup_login.find('input', {'name': 'authenticity_token'})['value'],
'login': self.github_username,
'password': self.github_password,
'webauthn-support': 'supported',
'webauthn-iuvpaa-support': 'unsupported',
github_soup_login.find('input', {'name': compile('required_field_')})['name']: '',
'timestamp': github_soup_login.find('input', {'name': 'timestamp'})['value'],
'timestamp_secret': github_soup_login.find('input', {'name': 'timestamp_secret'})['value']
}
except Exception as exception:
raise MsgException('Unable to HTTP-GET GitHub login data', exception)
try: # 2nd request (submit the login form and grab some data needed for the OTP form)
github_http_session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
github_html_twofactor = github_http_session.post('https://github.com/session', data = urlencode(form_data_login))
sleep(GITHUB_HTTP_DELAY)
github_soup_twofactor = BeautifulSoup(github_html_twofactor.text, 'html.parser')
form_data_otp = {'authenticity_token': github_soup_twofactor.find('input', {'name': 'authenticity_token'})['value']}
except Exception as exception:
raise MsgException('Unable to log in to GitHub (credentials)', exception)
try: # 3rd request (submit the OTP form)
form_data_otp.update({'otp': TOTP(self.github_otp).now()})
github_http_session.post('https://github.com/sessions/two-factor', data = urlencode(form_data_otp))
sleep(GITHUB_HTTP_DELAY)
github_http_session.headers.pop('Content-Type')
except Exception as exception:
raise MsgException('Unable to log in to GitHub (OTP)', exception) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def github_login():\n if not github.authorized:\n flash('Access denied - please try again', 'warning')\n return redirect(url_for(\"auth.login\", local=1))\n # Get remote user data\n resp = github.get(\"/user\")\n if not resp.ok:\n flash('Unable to access GitHub data', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n resp_user = resp.json()\n if 'email' not in resp_user or 'login' not in resp_user:\n flash('Invalid GitHub data format', 'danger')\n # print(resp_user)\n return redirect(url_for(\"auth.login\", local=1))\n # Get remote profile data\n resp_emails = github.get(\"/user/emails\")\n if not resp.ok:\n flash('Unable to access GitHub e-mail data', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n for u in resp_emails.json():\n if u['primary'] and u['verified']:\n return get_or_create_sso_user(\n resp_user['id'],\n resp_user['login'],\n u['email'],\n 'https://github.com/%s' % resp_user['login']\n )\n flash('Please verify an e-mail with GitHub', 'danger')\n return redirect(url_for(\"auth.login\", local=1))",
"def log_in(self):\n\t\tpass",
"def connect_to_github():\n\n # Authentication\n from os.path import isfile\n if isfile(\"github-logins.json\"):\n with open(\"github-logins.json\", \"r\") as loginfile:\n logins = json.load(loginfile)\n gh = login(username=logins[\"username\"], password=logins[\"password\"])\n else:\n from getpass import getpass\n password = getpass()\n gh = login(username=\"yourusername\", password=password)\n\n # Connect to the repo\n repo = gh.repository(\"ghostofgoes\", \"botnet-example\")\n branch = repo.branch(\"master\")\n return gh, repo, branch",
"def login():\n scope = current_app.config.get('GITHUB_SCOPES')\n return GitHub(current_app).authorize(scope)",
"def login():",
"def login():",
"def _login(self, *args, **kwargs):\n pass",
"def github_login():\n # If a logged-in user goes to \"/login\", that user won't need to log in\n # again, and automatically go back to the home page.\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n\n # GitHub Sign-In constants\n GITHUB_APP_CLIENT_ID = os.environ['GITHUB_APP_CLIENT_ID']\n GITHUB_APP_CLIENT_SECRET = os.environ['GITHUB_APP_CLIENT_SECRET']\n GITHUB_ACCESS_TOKEN_ENDPOINT = 'https://github.com/login/oauth/access_token'\n GITHUB_USERINFO_ENDPOINT = 'https://api.github.com/user'\n\n # After authorization and redirection back from GitHub, the authorization\n # code should be in the query parameters.\n code = request.args.get('code')\n if not code:\n raise ValueError('Need authorization code to process')\n\n # 1. Exchange the authorization code for access token\n r = requests.post(\n GITHUB_ACCESS_TOKEN_ENDPOINT,\n headers={\n 'Accept': 'application/json'\n },\n params={\n 'client_id': GITHUB_APP_CLIENT_ID,\n 'client_secret': GITHUB_APP_CLIENT_SECRET,\n 'code': code\n }\n )\n json_data = r.json()\n if 'error' in json_data:\n raise ValueError(json_data['error_description'])\n access_token = json_data['access_token']\n\n # 2. Exchange the access token for GitHub user information\n r = requests.get(\n GITHUB_USERINFO_ENDPOINT,\n headers={\n 'Authorization': f'token {access_token}'\n }\n )\n json_data = r.json()\n if 'error' in json_data:\n raise ValueError(json_data['error_description'])\n\n # 3. Successfully got the GitHub user information from GitHub\n # -> Associate a local account with that GitHub user\n # (Similar workflow as user registeration or log-in)\n return _oauth_local_login(\n oauth_username=f\"GitHub-User-{json_data['id']}\",\n email=json_data['email'],\n image_url=json_data['avatar_url']\n )",
"def login(self):\n\t\treturn",
"def login(self):",
"def login(endpoint, git, yes):\n communicator = ClickCallback()\n login_command().with_communicator(communicator).build().execute(endpoint=endpoint, git_login=git, yes=yes)\n click.secho(\"Successfully logged in.\", fg=\"green\")",
"def code_login(ui, repo, **opts):\n\tMySend(None)",
"def login():\n pass",
"def test_github_fork_and_star_repo_annonymous_user_redirects_to_login(self):\n login_url = \"https://github.com/login?return_to=%2Fsysters%2Fmailman3\"\n driver = self.driver\n driver.get(\"{0}/systers/mailman3\".format(self.base_url))\n elem = driver.find_element_by_partial_link_text(\"Fork\")\n elem.send_keys(Keys.ENTER)\n time.sleep(2)\n assert login_url == driver.current_url\n\n driver.get(\"{0}/systers/mailman3\".format(self.base_url))\n elem = driver.find_element_by_partial_link_text(\"Star\")\n elem.send_keys(Keys.ENTER)\n time.sleep(2)\n assert login_url == driver.current_url",
"def login_bot(self):\n pass",
"async def github(self, ctx):\n await ctx.send('https://github.com/nick411077/nickcan_bot')",
"def einloggen(self):\n \n self.c.login(self.username.text(), self.password.text(), \"1\")",
"def authentification():\n # To get user input, we need to test Python version :(\n if version_info[0] > 2:\n username = input('GitHub username:')\n else:\n username = raw_input('GitHub username:')\n password = getpass()\n r = requests.get('https://api.github.com/user', auth=(username, password))\n if r.status_code != 200:\n print('Incorrect username/password, please retry')\n return authentification()\n return (username, password)",
"def create_api_handler(self):\n self.github = github3.login(username=GH_USER, password=GH_PASSWORD)\n if hasattr(self.github, 'set_user_agent'):\n self.github.set_user_agent('Jonathan Reeve: http://jonreeve.com')\n self.org = self.github.organization(login='Git-Lit')\n # FIXME: logging\n print(\"ratelimit: \" + str(self.org.ratelimit_remaining))",
"def display_credentials():\n print(f\"GH_TOKEN: {GH_TOKEN}\")\n print(f\"USER: {GH_USER}\")",
"def login(self):\n with self.client.post(\"/login\", {\"username\":self.user.username,\n \"password\":MASTER_PASSWORD},\n catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.cookies.get('token') is not None:\n response.success()\n return\n response.failure(\"login failed\")",
"def login(self):\n\n self.__login_if_required()",
"def login(self):\n r = self._login_token()",
"def login(self):\n #raise NotImplementedError(\"This method must be overridden\")",
"def log_in(self):\n if self.is_logged_in():\n return\n\n req_html = request.urlopen(\"https://www.linkedin.com/uas/login\").read()\n soup = BeautifulSoup(req_html)\n csrf = soup.find(id=\"loginCsrfParam-login\")['value']\n\n login_data = parse.urlencode({\n 'session_key': self.username,\n 'session_password': self.password,\n 'loginCsrfParam': csrf\n })\n\n data = login_data.encode()\n\n password_manager = request.HTTPPasswordMgrWithDefaultRealm()\n password_manager.add_password(None, \"https://www.linkedin.com/\", self.username, self.password)\n\n Registration.opener.add_handler(request.HTTPBasicAuthHandler(password_manager))\n\n response = request.urlopen(\"https://www.linkedin.com/uas/login-submit\", data)\n res_html = BeautifulSoup(response.read())\n\n Registration.jar.save(Registration.cookie_filename)\n\n return response",
"def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)",
"def log_login(sender, request, user, **kwargs):\n stracks.user(user).log(\"? has logged in\", action=stracks.login())",
"def verifyLogin():\n global HUB\n\n loginInfo = FloatingTools.userData()['Login']\n if loginInfo['username'] is None or loginInfo['password'] is None:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False\n try:\n HUB = Github(loginInfo['username'], loginInfo['password'])\n for repo in HUB.get_user().get_repos():\n break\n FloatingTools.Dashboard.setDashboardVariable('logged_in', True)\n return True\n except BadCredentialsException:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False",
"def github(code, input):\n syntax = 'Syntax: \\'.github <user|user/repo>\\''\n failed = 'Failed to get data from Githubs API :('\n if len(input.group(2).strip().split()) != 1:\n return code.say(syntax)\n\n spacer = ' {blue}|{c} '\n\n if '/' not in input.group(2):\n # Assume a single username\n try:\n tmp = web.json(user_api % input.group(2).strip())\n response = {}\n # Remove dem ugly nulled values. It's a dictionary so we have to\n # loop differently.\n for key, value in tmp.iteritems():\n if value != '' or len(value) != 0 or value != 'null':\n response[key] = value\n print response\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n\n # Here is where we build the response\n output = []\n if 'name' in response:\n output.append('%s (%s)' % (response['name'], response['login']))\n else:\n output.append(response['login'])\n if 'location' in response:\n output.append(response['location'])\n if 'email' in response:\n output.append(response['email'])\n if 'public_repos' in response:\n output.append('%s Repos' % response['public_repos'])\n if 'followers' in response:\n output.append('%s Followers' % response['followers'])\n if 'following' in response:\n output.append('Following %s' % response['following'])\n if 'public_gists' in response:\n output.append('%s Gists' % response['public_gists'])\n if 'html_url' in response:\n output.append(response['html_url'])\n\n return code.say(spacer.join(output))\n\n else:\n # Assume Username/Repo\n try:\n response = jweb.json(repo_api % input.group(2).strip())\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n # Here is where we build the response\n output = []\n output.append('%s (%s)' %\n (response['name'], response['owner']['login']))\n output.append(response['description'])\n output.append('%s %s' % (response['stargazers_count'], u'\\u2605'))\n output.append('%s %s' % (response['watchers_count'], u'\\u231A'))\n output.append('%s %s' % (response['forks_count'], u'\\u2442'))\n output.append('%s %s' % (response['open_issues_count'], u'\\u2602'))\n output.append('%s %s' % (response['network_count'], u'\\U0001F46C'))\n output.append('%s %s' % (response['subscribers_count'], u'\\u2764'))\n output.append(response['html_url'])\n return code.say(spacer.join(output))",
"def login():\n if app.testing:\n callback_url = url_for('user.authorize', _external=True)\n else:\n callback_url = 'https://codegolf.uqcs.org.au/user/authorize'\n return git_auth.authorize(callback=callback_url)"
] | [
"0.7055886",
"0.68813187",
"0.6742593",
"0.6740107",
"0.67381793",
"0.67381793",
"0.67226154",
"0.6688778",
"0.658423",
"0.6494173",
"0.6449162",
"0.64064425",
"0.64043856",
"0.6383343",
"0.6278874",
"0.62720364",
"0.62676406",
"0.6254351",
"0.6172876",
"0.60915226",
"0.6059552",
"0.59733856",
"0.59684503",
"0.59090495",
"0.5901915",
"0.5897831",
"0.58881056",
"0.58846134",
"0.5883865",
"0.58837616"
] | 0.73178905 | 0 |
github logging out (2 requests needed) | def __github_logout(self, github_http_session):
self.__debugInfo("Logging out from Github account")
try: # 1st request (grab some data needed for the logout form)
github_html_root = github_http_session.get('https://github.com')
sleep(GITHUB_HTTP_DELAY)
github_soup_root = BeautifulSoup(github_html_root.text, 'html.parser')
form_data_logout = {'authenticity_token': github_soup_root.find('input', {'name': 'authenticity_token'})['value']}
except Exception as exception:
raise MsgException('Unable to HTTP-GET GitHub logout data', exception)
try: # 2nd request (submit the logout form)
github_http_session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
github_http_session.post('https://github.com/logout', data = urlencode(form_data_logout))
sleep(GITHUB_HTTP_DELAY)
github_http_session.headers.pop('Content-Type')
except Exception as exception:
raise MsgException('Unable to log out from GitHub', exception) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def logout():",
"def logout():\n login()",
"def logout(self):",
"def logOut(self):\n self.client.logout()",
"def logout(self):\n pass",
"def logout():\n rino.login.logout()",
"def logout(self):\r\n self._api_entrypoint.logout(self._session_token)",
"def logout(self):\n self.client.get(f\"{host}/logout\")",
"def do_logout():\n\n session['authenticated'] = False\n session['username'] = None\n session['name'] = None\n session['cpi'] = None\n session['grp_size'] = None\n\n return home()",
"def logout(self, request):\n pass",
"def logout(self):\n with self.client.post(\"/logout\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.success()\n self.user.username = None\n # go to UnauthenticatedTasks\n self.interrupt()",
"def logout():\n return logout_user()",
"async def exit(self, ctx):\n print('Logging out...')\n await ctx.bot.logout()",
"def logout():\n update_session(\"X-GEMINI-APIKEY\", \"\")\n set_secret_key(\"\".encode())\n set_login_state(False)",
"def logout(self):\n self.auth = None",
"def _logout(self):\n self.api_query(action=\"logout\")\n self._cookiejar.clear()\n self._save_cookiejar()",
"def log_out():\n\n del session[\"user_id\"]\n # print session[\"user_id\"]\n flash('You were successfully logged out')\n return render_template('homepage.html')\n\n #Additional reference for log in/log out can be found in project tracker project",
"def logout_user():\n pass",
"def test_logout(self):\r\n self.logout()",
"def logout(self):\n self.__send_command(\"LOGOUT\")",
"async def handle_logout(request: aiohttp.web.Request) -> aiohttp.web.Response:\n log = request.app[\"Log\"]\n client = request.app[\"api_client\"]\n if not setd[\"set_session_devmode\"]:\n try:\n session = await aiohttp_session.get_session(request)\n log.info(f\"Killing session {session.identity}\")\n for project in session[\"projects\"]:\n async with client.delete(\n f\"{setd['auth_endpoint_url']}/auth/tokens\",\n headers={\n \"X-Auth-Token\": session[\"token\"],\n \"X-Subject-Token\": session[\"projects\"][project][\"token\"],\n },\n ):\n pass\n session.invalidate()\n except aiohttp.web.HTTPUnauthorized:\n log.info(\"Trying to log our an invalidated session\")\n raise aiohttp.web.HTTPUnauthorized\n response = aiohttp.web.Response(status=303)\n response.headers[\"Location\"] = \"/\"\n return response",
"def logout(self, **kwargs):\n\tself.call('logout')",
"def signout():\n session.pop('oauth2_state', None)\n session.pop('oauth2_token', None)\n session.pop('discord_user', None)\n return redirect('/')",
"def log_out(self):\n self.__is_logged_in = False",
"def __github_login(self, github_http_session):\n\t\tself.__debugInfo(\"Logging into Github\")\n\t\ttry: # 1st request (grab some data needed for the login form)\n\t\t\tgithub_html_login = github_http_session.get('https://github.com/login')\n\t\t\tsleep(GITHUB_HTTP_DELAY)\n\t\t\tgithub_soup_login = BeautifulSoup(github_html_login.text, 'html.parser')\n\t\t\tform_data_login = {\n\t\t\t\t'commit': 'Sign in',\n\t\t\t\t'authenticity_token': github_soup_login.find('input', {'name': 'authenticity_token'})['value'],\n\t\t\t\t'login': self.github_username,\n\t\t\t\t'password': self.github_password,\n\t\t\t\t'webauthn-support': 'supported',\n\t\t\t\t'webauthn-iuvpaa-support': 'unsupported',\n\t\t\t\tgithub_soup_login.find('input', {'name': compile('required_field_')})['name']: '',\n\t\t\t\t'timestamp': github_soup_login.find('input', {'name': 'timestamp'})['value'],\n\t\t\t\t'timestamp_secret': github_soup_login.find('input', {'name': 'timestamp_secret'})['value']\n\t\t\t}\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Unable to HTTP-GET GitHub login data', exception)\n\n\t\ttry: # 2nd request (submit the login form and grab some data needed for the OTP form)\n\t\t\tgithub_http_session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})\n\t\t\tgithub_html_twofactor = github_http_session.post('https://github.com/session', data = urlencode(form_data_login))\n\t\t\tsleep(GITHUB_HTTP_DELAY)\n\t\t\tgithub_soup_twofactor = BeautifulSoup(github_html_twofactor.text, 'html.parser')\n\t\t\tform_data_otp = {'authenticity_token': github_soup_twofactor.find('input', {'name': 'authenticity_token'})['value']}\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Unable to log in to GitHub (credentials)', exception)\n\n\t\ttry: # 3rd request (submit the OTP form)\n\t\t\tform_data_otp.update({'otp': TOTP(self.github_otp).now()})\n\t\t\tgithub_http_session.post('https://github.com/sessions/two-factor', data = urlencode(form_data_otp))\n\t\t\tsleep(GITHUB_HTTP_DELAY)\n\t\t\tgithub_http_session.headers.pop('Content-Type')\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Unable to log in to GitHub (OTP)', exception)",
"def logout(self):\n url = \"https://%s/game/index.php?page=logout\" % self.server\n #\"https://s103-pt.ogame.gameforge.com/game/index.php?page=logout\"\n self.session.get(url)",
"async def _logout(self, ctx):\n self.bot.log.info('Bot logging off.')\n await self.bot.logout()",
"def _backend_logout_cleanup(self, name):\n self.log.info(\"User logged out: %s\", name)\n self.clear_login_cookie()\n self.statsd.incr('logout')",
"def logout():\n session['logged_in'] = False\n return '', 204",
"def logout(self):\n logger.info(\"Logging out\")\n self._limited_call(self._requests.get, constants.FA_ROOT + \"/logout/\")"
] | [
"0.73949134",
"0.7085794",
"0.7016977",
"0.7016463",
"0.69752824",
"0.67594814",
"0.6740951",
"0.665061",
"0.662168",
"0.660737",
"0.65762925",
"0.652898",
"0.6528519",
"0.65056723",
"0.64738286",
"0.6471477",
"0.64612675",
"0.64234346",
"0.64109135",
"0.6407844",
"0.63996345",
"0.63957214",
"0.63836426",
"0.6330605",
"0.6298518",
"0.6295753",
"0.6293764",
"0.62881577",
"0.62734973",
"0.625522"
] | 0.78040963 | 0 |
This function assumes that you have an existing database called `postgres` without any username/password required to access it. Then it creates a new database called `opportunity_youth` | def create_database():
# Depending on your local settings, you may need to specify a user and password, e.g.
# conn = psycopg2.connect(dbname="postgres", user="postgres", password="password")
conn = psycopg2.connect(dbname="postgres")
conn.autocommit = True # it seems this mode is needed to make a db
conn.set_isolation_level(0) # also this for dropping db
# un-comment this line if you already have a database called
# `opportunity_youth` and you want to drop it
# execute_sql_script(conn, "01_drop_old_database.sql")
execute_sql_script(conn, "02_create_new_database.sql")
conn.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_database():\n db_user = 'expensetracker' # define these\n db_pass = 'beta'\n db_table = 'expensetracker'\n\n local('psql -U postgres -c \"DROP ROLE IF EXISTS %s\"'%db_user)\n local('psql -U postgres -c \"CREATE USER %s WITH NOCREATEDB NOCREATEUSER ENCRYPTED PASSWORD E\\'%s\\'\"' % (db_user, db_pass))\n local('psql -U postgres -c \"DROP DATABASE IF EXISTS %s\"'%db_table)\n local('psql -U postgres -c \"CREATE DATABASE %s WITH OWNER %s\"' % (\n db_table, db_user))",
"def createDB(dbname='dds_assignment2'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print('A database named {0} already exists'.format(dbname))\n\n # Clean up\n cur.close()\n con.commit()\n con.close()",
"def createDB(dbname='dds_assignment1'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print('A database named {0} already exists'.format(dbname))\n\n # Clean up\n cur.close()\n con.close()",
"def createDB(dbname='ddsassignment3'):\r\n # Connect to the default database\r\n con = getOpenConnection(dbname='postgres')\r\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\r\n cur = con.cursor()\r\n\r\n # Check if an existing database with the same name exists\r\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\r\n count = cur.fetchone()[0]\r\n if count == 0:\r\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\r\n else:\r\n print 'A database named {0} already exists'.format(dbname)\r\n\r\n # Clean up\r\n cur.close()\r\n con.commit()\r\n con.close()",
"def createDB(dbname='ddsassignment3'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n print \"Count \",count\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.commit()\n con.close()",
"def createDB(dbname='dds_assignment'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print('A database named ' + dbname + ' already exists')\n\n # Clean up\n cur.close()\n con.close()",
"def _create_db(db_name):\n template_conn.execute('commit')\n template_conn.execute('create database {}'.format(db_name))",
"def createDB(dbname='dds_assignment'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.commit()\n con.close()",
"def createDB(dbname='ddsassignment3'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.commit()\n con.close()",
"def createDB(dbname='dds_assignment'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.close()",
"def createDB(dbname='dds_assignment'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.close()",
"def create_db():\n\n require('environment', provided_by=env.environments)\n sudo('createdb -O %(database_user)s -T %(template_db)s %(database_name)s' % env, user='postgres')",
"def create_database(database):\n # open an existing postgres database\n with Database(database=\"postgres\") as connection:\n # set isolation level (dunno why tbqh)\n connection.db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n connection.query(\"CREATE DATABASE \" + database)",
"def create_db():\n from sqlalchemy_utils import database_exists, create_database, drop_database\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n else:\n drop_database(DB_URL)\n create_database(DB_URL)\n print(\"Deleted and created new database\")\n create_tables()",
"def newDb(options, dbName, adminPswd, userPswd, viewerPswd):\n if not re.match(\"^[A-Za-z][A-Za-z0-9_]*$\", dbName):\n errorPrint(\"'%s' is not a valid database name\" % dbName)\n return\n\n adminName = dbName + \"_admin\"\n userName = dbName + \"_user\"\n viewerName = dbName + \"_viewer\"\n\n setupDictionaryDatabases(options, {\n 'databases': {\n dbName: {\n 'ownerRole': adminName,\n 'roles': {\n adminName: {\n 'password': adminPswd,\n 'role': 'admin'\n },\n userName: {\n 'password': userPswd,\n 'role': 'writer'\n },\n viewerName: {\n 'password': viewerPswd,\n 'role': 'reader'\n }\n }\n }\n }\n })",
"def create_db():\n init_postgres(current_app.config['SQLALCHEMY_DATABASE_URI'])",
"def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)",
"def create_db(dbname):\r\n # Connect to the default database\r\n con = getopenconnection(dbname='postgres')\r\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\r\n cur = con.cursor()\r\n\r\n # Check if an existing database with the same name exists\r\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\r\n count = cur.fetchone()[0]\r\n if count == 0:\r\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\r\n else:\r\n print 'A database named {0} already exists'.format(dbname)\r\n\r\n # Clean up\r\n cur.close()\r\n con.close()",
"def create_database():\n create_db(app)",
"def create_prod_db():\n _create_database(is_production=True)",
"def create_db(dbname):\n # Connect to the default database\n con = getopenconnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print ('A database named {0} already exists'.format(dbname))\n\n # Clean up\n cur.close()\n con.close()",
"def create_database():\n # connect to default database\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=test user=postgres password=password1\")\n conn.set_session(autocommit=True)\n cur = conn.cursor()\n \n # create sparkify database with UTF8 encoding\n cur.execute(\"DROP DATABASE IF EXISTS sparkifydb\")\n cur.execute(\"CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0\")\n\n # close connection to default database\n conn.close() \n \n # connect to sparkify database\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=postgres password=password1\")\n cur = conn.cursor()\n \n return cur, conn",
"def setup_database():\n\n user = 'bard'\n password = 'STORY'\n database = 'story'\n DSN = f\"postgresql://{user}:{password}@postgres:5432/{database}\"\n engine = create_engine(DSN)\n register_tables(engine)\n return engine",
"def create_db():\n db_url = engine.url\n if not database_exists(db_url):\n create_database(db_url)\n base.metadata.create_all()",
"def create_database_stock_master():\n sql = \"\"\"\n CREATE DATABASE stock_master;\n \"\"\"\n excute_sql(sql,None)",
"def _init_db():\n c = ppc.app().config['PUBLICPRIZE']['DATABASE']\n e = os.environ.copy()\n e['PGPASSWORD'] = c['postgres_pass']\n subprocess.call(\n ['createuser', '--host=' + c['host'], '--user=postgres',\n '--no-superuser', '--no-createdb', '--no-createrole', c['user']],\n env=e)\n p = subprocess.Popen(\n ['psql', '--host=' + c['host'], '--user=postgres', 'template1'],\n env=e,\n stdin=subprocess.PIPE)\n s = u\"ALTER USER {user} WITH PASSWORD '{password}'\".format(**c)\n enc = locale.getlocale()[1]\n loc = locale.setlocale(locale.LC_ALL)\n p.communicate(input=bytes(s, enc))\n subprocess.check_call(\n ['createdb', '--host=' + c['host'], '--encoding=' + enc,\n '--locale=' + loc, '--user=postgres',\n '--template=template0',\n '--owner=' + c['user'], c['name']],\n env=e)",
"def create_db(db, db_username):\n print system(\"su -c \\\"echo \\\\\\\"create database \" + db + \" with owner \" + db_username + \";\\\\\\\" | psql \\\" postgres\")",
"def create_db(): \r\n env.db_user = prompt('DB user for %s:' % env.host, default=env.project_name)\r\n env.db_password = prompt('DB password for user %s:' % env.db_user)\r\n \r\n # -e echo-sql S no-superuser D no-createdb R no-createrole l can-login\r\n # P prompt-for-passwd -U <login role> -O <owner role> -h <hostname>\r\n # TODO find a way to use provided password! (use SQL instead of command)\r\n run('createuser -e -SDRlP -U %s -h %s %s' % (env.db_superuser, env.db_host, env.db_user)) \r\n # -U <login role> -O <owner role> -h <hostname>\r\n run('createdb -e -E UTF8 -O %s -U %s -h %s %s' % (env.db_user, env.db_superuser, env.db_host, env.project_name))",
"def create_database():\n # Build a unique URL using todays date\n dbname = 'orthomcl_{t.year}_{t.month}_{t.day}_at_{t.hour}_{t.minute}_{t.second}'.format(t=datetime.today())\n dbhost, port, user, passwd = _get_root_credentials()\n clhost = 'odose.nl' if dbhost not in ['127.0.0.1', 'localhost'] else dbhost\n db_connection = MySQLdb.connect(host=dbhost, port=port, user=user, passwd=passwd)\n cursor = db_connection.cursor()\n cursor.execute('CREATE DATABASE ' + dbname)\n cursor.execute('GRANT ALL on {0}.* TO orthomcl@\\'{1}\\' IDENTIFIED BY \\'pass\\';'.format(dbname, clhost))\n db_connection.commit()\n cursor.close()\n db_connection.close()\n log.info('Created database %s as %s on %s', dbname, user, dbhost)\n return dbname",
"def create_db():\n db.create_all()\n print(\"DB Created\")"
] | [
"0.7448531",
"0.7208092",
"0.7126366",
"0.7112931",
"0.70927596",
"0.70882756",
"0.70807326",
"0.70731986",
"0.70635045",
"0.70544",
"0.70544",
"0.69928306",
"0.69432926",
"0.6848553",
"0.68377304",
"0.6825666",
"0.6820272",
"0.6790556",
"0.6786091",
"0.6753634",
"0.6741311",
"0.6727389",
"0.6725657",
"0.66818035",
"0.66668797",
"0.6637079",
"0.65982825",
"0.6551013",
"0.65474886",
"0.6505141"
] | 0.85329705 | 0 |
Create a table for the train data | def create_train_table(conn):
execute_sql_script(conn, "03_create_train_table.sql") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_table(self):\n pass",
"def generate_table(self, rows):\n ...",
"def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)",
"def _make_table(\n self,\n epoch,\n total_loss_training,\n total_loss_validation=None,\n losses_training=None,\n losses_validation=None,\n metrics=None,\n learning_rate=None,\n ):\n col_width = 9\n multi_target = losses_training is not None and len(losses_training) > 1\n\n title = \"\\n\\n Training history\\n\"\n\n # Calculate width of table and columns\n epoch_width = 18\n if not multi_target:\n train_width = 20\n else:\n train_width = min(40, (col_width + 2) * (len(losses_training) + 1))\n\n val_width = 0\n if total_loss_validation is not None:\n val_width = 20\n if multi_target:\n val_width = min(40, (col_width + 2) * (len(losses_training) + 1))\n\n all_metrics_width = 0\n if metrics is not None:\n if not multi_target:\n metrics_width = col_width + 2\n else:\n metrics_width = len(losses_training) * (col_width + 2) + 1\n all_metrics_width = len(metrics) * metrics_width\n\n table_width = epoch_width + train_width + val_width + all_metrics_width\n\n self.table = rich.table.Table(\n expand=False, box=rich.box.SIMPLE, title=title, width=table_width, leading=0\n )\n\n self.table.add_column(\n Text(\"Epoch\", style=\"Grey\"), justify=\"center\", width=epoch_width\n )\n self.table.add_column(\n Text(\"Training loss\", style=\"red bold\"), justify=\"center\", width=train_width\n )\n if total_loss_validation is not None:\n self.table.add_column(\n Text(\"Validation loss\", style=\"blue bold\"),\n justify=\"center\",\n width=val_width,\n )\n if metrics is not None:\n for name, m in metrics.items():\n self.table.add_column(\n Text(name, style=\"purple bold\"),\n justify=\"center\",\n width=metrics_width,\n )\n\n def make_header_columns():\n # Epoch and LR\n columns = [Text(\"#\", justify=\"right\", style=\"bold\")]\n if learning_rate is not None:\n columns += [Text(\"LR\", justify=\"right\")]\n yield Columns(columns, align=\"center\", width=6)\n\n # Training losses\n text = Align(\n Text(\"Total\", justify=\"right\", style=\"bold red\"),\n width=col_width,\n align=\"center\",\n )\n if multi_target:\n columns = [text] + [\n Align(Text(n, justify=\"right\", style=\"red\"), width=col_width)\n for n in losses_training.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield text\n\n # Validation losses\n if total_loss_validation is not None:\n text = Align(\n Text(\"Total\", justify=\"center\", style=\"bold blue\"),\n width=col_width,\n align=\"center\",\n )\n if multi_target:\n columns = [text] + [\n Align(Text(n, justify=\"center\", style=\"blue\"), width=col_width)\n for n in losses_validation.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield text\n\n # Metrics\n if metrics is not None:\n for name, values in metrics.items():\n if isinstance(values, dict):\n columns = [\n Align(\n Text(n, justify=\"center\", style=\"purple\"),\n width=col_width,\n )\n for n in values.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield Align(Text(\"\"), width=col_width)\n\n self.table.add_row(*make_header_columns())\n self.table.add_row()",
"def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])",
"def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()",
"def tabular_data(self):\n path = CFG.GRAPHS_DIR\n chdir(path)\n\n if self.experiment_count == 1:\n f = open(self.tablefile, 'w')\n f.write(self.print_border_line())\n f.write(self.table_header())\n f.write(self.print_border_line())\n f.write(self.pretty_string(\"Functions\"))\n f.write(self.pretty_string(\"Batch Size\"))\n f.write(self.pretty_string(\"Training (%)\"))\n f.write(self.pretty_string(\"Testing (%)\", True))\n f.write('\\n')\n f.write(self.print_border_line())\n f.close()\n\n f = open(self.tablefile, 'a')\n f.write(self.pretty_string(self.function_name))\n f.write(self.pretty_string(str(self.batch_size)))\n f.write(self.pretty_string(self.tr_mean_str))\n f.write(self.pretty_string(self.test_mean_str, True))\n f.write('\\n')\n f.close()",
"def make_performance_table(self):\n table = Table()\n table.add_column(\"Classifier\", ratio=25)\n table.add_column(\"Score\", ratio=10, justify=\"center\", no_wrap=True)\n table.add_column(\"Params\", ratio=25, no_wrap=False)\n table.add_column(\"Model ID\",ratio=40, no_wrap=True)\n\n for name, stuff in self.trainer.performance.items():\n score, params, hash_id = stuff\n style = \"bold green\" if name == self.trainer.best_classifier__name else \"\"\n best_one = \" ***\" if name == self.trainer.best_classifier__name else \"\"\n \n table.add_row(\n str(name),\n str(np.round(score, 3)), \n str(params), \n f\"{str(hash_id)}{best_one}\",\n style=style)\n \n return table",
"def create_table(\n self, data: Data = None, trim: bool = False, columns: List[str] = None\n ) -> Table:\n table = Table(data, columns)\n\n if trim:\n self.trim_empty_rows(table)\n self.trim_column_names(table)\n\n self.logger.info(\"Created table: %s\", table)\n notebook_table(self.table_head(table, 10))\n\n return table",
"def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')",
"def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )",
"def create_prediction_table():\n try:\n conn = create_connection()\n c = conn.cursor()\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS predtable\n (age NUMERIC,workclass TEXT,fnlwgt NUMERIC,education TEXT,education_num NUMERIC,marital_status TEXT,occupation TEXT,relationship TEXT,race TEXT,sex TEXT,capital_gain NUMERIC,capital_loss NUMERIC,hours_per_week NUMERIC,native_country TEXT,predicted_class NUMERIC,model_class TEXT, time_of_prediction TEXT)\"\"\")\n\n except Exception as e:\n pass",
"def create_tables():\n\tlog_msg4(\"No hay tablas para el año \" + txt_year + \". Creando\")\n\n\tcreate_table('visited')\n\tcreate_table('saved')\n\tcreate_table('actions')\n\n\tglobal new_tables_created\n\tnew_tables_created = True\n\n\tlog_msg_ok4()",
"def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())",
"def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)",
"def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot",
"def create_nnar_table(conn, table_name):\n sql = \"CREATE TABLE \" + table_name + \"\"\"(\n id integer PRIMARY KEY,\n fp_male real,\n fn_male real,\n fp_female real,\n fn_female real,\n test_loss real,\n test_acc real);\"\"\"\n\n try:\n c = conn.cursor()\n c.execute(sql)\n except Error as e:\n print(e)\n\n create_nnar_pred_table(conn, table_name)",
"def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()",
"def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results",
"def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str",
"def basic_table_creation():\n results = {\n 'From pyarrow arrays': pa.table([\n pa.array(['Kakashi', 'Itachi', 'Shisui'], type=pa.string()),\n pa.array(['Hatake', 'Uchiha', 'Uchiha'], type=pa.string())\n ], names=['first_name', 'last_name']),\n 'From List[dict]': pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ]),\n 'From Dict[str, list]': pa.Table.from_pydict({\n 'first_name': ['Kakashi', 'Itachi', 'Shisui'],\n 'last_name': ['Hatake', 'Uchiha', 'Uchiha'],\n }),\n 'From pandas df': pa.Table.from_pandas(pd.DataFrame([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])),\n }\n pretty_print_result_map(results)",
"def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )",
"def create_table():\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n logging.debug(\"Successfully Connected to SQLite\")\n\n cursor.execute(\n '''CREATE TABLE Status\n ([ip] text, [port] integer, [count_requests] integer, [t_start] integer, [protocol] text)'''\n )\n\n cursor.close()",
"def create_table(self):\n c = self.conn.cursor()\n c.execute(\"CREATE TABLE sensor_data (mac text, name text, temperature real, light integer, moisture real, conductivity real, battery real, ts_utc int, date_iso text, firmware text )\")",
"def create_layers_table():\n\n table_name = f\"{BQ_LAYERS_TABLE}\"",
"def setTable(self):\n if not self.outvar or self.data==None:\n return\n\n self.table.setColumnCount(len(self.data.domain.attributes) + (self.data.domain.classVar != None) + len(self.predictors))\n self.table.setRowCount(len(self.data))\n \n print self.table.rowCount(), len(self.data.domain.attributes), (self.data.domain.classVar != None), len(self.predictors)\n\n # HEADER: set the header (attribute names)\n## for col in range(len(self.data.domain.attributes)):\n## self.header.setLabel(col, self.data.domain.attributes[col].name)\n labels = [attr.name for attr in self.data.domain.variables] + [c.name for c in self.predictors.values()]\n self.table.setHorizontalHeaderLabels(labels)\n## col = len(self.data.domain.attributes)\n## if self.data.domain.classVar != None:\n## self.header.setLabel(col, self.data.domain.classVar.name)\n## col += 1\n## for (i,c) in enumerate(self.predictors.values()):\n## self.header.setLabel(col+i, c.name)\n\n # ATTRIBUTE VALUES: set the contents of the table (values of attributes), data first\n for i in range(len(self.data)):\n for j in range(len(self.data.domain.attributes)):\n## self.table.setText(i, j, str(self.data[i][j]))\n self.table.setItem(i, j, QTableWidgetItem(str(self.data[i][j])))\n col = len(self.data.domain.attributes)\n\n # TRUE CLASS: set the contents of the table (values of attributes), data first\n self.classifications = [[]] * len(self.data)\n if self.data.domain.classVar:\n for (i, d) in enumerate(self.data):\n c = d.getclass()\n item = colorItem(str(c))\n self.table.setItem(i, col, item)\n self.classifications[i] = [c]\n col += 1\n\n## for i in range(col):\n## self.table.adjustColumn(i)\n\n # include predictions, handle show/hide columns\n self.updateTableOutcomes()\n self.updateAttributes()\n self.updateTrueClass()\n self.table.show()",
"def start_table(self):\n self.result = \"<table>\\n\"",
"def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')",
"def addTable(self, document, title, data):\n #[<Job ID> | <Simulation> | Server | User | Submitted | Status | <Delete> | <Check>]\n\n headers = (\"Job ID\", \"Simulation\", \"Server\", \"User\", \"Submitted\", \"Status\", \" \", \" \")\n\n if len(data[0]) != 0:\n document.add(Paragraph(text=title, Class=\"header-h2\"))\n document.add(tableController(headers, 'jobs/index')) # temp\n document.add(tableJobs(headers, data[0], data[1], data[2], data[3]))",
"def __init__(self, *args, **kwargs):\n \n self.dense = True\n\n # Create table\n super().__init__(*args, **kwargs)"
] | [
"0.72169733",
"0.7134221",
"0.6992332",
"0.6981153",
"0.6956814",
"0.6953072",
"0.6939007",
"0.69278616",
"0.6826252",
"0.6803158",
"0.6784577",
"0.67687136",
"0.67621595",
"0.6742176",
"0.67233443",
"0.66465044",
"0.6619165",
"0.6612739",
"0.65053135",
"0.65008765",
"0.6485804",
"0.64850706",
"0.64710677",
"0.6464124",
"0.6451874",
"0.6429491",
"0.64282596",
"0.6424274",
"0.6411599",
"0.64019525"
] | 0.7581863 | 0 |
Create a table for the questions data | def create_questions_table(conn):
execute_sql_script(conn, "04_create_questions_table.sql") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildTable(self, questions):\n\t\tself.dir = apps_dir + self.name\n\t\tif os.path.exists(self.dir):\n\t\t\tself.dir += '2'\n\t\tos.makedirs(self.dir)\n\t\tself.html = self.dir + \"/application.html\"\n\n\t\tself.parseQuestions()\n\t\twith open(self.html, \"wt\") as table:\n\t\t\ttable.write(gen.header(self.name) )\n\t\t\tfor idx in question_array:\n\t\t\t\tif idx in title_array:\n\t\t\t\t\tif idx is not title_array[0]:\n\t\t\t\t\t\ttable.write(gen.table_close())\n\t\t\t\t\ttable.write( gen.table_header( questions[idx] ) )\n\t\t\t\telse:\n\t\t\t\t\ttable.write( gen.table(questions[idx], str(self.form[idx])) )\n\t\t\ttable.write(gen.close() )",
"def return_questions_data():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n \n cursor.execute(\"select * from questions\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Answer']\n questions = {}\n for q,a in data:\n table.add_row([q,a])\n questions[q] = a\n conn.close()\n\n return table, questions",
"def data_table_creation(cursor, connection_to_db):\n\n cursor.execute(\"\"\"\n\n CREATE TABLE IF NOT EXISTS data(\n question TEXT NOT NULL,\n answer TEXT NULL,\n question_type TEXT NOT NULL,\n question_type_answers TEXT NULL,\n PRIMARY KEY(question)\n );\n\n \"\"\")\n\n connection_to_db.commit()",
"def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table",
"def create_example_sample_submission_table(conn):\n execute_sql_script(conn, \"07_create_example_sample_submission_table.sql\")",
"def initialize_new_questionnaire(questionnaire, option_type, uuid):\r\n q = {}\r\n if (type(questionnaire) == dict):\r\n for key, val in questionnaire.items():\r\n if key != 'index':\r\n\r\n q[key] = [val] if type(val) != list else val\r\n questionnaire = pd.DataFrame(q)\r\n\r\n\r\n if \"_questionnaire\" not in option_type:\r\n option_type = option_type + \"_questionnaire\"\r\n\r\n option_type = option_type.lower()\r\n if 'option_type' not in questionnaire:\r\n questionnaire['option_type'] = [option_type]\r\n questionnaire['uuid'] = [uuid]\r\n questionnaire['timestamp'] = [datetime.datetime.utcnow()]\r\n print(\"this is questionaire: \", questionnaire)\r\n\r\n questionnaire=questionnaire.set_index('uuid')\r\n print(\"this is questionaire: \", questionnaire)\r\n questionnaire.to_sql(option_type, con=Database.DATABASE.engine, if_exists=\"append\", index=True)",
"def gen_questions(self, number_of_questions):",
"def create_table(self):\n pass",
"def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)",
"def generate_table(self, rows):\n ...",
"def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])",
"def create_table(\n self, data: Data = None, trim: bool = False, columns: List[str] = None\n ) -> Table:\n table = Table(data, columns)\n\n if trim:\n self.trim_empty_rows(table)\n self.trim_column_names(table)\n\n self.logger.info(\"Created table: %s\", table)\n notebook_table(self.table_head(table, 10))\n\n return table",
"def putTestData(self):\n # print 'Not Yet implement / sample DB table create'\n tkMessageBox.showinfo(\"Message\", \"Sample DB Table Create\")",
"def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)",
"def test_create_questions(self):\n res = self.client().post('/questions',\n json={\n \"question\": \"What is chemical \\\n composition of water\",\n \"answer\": \"H2O\",\n \"category\": 1,\n \"difficulty\": 2\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])",
"def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()",
"def addTable(self, document, title, data):\n #[<Job ID> | <Simulation> | Server | User | Submitted | Status | <Delete> | <Check>]\n\n headers = (\"Job ID\", \"Simulation\", \"Server\", \"User\", \"Submitted\", \"Status\", \" \", \" \")\n\n if len(data[0]) != 0:\n document.add(Paragraph(text=title, Class=\"header-h2\"))\n document.add(tableController(headers, 'jobs/index')) # temp\n document.add(tableJobs(headers, data[0], data[1], data[2], data[3]))",
"def write_the_table(what):\n global count_row\n count_row += 1\n\n if what.get('rank') == 0:\n background_blue.append(count_row)\n\n struct = what.get('structure')\n link = what.get('link')\n exams_1 = what.get('exams_1')\n exams_2 = what.get('exams_2')\n exams_empty = [['', '', '', '', '', '', '', '', '', '', '', '']] \\\n if self.training.session_type != '1' else \\\n [['', '', '', '', '', '']]\n\n def formated(number):\n \"\"\"\n Remove trailing 0\n \"\"\"\n frac, whole = modf(number)\n if frac == 0:\n return int(whole)\n return str(number).rstrip('0')\n\n def write_exams(list_1, list_2):\n exam_table = []\n for ex_1, ex_2 in itertools.zip_longest(list_1, list_2):\n ex_1_table = [\n formated(ex_1.coefficient) if ex_1 is not None else '',\n [\n Paragraph(filter_content(ex_1.label) if ex_1 else '',\n self.styles['SmallNormal']),\n Paragraph(\n \"<para textColor=grey>\" + filter_content(ex_1.additionnal_info) \\\n if ex_1 and ex_1.additionnal_info \\\n else \"\" + \"</para\\>\",\n self.styles['SmallNormal'])\n ],\n ex_1.type_exam if ex_1 is not None else '',\n ex_1.text_duration if ex_1 is not None else '',\n '' if ex_1 is None \\\n else ex_1.convocation if not training_is_ccct \\\n else ex_1.get_type_ccct_display(),\n ex_1.eliminatory_grade if ex_1 is not None else '',\n ex_1.threshold_session_2 if ex_1 is not None else '',\n ]\n\n ex_2_table = [\n formated(ex_2.coefficient) if ex_2 is not None else '',\n [Paragraph(filter_content(ex_2.label) if ex_2 is not None else '', self.styles[\n 'SmallNormal']), Paragraph(\"<para textColor=grey\\\n >\" + ex_2.additionnal_info + \"</para\\\n >\" if ex_2.additionnal_info is not None else \"\",\n self.styles['SmallNormal'])],\n ex_2.type_exam if ex_2 is not None else '',\n ex_2.text_duration if ex_2 is not None else '',\n ex_2.eliminatory_grade if ex_2 is not None else '',\n ] if ex_2 is not None else ['', '', '', '', '']\n if self.training.session_type != '1':\n ex_1_table.extend(ex_2_table)\n else:\n ex_1_table.pop()\n exam_table.append(ex_1_table)\n exam_table = exam_table if len(exam_table) > 0 else exams_empty\n if exam_table == exams_empty:\n # TODO: calculate empty space to set rowHeights in order to\n # avoid blank in table\n pass\n inner_table = Table(\n exam_table, colWidths=width_exams, rowHeights=None)\n inner_table.setStyle(TableStyle(\n [('INNERGRID', (0, 0), (-1, -1), 0.1, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n # ('LEFTPADDING', (0, 0), (-1, -1), 0),\n # ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n ]))\n return inner_table\n\n ref_scol = struct.ref_si_scol if struct.ref_si_scol else \"\" # FIX bug with rof data\n ref_data = (\n Paragraph(struct.ROF_ref, self.styles['CenterSmall']),\n Paragraph(ref_scol, self.styles['CenterSmall'])\n ) if self.reference == 'both' \\\n else Paragraph(struct.ROF_ref, self.styles['CenterSmall']) if self.reference == 'with_rof' \\\n else Paragraph(ref_scol, self.styles['CenterSmall']) if self.reference == 'with_si' \\\n else Paragraph('', self.styles['CenterSmall'])\n\n object_line = [\n Paragraph(\n \"<para leftIndent=%s>%s</para> \" % (what.get('rank')*10, filter_content(struct.label)),\n self.styles['SmallBold'] if what.get('rank') == 0 \\\n or what.get('structure').nature == 'UE' \\\n else self.styles['SmallNormal']\n ),\n Paragraph(\n struct.get_respens_name if not struct.external_name \\\n else struct.external_name,\n self.styles['CenterSmall'] if not struct.external_name else \\\n self.styles['CenterSmallItalic']\n ),\n [ref_data],\n '30' if self.training.degree_type.ROF_code in self.training_types_for_which_to_display_30_ects\\\n and struct.nature == 'SE'\\\n else struct.ECTS_credit if struct.ECTS_credit else '-',\n formated(link.coefficient) if link.coefficient else '',\n link.eliminatory_grade,\n write_exams(exams_1, exams_2)\n ]\n if self.respforms:\n if self.reference == 'without':\n object_line.pop(2)\n else:\n object_line.pop(1)\n if self.reference == 'without':\n object_line.pop(1)\n\n big_table.append(object_line)\n\n for e in what.get('children'):\n write_the_table(e)",
"def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()",
"def __create_presentations_table(self, schema=PRESENTATIONS_SCHEMA_310):\r\n log.info(\"table created\")\r\n QtSql.QSqlQuery(schema)",
"def create_tables(self):\n\n self.cur.execute('''CREATE TABLE IF NOT EXISTS my_business_entry\n (\n id SERIAL PRIMARY KEY,\n url_yes_no boolean,\n url TEXT,\n phone_yes_no boolean,\n phone TEXT,\n rating TEXT,\n nr_of_ratings TEXT,\n myBusiness boolean,\n company TEXT\n );''')\n\n self.connection.commit()",
"def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])",
"def create_lectures_table(conn):\n execute_sql_script(conn, \"05_create_lectures_table.sql\")",
"def basic_table_creation():\n results = {\n 'From pyarrow arrays': pa.table([\n pa.array(['Kakashi', 'Itachi', 'Shisui'], type=pa.string()),\n pa.array(['Hatake', 'Uchiha', 'Uchiha'], type=pa.string())\n ], names=['first_name', 'last_name']),\n 'From List[dict]': pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ]),\n 'From Dict[str, list]': pa.Table.from_pydict({\n 'first_name': ['Kakashi', 'Itachi', 'Shisui'],\n 'last_name': ['Hatake', 'Uchiha', 'Uchiha'],\n }),\n 'From pandas df': pa.Table.from_pandas(pd.DataFrame([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])),\n }\n pretty_print_result_map(results)",
"def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )",
"def create_prediction_table():\n try:\n conn = create_connection()\n c = conn.cursor()\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS predtable\n (age NUMERIC,workclass TEXT,fnlwgt NUMERIC,education TEXT,education_num NUMERIC,marital_status TEXT,occupation TEXT,relationship TEXT,race TEXT,sex TEXT,capital_gain NUMERIC,capital_loss NUMERIC,hours_per_week NUMERIC,native_country TEXT,predicted_class NUMERIC,model_class TEXT, time_of_prediction TEXT)\"\"\")\n\n except Exception as e:\n pass",
"def create_table():\n conn, curs = conn_curs()\n create = \"\"\"\n CREATE TABLE posts(\n id SERIAL PRIMARY KEY,\n title_selftext TEXT NOT NULL,\n subreddit VARCHAR(20) NOT NULL,\n subreddit_id VARCHAR(15) NOT NULL,\n num_comments INT NOT NULL,\n upvotes INT NOT NULL,\n downvotes INT NOT NULL,\n flair VARCHAR(20) NOT NULL,\n has_vid bool NOT NULL,\n num_awards INT NOT NULL)\n \"\"\"\n curs.execute(create)\n conn.commit()\n return",
"def _store_atomic_queries_table(self):\n fieldtypes = t1s.TEST_FIELD_TYPES.numbers_list()\n fieldtype_strings = [t1s.TEST_FIELD_TYPES.to_string(ft)\n for ft in fieldtypes]\n categories = [] # a list of atomic (cat, subcat) tuples\n for cat in t1s.ATOMIC_CATEGORIES:\n if cat in t1s.SUBCATEGORIES.keys():\n for subcat in t1s.SUBCATEGORIES[cat].numbers_list():\n categories.append((cat, subcat))\n else:\n categories.append((cat, None))\n table = latex_classes.LatexTable(\n \"Supported Query Types\", \"supported_query_types\",\n [\"Query Type\", \"Subtype\"] + [\n \"on \" + fts for fts in fieldtype_strings])\n for (cat, subcat) in categories:\n cat_string = t1s.CATEGORIES.to_string(cat)\n if subcat != None:\n subcat_string = t1s.SUBCATEGORIES[cat].to_string(subcat)\n else:\n subcat_string = \"\"\n row = [cat_string, subcat_string]\n for fieldtype in fieldtypes:\n if fieldtype not in t1s.CATEGORY_TO_FIELDS[cat]:\n status = \"N/A\"\n else:\n correctness_getter = self._report_generator.get_correctness_getter(\n cat=cat, subcat=subcat, fieldtype=fieldtype)\n if correctness_getter.get_count() == 0:\n status = \"-\"\n elif correctness_getter.get_num_correct() > 0:\n status = \"pass\"\n else:\n status = \"fail\"\n row.append(status)\n table.add_content(row)\n self._outp[\"atomic_queries_table\"] = table.get_string()",
"def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n id INT UNSIGNED NOT NULL AUTO_INCREMENT,\n name VARCHAR(140) NOT NULL,\n PRIMARY KEY (id)\n )\n \"\"\")\n\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS product_category (\n product_id bigint unsigned,\n category_id int unsigned,\n CONSTRAINT pfk_product_2\n FOREIGN KEY (product_id)\n REFERENCES product(id),\n CONSTRAINT pfk_category_2\n FOREIGN KEY (category_id)\n REFERENCES category(id),\n PRIMARY KEY (product_id, category_id)\n )\n \"\"\")",
"def make_tables(con):\n\n with con:\n con.execute(\"\"\"\n CREATE TABLE authorpapers (\n id TEXT NOT NULL PRIMARY KEY,\n value TEXT\n );\n \"\"\")\n \n \n #write one testValue to papers\n sql = 'INSERT INTO authorpapers (id, value) values (?, ?)'\n data = [('test A Paper', 'testy_McTest')]\n \n with con:\n con.executemany(sql, data)"
] | [
"0.7502053",
"0.74530804",
"0.7006743",
"0.67116565",
"0.637581",
"0.63053846",
"0.6291374",
"0.6251102",
"0.62424546",
"0.6121951",
"0.6028198",
"0.60154396",
"0.5988935",
"0.5975908",
"0.59302306",
"0.58881867",
"0.58683085",
"0.58677876",
"0.5864087",
"0.5860241",
"0.58586377",
"0.5847888",
"0.58345586",
"0.5834341",
"0.5830402",
"0.58211243",
"0.5817892",
"0.57839704",
"0.5781435",
"0.5773956"
] | 0.7593576 | 0 |
Create a table for the lectures data | def create_lectures_table(conn):
execute_sql_script(conn, "05_create_lectures_table.sql") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)",
"def make_performance_table(self):\n table = Table()\n table.add_column(\"Classifier\", ratio=25)\n table.add_column(\"Score\", ratio=10, justify=\"center\", no_wrap=True)\n table.add_column(\"Params\", ratio=25, no_wrap=False)\n table.add_column(\"Model ID\",ratio=40, no_wrap=True)\n\n for name, stuff in self.trainer.performance.items():\n score, params, hash_id = stuff\n style = \"bold green\" if name == self.trainer.best_classifier__name else \"\"\n best_one = \" ***\" if name == self.trainer.best_classifier__name else \"\"\n \n table.add_row(\n str(name),\n str(np.round(score, 3)), \n str(params), \n f\"{str(hash_id)}{best_one}\",\n style=style)\n \n return table",
"def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list",
"def addTable(self, document, title, data):\n #[<Job ID> | <Simulation> | Server | User | Submitted | Status | <Delete> | <Check>]\n\n headers = (\"Job ID\", \"Simulation\", \"Server\", \"User\", \"Submitted\", \"Status\", \" \", \" \")\n\n if len(data[0]) != 0:\n document.add(Paragraph(text=title, Class=\"header-h2\"))\n document.add(tableController(headers, 'jobs/index')) # temp\n document.add(tableJobs(headers, data[0], data[1], data[2], data[3]))",
"def _createCoursesTable(self):\n\t\tcommand = \"\"\"CREATE TABLE courses (ID INTEGER PRIMARY KEY,\n\t\t\tname TEXT,\n\t\t\tauthor_id INTEGER,\n\t\t\tdescription TEXT\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)",
"def createTable(self):\n self.c.execute(\"CREATE TABLE IF NOT EXISTS Enigma(Datum INTEGER PRIMARY KEY, Walzenlage TEXT, Ringstellung TEXT, Steckerverbindungen TEXT, Kenngruppen TEXT)\")",
"def create_table(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS movie_table (movie_title, people);\"\n )\n self._close_connection(conn)",
"def generate_table(self, rows):\n ...",
"def create_tables():\n\tlog_msg4(\"No hay tablas para el año \" + txt_year + \". Creando\")\n\n\tcreate_table('visited')\n\tcreate_table('saved')\n\tcreate_table('actions')\n\n\tglobal new_tables_created\n\tnew_tables_created = True\n\n\tlog_msg_ok4()",
"def write_the_table(what):\n global count_row\n count_row += 1\n\n if what.get('rank') == 0:\n background_blue.append(count_row)\n\n struct = what.get('structure')\n link = what.get('link')\n exams_1 = what.get('exams_1')\n exams_2 = what.get('exams_2')\n exams_empty = [['', '', '', '', '', '', '', '', '', '', '', '']] \\\n if self.training.session_type != '1' else \\\n [['', '', '', '', '', '']]\n\n def formated(number):\n \"\"\"\n Remove trailing 0\n \"\"\"\n frac, whole = modf(number)\n if frac == 0:\n return int(whole)\n return str(number).rstrip('0')\n\n def write_exams(list_1, list_2):\n exam_table = []\n for ex_1, ex_2 in itertools.zip_longest(list_1, list_2):\n ex_1_table = [\n formated(ex_1.coefficient) if ex_1 is not None else '',\n [\n Paragraph(filter_content(ex_1.label) if ex_1 else '',\n self.styles['SmallNormal']),\n Paragraph(\n \"<para textColor=grey>\" + filter_content(ex_1.additionnal_info) \\\n if ex_1 and ex_1.additionnal_info \\\n else \"\" + \"</para\\>\",\n self.styles['SmallNormal'])\n ],\n ex_1.type_exam if ex_1 is not None else '',\n ex_1.text_duration if ex_1 is not None else '',\n '' if ex_1 is None \\\n else ex_1.convocation if not training_is_ccct \\\n else ex_1.get_type_ccct_display(),\n ex_1.eliminatory_grade if ex_1 is not None else '',\n ex_1.threshold_session_2 if ex_1 is not None else '',\n ]\n\n ex_2_table = [\n formated(ex_2.coefficient) if ex_2 is not None else '',\n [Paragraph(filter_content(ex_2.label) if ex_2 is not None else '', self.styles[\n 'SmallNormal']), Paragraph(\"<para textColor=grey\\\n >\" + ex_2.additionnal_info + \"</para\\\n >\" if ex_2.additionnal_info is not None else \"\",\n self.styles['SmallNormal'])],\n ex_2.type_exam if ex_2 is not None else '',\n ex_2.text_duration if ex_2 is not None else '',\n ex_2.eliminatory_grade if ex_2 is not None else '',\n ] if ex_2 is not None else ['', '', '', '', '']\n if self.training.session_type != '1':\n ex_1_table.extend(ex_2_table)\n else:\n ex_1_table.pop()\n exam_table.append(ex_1_table)\n exam_table = exam_table if len(exam_table) > 0 else exams_empty\n if exam_table == exams_empty:\n # TODO: calculate empty space to set rowHeights in order to\n # avoid blank in table\n pass\n inner_table = Table(\n exam_table, colWidths=width_exams, rowHeights=None)\n inner_table.setStyle(TableStyle(\n [('INNERGRID', (0, 0), (-1, -1), 0.1, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n # ('LEFTPADDING', (0, 0), (-1, -1), 0),\n # ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n ]))\n return inner_table\n\n ref_scol = struct.ref_si_scol if struct.ref_si_scol else \"\" # FIX bug with rof data\n ref_data = (\n Paragraph(struct.ROF_ref, self.styles['CenterSmall']),\n Paragraph(ref_scol, self.styles['CenterSmall'])\n ) if self.reference == 'both' \\\n else Paragraph(struct.ROF_ref, self.styles['CenterSmall']) if self.reference == 'with_rof' \\\n else Paragraph(ref_scol, self.styles['CenterSmall']) if self.reference == 'with_si' \\\n else Paragraph('', self.styles['CenterSmall'])\n\n object_line = [\n Paragraph(\n \"<para leftIndent=%s>%s</para> \" % (what.get('rank')*10, filter_content(struct.label)),\n self.styles['SmallBold'] if what.get('rank') == 0 \\\n or what.get('structure').nature == 'UE' \\\n else self.styles['SmallNormal']\n ),\n Paragraph(\n struct.get_respens_name if not struct.external_name \\\n else struct.external_name,\n self.styles['CenterSmall'] if not struct.external_name else \\\n self.styles['CenterSmallItalic']\n ),\n [ref_data],\n '30' if self.training.degree_type.ROF_code in self.training_types_for_which_to_display_30_ects\\\n and struct.nature == 'SE'\\\n else struct.ECTS_credit if struct.ECTS_credit else '-',\n formated(link.coefficient) if link.coefficient else '',\n link.eliminatory_grade,\n write_exams(exams_1, exams_2)\n ]\n if self.respforms:\n if self.reference == 'without':\n object_line.pop(2)\n else:\n object_line.pop(1)\n if self.reference == 'without':\n object_line.pop(1)\n\n big_table.append(object_line)\n\n for e in what.get('children'):\n write_the_table(e)",
"def create_table():\n c.execute('CREATE TABLE IF NOT EXISTS activities(name TEXT, sort TEXT, category TEXT, estimated_time_hours REAL, '\n 'estimated_time_min REAL, '\n 'ratio REAL, date_now TEXT, date TEXT, frm TEXT, till TEXT, priority REAL, status TEXT, score TEXT, '\n 'frequency TEXT, Sunday TEXT, Monday TEXT, Tuesday TEXT, Wednesday TEXT, Thursday TEXT, Friday TEXT, '\n 'Saturday TEXT)')\n data = strainer(\"\", 'sort', 'category')\n if data == []:\n insert_category('None', 3)",
"def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])",
"def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)",
"def __create_presentations_table(self, schema=PRESENTATIONS_SCHEMA_310):\r\n log.info(\"table created\")\r\n QtSql.QSqlQuery(schema)",
"def create_table(self):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"\"\"\n CREATE TABLE film (id INT(3) NOT NULL PRIMARY KEY AUTO_INCREMENT,\n title VARCHAR(256) NOT NULL COLLATE utf8_spanish2_ci, film_id \n VARCHAR(20), year INT(4) NOT NULL, director VARCHAR(256) NOT NULL \n COLLATE utf8_spanish2_ci, cast VARCHAR(1024) NOT NULL COLLATE \n utf8_spanish2_ci, rating FLOAT(2, 1) NOT NULL, poster_url \n VARCHAR(1024) NOT NULL);\n \"\"\"\n cur.execute(sql)\n print(\"Table created!\")\n except:\n return\n\n db.close()",
"def data_table_creation(cursor, connection_to_db):\n\n cursor.execute(\"\"\"\n\n CREATE TABLE IF NOT EXISTS data(\n question TEXT NOT NULL,\n answer TEXT NULL,\n question_type TEXT NOT NULL,\n question_type_answers TEXT NULL,\n PRIMARY KEY(question)\n );\n\n \"\"\")\n\n connection_to_db.commit()",
"def make_electrode_table(self):\n self.table = get_electrode_table()\n self.dev1 = Device(name='dev1')\n self.group = ElectrodeGroup(name='tetrode1',\n description='tetrode description',\n location='tetrode location',\n device=self.dev1)\n for i in range(4):\n self.table.add_row(location='CA1', group=self.group, group_name='tetrode1')",
"def make_electrode_table(self):\n self.table = get_electrode_table()\n self.dev1 = Device(name='dev1')\n self.group = ElectrodeGroup(name='tetrode1',\n description='tetrode description',\n location='tetrode location',\n device=self.dev1)\n for i in range(4):\n self.table.add_row(location='CA1', group=self.group, group_name='tetrode1')",
"def create_table(self):\n pass",
"def _make_table(\n self,\n epoch,\n total_loss_training,\n total_loss_validation=None,\n losses_training=None,\n losses_validation=None,\n metrics=None,\n learning_rate=None,\n ):\n col_width = 9\n multi_target = losses_training is not None and len(losses_training) > 1\n\n title = \"\\n\\n Training history\\n\"\n\n # Calculate width of table and columns\n epoch_width = 18\n if not multi_target:\n train_width = 20\n else:\n train_width = min(40, (col_width + 2) * (len(losses_training) + 1))\n\n val_width = 0\n if total_loss_validation is not None:\n val_width = 20\n if multi_target:\n val_width = min(40, (col_width + 2) * (len(losses_training) + 1))\n\n all_metrics_width = 0\n if metrics is not None:\n if not multi_target:\n metrics_width = col_width + 2\n else:\n metrics_width = len(losses_training) * (col_width + 2) + 1\n all_metrics_width = len(metrics) * metrics_width\n\n table_width = epoch_width + train_width + val_width + all_metrics_width\n\n self.table = rich.table.Table(\n expand=False, box=rich.box.SIMPLE, title=title, width=table_width, leading=0\n )\n\n self.table.add_column(\n Text(\"Epoch\", style=\"Grey\"), justify=\"center\", width=epoch_width\n )\n self.table.add_column(\n Text(\"Training loss\", style=\"red bold\"), justify=\"center\", width=train_width\n )\n if total_loss_validation is not None:\n self.table.add_column(\n Text(\"Validation loss\", style=\"blue bold\"),\n justify=\"center\",\n width=val_width,\n )\n if metrics is not None:\n for name, m in metrics.items():\n self.table.add_column(\n Text(name, style=\"purple bold\"),\n justify=\"center\",\n width=metrics_width,\n )\n\n def make_header_columns():\n # Epoch and LR\n columns = [Text(\"#\", justify=\"right\", style=\"bold\")]\n if learning_rate is not None:\n columns += [Text(\"LR\", justify=\"right\")]\n yield Columns(columns, align=\"center\", width=6)\n\n # Training losses\n text = Align(\n Text(\"Total\", justify=\"right\", style=\"bold red\"),\n width=col_width,\n align=\"center\",\n )\n if multi_target:\n columns = [text] + [\n Align(Text(n, justify=\"right\", style=\"red\"), width=col_width)\n for n in losses_training.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield text\n\n # Validation losses\n if total_loss_validation is not None:\n text = Align(\n Text(\"Total\", justify=\"center\", style=\"bold blue\"),\n width=col_width,\n align=\"center\",\n )\n if multi_target:\n columns = [text] + [\n Align(Text(n, justify=\"center\", style=\"blue\"), width=col_width)\n for n in losses_validation.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield text\n\n # Metrics\n if metrics is not None:\n for name, values in metrics.items():\n if isinstance(values, dict):\n columns = [\n Align(\n Text(n, justify=\"center\", style=\"purple\"),\n width=col_width,\n )\n for n in values.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield Align(Text(\"\"), width=col_width)\n\n self.table.add_row(*make_header_columns())\n self.table.add_row()",
"def table_maker():\r\n try:\r\n off_copy = off.copy()\r\n man_copy = man.copy()\r\n exe_copy = exe.copy()\r\n ceo_copy = ceo.copy()\r\n list_of_lists = [off_copy, man_copy, exe_copy, ceo_copy]\r\n\r\n for i in list_of_lists:\r\n for j in i:\r\n if type(j) == str:\r\n continue\r\n else:\r\n raise ValueError('All elements must be strings')\r\n\r\n row_num = max(len(off_copy), len(man_copy),\r\n len(exe_copy), len(ceo_copy))\r\n for i in list_of_lists:\r\n if len(i) != row_num:\r\n diff = row_num - len(i)\r\n for j in range(diff):\r\n i.append('')\r\n\r\n t = PrettyTable(\r\n ['Office Workers', 'Managers', 'Executives', 'CEO'])\r\n for i in range(row_num):\r\n t.add_row([off_copy[i], man_copy[i], exe_copy[i], ceo_copy[i]])\r\n\r\n with open('Employee Table.txt', 'w') as f:\r\n f.write(str(t))\r\n\r\n except FileNotFoundError:\r\n print(\"Error: No file entered\")",
"def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table",
"def create_tables():\r\n db = connect_database()\r\n table_wait = \"waiting\"\r\n table_helped = \"helped\"\r\n table_help = \"help\"\r\n param_name = ['cus_num', 'name', 'username', 'ru_id', 'os_platform', 'description']\r\n param_type1 = ['INTEGER PRIMARY KEY AUTOINCREMENT', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n param_type2 = ['INTEGER PRIMARY KEY', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n with db:\r\n create_table(db, table_wait, param_name, param_type1)\r\n create_table(db, table_helped, param_name, param_type2)\r\n create_table(db, table_help, param_name, param_type2)\r\n db.close()",
"def create_table_data(self):\n table_rows = []\n if (len(self.students)):\n table_rows = [self._create_student_row(student) for student\n in self.students]\n return table_rows",
"def prepare_table(self):\n i = 0\n for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:\n self.table.insertColumn(i)\n self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))\n i += 1\n for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',\n 'tlak_ztraty[Pa]', 'hmotnost[kg]']:\n self.table.insertColumn(i)\n self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))\n i += 1",
"def _createWordsTable(self):\n\t\tcommand = \"\"\"CREATE TABLE words (ID INTEGER PRIMARY KEY,\n\t\t\tword TEXT,\n\t\t\ttranslation TEXT,\n\t\t\tlast_refresh INTEGER,\n\t\t\tlevel INTEGER,\n\t\t\tcourse INTEGER\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)",
"def create_table(self):\n # Connect to database\n conn = sqlite3.connect(self)\n # Create a cursor\n c = conn.cursor()\n\n # Create a Table\n c.execute(\"\"\"CREATE TABLE weather (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n sensor text,\n location text,\n temperature real,\n description text,\n time text\n )\"\"\")\n # Commit our command\n conn.commit()\n # Close our connection\n conn.close()",
"def create_table(categories:list)->str:\n\n total_spent = get_total_spent(categories)\n\n table = str()\n\n for row_num in range(11):\n row_label = 100 - row_num*10\n\n # Row label creation - ie 100| .. 90| ... 80| ...etc\n row = f\"{row_label:>3}|\"\n\n for category in categories:\n percentage = math.floor(category.total_spent/total_spent * 10) * 10\n if percentage >= row_label:\n row += ' o '\n else:\n row += ' '\n \n table += row + ' \\n'\n return table",
"def data(self):\n worksheet_type = self.options[\"worksheet_type\"].value\n\n if worksheet_type.startswith(\"student\"):\n doc, tag, text, line = Doc().ttl()\n with tag(\"h3\"):\n text(WORKSHEET_OPTIONS[worksheet_type])\n\n line(\n \"style\",\n \"\"\"\n table {\n margin-bottom: 1cm;\n }\n td {\n border-collapse: collapse;\n height: 0.5cm;\n margin: 0;\n padding: 0;\n }\n td.bordered-cell {\n border: 1px solid black;\n width: 0.5cm;\n }\n td.label-cell {\n padding-left: 0.5cm;\n font-size: 0.4cm;\n line-height: 0.4cm;\n }\n \"\"\"\n )\n\n with tag(\"p\"):\n text(WORKSHEET_INTRODUCTION_TEXT[worksheet_type])\n\n if worksheet_type == \"student-basic\":\n # Table one\n self.add_run_length_encoding_table(\n tag,\n line,\n 9,\n 18,\n row_labels=[\n \"4, 11\",\n \"4, 9, 2, 1\",\n \"4, 9, 2, 1\",\n \"4, 11\",\n \"4, 9\",\n \"4, 9\",\n \"5, 7\",\n \"0, 17\",\n \"1, 15\",\n ]\n )\n # Table two\n self.add_run_length_encoding_table(\n tag,\n line,\n 13,\n 18,\n row_labels=[\n \"6, 5, 2, 3\",\n \"4, 2, 5, 2, 3, 1\",\n \"3, 1, 9, 1, 2, 1\",\n \"3, 1, 9, 1, 1, 1\",\n \"2, 1, 11, 1\",\n \"2, 1, 10, 2\",\n \"2, 1, 9, 1, 1, 1\",\n \"2, 1, 8, 1, 2, 1\",\n \"2, 1, 7, 1, 3, 1\",\n \"1, 1, 1, 1, 4, 2, 3, 1\",\n \"0, 1, 2, 1, 2, 2, 5, 1\",\n \"0, 1, 3, 2, 5, 2\",\n \"1, 3, 2, 5 \",\n ]\n )\n # Table three\n self.add_run_length_encoding_table(\n tag,\n line,\n 17,\n 18,\n row_labels=[\n \"6, 2, 2, 2\",\n \"5, 1, 2, 2, 2, 1\",\n \"6, 6\",\n \"4, 2, 6, 2\",\n \"3, 1, 10, 1\",\n \"2, 1, 12, 1\",\n \"2, 1, 3, 1, 4, 1, 3, 1\",\n \"1, 2, 12, 2\",\n \"0, 1, 16, 1\",\n \"0, 1, 6, 1, 2, 1, 6, 1\",\n \"0, 1, 7, 2, 7, 1\",\n \"1, 1, 14, 1\",\n \"2, 1, 12, 1\",\n \"2, 1, 5, 2, 5, 1\",\n \"3, 1, 10, 1\",\n \"4, 2, 6, 2\",\n \"6, 6\",\n ]\n )\n else:\n line(\n \"style\",\n \"\"\"\n td.padding-cell {\n width: 0.5cm;\n }\n td.underline-cell {\n border-bottom: 1px solid #999;\n width: 8cm;\n }\n div.dotted-line {\n margin-top: 1cm;\n margin-bottom: 1cm;\n border-top: 1px dotted #888;\n }\n \"\"\"\n )\n self.add_run_length_encoding_table(tag, line, 16, 16, underline=True)\n line(\"div\", \"\", klass=\"dotted-line\")\n self.add_run_length_encoding_table(tag, line, 16, 16, underline=True)\n return {\"type\": \"html\", \"data\": doc.getvalue()}\n else:\n image = Image.open(\"static/img/resources/run-length-encoding/teacher-worksheet.png\")\n image = image.rotate(270, expand=True)\n return {\"type\": \"image\", \"data\": image}",
"def create_table(self) -> None:\n self._cur.execute(\"CREATE TABLE cameras (Name TEXT PRIMARY KEY)\")\n self._conn.commit()"
] | [
"0.6672511",
"0.643523",
"0.63418883",
"0.6291637",
"0.62880355",
"0.626312",
"0.6157667",
"0.6146372",
"0.6142933",
"0.61408347",
"0.6134551",
"0.61316216",
"0.6126261",
"0.6076441",
"0.60461056",
"0.6041219",
"0.603075",
"0.603075",
"0.60147893",
"0.60142946",
"0.6013143",
"0.59936166",
"0.5955618",
"0.5955214",
"0.595481",
"0.59515774",
"0.5939464",
"0.5937153",
"0.5934107",
"0.59240425"
] | 0.7030476 | 0 |
Subsets and Splits