query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Creates listeners for the default balancer of the current environment.
|
def create_listeners(self):
target_groups_config = self.get_target_groups_config()
balancer_arn = self.get_balancer_arn()
response_data = {}
for short_name in target_groups_config.keys():
target_group_name = self.get_target_group_name(short_name)
response = self.client.create_listener(
LoadBalancerArn=balancer_arn,
DefaultActions=[
{
'Type': 'forward',
'TargetGroupArn': self.get_target_group_arn(short_name)
}
],
**target_groups_config[short_name],
)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
self.logger.info('Target group {group} bound to {balancer} load balancer.'.format(
group=target_group_name,
balancer=self.get_balancer_name(),
))
response_data[target_group_name] = response['Listeners']
return response_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)",
"def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')",
"def create_sockets(conf, log, fds=None):\n listeners = []\n\n # get it only once\n laddr = conf.address\n\n # check ssl config early to raise the error on startup\n # only the certfile is needed since it can contains the keyfile\n if conf.certfile and not os.path.exists(conf.certfile):\n raise ValueError('certfile \"%s\" does not exist' % conf.certfile)\n\n if conf.keyfile and not os.path.exists(conf.keyfile):\n raise ValueError('keyfile \"%s\" does not exist' % conf.keyfile)\n\n # sockets are already bound\n if fds is not None:\n for fd in fds:\n sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n sock_name = sock.getsockname()\n sock_type = _sock_type(sock_name)\n listener = sock_type(sock_name, conf, log, fd=fd)\n listeners.append(listener)\n\n return listeners\n\n # no sockets is bound, first initialization of gunicorn in this env.\n for addr in laddr:\n sock_type = _sock_type(addr)\n sock = None\n for i in range(5):\n try:\n sock = sock_type(addr, conf, log)\n except socket.error as e:\n if e.args[0] == errno.EADDRINUSE:\n log.error(\"Connection in use: %s\", str(addr))\n if e.args[0] == errno.EADDRNOTAVAIL:\n log.error(\"Invalid address: %s\", str(addr))\n if i < 5:\n msg = \"connection to {addr} failed: {error}\"\n log.debug(msg.format(addr=str(addr), error=str(e)))\n log.error(\"Retrying in 1 second.\")\n time.sleep(1)\n else:\n break\n\n if sock is None:\n log.error(\"Can't connect to %s\", str(addr))\n sys.exit(1)\n\n listeners.append(sock)\n\n return listeners",
"def events_init(sc, drivers, rpcmgr):\n ev_ids = [lb_const.EVENT_CREATE_LOADBALANCER_V2,\n lb_const.EVENT_UPDATE_LOADBALANCER_V2,\n lb_const.EVENT_DELETE_LOADBALANCER_V2,\n\n lb_const.EVENT_CREATE_LISTENER_V2,\n lb_const.EVENT_UPDATE_LISTENER_V2,\n lb_const.EVENT_DELETE_LISTENER_V2,\n\n lb_const.EVENT_CREATE_POOL_V2, lb_const.EVENT_UPDATE_POOL_V2,\n lb_const.EVENT_DELETE_POOL_V2,\n\n lb_const.EVENT_CREATE_MEMBER_V2,\n lb_const.EVENT_UPDATE_MEMBER_V2,\n lb_const.EVENT_DELETE_MEMBER_V2,\n\n lb_const.EVENT_CREATE_HEALTH_MONITOR_V2,\n lb_const.EVENT_UPDATE_HEALTH_MONITOR_V2,\n lb_const.EVENT_DELETE_HEALTH_MONITOR_V2,\n\n lb_const.EVENT_AGENT_UPDATED_V2,\n lb_const.EVENT_COLLECT_STATS_V2\n ]\n\n evs = []\n for ev_id in ev_ids:\n ev = nfp_event.Event(id=ev_id, handler=LBaaSV2EventHandler(\n sc, drivers, rpcmgr))\n evs.append(ev)\n sc.register_events(evs)",
"def get_create_all_listeners_flow(self):\n create_all_listeners_flow = linear_flow.Flow(\n constants.CREATE_LISTENERS_FLOW)\n create_all_listeners_flow.add(\n database_tasks.GetListenersFromLoadbalancer(\n requires=constants.LOADBALANCER,\n provides=constants.LISTENERS))\n create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(\n requires=[constants.LOADBALANCER, constants.LISTENERS]))\n create_all_listeners_flow.add(network_tasks.UpdateVIP(\n requires=constants.LOADBALANCER))\n return create_all_listeners_flow",
"def create_listener(request, **kwargs):\n data = request.DATA\n\n try:\n default_tls_ref = data['certificates'][0]\n except (KeyError, IndexError):\n default_tls_ref = None\n\n conn = get_sdk_connection(request)\n # TODO(johnsom) Add SNI support\n # https://bugs.launchpad.net/octavia/+bug/1714294\n listener = conn.load_balancer.create_listener(\n protocol=data['listener']['protocol'],\n protocol_port=data['listener']['protocol_port'],\n load_balancer_id=kwargs['loadbalancer_id'],\n name=data['listener'].get('name'),\n description=data['listener'].get('description'),\n connection_limit=data['listener'].get('connection_limit'),\n default_tls_container_ref=default_tls_ref,\n sni_container_refs=None,\n admin_state_up=data['listener'].get('admin_state_up'),\n insert_headers=data['listener'].get('insert_headers'),\n timeout_client_data=data['listener'].get('timeout_client_data'),\n timeout_member_connect=data['listener'].get('timeout_member_connect'),\n timeout_member_data=data['listener'].get('timeout_member_data'),\n timeout_tcp_inspect=data['listener'].get('timeout_tcp_inspect'),\n allowed_cidrs=data['listener'].get('allowed_cidrs'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['listener'].get('tls_ciphers') or None,\n )\n\n if data.get('pool'):\n args = (request, kwargs['loadbalancer_id'], create_pool)\n kwargs = {'callback_kwargs': {'listener_id': listener.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(listener)",
"def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def __init__(self):\n self._listeners = []",
"def describe_listeners(ctx):\n data = self.describe_listeners()\n ctx.info('Listeners details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def balancer():\n pass",
"def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id')}\n return create_listener(request, **kwargs)",
"def create_listener(self, context, listener):\n LOG.info(\"Received request 'Create Listener' for LB:%(lb)s \",\n {'lb': listener['loadbalancer_id']})\n arg_dict = {'context': context,\n lb_const.LISTENER: listener,\n }\n self._send_event(lb_const.EVENT_CREATE_LISTENER_V2, arg_dict,\n serialize=True,\n binding_key=listener['loadbalancer_id'],\n key=listener['id'])",
"def create_listener(self, service, bigips):\n vip = self.service_adapter.get_virtual(service)\n tls = self.service_adapter.get_tls(service)\n if tls:\n tls['name'] = vip['name']\n tls['partition'] = vip['partition']\n\n service['listener']['operating_status'] = lb_const.ONLINE\n\n network_id = service['loadbalancer']['network_id']\n error = None\n for bigip in bigips:\n self.service_adapter.get_vlan(vip, bigip, network_id)\n try:\n self.vs_helper.create(bigip, vip)\n except HTTPError as err:\n if err.response.status_code == 409:\n LOG.debug(\"Virtual server already exists updating\")\n try:\n self.update_listener(service, [bigip])\n #self.vs_helper.update(bigip, vip)\n except Exception as e:\n LOG.warn(\"Update triggered in create failed, this could be due to timing issues in assure_service\")\n LOG.warn('VS info %s',service['listener'])\n LOG.exception(e)\n LOG.warn('Exception %s',e)\n raise e\n else:\n LOG.exception(\"Virtual server creation error: %s\" %\n err.message)\n raise\n if tls:\n # Don't stop processing in case of errors. Otherwise the other F5's won't get the same vs\n try:\n self.add_ssl_profile(tls, bigip)\n except Exception as err:\n LOG.error(\"Error adding SSL Profile to listener: {0}\".format(err))\n error = err if error is None else error\n\n if error:\n service['listener']['provisioning_status'] = 'ERROR'\n raise error",
"def delete_listeners(ctx):\n if self.balancer_exists():\n self.delete_listeners()\n ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(self.get_balancer_name()))",
"def get_create_listener_flow(self):\n create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)\n create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(\n requires=[constants.LOADBALANCER, constants.LISTENERS]))\n #create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(\n # requires=[constants.LOADBALANCER, constants.LISTENERS]))\n # Get VThunder details from database\n create_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(\n requires=constants.LOADBALANCER,\n provides=a10constants.VTHUNDER))\n create_listener_flow.add(handler_virtual_port.ListenersCreate(\n requires=[constants.LOADBALANCER, constants.LISTENERS, a10constants.VTHUNDER]))\n create_listener_flow.add(network_tasks.UpdateVIP(\n requires=constants.LOADBALANCER))\n create_listener_flow.add(database_tasks.\n MarkLBAndListenersActiveInDB(\n requires=[constants.LOADBALANCER,\n constants.LISTENERS]))\n return create_listener_flow",
"def start_a_listener():\n listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listener.bind(('', 0))\n return listener, listener.getsockname()",
"def _start_listeners(self):\n if self.listeners:\n self.state = \"listening\"\n for event_listener in self.listeners:\n event_listener.start()\n\n for listener in self.listeners:\n listener.join()",
"def create_default_hooks(self):\n\n self.create_hooks(\"commands\")\n self.create_hooks(\"commands_out\")\n self.create_hooks(\"hooks\")",
"def _add_listeners(vehicle):\n @vehicle.on_attribute('mode')\n def mode_listener(self,name, msg):\n util.log_info(\"Mode switched to %s\" % msg.name)\n \n if msg.name != shared.status['manual_mode']: # manual override\n if msg.name == 'RTL' or msg.name == 'LAND':\n util.log_warning(\"External %s detected. Abort.\" % msg.name)\n shared.status['abort'] = True\n \n @vehicle.on_attribute('gps_0')\n def gps_listener(self,name, msg): # monitor satellites\n if not shared.status['thread_flag'] & shared.NSATS_TOO_LOW:\n if msg.satellites_visible < 6:\n util.log_warning(\"Satellites dropped below 5!\")\n shared.status['thread_flag'] |= shared.NSATS_TOO_LOW\n \n elif msg.satellites_visible >= 10:\n util.log_info(\"Satellites recovered to %d.\" % msg.satellites_visible)\n shared.status['thread_flag'] &= ~shared.NSATS_TOO_LOW\n \n @vehicle.on_message('SYSTEM_TIME')\n def time_listener(self,name, msg): # log timestamp\n format = '%Y-%m-%d %H:%M:%S'\n val = time.localtime(msg.time_unix_usec/1000000)\n shared.timestamp = time.strftime(format, val)",
"def create_sockets(laddr):\n listeners = []\n\n for addr in laddr:\n sock_type = _sock_type(addr)\n sock = None\n for i in range(5):\n try:\n sock = sock_type(addr)\n except socket.error as e:\n log = logging.getLogger('thriftsvr.sock')\n if e.args[0] == errno.EADDRINUSE:\n log.error(\"Connection in use: %s\", str(addr))\n if e.args[0] == errno.EADDRNOTAVAIL:\n log.error(\"Invalid address: %s\", str(addr))\n if i < 5:\n msg = \"connection to {addr} failed: {error}\"\n log.debug(msg.format(addr=str(addr), error=str(e)))\n log.error(\"Retrying in 1 second.\")\n time.sleep(1)\n else:\n break\n\n if sock is None:\n log = logging.getLogger('thriftsvr.sock')\n log.error(\"Can't connect to %s\", str(addr))\n sys.exit(1)\n\n listeners.append(sock)\n\n return listeners",
"def add_listeners(self, *listeners: CBListenerType) -> None:\n for listener in listeners:\n self.add_listener(listener)",
"def listen(self):\n pass",
"def start_a_listener():\n listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n listener.bind(('localhost', 0))\n return listener, listener.getsockname()",
"def _instantiate_event_listeners(self):\n event_listeners = self.conf_manager.get_event_listeners()\n plugin_parameters = [self.events_manager, self.conf_manager]\n self.listeners = self._load_plugins(event_listeners,\n common.EVENT_LISTENER_PACKAGE,\n paths.EVENT_LISTENER_DIR,\n plugin_parameters)",
"def listen(self, backlog: int) -> None:\n ...",
"def test_listen(sysmon_tester_agent):\n listen(sysmon_tester_agent, _test_config)",
"def AddListener(self, listener):\n pass",
"def create_loadbalancer(self, context, loadbalancer, driver_name):\n LOG.info(\"Received request 'Create Loadbalancer' for LB:%(lb)s \"\n \"with driver:%(driver_name)s\",\n {'lb': loadbalancer['id'],\n 'driver_name': driver_name})\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n 'driver_name': driver_name\n }\n self._send_event(lb_const.EVENT_CREATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])",
"def create_listeners(self, **kwargs):\n logging.debug('Creating Service Manager listeners')\n\n # Check for required endpoint args\n required_args = (\n 'frontend_endpoint',\n 'backend_endpoint',\n 'mgmt_endpoint',\n 'sink_endpoint'\n )\n\n if not all(k in kwargs for k in required_args):\n raise ServiceManagerException, 'Missing socket endpoints, e.g. frontend/backend/mgmt/sink'\n\n for k in kwargs:\n setattr(self, k, kwargs[k])\n\n self.zcontext = zmq.Context().instance()\n\n # Our Service Manager sockets\n self.frontend_socket = self.zcontext.socket(zmq.ROUTER)\n self.backend_socket = self.zcontext.socket(zmq.XPUB)\n self.sink_socket = self.zcontext.socket(zmq.PULL)\n self.mgmt_socket = self.zcontext.socket(zmq.REP)\n self.result_pub_socket = self.zcontext.socket(zmq.PUB)\n\n try:\n self.frontend_socket.bind(self.frontend_endpoint)\n self.backend_socket.bind(self.backend_endpoint)\n self.sink_socket.bind(self.sink_endpoint)\n self.mgmt_socket.bind(self.mgmt_endpoint)\n self.result_pub_port = self.result_pub_socket.bind_to_random_port('tcp://*')\n except zmq.ZMQError as e:\n raise ServiceManagerException, 'Cannot bind Service Manager sockets: %s' % e\n\n # Create a poll set for our sockets\n self.zpoller = zmq.Poller()\n self.zpoller.register(self.frontend_socket, zmq.POLLIN)\n self.zpoller.register(self.backend_socket, zmq.POLLIN)\n self.zpoller.register(self.sink_socket, zmq.POLLIN)\n self.zpoller.register(self.mgmt_socket, zmq.POLLIN)\n\n logging.debug('Frontend socket bound to %s', self.frontend_endpoint)\n logging.debug('Backend socket bound to %s', self.backend_endpoint)\n logging.debug('Sink socket bound to %s', self.sink_endpoint)\n logging.debug('Management socket bound to %s', self.mgmt_endpoint)\n logging.debug('Result publisher socket bound to %s', 'tcp://*:' + str(self.result_pub_port))",
"def CreateListenSocket(self):\r\n self.listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.listenSocket.bind(('localhost', 0))\r\n self.listenSocket.listen(BACKLOG)\r\n self.listenSocket.setblocking(False)\r\n return self.listenSocket"
] |
[
"0.74984",
"0.6155189",
"0.6015609",
"0.6012771",
"0.5820154",
"0.57435435",
"0.56980133",
"0.5667754",
"0.5656363",
"0.5631498",
"0.5504685",
"0.5498395",
"0.5460351",
"0.54144293",
"0.5363359",
"0.53238726",
"0.5311425",
"0.53105676",
"0.52813196",
"0.5230505",
"0.52195066",
"0.51972014",
"0.51599574",
"0.5152817",
"0.51509404",
"0.5150342",
"0.5080894",
"0.5059139",
"0.5053306",
"0.5042264"
] |
0.6311822
|
1
|
Deletes listeners for the default balancer of the current environment.
|
def delete_listeners(self):
listeners_info = self.describe_listeners()
for listener in listeners_info:
response = self.client.delete_listener(
ListenerArn=listener['ListenerArn']
)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
self.logger.info('Successfully deleted listener {listener_arn} for balancer {balancer}.'.format(
listener_arn=listener['ListenerArn'],
balancer=self.get_balancer_name(),
))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_listeners(ctx):\n if self.balancer_exists():\n self.delete_listeners()\n ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(self.get_balancer_name()))",
"def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')",
"def delete_listener(self, service, bigips):\n vip = self.service_adapter.get_virtual_name(service)\n tls = self.service_adapter.get_tls(service)\n if tls:\n tls['name'] = vip['name']\n tls['partition'] = vip['partition']\n error = None\n for bigip in bigips:\n self.vs_helper.delete(bigip,\n name=vip[\"name\"],\n partition=vip[\"partition\"])\n\n # delete ssl profiles\n # Don't stop processing in case of errors. Otherwise the other F5's might have a different configuration\n try:\n self.remove_ssl_profiles(tls, bigip)\n except Exception as err:\n LOG.error(\"Error adding SSL Profile to listener: {0}\".format(err))\n error = err if error is None else error\n\n if error:\n raise error",
"async def on_delete_sockets(self):\n async with self.socket_lock:\n existing_monitor_sockets = []\n existing_scraper_sockets = []\n # get the current list of existing sockets\n for f in list(os.listdir(self.config[\"GlobalConfig\"][\"socket_path\"])):\n if f.startswith(\"Monitor.\"):\n existing_monitor_sockets.append(f)\n elif f.startswith(\"Scraper.\"):\n existing_scraper_sockets.append(f)\n\n # temp copy\n updated_monitor_sockets = copy.copy(self.monitor_sockets)\n updated_scraper_sockets = copy.copy(self.scraper_sockets)\n\n # remove every internal socket that is not existing\n for class_name in self.monitor_sockets:\n if \"Monitor.\" + class_name not in existing_monitor_sockets:\n updated_monitor_sockets.pop(class_name)\n for class_name in self.scraper_sockets:\n if \"Scraper.\" + class_name not in existing_scraper_sockets:\n updated_scraper_sockets.pop(class_name)\n\n self.monitor_sockets = updated_monitor_sockets\n self.scraper_sockets = updated_scraper_sockets",
"def unregister(self, target, hostname, listener_type):",
"def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def remove_hsm_status_listeners(self) -> None:\n self._listeners[ID_HSM_STATUS] = []",
"def close_listeners(self):\n logging.debug('Closing Service Manager listeners')\n\n self.zpoller.unregister(self.frontend_socket)\n self.zpoller.unregister(self.backend_socket)\n self.zpoller.unregister(self.sink_socket)\n self.zpoller.unregister(self.mgmt_socket)\n\n self.frontend_socket.close()\n self.backend_socket.close()\n self.sink_socket.close()\n self.mgmt_socket.close()\n self.result_pub_socket.close()\n\n self.zcontext.destroy()",
"def delete(self, request, listener_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_listener,\n listener_id, ignore_missing=True,\n load_balancer_getter=listener_get_load_balancer_id,\n resource_id=listener_id)",
"def DeleteListeners(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteListeners\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteListenersResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def delete_listener(self, argu):\n\n if not argu:\n LOG.error(\"In delete_listener, it should not pass the None.\")\n\n # delete vs\n self._delete_vs(\n argu['listener_id'],\n argu['protocol']\n )",
"def delete_listener(self, context, listener):\n LOG.info(\"Received request 'Delete Listener' for LB:%(lb)s \",\n {'lb': listener['loadbalancer_id']})\n arg_dict = {'context': context,\n lb_const.LISTENER: listener,\n }\n self._send_event(lb_const.EVENT_DELETE_LISTENER_V2, arg_dict,\n serialize=True,\n binding_key=listener['loadbalancer_id'],\n key=listener['id'])",
"def test_remove_listener(self):\n listener = lambda state: state\n self.wrapper.add_listener(listener)\n self.assertEqual(len(self.client.listeners), 1)\n self.wrapper.remove_listener(listener)\n self.assertEqual(len(self.client.listeners), 0)",
"def removeServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...",
"def unlisten(cls, name: str):\r\n cls.Unlisten(name)",
"def abort(self):\n for key in self.listeners:\n sem = self.listeners[key]\n self.listeners[key] = None\n\n # TODO: Received data and semahore should be stored separately\n if isinstance(sem, asyncio.Semaphore):\n sem.release()",
"def delete_balancer(ctx):\n if self.balancer_exists():\n self.delete_balancer()\n ctx.info('Successfully deleted load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, nothing to delete.'.format(\n self.get_balancer_name()\n ))",
"def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)",
"def clear_hooks(self):\n self._conn_hooks = []",
"def remove_device_listeners(self, device_id: str) -> None:\n self._listeners[device_id] = []",
"def clear(self) -> None:\n self.loggers.clear()\n self.topics.clear()\n self.bindings = BindFlag(0)",
"def clear_message_listener(self):\n self.callback_message.clear()",
"def off_all(self) -> None:\n self._event_tree.clear()\n del self._any_listeners[:]",
"def remove_listener(self) -> None:\n client = self.discord_client\n\n try:\n remove_listener = cast(\"Bot\", client).remove_listener\n except AttributeError:\n unwrap_client_listener(self.discord_client, self.on_socket_response)\n else:\n log.info(f\"Removing socket response listener from {client}\")\n remove_listener(self.on_socket_response)",
"def cleanup_notifiers(notifiers):\n for notifier in notifiers.values():\n notifier.stop()",
"def remove_listener ( cls, listener, class_name = '' ):\n MetaHasTraits._listeners[ class_name ].remove( listener )",
"def _remove_listeners(self, txid: int = None):\n if txid is None:\n for f in self._listeners.values():\n f.cancel()\n self._listeners = {}\n return\n\n self._listeners[txid].cancel()\n del self._listeners[txid]",
"def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()",
"def __del__(self):\n\t\trospy.logdebug('MAVROSListener destruction')\n\t\t\n\t\tfor sub in self.__subs.values():\n\t\t\tsub.unregister()",
"def _get_cascade_delete_pools_listeners_flow(self, lb):\n pools_listeners_delete_flow = linear_flow.Flow('pool_listener_delete_flow')\n store = {}\n # loop for loadbalancer's l7policy deletion\n l7policy_delete_flow = None\n for listener in lb.listeners:\n l7policy_delete_flow = linear_flow.Flow('l7policy_delete_flow')\n for l7policy in listener.l7policies:\n l7policy_name = 'l7policy_' + l7policy.id\n store[l7policy_name] = l7policy\n l7policy_delete_flow.add(\n self._l7policy_flows.get_cascade_delete_l7policy_internal_flow(l7policy_name))\n if l7policy_delete_flow:\n pools_listeners_delete_flow.add(l7policy_delete_flow)\n\n # loop for loadbalancer's pool deletion\n for pool in lb.pools:\n pool_name = 'pool' + pool.id\n members = pool.members\n store[pool_name] = pool\n listeners = pool.listeners\n default_listener = None\n pool_listener_name = 'pool_listener' + pool.id\n if listeners:\n default_listener = pool.listeners[0]\n store[pool_listener_name] = default_listener\n health_mon = None\n health_monitor = pool.health_monitor\n if health_monitor is not None:\n health_mon = 'health_mon' + health_monitor.id\n store[health_mon] = health_monitor\n (pool_delete, pool_store) = self._pool_flows.get_cascade_delete_pool_internal_flow(\n pool_name, members, pool_listener_name, health_mon)\n store.update(pool_store)\n pools_listeners_delete_flow.add(pool_delete)\n\n # loop for loadbalancer's listener deletion\n listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')\n for listener in lb.listeners:\n listener_name = 'listener_' + listener.id\n store[listener_name] = listener\n listeners_delete_flow.add(\n self._listener_flows.get_cascade_delete_listener_internal_flow(\n listener, listener_name))\n\n pools_listeners_delete_flow.add(listeners_delete_flow)\n # move UpdateVIPForDelete() out from unordered_flow loop, call it multiple time at the\n # same time will add/del same rules at the same time and causing error from neutron.\n if lb.amphorae:\n pools_listeners_delete_flow.add(a10_database_tasks.GetLatestLoadBalancer(\n requires=constants.LOADBALANCER,\n provides=constants.LOADBALANCER))\n pools_listeners_delete_flow.add(network_tasks.UpdateVIPForDelete(\n requires=constants.LOADBALANCER))\n\n return (pools_listeners_delete_flow, store)"
] |
[
"0.76726115",
"0.62180847",
"0.6021032",
"0.595186",
"0.5938151",
"0.5872455",
"0.56657547",
"0.56461865",
"0.55808574",
"0.55471516",
"0.5535369",
"0.5481285",
"0.54216963",
"0.5404566",
"0.53681505",
"0.5311279",
"0.52753246",
"0.52654445",
"0.5250673",
"0.5249863",
"0.5236804",
"0.5215574",
"0.5200921",
"0.51984245",
"0.5161316",
"0.5155238",
"0.51446855",
"0.51390463",
"0.51234525",
"0.5114961"
] |
0.7043698
|
1
|
Deletes balancer for current environment.
|
def delete_balancer(ctx):
if self.balancer_exists():
self.delete_balancer()
ctx.info('Successfully deleted load balancer {}:'.format(self.get_balancer_name()))
else:
ctx.info('Load balancer {} does not exist, nothing to delete.'.format(
self.get_balancer_name()
))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_balancer(self):\n response = self.client.delete_load_balancer(\n LoadBalancerArn=self.get_balancer_arn()\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200",
"def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')",
"def delete(self):\r\n return self.connection.delete_load_balancer(self.name)",
"def delete_loadbalancer(self, context, lb):\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n hostnames = self._get_hostname(lb)\n if deployment_model in [\"PER_TENANT\", \"PER_SUBNET\"]:\n vapv = self._get_vapv(hostnames)\n if not vapv.tip_group.list():\n self._destroy_vapv(hostnames, lb)\n elif deployment_model == \"PER_TENANT\":\n # Delete subnet ports if no longer required\n if self.openstack_connector.subnet_in_use(lb) is False:\n self._detach_subnet_port(vapv, hostnames, lb)\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.delete_ip_from_ports(\n lb.vip_address, port_ids\n )\n elif deployment_model == \"PER_LOADBALANCER\":\n self._destroy_vapv(hostnames, lb)",
"def delete(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_load_balancer(loadbalancer_id,\n ignore_missing=True,\n cascade=True)",
"def delete_loadbalancer(self, context, loadbalancer):\n LOG.info(\"Received request 'Delete Loadbalancer' for LB:%(lb)s \",\n {'lb': loadbalancer['id']})\n\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n }\n self._send_event(lb_const.EVENT_DELETE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])",
"def delete(self, load_balancer):\n # type: (LoadBalancer) -> BoundAction\n self._client.request(\n url=\"/load_balancers/{load_balancer_id}\".format(load_balancer_id=load_balancer.id), method=\"DELETE\"\n )\n return True",
"def delete(self, loadbalancer_id):\n response.status = 201",
"def delete(self, request, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_pool,\n pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)",
"def delete(self):\n self._lbcall('delete_pool', [self._name])",
"def resource_cleanup(self):\n for lb in self.loadbalancers:\n self.octavia_client.load_balancer_delete(lb['id'], cascade=True)\n try:\n self.wait_for_lb_resource(\n self.octavia_client.load_balancer_show, lb['id'],\n provisioning_status='DELETED')\n except osc_lib.exceptions.NotFound:\n pass\n for fip in self.fips:\n self.neutron_client.delete_floatingip(fip)\n # we run the parent resource_cleanup last as it will remove instances\n # referenced as members in the above cleaned up load balancers\n super(LBAASv2Test, self).resource_cleanup()",
"def delete(self, request, flavor_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_flavor(flavor_id,\n ignore_missing=True)",
"def delete_access_list(self, loadbalancer):\n return loadbalancer.delete_access_list()",
"def reset(ctx):\n delete(ctx)\n create(ctx)\n\n ctx.info('Load balancers reset completed.')",
"def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def delete(self):\n\n uri = \"{0}/{1}\".format(self.base_uri, self.ip_or_ifname_or_group_name)\n\n try:\n response = self.session.request(\"DELETE\", uri)\n\n except Exception as e:\n raise ResponseError(\"DELETE\", e)\n\n if not utils._response_ok(response, \"DELETE\"):\n raise GenericOperationError(response.text, response.status_code)\n\n logging.info(\"SUCCESS: Deleting %s\", self)\n\n # Delete back reference from BGP_Routers\n for neighbor in self.__parent_bgp_router.bgp_neighbors:\n if (\n neighbor.ip_or_ifname_or_group_name\n == self.ip_or_ifname_or_group_name\n ):\n self.__parent_bgp_router.bgp_neighbors.remove(neighbor)\n\n # Delete object attributes\n utils.delete_attrs(self, self.config_attrs)",
"def pre_loadbalancer_pool_delete(self, resource_id):\n pass",
"def delete(self, params=None):\n self.logger.debug('Deleting %s with parameters: %s'\n % (self.type_name, params))\n return self.client.delete_load_balancer_policy(**params)",
"def delete_health_monitor(self, loadbalancer):\n return loadbalancer.delete_health_monitor()",
"def delete_listeners(ctx):\n if self.balancer_exists():\n self.delete_listeners()\n ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(self.get_balancer_name()))",
"def detach_elastic_load_balancer(ElasticLoadBalancerName=None, LayerId=None):\n pass",
"def delete(self):\n self.model.remove_agents(self)",
"def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))",
"def teardown_method(self):\n\n for bp_name in self.created_bp_list:\n LOG.info(\"Deleting Blueprint {}\".format(bp_name))\n runner = CliRunner()\n result = runner.invoke(cli, [\"delete\", \"bp\", bp_name])\n assert result.exit_code == 0\n\n for app_name in self.created_app_list:\n LOG.info(\"Deleting app {}\".format(app_name))\n self._delete_app(app_name)\n\n self.created_app_list = []\n self.created_bp_list = []",
"def teardown(client, blueprint_dir):\n logger.info(\"Running teardown on: %s\", blueprint_dir)\n state = get_state(blueprint_dir)\n if not state or \"network_name\" not in state:\n return\n all_services = client.service.list()\n for service in all_services:\n if service.network.name == state[\"network_name\"]:\n client.service.destroy(service)\n network = client.network.get(state[\"network_name\"])\n if network:\n client.network.destroy(network)\n save_state({}, blueprint_dir)",
"def delete(profile, environment, force=True):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"EnvironmentName\"] = environment\n params[\"TerminateResources\"] = force\n return client.terminate_environment(**params)",
"def delete_connection_throttle(self, loadbalancer):\n return loadbalancer.delete_connection_throttle()",
"def delete_asg(client, asg):\n if len(asg['LoadBalancerNames']) > 0:\n client.detach_load_balancers(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n LoadBalancerNames=asg['LoadBalancerNames'],\n )\n client.update_auto_scaling_group(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n MinSize=0,\n MaxSize=0,\n DesiredCapacity=0,\n )\n client.resume_processes(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n )\n\n wait_for_instances(client, asg, 'Terminated')\n\n client.delete_auto_scaling_group(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n )",
"def delete(self):\n for i in set(self.instances.values()):\n i.delete()\n shutil.rmtree(self.dirpath, True)",
"def delete_entity(self, context, lb_obj):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id)\n msg = _(\"NetScaler driver lb_obj removal: %s\") % lb_obj.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)"
] |
[
"0.75917363",
"0.75186515",
"0.73194486",
"0.7260747",
"0.6964305",
"0.67855936",
"0.67139965",
"0.66862214",
"0.6370703",
"0.6285855",
"0.5962687",
"0.5895908",
"0.58747965",
"0.58266735",
"0.5814123",
"0.58118564",
"0.5759592",
"0.5751461",
"0.5717588",
"0.5677057",
"0.5676532",
"0.566068",
"0.5655436",
"0.56493396",
"0.5644034",
"0.5582409",
"0.55493885",
"0.5512825",
"0.5445437",
"0.54402095"
] |
0.7533795
|
1
|
Describes balancer for current environment.
|
def describe_balancer(ctx):
data = self.get_balancer_info()
if data is not None:
ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))
ctx.pp.pprint(data)
else:
ctx.info('Load balancer {} does not exist.'.format(self.get_balancer_name()))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )",
"def balancer():\n pass",
"def load_balancer_name(self) -> str:\n return pulumi.get(self, \"load_balancer_name\")",
"def balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"balancer_id\")",
"def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None",
"def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']",
"def create_balancer(ctx):\n if not self.balancer_exists():\n data = self.create_balancer()\n ctx.info('Successfully created load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} already exists, nothing to create.'.format(\n self.get_balancer_name()\n ))",
"def create_balancer(self):\n app_env = self.get_current_env()\n balancer_name = self.get_balancer_name()\n subnet_ids = self.get_subnet_ids()\n\n response = self.client.create_load_balancer(\n Name=balancer_name,\n Subnets=subnet_ids,\n SecurityGroups=[self.get_security_group_id(self.get_security_group_short_name())],\n Scheme='internet-facing',\n Tags=[\n {\n 'Key': 'chops-aws-project',\n 'Value': self.get_aws_project_name(),\n },\n {\n 'Key': 'environment',\n 'Value': app_env,\n },\n ],\n Type='application',\n IpAddressType='ipv4',\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['LoadBalancers'][0]",
"def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")",
"def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")",
"def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")",
"def describe_listeners(ctx):\n data = self.describe_listeners()\n ctx.info('Listeners details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")",
"def __init__(self, machine, balancer):\r\n self._machine = machine\r\n self._balancer = balancer",
"def get_balancer_dns(self):\n return self.get_balancer_info()['DNSName']",
"def load_balancer_id(self):\n return self._load_balancer_id",
"def load_balancer_example(lb_info, lb_id, status,\n current_time):\n lb_example = {\"name\": lb_info[\"name\"],\n \"id\": lb_id,\n \"protocol\": lb_info[\"protocol\"],\n \"port\": lb_info.get(\"port\", 80),\n \"algorithm\": lb_info.get(\"algorithm\") or \"RANDOM\",\n \"status\": status,\n \"cluster\": {\"name\": \"test-cluster\"},\n \"timeout\": lb_info.get(\"timeout\", 30),\n \"created\": {\"time\": current_time},\n \"virtualIps\": [{\"address\": \"127.0.0.1\",\n \"id\": 1111, \"type\": \"PUBLIC\", \"ipVersion\": \"IPV4\"},\n {\"address\": \"0000:0000:0000:0000:1111:111b:0000:0000\",\n \"id\": 1111,\n \"type\": \"PUBLIC\",\n \"ipVersion\": \"IPV6\"}],\n \"sourceAddresses\": {\"ipv6Public\": \"0000:0001:0002::00/00\",\n \"ipv4Servicenet\": \"127.0.0.1\",\n \"ipv4Public\": \"127.0.0.1\"},\n \"httpsRedirect\": lb_info.get(\"httpsRedirect\", False),\n \"updated\": {\"time\": current_time},\n \"halfClosed\": lb_info.get(\"halfClosed\", False),\n \"connectionLogging\": lb_info.get(\"connectionLogging\", {\"enabled\": False}),\n \"contentCaching\": {\"enabled\": False}}\n if lb_info.get(\"metadata\"):\n lb_example.update({\"metadata\": _format_meta(lb_info[\"metadata\"])})\n return lb_example",
"def load_balancer_type(self) -> Optional[pulumi.Input['CloudRunConfigLoadBalancerType']]:\n return pulumi.get(self, \"load_balancer_type\")",
"def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')",
"def load_balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_id\")",
"def get_name(self):\n \n return 'Loop-Back'",
"def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")",
"def nodebalancer_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"nodebalancer_id\")",
"def get_elbs(elbclient):\r\n try:\r\n resp = elbclient.describe_load_balancers()\r\n return list(map(\r\n lambda x:x['LoadBalancerName'],\r\n resp['LoadBalancerDescriptions']\r\n ))\r\n except Exception as ex:\r\n print(ex.message)\r\n return None",
"def load_balancer_profile(self) -> Optional[pulumi.Input['LoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")",
"def load_balancing(self) -> pulumi.Input['FrontdoorOriginGroupLoadBalancingArgs']:\n return pulumi.get(self, \"load_balancing\")",
"def describe_target_groups(ctx):\n data = self.get_target_groups_info()\n ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def nodebalancer_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"nodebalancer_id\")",
"def describe_elastic_load_balancers(StackId=None, LayerIds=None):\n pass",
"def load_balancer_billing_configs(self) -> Sequence['outputs.GetLoadBalancersBalancerLoadBalancerBillingConfigResult']:\n return pulumi.get(self, \"load_balancer_billing_configs\")"
] |
[
"0.7542367",
"0.68922436",
"0.6775603",
"0.6484639",
"0.6443855",
"0.6258168",
"0.61299396",
"0.6095195",
"0.6012776",
"0.6012776",
"0.6012776",
"0.5973077",
"0.59008837",
"0.5856403",
"0.5793592",
"0.56712395",
"0.5651833",
"0.5561907",
"0.5521645",
"0.54867375",
"0.5463622",
"0.5438868",
"0.54209197",
"0.5420232",
"0.5375793",
"0.535071",
"0.53266877",
"0.5306493",
"0.5275169",
"0.5266107"
] |
0.78482044
|
0
|
Creates load balancer target groups for current environment.
|
def create_target_groups(ctx):
data = self.create_target_groups()
ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))
ctx.pp.pprint(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')",
"def ensure_target_group_created(vpc, environment):\n name = environment + '-web'\n\n # If it already exists, create returns the existing data\n response = ELB.create_target_group(\n Name=name,\n Protocol='HTTP',\n Port=9000,\n VpcId=vpc.id,\n Matcher={\n 'HttpCode': '200,301'\n }\n )\n\n arn = response['TargetGroups'][0]['TargetGroupArn']\n\n return arn",
"def create_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n vpc_id = self.get_vpc_id()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n if self.target_group_exists(short_name):\n self.logger.info(f'Target group {target_group_name} exists, skipping creation.')\n continue\n\n response = self.client.create_target_group(\n Name=target_group_name,\n VpcId=vpc_id,\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info(f'Target group {target_group_name} created.')\n response_data[target_group_name] = response['TargetGroups']\n\n return response_data",
"def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))",
"def describe_target_groups(ctx):\n data = self.get_target_groups_info()\n ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def update_target_groups():\n\n # detect which region the explorer(s) are located\n for j in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(j)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, exit\n verify_nodes_same_region(reg, array_instance_ip)\n\n elbv2_client = boto3.client('elbv2', region_name=reg)\n\n array_target_group = create_name_target_group(j, ID_DOMAIN_NAME)\n pp.pprint(array_target_group)\n\n # 1/3 - retrieve target group arn\n print(\"==== retrieve target group arn\")\n dict_tg_arn = dict()\n for tg in array_target_group:\n resp = elbv2_client.describe_target_groups(Names=[tg])\n tg_arn = resp[\"TargetGroups\"][0][\"TargetGroupArn\"]\n dict_tg_arn[tg] = tg_arn\n pp.pprint(dict_tg_arn)\n\n # 2/3 - find all the instances\n print(\"==== find all the instances current registered\")\n dict_tg_instanceid = defaultdict(list)\n for tg in array_target_group:\n resp = elbv2_client.describe_target_health(TargetGroupArn=dict_tg_arn[tg])\n num_of_targets = len(resp[\"TargetHealthDescriptions\"])\n for k in range(num_of_targets):\n instance_id = resp[\"TargetHealthDescriptions\"][k][\"Target\"][\"Id\"]\n dict_tg_instanceid[tg].append(instance_id)\n pp.pprint(dict_tg_instanceid)\n\n # 3/3 - deregister all instances, then we can have a clean and nice target group\n print(\"==== deregister all instances\")\n for tg in array_target_group:\n for instance_id in dict_tg_instanceid[tg]:\n try:\n resp = elbv2_client.deregister_targets(TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance_id}])\n except Exception as e:\n print(\"Unexpected error to deregister the instance: %s\" % e)\n\n # 3/3 - register instances into the tg\n print(\"==== register all instances\")\n # outer for loop: loop through 2 tg, https and wss\n # inner loop: add every single instance id into each tg\n for tg in array_target_group:\n for instance in array_instance_id:\n response = elbv2_client.register_targets(\n TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance, }, ]\n )",
"def create_listeners(self):\n target_groups_config = self.get_target_groups_config()\n balancer_arn = self.get_balancer_arn()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n response = self.client.create_listener(\n LoadBalancerArn=balancer_arn,\n DefaultActions=[\n {\n 'Type': 'forward',\n 'TargetGroupArn': self.get_target_group_arn(short_name)\n }\n ],\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info('Target group {group} bound to {balancer} load balancer.'.format(\n group=target_group_name,\n balancer=self.get_balancer_name(),\n ))\n response_data[target_group_name] = response['Listeners']\n\n return response_data",
"def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))",
"def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return",
"def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])",
"def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")",
"def create_target_group(region_name, app_name, instance_ids=None,\n image_name=None, identities_url=None, ssh_key_name=None,\n instance_type=None,\n subnet_id=None, vpc_id=None, vpc_cidr=None,\n tag_prefix=None,\n dry_run=False):\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, region_name=region_name)\n\n elb_client = boto3.client('elbv2', region_name=region_name)\n\n resp = elb_client.create_target_group(\n Name=app_name,\n Protocol='HTTPS',\n Port=443,\n VpcId=vpc_id,\n TargetType='instance',\n HealthCheckEnabled=True,\n HealthCheckProtocol='HTTP',\n HealthCheckPort='80',\n HealthCheckPath='/',\n #HealthCheckIntervalSeconds=30,\n #HealthCheckTimeoutSeconds=5,\n #HealthyThresholdCount=5,\n #UnhealthyThresholdCount=2,\n Matcher={\n 'HttpCode': '200'\n })\n target_group = resp.get('TargetGroups')[0].get('TargetGroupArn')\n LOGGER.info(\"%s found/created target group %s for %s\",\n tag_prefix, target_group, app_name)\n\n # It is time to attach the instance that will respond to http requests\n # to the target group.\n if not instance_ids:\n instance_ids = create_webfront_resources(\n region_name, app_name, image_name,\n identities_url=identities_url, ssh_key_name=ssh_key_name,\n instance_type=instance_type,\n web_subnet_id=subnet_id, vpc_id=vpc_id, vpc_cidr=vpc_cidr,\n tag_prefix=tag_prefix,\n dry_run=dry_run)\n if instance_ids:\n for _ in range(0, NB_RETRIES):\n # The EC2 instances take some time to be fully operational.\n try:\n resp = elb_client.register_targets(\n TargetGroupArn=target_group,\n Targets=[{\n 'Id': instance_id,\n 'Port': 443\n } for instance_id in instance_ids])\n LOGGER.info(\"%s registers instances %s with target group %s\",\n tag_prefix, instance_ids, target_group)\n break\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" in running state ...\", tag_prefix, instance_ids)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidTarget':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n return target_group",
"def test_ipam_vlan_groups_create(self):\n pass",
"def ensure_load_balancer_created(vpc, security_group, subnet1, subnet2, target_group_arn, ssl_certificate_arn, environment):\n name = environment + '-load-balancer'\n\n # If it already exists, create returns the existing data\n response = ELB.create_load_balancer(\n Name=name,\n Subnets=[ subnet1.id, subnet2.id ],\n SecurityGroups=[ security_group.id ],\n IpAddressType='dualstack',\n Tags=[\n { 'Key': 'Name', 'Value': name },\n { 'Key': 'Environment', 'Value': environment }\n ]\n )\n\n load_balancer = response['LoadBalancers'][0]\n arn = load_balancer['LoadBalancerArn']\n\n # There seems to be no harm in creating listeners if they already exist\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{ 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTPS',\n Port=443,\n SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',\n Certificates=[ { 'CertificateArn': ssl_certificate_arn } ],\n DefaultActions=[ { 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n return load_balancer",
"def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret",
"def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)",
"def create_target_to_storage(self, port, connector, hba_ids):\n target_name = self.create_target_name(connector)\n body = {'portId': port, 'hostGroupName': target_name}\n if hba_ids:\n body['iscsiName'] = '%(id)s%(suffix)s' % {\n 'id': hba_ids,\n 'suffix': self.driver_info['target_iqn_suffix'],\n }\n try:\n gid = self.client.add_host_grp(body, no_log=True)\n except Exception:\n params = {'portId': port}\n host_grp_list = self.client.get_host_grps(params)\n for host_grp_data in host_grp_list:\n if host_grp_data['hostGroupName'] == target_name:\n return target_name, host_grp_data['hostGroupNumber']\n else:\n raise\n return target_name, gid",
"def get_target_groups_config(self):\n return self.config['target_groups']",
"def create_pool(self, argu):\n\n if not argu:\n LOG.error(\"In create_pool, it should not pass the None.\")\n\n cmd_apv_create_group = ADCDevice.create_group(argu['pool_id'], argu['lb_algorithm'], argu['session_persistence_type'])\n for base_rest_url in self.base_rest_urls:\n self.run_cli_extend(base_rest_url, cmd_apv_create_group)\n\n # create policy\n self._create_policy(argu['pool_id'],\n argu['listener_id'],\n argu['session_persistence_type'],\n argu['lb_algorithm'],\n argu['cookie_name']\n )",
"def _generate_elb_instances_and_sg(resource, session):\n for instance in resource.describe_load_balancers()[\"LoadBalancers\"]:\n for security_group in instance.get(\"SecurityGroups\", []):\n yield instance, security_group, _get_sg_name(security_group, session)",
"def setup_spritegroups(self):\n self.ground_step_pipe_group = pygame.sprite.Group(self.ground_group,\n self.pipe_group,\n self.step_group)",
"def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result",
"def test_create_resource_group(self):\n pass",
"def _make_valet_groups(self, _rk, _az, _rule_list):\n\n for rn in _rule_list:\n rule = self.resource.group_rules[rn]\n\n # Valet group naming convention.\n # It contains datacenter id and availability_zone\n # followed by service id and vnf id\n # depending on scope.\n # And concatenate rule name.\n # Exception: quorum-diversity\n\n group_id = self.datacenter_id + \":\"\n\n if rule.rule_type != \"quorum-diversity\":\n group_id += _az + \":\"\n\n if rule.app_scope == \"lcp\":\n group_id += rn\n elif rule.app_scope == \"service\":\n group_id += self.service_instance_id + \":\" + rn\n elif rule.app_scope == \"vnf\":\n group_id += self.service_instance_id + \":\" + self.vnf_instance_id + \":\" + rn\n else:\n return \"unknown app_scope value\"\n\n if group_id in self.groups.keys():\n group = self.groups[group_id]\n else:\n group = Group(group_id)\n group.group_type = rule.rule_type\n group.factory = \"valet\"\n group.level = rule.level\n\n self.groups[group_id] = group\n\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"",
"def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)",
"def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors",
"def create_endpoints_new_network():\n\n for i in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(i)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n # 0/ - detect region of explorers\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, gracefully exit\n # verify_nodes_same_region(reg, array_instance_ip)\n\n print(\"\\n######################################### Creating complete pipeline for shard\", str(i),\n \"in AWS region: \", reg, \"#########################################\\n\")\n # 1/ - request certificates\n print(\"\\n==== step 1: request SSL certificates, CertificateArn will be stored into dict_region_sslcerts \\n\")\n domain_name = ''.join(['api.s', str(i), \".\", BASE_DOMAIN_NAME])\n dict_existing_certs = get_existing_certs(reg, domain_name)\n dict_region_sslcerts.clear()\n if dict_existing_certs[domain_name]:\n print(\"[INFO] SSL certificate of\", domain_name, \"exists, skipping..\")\n dict_region_sslcerts[reg].append(dict_existing_certs[domain_name][0])\n else:\n print(\"[INFO] SSL certificate of\", domain_name, \"does NOT exist, requesting..\")\n request_ssl_certificates(reg, domain_name)\n\n print(\"[RESULT] OF STEP 1\")\n pp.pprint(dict_region_sslcerts)\n\n # 2/ - create target group\n dict_region_tgarn.clear()\n array_tgs = create_name_target_group(i, ID_DOMAIN_NAME)\n pp.pprint(array_tgs)\n create_target_group(reg, array_tgs)\n print(\"[RESULT] OF STEP 2\")\n pp.pprint(dict_region_tgarn)\n\n # 3/ - create elb\n dict_region_elb2arn.clear()\n elb2_name = ''.join('s' + str(i) + '-' + ID_DOMAIN_NAME + '-' + reg)\n array_dns_hostedzone = create_elb2(reg, elb2_name)\n print(\"[RESULT] OF STEP 3\")\n pp.pprint(dict_region_elb2arn)\n\n # 4/ - create listener\n dict_region_ListenerArn.clear()\n create_listener(reg, dict_region_elb2arn, dict_region_sslcerts, dict_region_tgarn)\n print(\"[RESULT] OF STEP 4\")\n pp.pprint(dict_region_ListenerArn)\n\n # 5/ - create one more rule for the current listener\n host_header_value = ''.join('ws.s' + str(i) + '.' + BASE_DOMAIN_NAME)\n create_rule(reg, dict_region_ListenerArn, dict_region_tgarn, dict_region_elb2arn, host_header_value)\n\n # 6/ - register explorer instances into the target group\n register_explorers(reg, array_instance_id, dict_region_tgarn)\n\n # 7/ - create entries on Route 53\n array_record_set = create_name_record_set(i, BASE_DOMAIN_NAME)\n create_dns_entries(HOSTED_ZONE_ID, array_record_set, array_dns_hostedzone)",
"def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups",
"def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))",
"def create_target(self):\n\n # I used a random number variable (rand_target) in order to randomize the target created each time this function\n # is called.\n stand = StandardTarget()\n strong = StrongTarget()\n safe = SafeTarget()\n bird = Bird()\n\n rand_target = random.randint(1, 4)\n if rand_target == 1:\n self.targets.append(stand)\n elif rand_target == 2:\n self.targets.append(strong)\n elif rand_target == 3:\n self.targets.append(safe)\n elif rand_target == 4:\n self.targets.append(bird)"
] |
[
"0.67362416",
"0.6500324",
"0.6379159",
"0.6233221",
"0.5968006",
"0.5882499",
"0.585659",
"0.57661116",
"0.57613397",
"0.57101214",
"0.56413764",
"0.55966014",
"0.5414972",
"0.5409071",
"0.53946424",
"0.53711516",
"0.5329309",
"0.5283815",
"0.5232401",
"0.5230137",
"0.5227133",
"0.52205557",
"0.52160126",
"0.521212",
"0.5205379",
"0.51973146",
"0.51868016",
"0.51475406",
"0.51061034",
"0.5095434"
] |
0.84025216
|
0
|
Describes target groups for current environment.
|
def describe_target_groups(ctx):
data = self.get_target_groups_info()
ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))
ctx.pp.pprint(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def get_target_groups_config(self):\n return self.config['target_groups']",
"def target_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group\")",
"def get_target_groups_info(self):\n target_groups_config = self.get_target_groups_config()\n groups_info = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n data = self.get_target_group_info(short_name)\n if data is not None:\n groups_info[target_group_name] = data\n\n return groups_info",
"def groups(self):\n\n return ('train', 'dev', 'eval')",
"def _get_target_group(self):\n return self.__target_group",
"def _get_target_group(self):\n return self.__target_group",
"def _get_target_group(self):\n return self.__target_group",
"def _get_target_group(self):\n return self.__target_group",
"def _get_target_group(self):\n return self.__target_group",
"def _get_target_group(self):\n return self.__target_group",
"def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))",
"def target_group(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group\")",
"def target_group_kinds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GroupKindArgs']]]]:\n return pulumi.get(self, \"target_group_kinds\")",
"def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))",
"def create_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n vpc_id = self.get_vpc_id()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n if self.target_group_exists(short_name):\n self.logger.info(f'Target group {target_group_name} exists, skipping creation.')\n continue\n\n response = self.client.create_target_group(\n Name=target_group_name,\n VpcId=vpc_id,\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info(f'Target group {target_group_name} created.')\n response_data[target_group_name] = response['TargetGroups']\n\n return response_data",
"def project_grp():\n pass",
"def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups",
"def target_group_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group_identifier\")",
"def _emit_group_build_lines(self, basename: str) -> list[str]:\n del basename # Unused.\n out: list[str] = []\n if not self.targets:\n return out\n all_dsts = set()\n for target in self.targets:\n all_dsts.add(target.dst)\n out.append(\n \"# Add this section's targets to the overall resources target.\\n\"\n 'resources: \\\\\\n '\n + ' \\\\\\n '.join(\n dst.replace(' ', '\\\\ ') for dst in sorted(all_dsts)\n )\n + '\\n'\n )\n return out",
"def groups(self, protocol=None): \n return ProtocolPurpose.group_choices",
"def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))",
"def delete_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n\n for short_name in target_groups_config.keys():\n if not self.target_group_exists(short_name):\n self.logger.info('Target group {} does not exists, nothing to delete.'.format(\n self.get_target_group_name(short_name)\n ))\n continue\n\n response = self.client.delete_target_group(\n TargetGroupArn=self.get_target_group_arn(short_name)\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n self.logger.info('Target group {} deleted.'.format(self.get_target_group_name(short_name)))",
"def get_target_group_info(self, short_name):\n try:\n response = self.client.describe_target_groups(\n Names=[self.get_target_group_name(short_name)],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['TargetGroups'][0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {balancer} target group {group}.'.format(\n balancer=self.get_balancer_name(),\n group=self.get_target_group_name(short_name)\n ))\n return None",
"def configure_groups(mods, apps):\n specs = configure_dd_spec_list(mods, apps)\n groups = [\n ScratchPad('scratch', config_dropdowns(specs)),\n Group('1', label='', layout='verticaltile'),\n Group('1a', label='', layout='monadthreecol'),\n Group('2', label='', layout='verticaltile'),\n Group('2a', label='', layout='maximize'),\n Group('3', label='', layout='treetab'),\n Group('3a', label='', layout='treetab'),\n Group('4', label='', layout='monadtall'),\n Group('4a', label='', layout='monadtall'),\n Group('5', label='', layout='max',\n matches=[Match(wm_class=['emacs'])]),\n Group('5a', label='', layout='max'),\n Group('6', layout='treetab', label=''),\n Group('6a', label='', layout='max'),\n Group('7', label=''),\n Group('7a', label='', layout='treetab'),\n Group('8', label='', layout='max'),\n Group('8a', label='', layout='max'),\n Group('9', label='', layout='treetab', matches=[\n Match(wm_class=['microsoft teams - preview']),\n Match(wm_class=['msoutlook-nativefier-9dd141']),\n ]),\n Group('9a', label='', layout='treetab', matches=[\n Match(wm_class=['jira-nativefier-894f7c'])\n ]),\n Group('0', label='', layout='floating'),\n ]\n keys = keymap.bind_keys(mods, apps, groups, specs)\n return (groups, keys)",
"def group_tag(self):\n return ''.join(['[{}]'.format(x.name) for x in self.groups])",
"def update_target_groups():\n\n # detect which region the explorer(s) are located\n for j in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(j)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, exit\n verify_nodes_same_region(reg, array_instance_ip)\n\n elbv2_client = boto3.client('elbv2', region_name=reg)\n\n array_target_group = create_name_target_group(j, ID_DOMAIN_NAME)\n pp.pprint(array_target_group)\n\n # 1/3 - retrieve target group arn\n print(\"==== retrieve target group arn\")\n dict_tg_arn = dict()\n for tg in array_target_group:\n resp = elbv2_client.describe_target_groups(Names=[tg])\n tg_arn = resp[\"TargetGroups\"][0][\"TargetGroupArn\"]\n dict_tg_arn[tg] = tg_arn\n pp.pprint(dict_tg_arn)\n\n # 2/3 - find all the instances\n print(\"==== find all the instances current registered\")\n dict_tg_instanceid = defaultdict(list)\n for tg in array_target_group:\n resp = elbv2_client.describe_target_health(TargetGroupArn=dict_tg_arn[tg])\n num_of_targets = len(resp[\"TargetHealthDescriptions\"])\n for k in range(num_of_targets):\n instance_id = resp[\"TargetHealthDescriptions\"][k][\"Target\"][\"Id\"]\n dict_tg_instanceid[tg].append(instance_id)\n pp.pprint(dict_tg_instanceid)\n\n # 3/3 - deregister all instances, then we can have a clean and nice target group\n print(\"==== deregister all instances\")\n for tg in array_target_group:\n for instance_id in dict_tg_instanceid[tg]:\n try:\n resp = elbv2_client.deregister_targets(TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance_id}])\n except Exception as e:\n print(\"Unexpected error to deregister the instance: %s\" % e)\n\n # 3/3 - register instances into the tg\n print(\"==== register all instances\")\n # outer for loop: loop through 2 tg, https and wss\n # inner loop: add every single instance id into each tg\n for tg in array_target_group:\n for instance in array_instance_id:\n response = elbv2_client.register_targets(\n TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance, }, ]\n )",
"def get_targetgroups(self):\r\n result = {}\r\n for row in self._db().select(self._db.targetgroup.ALL):\r\n result[row.id] = {}\r\n result[row.id][\"data\"] = dict(row)\r\n result[row.id][\"members\"] = []\r\n try:\r\n members = result[row.id][\"data\"]['targets']\r\n for member in json.loads(members):\r\n member_data = self._db(self._db.target.id==int(member)\r\n ).select().first()\r\n result[row.id][\"members\"].append(dict(member_data))\r\n except:\r\n result[row.id][\"members\"] = []\r\n return result",
"def items_target(self):\n # Group by kind.\n kinds = {}\n package_path = self.choices['package']\n for target in self.packages[package_path]['targets']:\n # AFAIK, when there are multiple \"kind\" values, this only happens\n # when there are multiple library kinds.\n kind = target['kind'][0]\n if kind in ('lib', 'rlib', 'dylib', 'cdylib', 'staticlib', 'proc-macro'):\n kinds.setdefault('lib', []).append(('Lib', '--lib'))\n elif kind in ('bin', 'test', 'example', 'bench'):\n text = '%s: %s' % (kind.capitalize(), target['name'])\n arg = '--%s %s' % (kind, target['name'])\n kinds.setdefault(kind, []).append((text, arg))\n elif kind in ('custom-build',):\n # build.rs, can't be built explicitly.\n pass\n else:\n log.critical(self.window,\n 'Rust: Unsupported target found: %s', kind)\n items = []\n for kind, values in kinds.items():\n allowed = True\n if self.choices.get('variant', None):\n cmd = CARGO_COMMANDS[self.choices['variant']]\n target_types = cmd['allows_target']\n if target_types is not True:\n allowed = kind in target_types\n if allowed:\n items.extend(values)\n if not items:\n sublime.error_message('Could not determine available targets.')\n return items",
"def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])"
] |
[
"0.6801498",
"0.6745524",
"0.6559486",
"0.63894445",
"0.6366864",
"0.62445754",
"0.62445754",
"0.62445754",
"0.62445754",
"0.62445754",
"0.62445754",
"0.6178109",
"0.6038597",
"0.6031735",
"0.5996421",
"0.5938765",
"0.59342307",
"0.58795434",
"0.5793468",
"0.5752609",
"0.57451403",
"0.5668814",
"0.56582004",
"0.5637307",
"0.5592338",
"0.5591136",
"0.5548536",
"0.55236435",
"0.55194664",
"0.5516523"
] |
0.7605977
|
0
|
Creates listeners between load balancer and target groups for current environment.
|
def create_listeners(ctx):
data = self.create_listeners()
ctx.info('Created listeners for load balancer {}:'.format(
self.get_balancer_name()
))
ctx.pp.pprint(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_listeners(self):\n target_groups_config = self.get_target_groups_config()\n balancer_arn = self.get_balancer_arn()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n response = self.client.create_listener(\n LoadBalancerArn=balancer_arn,\n DefaultActions=[\n {\n 'Type': 'forward',\n 'TargetGroupArn': self.get_target_group_arn(short_name)\n }\n ],\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info('Target group {group} bound to {balancer} load balancer.'.format(\n group=target_group_name,\n balancer=self.get_balancer_name(),\n ))\n response_data[target_group_name] = response['Listeners']\n\n return response_data",
"def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')",
"def get_create_all_listeners_flow(self):\n create_all_listeners_flow = linear_flow.Flow(\n constants.CREATE_LISTENERS_FLOW)\n create_all_listeners_flow.add(\n database_tasks.GetListenersFromLoadbalancer(\n requires=constants.LOADBALANCER,\n provides=constants.LISTENERS))\n create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(\n requires=[constants.LOADBALANCER, constants.LISTENERS]))\n create_all_listeners_flow.add(network_tasks.UpdateVIP(\n requires=constants.LOADBALANCER))\n return create_all_listeners_flow",
"def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def events_init(sc, drivers, rpcmgr):\n ev_ids = [lb_const.EVENT_CREATE_LOADBALANCER_V2,\n lb_const.EVENT_UPDATE_LOADBALANCER_V2,\n lb_const.EVENT_DELETE_LOADBALANCER_V2,\n\n lb_const.EVENT_CREATE_LISTENER_V2,\n lb_const.EVENT_UPDATE_LISTENER_V2,\n lb_const.EVENT_DELETE_LISTENER_V2,\n\n lb_const.EVENT_CREATE_POOL_V2, lb_const.EVENT_UPDATE_POOL_V2,\n lb_const.EVENT_DELETE_POOL_V2,\n\n lb_const.EVENT_CREATE_MEMBER_V2,\n lb_const.EVENT_UPDATE_MEMBER_V2,\n lb_const.EVENT_DELETE_MEMBER_V2,\n\n lb_const.EVENT_CREATE_HEALTH_MONITOR_V2,\n lb_const.EVENT_UPDATE_HEALTH_MONITOR_V2,\n lb_const.EVENT_DELETE_HEALTH_MONITOR_V2,\n\n lb_const.EVENT_AGENT_UPDATED_V2,\n lb_const.EVENT_COLLECT_STATS_V2\n ]\n\n evs = []\n for ev_id in ev_ids:\n ev = nfp_event.Event(id=ev_id, handler=LBaaSV2EventHandler(\n sc, drivers, rpcmgr))\n evs.append(ev)\n sc.register_events(evs)",
"def get_create_listener_flow(self):\n create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)\n create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(\n requires=[constants.LOADBALANCER, constants.LISTENERS]))\n #create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(\n # requires=[constants.LOADBALANCER, constants.LISTENERS]))\n # Get VThunder details from database\n create_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(\n requires=constants.LOADBALANCER,\n provides=a10constants.VTHUNDER))\n create_listener_flow.add(handler_virtual_port.ListenersCreate(\n requires=[constants.LOADBALANCER, constants.LISTENERS, a10constants.VTHUNDER]))\n create_listener_flow.add(network_tasks.UpdateVIP(\n requires=constants.LOADBALANCER))\n create_listener_flow.add(database_tasks.\n MarkLBAndListenersActiveInDB(\n requires=[constants.LOADBALANCER,\n constants.LISTENERS]))\n return create_listener_flow",
"def _instantiate_event_listeners(self):\n event_listeners = self.conf_manager.get_event_listeners()\n plugin_parameters = [self.events_manager, self.conf_manager]\n self.listeners = self._load_plugins(event_listeners,\n common.EVENT_LISTENER_PACKAGE,\n paths.EVENT_LISTENER_DIR,\n plugin_parameters)",
"def create_sockets(conf, log, fds=None):\n listeners = []\n\n # get it only once\n laddr = conf.address\n\n # check ssl config early to raise the error on startup\n # only the certfile is needed since it can contains the keyfile\n if conf.certfile and not os.path.exists(conf.certfile):\n raise ValueError('certfile \"%s\" does not exist' % conf.certfile)\n\n if conf.keyfile and not os.path.exists(conf.keyfile):\n raise ValueError('keyfile \"%s\" does not exist' % conf.keyfile)\n\n # sockets are already bound\n if fds is not None:\n for fd in fds:\n sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n sock_name = sock.getsockname()\n sock_type = _sock_type(sock_name)\n listener = sock_type(sock_name, conf, log, fd=fd)\n listeners.append(listener)\n\n return listeners\n\n # no sockets is bound, first initialization of gunicorn in this env.\n for addr in laddr:\n sock_type = _sock_type(addr)\n sock = None\n for i in range(5):\n try:\n sock = sock_type(addr, conf, log)\n except socket.error as e:\n if e.args[0] == errno.EADDRINUSE:\n log.error(\"Connection in use: %s\", str(addr))\n if e.args[0] == errno.EADDRNOTAVAIL:\n log.error(\"Invalid address: %s\", str(addr))\n if i < 5:\n msg = \"connection to {addr} failed: {error}\"\n log.debug(msg.format(addr=str(addr), error=str(e)))\n log.error(\"Retrying in 1 second.\")\n time.sleep(1)\n else:\n break\n\n if sock is None:\n log.error(\"Can't connect to %s\", str(addr))\n sys.exit(1)\n\n listeners.append(sock)\n\n return listeners",
"def create_listener(request, **kwargs):\n data = request.DATA\n\n try:\n default_tls_ref = data['certificates'][0]\n except (KeyError, IndexError):\n default_tls_ref = None\n\n conn = get_sdk_connection(request)\n # TODO(johnsom) Add SNI support\n # https://bugs.launchpad.net/octavia/+bug/1714294\n listener = conn.load_balancer.create_listener(\n protocol=data['listener']['protocol'],\n protocol_port=data['listener']['protocol_port'],\n load_balancer_id=kwargs['loadbalancer_id'],\n name=data['listener'].get('name'),\n description=data['listener'].get('description'),\n connection_limit=data['listener'].get('connection_limit'),\n default_tls_container_ref=default_tls_ref,\n sni_container_refs=None,\n admin_state_up=data['listener'].get('admin_state_up'),\n insert_headers=data['listener'].get('insert_headers'),\n timeout_client_data=data['listener'].get('timeout_client_data'),\n timeout_member_connect=data['listener'].get('timeout_member_connect'),\n timeout_member_data=data['listener'].get('timeout_member_data'),\n timeout_tcp_inspect=data['listener'].get('timeout_tcp_inspect'),\n allowed_cidrs=data['listener'].get('allowed_cidrs'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['listener'].get('tls_ciphers') or None,\n )\n\n if data.get('pool'):\n args = (request, kwargs['loadbalancer_id'], create_pool)\n kwargs = {'callback_kwargs': {'listener_id': listener.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(listener)",
"def ensure_load_balancer_created(vpc, security_group, subnet1, subnet2, target_group_arn, ssl_certificate_arn, environment):\n name = environment + '-load-balancer'\n\n # If it already exists, create returns the existing data\n response = ELB.create_load_balancer(\n Name=name,\n Subnets=[ subnet1.id, subnet2.id ],\n SecurityGroups=[ security_group.id ],\n IpAddressType='dualstack',\n Tags=[\n { 'Key': 'Name', 'Value': name },\n { 'Key': 'Environment', 'Value': environment }\n ]\n )\n\n load_balancer = response['LoadBalancers'][0]\n arn = load_balancer['LoadBalancerArn']\n\n # There seems to be no harm in creating listeners if they already exist\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{ 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTPS',\n Port=443,\n SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',\n Certificates=[ { 'CertificateArn': ssl_certificate_arn } ],\n DefaultActions=[ { 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n return load_balancer",
"def get_hosts(self, target, listener_type):",
"def create_listener(self, context, listener):\n LOG.info(\"Received request 'Create Listener' for LB:%(lb)s \",\n {'lb': listener['loadbalancer_id']})\n arg_dict = {'context': context,\n lb_const.LISTENER: listener,\n }\n self._send_event(lb_const.EVENT_CREATE_LISTENER_V2, arg_dict,\n serialize=True,\n binding_key=listener['loadbalancer_id'],\n key=listener['id'])",
"def connection_groups(self, **kwargs):\n return [\"listener-%s\" % self.path[1:]]",
"def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id')}\n return create_listener(request, **kwargs)",
"def listen_for_notifications(self, targets_and_priorities, pool,\n batch_size, batch_timeout):\n conn = ConsumerConnection(self.conf, self._url)\n topics = set()\n for target, priority in targets_and_priorities:\n topics.add(target_to_topic(target, priority))\n\n conn.declare_topic_consumer(topics, pool)\n\n listener = KafkaListener(conn)\n return base.PollStyleListenerAdapter(listener, batch_size,\n batch_timeout)",
"def _start_listeners(self):\n if self.listeners:\n self.state = \"listening\"\n for event_listener in self.listeners:\n event_listener.start()\n\n for listener in self.listeners:\n listener.join()",
"def _add_listeners(vehicle):\n @vehicle.on_attribute('mode')\n def mode_listener(self,name, msg):\n util.log_info(\"Mode switched to %s\" % msg.name)\n \n if msg.name != shared.status['manual_mode']: # manual override\n if msg.name == 'RTL' or msg.name == 'LAND':\n util.log_warning(\"External %s detected. Abort.\" % msg.name)\n shared.status['abort'] = True\n \n @vehicle.on_attribute('gps_0')\n def gps_listener(self,name, msg): # monitor satellites\n if not shared.status['thread_flag'] & shared.NSATS_TOO_LOW:\n if msg.satellites_visible < 6:\n util.log_warning(\"Satellites dropped below 5!\")\n shared.status['thread_flag'] |= shared.NSATS_TOO_LOW\n \n elif msg.satellites_visible >= 10:\n util.log_info(\"Satellites recovered to %d.\" % msg.satellites_visible)\n shared.status['thread_flag'] &= ~shared.NSATS_TOO_LOW\n \n @vehicle.on_message('SYSTEM_TIME')\n def time_listener(self,name, msg): # log timestamp\n format = '%Y-%m-%d %H:%M:%S'\n val = time.localtime(msg.time_unix_usec/1000000)\n shared.timestamp = time.strftime(format, val)",
"def app_elb(template, name, subnets, instances, vpc, instance_port=443, load_balancer_port=443, instance_proto='HTTPS',\n load_balancer_proto='HTTPS', securitygroups=None):\n\n applb = elbv2.LoadBalancer(name,\n template=template,\n Subnets=[Ref(r) for r in subnets],\n SecurityGroups=[Ref(r) for r in securitygroups],\n )\n\n targetgroup = elbv2.TargetGroup(title=name + 'targetgroup',\n template=template,\n Port=instance_port,\n Protocol=instance_proto,\n VpcId=Ref(vpc),\n Targets=[elbv2.TargetDescription(Id=Ref(r)) for r in instances],\n HealthCheckIntervalSeconds=10,\n # HealthCheckPath=\"/\",\n # HealthCheckPort=\"traffic-port\",\n # HealthCheckProtocol=\"HTTP\",\n # HealthCheckTimeoutSeconds=5,\n # UnhealthyThresholdCount=10,\n # HealthyThresholdCount=2,\n )\n\n elbv2.Listener(title=(name + 'listener'),\n template=template,\n DefaultActions=[elbv2.Action(TargetGroupArn=Ref(targetgroup), Type='forward')],\n LoadBalancerArn=Ref(applb),\n Port=load_balancer_port,\n Protocol=load_balancer_proto,\n )\n\n return applb",
"def create_listeners(self, **kwargs):\n logging.debug('Creating Service Manager listeners')\n\n # Check for required endpoint args\n required_args = (\n 'frontend_endpoint',\n 'backend_endpoint',\n 'mgmt_endpoint',\n 'sink_endpoint'\n )\n\n if not all(k in kwargs for k in required_args):\n raise ServiceManagerException, 'Missing socket endpoints, e.g. frontend/backend/mgmt/sink'\n\n for k in kwargs:\n setattr(self, k, kwargs[k])\n\n self.zcontext = zmq.Context().instance()\n\n # Our Service Manager sockets\n self.frontend_socket = self.zcontext.socket(zmq.ROUTER)\n self.backend_socket = self.zcontext.socket(zmq.XPUB)\n self.sink_socket = self.zcontext.socket(zmq.PULL)\n self.mgmt_socket = self.zcontext.socket(zmq.REP)\n self.result_pub_socket = self.zcontext.socket(zmq.PUB)\n\n try:\n self.frontend_socket.bind(self.frontend_endpoint)\n self.backend_socket.bind(self.backend_endpoint)\n self.sink_socket.bind(self.sink_endpoint)\n self.mgmt_socket.bind(self.mgmt_endpoint)\n self.result_pub_port = self.result_pub_socket.bind_to_random_port('tcp://*')\n except zmq.ZMQError as e:\n raise ServiceManagerException, 'Cannot bind Service Manager sockets: %s' % e\n\n # Create a poll set for our sockets\n self.zpoller = zmq.Poller()\n self.zpoller.register(self.frontend_socket, zmq.POLLIN)\n self.zpoller.register(self.backend_socket, zmq.POLLIN)\n self.zpoller.register(self.sink_socket, zmq.POLLIN)\n self.zpoller.register(self.mgmt_socket, zmq.POLLIN)\n\n logging.debug('Frontend socket bound to %s', self.frontend_endpoint)\n logging.debug('Backend socket bound to %s', self.backend_endpoint)\n logging.debug('Sink socket bound to %s', self.sink_endpoint)\n logging.debug('Management socket bound to %s', self.mgmt_endpoint)\n logging.debug('Result publisher socket bound to %s', 'tcp://*:' + str(self.result_pub_port))",
"def create_listener(self, service, bigips):\n vip = self.service_adapter.get_virtual(service)\n tls = self.service_adapter.get_tls(service)\n if tls:\n tls['name'] = vip['name']\n tls['partition'] = vip['partition']\n\n service['listener']['operating_status'] = lb_const.ONLINE\n\n network_id = service['loadbalancer']['network_id']\n error = None\n for bigip in bigips:\n self.service_adapter.get_vlan(vip, bigip, network_id)\n try:\n self.vs_helper.create(bigip, vip)\n except HTTPError as err:\n if err.response.status_code == 409:\n LOG.debug(\"Virtual server already exists updating\")\n try:\n self.update_listener(service, [bigip])\n #self.vs_helper.update(bigip, vip)\n except Exception as e:\n LOG.warn(\"Update triggered in create failed, this could be due to timing issues in assure_service\")\n LOG.warn('VS info %s',service['listener'])\n LOG.exception(e)\n LOG.warn('Exception %s',e)\n raise e\n else:\n LOG.exception(\"Virtual server creation error: %s\" %\n err.message)\n raise\n if tls:\n # Don't stop processing in case of errors. Otherwise the other F5's won't get the same vs\n try:\n self.add_ssl_profile(tls, bigip)\n except Exception as err:\n LOG.error(\"Error adding SSL Profile to listener: {0}\".format(err))\n error = err if error is None else error\n\n if error:\n service['listener']['provisioning_status'] = 'ERROR'\n raise error",
"def describe_listeners(ctx):\n data = self.describe_listeners()\n ctx.info('Listeners details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def delete_listeners(ctx):\n if self.balancer_exists():\n self.delete_listeners()\n ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(self.get_balancer_name()))",
"def _setup_events(conf):\n events = {}\n for name in conf.keys():\n events[name] = Event(name=name)\n for listener in conf[name]:\n action = 'run'\n if ':' in listener:\n listener, action = listener.rsplit(':')\n events[name].add_listener(listener, action)\n\n # Add events to module scope.\n globals().update(events)",
"def register(self, target, hostname, listener_type, expire=-1):",
"def get_create_load_balancer_flow(self, load_balancer_id, topology, project_id,\n listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(vthunder_tasks.VthunderInstanceBusy(\n requires=a10constants.COMPUTE_BUSY))\n\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n lb_create_flow.add(a10_database_tasks.CheckExistingVthunderTopology(\n requires=constants.LOADBALANCER,\n inject={\"topology\": topology}))\n\n # Attaching vThunder to LB in database\n if topology == constants.TOPOLOGY_ACTIVE_STANDBY:\n lb_create_flow.add(*self._create_active_standby_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n elif topology == constants.TOPOLOGY_SINGLE:\n lb_create_flow.add(*self._create_single_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n else:\n LOG.error(\"Unknown topology: %s. Unable to build load balancer.\",\n topology)\n raise exceptions.InvalidTopology(topology=topology)\n\n # IMP: Now creating vThunder config here\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n vthunder = self._vthunder_repo.get_vthunder_by_project_id(db_apis.get_session(),\n project_id)\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(\n self.get_post_lb_vthunder_association_flow(\n post_amp_prefix, load_balancer_id, topology, vthunder,\n mark_active=(not listeners)))\n lb_create_flow.add(a10_database_tasks.CountLoadbalancersWithFlavor(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER),\n provides=a10constants.LB_COUNT_FLAVOR))\n lb_create_flow.add(vthunder_tasks.AllowL2DSR(\n requires=(constants.SUBNET, constants.AMPHORA,\n a10constants.LB_COUNT_FLAVOR, constants.FLAVOR_DATA)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA)))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_flow=True))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_vthunder_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n\n return lb_create_flow",
"def create_sockets(laddr):\n listeners = []\n\n for addr in laddr:\n sock_type = _sock_type(addr)\n sock = None\n for i in range(5):\n try:\n sock = sock_type(addr)\n except socket.error as e:\n log = logging.getLogger('thriftsvr.sock')\n if e.args[0] == errno.EADDRINUSE:\n log.error(\"Connection in use: %s\", str(addr))\n if e.args[0] == errno.EADDRNOTAVAIL:\n log.error(\"Invalid address: %s\", str(addr))\n if i < 5:\n msg = \"connection to {addr} failed: {error}\"\n log.debug(msg.format(addr=str(addr), error=str(e)))\n log.error(\"Retrying in 1 second.\")\n time.sleep(1)\n else:\n break\n\n if sock is None:\n log = logging.getLogger('thriftsvr.sock')\n log.error(\"Can't connect to %s\", str(addr))\n sys.exit(1)\n\n listeners.append(sock)\n\n return listeners",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--tg_name', required=True,\n help='specify target group name', type=str)\n parser.add_argument('--gwlb_name', required=True,\n help='specify gateway load balancer name', type=str)\n parser.add_argument('--vpc_id', required=True,\n help='specify vpc id', type=str)\n parser.add_argument('--subnet_ids', nargs='+', required=True,\n help='specify subnet ids')\n parser.add_argument('--target_ids', nargs='+', required=True,\n help='specify target ids')\n\n args = parser.parse_args()\n ############################\n # Define script variables:\n ############################\n tg_name = args.tg_name\n gwlb_name = args.gwlb_name\n vpc_id = args.vpc_id\n subnet_ids = args.subnet_ids\n target_ids = args.target_ids\n\n tg1_args = {\n 'name': tg_name,\n 'protocol': 'GENEVE',\n 'port': 6081,\n 'healthchkproto': 'HTTP',\n 'healthchkport': '80',\n 'healthchkpath': '/',\n 'vpc_id': vpc_id,\n 'type': 'instance'\n }\n #############################\n # Target Group:\n tg1 = create_tg(**tg1_args)\n print(f\"TG ARN: {tg1[1]}\")\n # GWLB:\n gwlb1 = create_gwlb(gwlb_name, subnet_ids)\n print(f\"GWLB ARN: {gwlb1[1]}\")\n # Listener:\n listener1 = create_fwd_listener(gwlb1[1], tg1[1])\n print(f\"LISTENER ARN: {listener1[1]}\")\n # Register Targets:\n register_targets(tg1[1], target_ids[0])",
"def _init_and_add_listeners_to_stage_traits(self):\n self.stages[\"Preprocessing\"].config.tracking_tool = self.stages[\"Diffusion\"].config.tracking_processing_tool\n self.stages[\"Preprocessing\"].config.act_tracking = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.use_act\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.seed_from_gmwmi\n self.stages[\"Registration\"].config.tracking_tool = self.stages[\"Diffusion\"].config.tracking_processing_tool\n self.stages[\"Registration\"].config.act_tracking = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.use_act\n self.stages[\"Registration\"].config.gmwmi_seeding = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.seed_from_gmwmi\n\n self.stages[\"Connectome\"].config.on_trait_change(\n self.update_vizualization_layout, \"circular_layout\"\n )\n self.stages[\"Connectome\"].config.on_trait_change(\n self.update_vizualization_logscale, \"log_visualization\"\n )\n self.stages[\"Diffusion\"].config.on_trait_change(\n self.update_outputs_recon, \"recon_processing_tool\"\n )\n self.stages[\"Diffusion\"].config.on_trait_change(\n self.update_tracking_tool, \"tracking_processing_tool\"\n )\n self.stages[\"Diffusion\"].config.mrtrix_tracking_config.on_trait_change(\n self.update_preprocessing_act, \"use_act\"\n )\n self.stages[\"Diffusion\"].config.dipy_tracking_config.on_trait_change(\n self.update_preprocessing_act, \"use_act\"\n )\n self.stages[\"Diffusion\"].config.mrtrix_tracking_config.on_trait_change(\n self.update_preprocessing_gmwmi, \"seed_from_gmwmi\"\n )",
"def start_a_listener():\n listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n listener.bind(('localhost', 0))\n return listener, listener.getsockname()",
"def CreateUDPListeners(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateUDPListeners\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateUDPListenersResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))"
] |
[
"0.78298986",
"0.63762605",
"0.6211721",
"0.61374104",
"0.59323144",
"0.5882233",
"0.5827499",
"0.58116966",
"0.57650423",
"0.5760158",
"0.5666049",
"0.5589464",
"0.5575253",
"0.55505455",
"0.5464185",
"0.5462238",
"0.53863287",
"0.5379138",
"0.5339459",
"0.533056",
"0.5287598",
"0.5274342",
"0.5173732",
"0.51594365",
"0.5157555",
"0.5156271",
"0.5137214",
"0.51351225",
"0.51121724",
"0.5105977"
] |
0.745263
|
1
|
Deletes listeners for current environment.
|
def delete_listeners(ctx):
if self.balancer_exists():
self.delete_listeners()
ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))
else:
ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(self.get_balancer_name()))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_listeners(self):\n listeners_info = self.describe_listeners()\n\n for listener in listeners_info:\n response = self.client.delete_listener(\n ListenerArn=listener['ListenerArn']\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info('Successfully deleted listener {listener_arn} for balancer {balancer}.'.format(\n listener_arn=listener['ListenerArn'],\n balancer=self.get_balancer_name(),\n ))",
"def __del__(self):\n\t\trospy.logdebug('MAVROSListener destruction')\n\t\t\n\t\tfor sub in self.__subs.values():\n\t\t\tsub.unregister()",
"def close_listeners(self):\n logging.debug('Closing Service Manager listeners')\n\n self.zpoller.unregister(self.frontend_socket)\n self.zpoller.unregister(self.backend_socket)\n self.zpoller.unregister(self.sink_socket)\n self.zpoller.unregister(self.mgmt_socket)\n\n self.frontend_socket.close()\n self.backend_socket.close()\n self.sink_socket.close()\n self.mgmt_socket.close()\n self.result_pub_socket.close()\n\n self.zcontext.destroy()",
"def DeleteListeners(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteListeners\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteListenersResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def remove_mode_listeners(self) -> None:\n self._listeners[ID_MODE] = []",
"def remove_hsm_status_listeners(self) -> None:\n self._listeners[ID_HSM_STATUS] = []",
"async def on_delete_sockets(self):\n async with self.socket_lock:\n existing_monitor_sockets = []\n existing_scraper_sockets = []\n # get the current list of existing sockets\n for f in list(os.listdir(self.config[\"GlobalConfig\"][\"socket_path\"])):\n if f.startswith(\"Monitor.\"):\n existing_monitor_sockets.append(f)\n elif f.startswith(\"Scraper.\"):\n existing_scraper_sockets.append(f)\n\n # temp copy\n updated_monitor_sockets = copy.copy(self.monitor_sockets)\n updated_scraper_sockets = copy.copy(self.scraper_sockets)\n\n # remove every internal socket that is not existing\n for class_name in self.monitor_sockets:\n if \"Monitor.\" + class_name not in existing_monitor_sockets:\n updated_monitor_sockets.pop(class_name)\n for class_name in self.scraper_sockets:\n if \"Scraper.\" + class_name not in existing_scraper_sockets:\n updated_scraper_sockets.pop(class_name)\n\n self.monitor_sockets = updated_monitor_sockets\n self.scraper_sockets = updated_scraper_sockets",
"def unregister(self, target, hostname, listener_type):",
"def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()",
"def delete_entity(self, context, listener):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, LISTENERS_RESOURCE,\n listener.id)\n msg = _(\"NetScaler driver listener removal: %s\") % listener.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)",
"def delete_listener(self, argu):\n\n if not argu:\n LOG.error(\"In delete_listener, it should not pass the None.\")\n\n # delete vs\n self._delete_vs(\n argu['listener_id'],\n argu['protocol']\n )",
"def remove_device_listeners(self, device_id: str) -> None:\n self._listeners[device_id] = []",
"def delete(self, *args, **kwargs):\n # Delete listener\n if self.db.listener:\n self.db.listener.delete()\n \n # Delete puppets\n puppetlist = [puppet for puppet in\n search.search_tag(self.key+\"-puppet\")]\n for puppet in puppetlist:\n puppet.delete()\n\n # Delete bot\n self.db.ev_location.msg_contents(\"Bot commencing shut-down process.\")\n super(ServerBot, self).delete(*args, **kwargs)",
"def destroy(self):\n if hasattr(self, 'vistrailsStartup'):\n self.vistrailsStartup.destroy()",
"def unregister(self, listener):\n for event_type in self.listeners:\n for event_listeners in self.listeners[event_type]:\n if event_listeners:\n try:\n event_listeners.remove(listener)\n logger.debug('Unregistered listener for event type \"%s\"', hr_event_type(event_type))\n except ValueError:\n pass",
"def cleanup(self):\n self.removeObservers()",
"def delete_listener(self, service, bigips):\n vip = self.service_adapter.get_virtual_name(service)\n tls = self.service_adapter.get_tls(service)\n if tls:\n tls['name'] = vip['name']\n tls['partition'] = vip['partition']\n error = None\n for bigip in bigips:\n self.vs_helper.delete(bigip,\n name=vip[\"name\"],\n partition=vip[\"partition\"])\n\n # delete ssl profiles\n # Don't stop processing in case of errors. Otherwise the other F5's might have a different configuration\n try:\n self.remove_ssl_profiles(tls, bigip)\n except Exception as err:\n LOG.error(\"Error adding SSL Profile to listener: {0}\".format(err))\n error = err if error is None else error\n\n if error:\n raise error",
"def off_all(self) -> None:\n self._event_tree.clear()\n del self._any_listeners[:]",
"def destroy_loggers(self) -> \"Logger\":\n\n for logger, level in self.get_next_logger():\n logger.handlers.clear()\n\n delattr(self, f\"{level}_logger\")\n setattr(self, f\"{level}_logger\", None)",
"def _delete_vports(self):\n self._api._remove(self._ixn_vport, self._api.config.ports)",
"def removeServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...",
"def _remove_listeners(self, txid: int = None):\n if txid is None:\n for f in self._listeners.values():\n f.cancel()\n self._listeners = {}\n return\n\n self._listeners[txid].cancel()\n del self._listeners[txid]",
"def __del__(self):\n AppHelper.stopEventLoop()",
"def __del__(self):\n AppHelper.stopEventLoop()",
"def __del__(self):\n for client in self.client_list:\n del client\n for server in self.server_list:\n del server",
"def unsubscribe_all_known(self):\n for key, value in self.__callbacks.items():\n self.__logger.debug(f'unsubscribe from event {key}')\n succ = self.__twitch.delete_eventsub_subscription(key)\n if not succ:\n self.__logger.warning(f'failed to unsubscribe from event {key}')\n self.__callbacks.clear()",
"def deleteCreatedBy(self, caller):\n\t\tnewListeners = []\n\t\tfor lis in self.allListeners:\n\t\t\tif lis.caller == caller:\n\t\t\t\tlis.unregister()\n\t\t\telse:\n\t\t\t\tnewListeners.append(lis)\n\t\tself.allListeners = newListeners",
"def test_remove_listener(self):\n listener = lambda state: state\n self.wrapper.add_listener(listener)\n self.assertEqual(len(self.client.listeners), 1)\n self.wrapper.remove_listener(listener)\n self.assertEqual(len(self.client.listeners), 0)",
"def abort(self):\n for key in self.listeners:\n sem = self.listeners[key]\n self.listeners[key] = None\n\n # TODO: Received data and semahore should be stored separately\n if isinstance(sem, asyncio.Semaphore):\n sem.release()",
"def cleanup(self):\n self.removeObservers()"
] |
[
"0.73083454",
"0.6432149",
"0.63611424",
"0.6352538",
"0.6321875",
"0.6212454",
"0.6150953",
"0.61417234",
"0.6006233",
"0.59870964",
"0.59372544",
"0.58935",
"0.58164805",
"0.5792932",
"0.5735109",
"0.5699852",
"0.56589717",
"0.5654272",
"0.56454384",
"0.56419045",
"0.5628233",
"0.56247133",
"0.56225514",
"0.56225514",
"0.56064993",
"0.5601689",
"0.55944705",
"0.5594169",
"0.5591304",
"0.5585566"
] |
0.6964974
|
1
|
Describes listeners for current environment.
|
def describe_listeners(ctx):
data = self.describe_listeners()
ctx.info('Listeners details for load balancer {}:'.format(self.get_balancer_name()))
ctx.pp.pprint(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def listener_description(self) -> str:\n return pulumi.get(self, \"listener_description\")",
"def _add_listeners(vehicle):\n @vehicle.on_attribute('mode')\n def mode_listener(self,name, msg):\n util.log_info(\"Mode switched to %s\" % msg.name)\n \n if msg.name != shared.status['manual_mode']: # manual override\n if msg.name == 'RTL' or msg.name == 'LAND':\n util.log_warning(\"External %s detected. Abort.\" % msg.name)\n shared.status['abort'] = True\n \n @vehicle.on_attribute('gps_0')\n def gps_listener(self,name, msg): # monitor satellites\n if not shared.status['thread_flag'] & shared.NSATS_TOO_LOW:\n if msg.satellites_visible < 6:\n util.log_warning(\"Satellites dropped below 5!\")\n shared.status['thread_flag'] |= shared.NSATS_TOO_LOW\n \n elif msg.satellites_visible >= 10:\n util.log_info(\"Satellites recovered to %d.\" % msg.satellites_visible)\n shared.status['thread_flag'] &= ~shared.NSATS_TOO_LOW\n \n @vehicle.on_message('SYSTEM_TIME')\n def time_listener(self,name, msg): # log timestamp\n format = '%Y-%m-%d %H:%M:%S'\n val = time.localtime(msg.time_unix_usec/1000000)\n shared.timestamp = time.strftime(format, val)",
"def describe_listeners(self):\n balancer_arn = self.get_balancer_arn()\n\n response = self.client.describe_listeners(\n LoadBalancerArn=balancer_arn,\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['Listeners']",
"def __init__(self):\n self._listeners = []",
"def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)",
"def listen(self):\n pass",
"def listeners(server, process):\n if (config.ENABLE_AUTH and Permissions.LIST_LISTENERS not in\n server.permissions):\n process.stderr.write(\"Permission denied\\r\\n\")\n return\n\n process.stdout.write(\"Listening clients:\\n\")\n\n names = server.daemon.listener_names\n if names:\n for name in sorted(names.keys()):\n for i, s in enumerate(names[name]):\n peername = s.conn.get_extra_info('peername')\n ip = peername[0]\n ports = (lp.listen_port for lp in s.listeners.values())\n if i == 0:\n connect_name = name\n else:\n connect_name = util.join_hostname_index(name, i)\n\n line = \" {}: ip={} aliases={} ports={}\\n\".format(\n connect_name,\n ip,\n ','.join(s.aliases),\n ','.join(str(p) for p in sorted(ports)),\n )\n process.stdout.write(line)\n else:\n process.stdout.write(\" None\\n\")",
"def inform_listeners(self):\n d = self.get_all_sorted()\n for listener in self.listeners:\n listener.stream_updated(d)",
"def __init__(self) -> None:\n self.event_listeners: Dict[str, List[Callable or Awaitable]] = {}",
"def listen(eventType):\n def _decoration(fcn):\n fcn.listen = True\n fcn.eventType = eventType\n return fcn\n return _decoration",
"def listeners(self) -> Tuple[CBListenerType, ...]:\n return tuple(self._listeners) # type: ignore[arg-type]",
"def get_create_listener_flow(self):\n create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)\n create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(\n requires=[constants.LOADBALANCER, constants.LISTENERS]))\n #create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(\n # requires=[constants.LOADBALANCER, constants.LISTENERS]))\n # Get VThunder details from database\n create_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(\n requires=constants.LOADBALANCER,\n provides=a10constants.VTHUNDER))\n create_listener_flow.add(handler_virtual_port.ListenersCreate(\n requires=[constants.LOADBALANCER, constants.LISTENERS, a10constants.VTHUNDER]))\n create_listener_flow.add(network_tasks.UpdateVIP(\n requires=constants.LOADBALANCER))\n create_listener_flow.add(database_tasks.\n MarkLBAndListenersActiveInDB(\n requires=[constants.LOADBALANCER,\n constants.LISTENERS]))\n return create_listener_flow",
"def events(self):",
"def __init__(self, _name, _subject):\n super(MyListener, self).__init__(_name, _subject)",
"def listener(self, event):\n print \"TB:@%s arrived event %s\" % (event.time, event) \n informFunction = self._informFunc\n informFunction((event.time, event.state))\n return []",
"def get_create_all_listeners_flow(self):\n create_all_listeners_flow = linear_flow.Flow(\n constants.CREATE_LISTENERS_FLOW)\n create_all_listeners_flow.add(\n database_tasks.GetListenersFromLoadbalancer(\n requires=constants.LOADBALANCER,\n provides=constants.LISTENERS))\n create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(\n requires=[constants.LOADBALANCER, constants.LISTENERS]))\n create_all_listeners_flow.add(network_tasks.UpdateVIP(\n requires=constants.LOADBALANCER))\n return create_all_listeners_flow",
"def listen(self):\n raise NotImplementedError()",
"def osgi_http_whiteboard_listener(self) -> ConfigNodePropertyString:\n return self._osgi_http_whiteboard_listener",
"def _init_and_add_listeners_to_stage_traits(self):\n self.stages[\"Preprocessing\"].config.tracking_tool = self.stages[\"Diffusion\"].config.tracking_processing_tool\n self.stages[\"Preprocessing\"].config.act_tracking = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.use_act\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.seed_from_gmwmi\n self.stages[\"Registration\"].config.tracking_tool = self.stages[\"Diffusion\"].config.tracking_processing_tool\n self.stages[\"Registration\"].config.act_tracking = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.use_act\n self.stages[\"Registration\"].config.gmwmi_seeding = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.seed_from_gmwmi\n\n self.stages[\"Connectome\"].config.on_trait_change(\n self.update_vizualization_layout, \"circular_layout\"\n )\n self.stages[\"Connectome\"].config.on_trait_change(\n self.update_vizualization_logscale, \"log_visualization\"\n )\n self.stages[\"Diffusion\"].config.on_trait_change(\n self.update_outputs_recon, \"recon_processing_tool\"\n )\n self.stages[\"Diffusion\"].config.on_trait_change(\n self.update_tracking_tool, \"tracking_processing_tool\"\n )\n self.stages[\"Diffusion\"].config.mrtrix_tracking_config.on_trait_change(\n self.update_preprocessing_act, \"use_act\"\n )\n self.stages[\"Diffusion\"].config.dipy_tracking_config.on_trait_change(\n self.update_preprocessing_act, \"use_act\"\n )\n self.stages[\"Diffusion\"].config.mrtrix_tracking_config.on_trait_change(\n self.update_preprocessing_gmwmi, \"seed_from_gmwmi\"\n )",
"def listener():\n rospy.init_node('demo_gui', anonymous=True)\n rospy.Subscriber(\"classification\", String, callback)",
"def listener_arn(self) -> Optional[str]:\n return pulumi.get(self, \"listener_arn\")",
"def _setup_events(conf):\n events = {}\n for name in conf.keys():\n events[name] = Event(name=name)\n for listener in conf[name]:\n action = 'run'\n if ':' in listener:\n listener, action = listener.rsplit(':')\n events[name].add_listener(listener, action)\n\n # Add events to module scope.\n globals().update(events)",
"def on_notify(self, name):\r\n pass",
"def listener_id(self) -> str:\n return pulumi.get(self, \"listener_id\")",
"def listener_id(self) -> str:\n return pulumi.get(self, \"listener_id\")",
"def listener_id(self) -> str:\n return pulumi.get(self, \"listener_id\")",
"def create_listeners(self):\n target_groups_config = self.get_target_groups_config()\n balancer_arn = self.get_balancer_arn()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n response = self.client.create_listener(\n LoadBalancerArn=balancer_arn,\n DefaultActions=[\n {\n 'Type': 'forward',\n 'TargetGroupArn': self.get_target_group_arn(short_name)\n }\n ],\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info('Target group {group} bound to {balancer} load balancer.'.format(\n group=target_group_name,\n balancer=self.get_balancer_name(),\n ))\n response_data[target_group_name] = response['Listeners']\n\n return response_data",
"def AddListener(self, listener):\n pass",
"def listen(self) -> None:\n raise NotImplementedError",
"def get_event_listeners(self) -> Dict[str, int]:\n\n return {key: len(self.event_listeners[key]) for key in self.event_listeners}"
] |
[
"0.70811313",
"0.62785274",
"0.60060024",
"0.5958816",
"0.59439224",
"0.58731776",
"0.5746067",
"0.5714471",
"0.5663927",
"0.56183153",
"0.56144804",
"0.5483228",
"0.5462865",
"0.53911906",
"0.5357568",
"0.53405404",
"0.5337399",
"0.5294675",
"0.52753985",
"0.5265671",
"0.5254623",
"0.5248317",
"0.52179146",
"0.5191963",
"0.5191963",
"0.5191963",
"0.5142358",
"0.5118056",
"0.51143926",
"0.5112547"
] |
0.6852199
|
1
|
Creates fully operational load balancer setup for the current environment.
|
def create(ctx):
create_target_groups(ctx)
create_balancer(ctx)
create_listeners(ctx)
ctx.info('Load balancers setup completed.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_loadbalancer(self, context, lb):\n super(ArrayDeviceDriverV2, self).create_loadbalancer(context, lb)\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n if deployment_model == \"PER_LOADBALANCER\":\n self.update_loadbalancer(context, lb, None)",
"def create_balancer(self):\n app_env = self.get_current_env()\n balancer_name = self.get_balancer_name()\n subnet_ids = self.get_subnet_ids()\n\n response = self.client.create_load_balancer(\n Name=balancer_name,\n Subnets=subnet_ids,\n SecurityGroups=[self.get_security_group_id(self.get_security_group_short_name())],\n Scheme='internet-facing',\n Tags=[\n {\n 'Key': 'chops-aws-project',\n 'Value': self.get_aws_project_name(),\n },\n {\n 'Key': 'environment',\n 'Value': app_env,\n },\n ],\n Type='application',\n IpAddressType='ipv4',\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['LoadBalancers'][0]",
"def create_balancer(ctx):\n if not self.balancer_exists():\n data = self.create_balancer()\n ctx.info('Successfully created load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} already exists, nothing to create.'.format(\n self.get_balancer_name()\n ))",
"def ensure_load_balancer_created(vpc, security_group, subnet1, subnet2, target_group_arn, ssl_certificate_arn, environment):\n name = environment + '-load-balancer'\n\n # If it already exists, create returns the existing data\n response = ELB.create_load_balancer(\n Name=name,\n Subnets=[ subnet1.id, subnet2.id ],\n SecurityGroups=[ security_group.id ],\n IpAddressType='dualstack',\n Tags=[\n { 'Key': 'Name', 'Value': name },\n { 'Key': 'Environment', 'Value': environment }\n ]\n )\n\n load_balancer = response['LoadBalancers'][0]\n arn = load_balancer['LoadBalancerArn']\n\n # There seems to be no harm in creating listeners if they already exist\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{ 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTPS',\n Port=443,\n SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',\n Certificates=[ { 'CertificateArn': ssl_certificate_arn } ],\n DefaultActions=[ { 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n return load_balancer",
"def process_load_balancer_in_dev ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n iam_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n params,\n aws_account_type,\n app_visibility = None,\n public_dns_cname = None,\n public_tcp_ports = [],\n app_tcp_ports = [],\n use_ssl = False,\n ssl_hostname = None\n ) :\n\n if not use_ssl : \n print( \"Do not create load balancer since use_ssl is False\" );\n return (None, None, None, None);\n \n\n if not app_name :\n app_name = params[ 'app-name' ]\n\n # in dev vpc, initialize local variables\n if use_ssl:\n app_visibility = 'PUBLIC'\n\n if not public_dns_cname :\n public_dns_cname = ssl_hostname\n\n if len( public_tcp_ports ) == 0 :\n public_tcp_ports = [443]\n \n if len( app_tcp_ports ) == 0 : \n app_tcp_ports = [8080]\n\n if app_visibility == 'PUBLIC' :\n subnet_type = 'PRIVATE' # Public apps have app LB's that sit private. The PROXY LB is public.\n elif app_visibility == 'HBO' :\n subnet_type = 'PUBLIC' # HBO apps have app LB's that site public.\n elif app_visibility == 'PRIVATE' :\n subnet_type = 'PRIVATE'\n else :\n subnet_type = params[ 'subnet-type' ]\n\n if not public_dns_cname :\n public_dns_cname = params.get( 'public-dns-alias' )\n\n create = params.get( 'create', 'NO' )\n if create == 'YES':\n print \"Creating load balancer security group.\"\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n if not lb_secgrp :\n lb_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, app_name ),\n 'Controls access to the ' + app_name + ' LB' )\n\n ## set deep as False, because there is no dev nat security group\n remove_all_rules( ec2_conn, [ lb_secgrp ] , deep=False, base_name=base_name)\n\n ## reload the security group after removing the rules\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n \n health_check_port = params.get( 'health-check-port', 8080 )\n health_check_url = params.get( 'health-check-url' )\n if not health_check_url :\n health_check_url = '/' + app_name + '/ping.html'\n\n ## Figure out if we need to find the SSL cert.\n ssl_cert_arn = None\n if use_ssl :\n cert = get_aws_ssl_certificate( iam_conn, ssl_cert_name )\n if cert :\n ssl_cert_arn = cert.arn\n else :\n print \"ERROR: Use SSL was specified, but could not find certificate matching host: \" + ssl_cert_name\n sys.exit( 5 )\n\n ## Generate the correct listener rules\n listeners = [ ( 80, 8080, 'http' ) ] # Default listener\n if params.get( 'listener-rules' ) :\n listeners = []\n for listener_rule in params[ 'listener-rules' ] :\n if params[ 'protocol' ] == 'https' :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ],\n ssl_cert_arn) )\n else :\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ] ) )\n\n ##\n ## FIX: There is a bug here where the public ports are supposed to be set on the proxy if\n ## app_visibility is PUBLIC. Don't have time to fix/regression test now...\n ##\n elif len( public_tcp_ports ) == len( app_tcp_ports ) and len( public_tcp_ports ) > 0 :\n listeners = []\n for public_port, app_port in zip( public_tcp_ports, app_tcp_ports ) :\n if public_port == 443 :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( public_port, app_port, 'https', ssl_cert_arn ) )\n else :\n listeners.append( ( public_port, app_port, 'http' ) )\n\n\n ## find subnet in dev vpc.\n ## TODO: should we define subnet-cidr prarameter to get subnet?\n subnets = vpc_conn.get_all_subnets( filters = [ ( \"vpcId\", [ vpc.id ] ) ] ) \n \n\n print \"Creating load balancer.\"\n elb = create_elb( elb_conn,\n get_elb_name( base_name, app_name ),\n subnets,\n listeners,\n lb_secgrp,\n health_check_port,\n health_check_url,\n subnet_type == 'PUBLIC' )\n \n elb = find_elb(elb_conn, elb.name)\n \n if params.get( 'monitors' ) :\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, params[ 'monitors' ] )\n\n if subnet_type == 'PUBLIC' :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, elb.dns_name )\n else :\n # create dna alias for internal elb in dev vpc.\n dns_alias = create_dns_name( base_name, app_name + '.internal' )\n print \"Configuring DNS name for load balancer: \" + dns_alias\n set_dns_cname( r53_conn, dns_alias, elb.dns_name )\n\n if app_visibility == 'HBO' :\n for port in public_tcp_ports :\n lb_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n elif app_visibility == 'PUBLIC' :\n print \"Creating public load balancer.\"\n lb_public_name = app_name + '-PB'\n\n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n \n if not lb_public_secgrp :\n lb_public_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, lb_public_name ),\n 'Controls access to the ' + lb_public_name + ' load balancer.' )\n\n ## set deep as False, because there is no dev nat security group\n remove_all_rules( ec2_conn, [ lb_public_secgrp ], deep=False, base_name=base_name) \n \n ## reload the security group after removing the rules\n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n\n \n for port in public_tcp_ports :\n lb_public_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n lb_public_listeners = [ ( 80, 80, 'http' ) ]\n if use_ssl :\n lb_public_listeners = [ ( 443, 8080, 'https', ssl_cert_arn ) ]\n\n public_elb = create_elb( elb_conn,\n get_elb_name( base_name, lb_public_name ),\n subnets,\n lb_public_listeners,\n lb_public_secgrp,\n health_check_port,\n health_check_url,\n True )\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, proxy_lb_monitor_rules )\n\n if public_dns_cname :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, public_elb.dns_name )\n else :\n public_dns_cname = ''\n else :\n elb = find_elb( elb_conn, get_elb_name( base_name, app_name ) )\n print \"Processing load-balancer actions.\"\n for action_param in params.get( 'actions', [] ) :\n if action_param[ 'type' ] == 'RESTART_INSTANCES' :\n restart_elb_instances( ec2_conn, elb_conn, elb, params.get( 'restart-smoothly', 'YES' ) == 'YES' )\n\n lb_secgrp = find_group( ec2_conn, base_name, get_lb_secgrp_type( app_name ) )\n dns_alias = None\n \n lb_public_name = app_name + '-PB' \n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n\n return ( elb, lb_secgrp, dns_alias, lb_public_secgrp )",
"def post(self, request):\n return create_loadbalancer(request)",
"def pre_loadbalancer_pool_create(self, resource_dict):\n pass",
"def create_elb(tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n elb_name=None, s3_logs_bucket=None,\n tls_priv_key=None, tls_fullchain_cert=None,\n region_name=None, dry_run=False):\n if not elb_name:\n elb_name = '%selb' % _clean_tag_prefix(tag_prefix)\n\n elb_client = boto3.client('elbv2', region_name=region_name)\n resp = elb_client.create_load_balancer(\n Name=elb_name,\n Subnets=[subnet['SubnetId'] for subnet in web_subnet_by_cidrs.values()\n if subnet],\n SecurityGroups=[\n moat_sg_id,\n ],\n Scheme='internet-facing',\n Type='application',\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix}])\n load_balancer = resp['LoadBalancers'][0]\n load_balancer_arn = load_balancer['LoadBalancerArn']\n load_balancer_dns = load_balancer['DNSName']\n LOGGER.info(\"%s found/created application load balancer %s available at %s\",\n tag_prefix, load_balancer_arn, load_balancer_dns)\n\n attributes = [{\n 'Key': 'deletion_protection.enabled',\n 'Value': 'true'\n }, {\n #pylint:disable=line-too-long\n #https://stackoverflow.com/questions/58848623/what-does-alb-consider-a-valid-header-field\n 'Key': 'routing.http.drop_invalid_header_fields.enabled',\n 'Value': 'true'\n }]\n if s3_logs_bucket:\n attributes += [{\n 'Key': 'access_logs.s3.enabled',\n 'Value': 'true'\n }, {\n 'Key': 'access_logs.s3.bucket',\n 'Value': s3_logs_bucket\n }, {\n 'Key': 'access_logs.s3.prefix',\n 'Value': 'var/log/elb'\n }]\n\n update_load_balancer_attributes = False\n resp = elb_client.describe_load_balancer_attributes(\n LoadBalancerArn=load_balancer_arn)\n for attr in attributes:\n for curr_attr in resp['Attributes']:\n if attr['Key'] == curr_attr['Key']:\n if attr['Value'] != curr_attr['Value']:\n update_load_balancer_attributes = True\n break\n if update_load_balancer_attributes:\n resp = elb_client.modify_load_balancer_attributes(\n LoadBalancerArn=load_balancer_arn,\n Attributes=attributes)\n LOGGER.info(\"%s updated attributes for load balancer %s\",\n tag_prefix, load_balancer_arn)\n else:\n LOGGER.info(\"%s found expected attributes for load balancer %s\",\n tag_prefix, load_balancer_arn)\n\n try:\n resp = elb_client.create_listener(\n LoadBalancerArn=load_balancer_arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{\n \"Type\": \"redirect\",\n \"RedirectConfig\": {\n \"Protocol\": \"HTTPS\",\n \"Port\": \"443\",\n \"Host\": \"#{host}\",\n \"Path\": \"/#{path}\",\n \"Query\": \"#{query}\",\n \"StatusCode\": \"HTTP_301\"\n }\n }])\n LOGGER.info(\"%s created HTTP application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'DuplicateListener':\n raise\n LOGGER.info(\"%s found HTTP application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n\n # We will need a default TLS certificate for creating an HTTPS listener.\n default_cert_location = None\n resp = elb_client.describe_listeners(\n LoadBalancerArn=load_balancer_arn)\n for listener in resp['Listeners']:\n if listener['Protocol'] == 'HTTPS':\n for certificate in listener['Certificates']:\n if 'IsDefault' not in certificate or certificate['IsDefault']:\n default_cert_location = certificate['CertificateArn']\n LOGGER.info(\"%s found default TLS certificate %s\",\n tag_prefix, default_cert_location)\n break\n if not default_cert_location:\n if tls_priv_key and tls_fullchain_cert:\n resp = _store_certificate(\n tls_fullchain_cert, tls_priv_key,\n tag_prefix=tag_prefix, region_name=region_name,\n dry_run=dry_run)\n default_cert_location = resp['CertificateArn']\n else:\n LOGGER.warning(\"default_cert_location is not set and there are no\"\\\n \" tls_priv_key and tls_fullchain_cert either.\")\n\n try:\n resp = elb_client.create_listener(\n LoadBalancerArn=load_balancer_arn,\n Protocol='HTTPS',\n Port=443,\n Certificates=[{'CertificateArn': default_cert_location}],\n DefaultActions=[{\n 'Type': 'fixed-response',\n 'FixedResponseConfig': {\n 'MessageBody': '%s ELB' % tag_prefix,\n 'StatusCode': '200',\n 'ContentType': 'text/plain'\n }\n }])\n LOGGER.info(\n \"%s created HTTPS application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'DuplicateListener':\n raise\n LOGGER.info(\"%s found HTTPS application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n\n return load_balancer_arn",
"def pre_loadbalancer_member_create(self, resource_dict):\n pass",
"def bootstrap():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(root)s' % env, user=env.deploy_user)\n clone_repo()\n setup_dirs()\n link_config_files()\n update_services()\n create_virtualenv()\n update_requirements()\n create_local_settings()",
"def get_create_load_balancer_flow(self, load_balancer_id, topology, project_id,\n listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(vthunder_tasks.VthunderInstanceBusy(\n requires=a10constants.COMPUTE_BUSY))\n\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n lb_create_flow.add(a10_database_tasks.CheckExistingVthunderTopology(\n requires=constants.LOADBALANCER,\n inject={\"topology\": topology}))\n\n # Attaching vThunder to LB in database\n if topology == constants.TOPOLOGY_ACTIVE_STANDBY:\n lb_create_flow.add(*self._create_active_standby_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n elif topology == constants.TOPOLOGY_SINGLE:\n lb_create_flow.add(*self._create_single_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n else:\n LOG.error(\"Unknown topology: %s. Unable to build load balancer.\",\n topology)\n raise exceptions.InvalidTopology(topology=topology)\n\n # IMP: Now creating vThunder config here\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n vthunder = self._vthunder_repo.get_vthunder_by_project_id(db_apis.get_session(),\n project_id)\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(\n self.get_post_lb_vthunder_association_flow(\n post_amp_prefix, load_balancer_id, topology, vthunder,\n mark_active=(not listeners)))\n lb_create_flow.add(a10_database_tasks.CountLoadbalancersWithFlavor(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER),\n provides=a10constants.LB_COUNT_FLAVOR))\n lb_create_flow.add(vthunder_tasks.AllowL2DSR(\n requires=(constants.SUBNET, constants.AMPHORA,\n a10constants.LB_COUNT_FLAVOR, constants.FLAVOR_DATA)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA)))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_flow=True))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_vthunder_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n\n return lb_create_flow",
"def process_load_balancer ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n iam_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n params,\n aws_account_type,\n app_visibility = None,\n public_dns_cname = None,\n public_tcp_ports = [],\n app_tcp_ports = [],\n use_ssl = False,\n ssl_hostname = None\n ) :\n\n if not app_name :\n app_name = params[ 'app-name' ]\n\n if app_visibility == 'PUBLIC' :\n subnet_type = 'PRIVATE' # Public apps have app LB's that sit private. The PROXY LB is public.\n elif app_visibility == 'HBO' :\n subnet_type = 'PUBLIC' # HBO apps have app LB's that site public.\n elif app_visibility == 'PRIVATE' :\n subnet_type = 'PRIVATE'\n else :\n subnet_type = params[ 'subnet-type' ]\n\n if not public_dns_cname :\n public_dns_cname = params.get( 'public-dns-alias' )\n\n create = params.get( 'create', 'NO' )\n if create == 'YES':\n print \"Creating load balancer security group.\"\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n if not lb_secgrp :\n lb_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, app_name ),\n 'Controls access to the ' + app_name + ' LB' )\n remove_all_rules( ec2_conn, [ lb_secgrp ] , deep=True, base_name=base_name)\n ## reload the security group after removing the rules\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n \n health_check_port = params.get( 'health-check-port', 8080 )\n health_check_url = params.get( 'health-check-url' )\n if not health_check_url :\n health_check_url = '/' + app_name + '/ping.html'\n\n ## Figure out if we need to find the SSL cert.\n ssl_cert_arn = None\n if use_ssl :\n cert = get_aws_ssl_certificate( iam_conn, ssl_cert_name )\n if cert :\n ssl_cert_arn = cert.arn\n else :\n print \"ERROR: Use SSL was specified, but could not find certificate matching host: \" + ssl_cert_name\n sys.exit( 5 )\n\n ## Generate the correct listener rules\n listeners = [ ( 80, 8080, 'http' ) ] # Default listener\n if params.get( 'listener-rules' ) :\n listeners = []\n for listener_rule in params[ 'listener-rules' ] :\n if params[ 'protocol' ] == 'https' :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ],\n ssl_cert_arn) )\n else :\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ] ) )\n ##\n ## FIX: There is a bug here where the public ports are supposed to be set on the proxy if\n ## app_visibility is PUBLIC. Don't have time to fix/regression test now...\n ##\n elif len( public_tcp_ports ) == len( app_tcp_ports ) and len( public_tcp_ports ) > 0 :\n listeners = []\n for public_port, app_port in zip( public_tcp_ports, app_tcp_ports ) :\n if public_port == 443 :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( public_port, app_port, 'https', ssl_cert_arn ) )\n else :\n listeners.append( ( public_port, app_port, 'http' ) )\n\n\n print \"Creating load balancer.\"\n elb = create_elb( elb_conn,\n get_elb_name( base_name, app_name ),\n get_vpc_subnets( vpc_conn, vpc, subnet_type ),\n listeners,\n lb_secgrp,\n health_check_port,\n health_check_url,\n subnet_type == 'PUBLIC' )\n \n elb = find_elb(elb_conn, elb.name)\n \n if params.get( 'monitors' ) :\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, params[ 'monitors' ] )\n\n if subnet_type == 'PUBLIC' :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, elb.dns_name )\n else :\n dns_alias = create_internal_elb_dns_name( base_name, app_name )\n print \"Configuring DNS name for load balancer: \" + dns_alias\n set_dns_cname( r53_conn, dns_alias, elb.dns_name )\n\n if app_visibility == 'HBO' :\n for port in public_tcp_ports :\n lb_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n elif app_visibility == 'PUBLIC' :\n print \"Creating proxy load balancer.\"\n proxy_type = app_name + '-PX'\n proxy_secgrp = find_secgrp(ec2_conn, get_secgrp_name( base_name, proxy_type ))\n if not proxy_secgrp :\n proxy_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_secgrp_name( base_name, proxy_type ),\n 'Controls access to the ' + proxy_type + ' servers.' )\n \n lb_proxy_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, proxy_type ))\n \n if not lb_proxy_secgrp :\n lb_proxy_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, proxy_type ),\n 'Controls access to the ' + proxy_type + ' load balancer.' )\n\n remove_all_rules( ec2_conn, [ lb_proxy_secgrp, proxy_secgrp ], deep=True, base_name=base_name) \n ## reload the security group after removing the rules\n lb_proxy_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, proxy_type ))\n proxy_secgrp = find_secgrp(ec2_conn, get_secgrp_name( base_name, proxy_type ))\n\n \n ##\n ## FIX: In reality, we need to set the group rules between lb_proxy and proxy to match\n ## the listener ports that were passed in/configured.\n ##\n grant_ssh_access( ec2_conn, [ proxy_secgrp ], find_group( ec2_conn, base_name, 'NAT' ) )\n \n \n ## proxy server port is always 80\n ## updated by yliu, 2014/6/13\n ##if use_ssl :\n ## proxy_port = 443\n ##else :\n ## proxy_port = 80\n proxy_port = 80\n\n ## backend elb port that the proxy server passes request to \n if use_ssl :\n proxy_to_elb_port = 443\n else :\n proxy_to_elb_port = 80\n\n grant_grp_access( ec2_conn, [ lb_proxy_secgrp ], proxy_secgrp, proxy_port )\n grant_grp_access( ec2_conn, [ proxy_secgrp ], lb_secgrp, proxy_to_elb_port )\n for port in public_tcp_ports :\n lb_proxy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr ) \n\n proxy_listeners = [ ( 80, 80, 'http' ) ]\n if use_ssl :\n proxy_listeners = [ ( 443, proxy_port, 'https', ssl_cert_arn ) ]\n\n proxy_elb = create_elb( elb_conn,\n get_elb_name( base_name, proxy_type ),\n get_vpc_subnets( vpc_conn, vpc, 'PUBLIC' ),\n proxy_listeners,\n lb_proxy_secgrp,\n proxy_port,\n '/robots.txt',\n True )\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, proxy_lb_monitor_rules )\n\n if public_dns_cname :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, proxy_elb.dns_name )\n else :\n public_dns_cname = ''\n\n print \"Creating proxy instances.\"\n proxy_ami = get_ami_by_name( ec2_conn, proxy_ami_name )\n subnets = get_vpc_subnets( vpc_conn, vpc, 'PRIVATE' )\n\n ## direct proxy server to access backend elb over given protocol\n ## added by yliu, 2014/6/13\n if use_ssl :\n app_elb_protocol = 'https'\n else :\n app_elb_protocol = 'http'\n \n proxy_userdata = get_proxy_userdata( public_dns_cname, elb.dns_name, app_elb_protocol, app_name )\n proxy_instances = []\n \n proxy_keypair = get_keypair_name( aws_account_type, vpc.region.name, \"APACHE\" )\n \n for subnet in subnets : \n instance = launch_instance_vpc( ec2_conn,\n proxy_ami,\n base_name = base_name,\n instance_type = proxy_type,\n keypair = proxy_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = proxy_secgrp.id ,\n subnet_id = subnet.id,\n user_data = proxy_userdata,\n public_ip = False )\n proxy_instances.append( instance )\n\n print \"Setting alarms on the proxy\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, 'PROXY', base_topicarn, proxy_monitor_rules )\n \n proxy_instance_ids = [ i.id for i in proxy_instances ]\n\n print \"Waiting for proxy instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, proxy_instance_ids )\n\n print \"Adding the new proxy instances into the load balancer.\"\n \n status = swap_elb_instances( elb_conn = elb_conn,\n elb = proxy_elb,\n new_instance_ids = proxy_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = False )\n\n else :\n elb = find_elb( elb_conn, get_elb_name( base_name, app_name ) )\n print \"Processing load-balancer actions.\"\n for action_param in params.get( 'actions', [] ) :\n if action_param[ 'type' ] == 'RESTART_INSTANCES' :\n restart_elb_instances( ec2_conn, elb_conn, elb, params.get( 'restart-smoothly', 'YES' ) == 'YES' )\n\n lb_secgrp = find_group( ec2_conn, base_name, get_lb_secgrp_type( app_name ) )\n dns_alias = None\n\n return ( elb, lb_secgrp, dns_alias )",
"def create_loadbalancer(self, context, loadbalancer, driver_name):\n LOG.info(\"Received request 'Create Loadbalancer' for LB:%(lb)s \"\n \"with driver:%(driver_name)s\",\n {'lb': loadbalancer['id'],\n 'driver_name': driver_name})\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n 'driver_name': driver_name\n }\n self._send_event(lb_const.EVENT_CREATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])",
"def create_loadbalancer(call=None, kwargs=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"The create_address function must be called with -f or --function.\"\n )\n\n if kwargs is None:\n kwargs = {}\n\n conn = get_conn()\n datacenter_id = get_datacenter_id()\n loadbalancer = LoadBalancer(\n name=kwargs.get(\"name\"), ip=kwargs.get(\"ip\"), dhcp=kwargs.get(\"dhcp\")\n )\n\n response = conn.create_loadbalancer(datacenter_id, loadbalancer)\n _wait_for_completion(conn, response, 60, \"loadbalancer\")\n\n return response",
"def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)",
"def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)",
"def host_bootstrap(args):\n name = args.name\n host = args.host\n port = args.port\n user = args.user\n protocol = args.protocol\n url = args.url\n pool = args.pool\n poolpath = args.poolpath\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n baseconfig.bootstrap(name, host, port, user, protocol, url, pool, poolpath)",
"def _configure_manager(self):\n self._manager = CloudLoadBalancerManager(self,\n resource_class=CloudLoadBalancer,\n response_key=\"loadBalancer\", uri_base=\"loadbalancers\")",
"def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)",
"def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))",
"def get_create_rack_vthunder_load_balancer_flow(\n self, vthunder_conf, device_dict, topology, listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n # device-name flavor support\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(vthunder_tasks.GetVthunderConfByFlavor(\n inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,\n a10constants.DEVICE_CONFIG_DICT: device_dict},\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,\n a10constants.DEVICE_CONFIG_DICT, constants.FLAVOR_DATA),\n provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))\n lb_create_flow.add(vthunder_tasks.HandleACOSPartitionChange(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG),\n provides=a10constants.VTHUNDER_CONFIG))\n lb_create_flow.add(\n a10_database_tasks.CheckExistingThunderToProjectMappedEntries(\n requires=(\n constants.LOADBALANCER,\n a10constants.VTHUNDER_CONFIG,\n a10constants.USE_DEVICE_FLAVOR)))\n lb_create_flow.add(\n self.vthunder_flows.get_rack_vthunder_for_lb_subflow(\n vthunder_conf=a10constants.VTHUNDER_CONFIG,\n prefix=constants.ROLE_STANDALONE,\n role=constants.ROLE_STANDALONE))\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n lb_create_flow.add(\n self.get_post_lb_rack_vthunder_association_flow(\n post_amp_prefix, topology, mark_active=(not listeners)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA),\n provides=a10constants.STATUS))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_conf=vthunder_conf, device_dict=device_dict))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_rack_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n return lb_create_flow",
"def prod_load_balancer_running(self) -> None:\n self.assertTrue(self.validate_load_balancer(is_prod=self.prod_env))",
"def create_gwlb(gwlb_name, subnet_id_list):\n logging.info(f\"Creating gateway load balancer: {gwlb_name}\")\n waiter = elbv2.get_waiter('load_balancer_available')\n try:\n response = elbv2.create_load_balancer(\n Name=gwlb_name,\n Subnets=subnet_id_list,\n Tags=[{'Key': 'Name', 'Value': gwlb_name}],\n Type='gateway'\n )\n gwlb_arn = response['LoadBalancers'][0]['LoadBalancerArn']\n logging.info(\"Waiting for GWLB's state to change to available\")\n waiter.wait(\n LoadBalancerArns=[gwlb_arn],\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 40\n }\n )\n return response, gwlb_arn\n except ClientError as e:\n logging.error(e)\n return None",
"def _create_body(self, name, port=None, protocol=None, nodes=None,\n virtual_ips=None, algorithm=None, halfClosed=None, accessList=None,\n connectionLogging=None, connectionThrottle=None, healthMonitor=None,\n metadata=None, timeout=None, sessionPersistence=None,\n httpsRedirect=None):\n required = (virtual_ips, port, protocol)\n if not all(required):\n raise exc.MissingLoadBalancerParameters(\"Load Balancer creation \"\n \"requires at least one virtual IP, a protocol, and a port.\")\n nodes = utils.coerce_to_list(nodes)\n virtual_ips = utils.coerce_to_list(virtual_ips)\n bad_conditions = [node.condition for node in nodes\n if node.condition.upper() not in (\"ENABLED\", \"DISABLED\")]\n if bad_conditions:\n raise exc.InvalidNodeCondition(\"Nodes for new load balancer must be \"\n \"created in either 'ENABLED' or 'DISABLED' condition; \"\n \"received the following invalid conditions: %s\" %\n \", \".join(set(bad_conditions)))\n node_dicts = [nd.to_dict() for nd in nodes]\n vip_dicts = [vip.to_dict() for vip in virtual_ips]\n body = {\"loadBalancer\": {\n \"name\": name,\n \"port\": port,\n \"protocol\": protocol,\n \"nodes\": node_dicts,\n \"virtualIps\": vip_dicts,\n \"algorithm\": algorithm or \"RANDOM\",\n \"halfClosed\": halfClosed,\n \"accessList\": accessList,\n \"connectionLogging\": connectionLogging,\n \"connectionThrottle\": connectionThrottle,\n \"healthMonitor\": healthMonitor,\n \"metadata\": metadata,\n \"timeout\": timeout,\n \"sessionPersistence\": sessionPersistence,\n \"httpsRedirect\": httpsRedirect,\n }}\n return body",
"def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def create(\n self,\n name, # type: str\n load_balancer_type, # type: LoadBalancerType\n algorithm=None, # type: Optional[LoadBalancerAlgorithm]\n services=None, # type: Optional[List[LoadBalancerService]]\n targets=None, # type: Optional[List[LoadBalancerTarget]]\n labels=None, # type: Optional[Dict[str, str]]\n location=None, # type: Optional[Location]\n network_zone=None, # type: Optional[str]\n public_interface=None, # type: Optional[bool]\n network=None # type: Optional[Union[Network,BoundNetwork]]\n ):\n # type: (...) -> CreateLoadBalancerResponse:\n data = {\"name\": name, \"load_balancer_type\": load_balancer_type.id_or_name}\n if network is not None:\n data[\"network\"] = network.id\n if public_interface is not None:\n data[\"public_interface\"] = public_interface\n if labels is not None:\n data[\"labels\"] = labels\n if algorithm is not None:\n data[\"algorithm\"] = {\"type\": algorithm.type}\n if services is not None:\n service_list = []\n for service in services:\n service_list.append(self.get_service_parameters(service))\n data[\"services\"] = service_list\n\n if targets is not None:\n target_list = []\n for target in targets:\n target_data = {\n \"type\": target.type,\n \"use_private_ip\": target.use_private_ip\n }\n if target.type == \"server\":\n target_data['server'] = {\"id\": target.server.id}\n elif target.type == \"label_selector\":\n target_data['label_selector'] = {\"selector\": target.label_selector.selector}\n elif target.type == \"ip\":\n target_data['ip'] = {\"ip\": target.ip.ip}\n target_list.append(target_data)\n\n data[\"targets\"] = target_list\n\n if network_zone is not None:\n data[\"network_zone\"] = network_zone\n if location is not None:\n data[\"location\"] = location.id_or_name\n\n response = self._client.request(url=\"/load_balancers\", method=\"POST\", json=data)\n\n return CreateLoadBalancerResponse(load_balancer=BoundLoadBalancer(self, response[\"load_balancer\"]),\n action=BoundAction(self._client.actions, response['action']))",
"def bootstrap(environment: Environment):\n pass",
"def test_nodeless(self):\n template = self._set_template(self.lb_template,\n nodes=[])\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = []\n rsrc, fake_lb = self._mock_loadbalancer(\n template, self.lb_name, expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()",
"def create_bridge(self, num_ifaces: int) -> Bridge:\n testutils.log.info(\n \"---------------------- Creating a namespace ----------------------\",\n )\n random.seed(datetime.now().timestamp())\n bridge = Bridge(uuid.uuid4())\n result = bridge.create_virtual_env(num_ifaces)\n if result != testutils.SUCCESS:\n bridge.ns_del()\n testutils.log.error(\n \"---------------------- Namespace creation failed ----------------------\",\n )\n raise SystemExit(\"Unable to create the namespace environment.\")\n testutils.log.info(\n \"---------------------- Namespace successfully created ----------------------\"\n )\n return bridge",
"def setup():\n LOG.info(\"Creating API.\")\n api = Flask(__name__)\n LOG.info(\"Registering blueprints.\")\n api.register_blueprint(health_check_blueprint.setup())\n LOG.info(\"Registering error handlers.\")\n api.register_error_handler(Exception, default_error_handler)\n LOG.info(\"Setting up config variables.\")\n api.config['PROPAGATE_EXCEPTIONS'] = True\n return api"
] |
[
"0.7192584",
"0.70757085",
"0.693216",
"0.63125825",
"0.6119484",
"0.6114502",
"0.60211533",
"0.6003069",
"0.5949105",
"0.59401405",
"0.59334785",
"0.5905166",
"0.5893923",
"0.58604234",
"0.5814833",
"0.57674474",
"0.5761494",
"0.5756742",
"0.5689999",
"0.56665444",
"0.5657202",
"0.5618832",
"0.5611582",
"0.5607613",
"0.5602878",
"0.54735357",
"0.5468118",
"0.5427254",
"0.5417177",
"0.54127836"
] |
0.7706572
|
0
|
Deletes load balancer for current environment and all related resources.
|
def delete(ctx):
delete_listeners(ctx)
delete_balancer(ctx)
delete_target_groups(ctx)
ctx.info('Load balancers deletion completed.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete(self):\r\n return self.connection.delete_load_balancer(self.name)",
"def delete_loadbalancer(self, context, lb):\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n hostnames = self._get_hostname(lb)\n if deployment_model in [\"PER_TENANT\", \"PER_SUBNET\"]:\n vapv = self._get_vapv(hostnames)\n if not vapv.tip_group.list():\n self._destroy_vapv(hostnames, lb)\n elif deployment_model == \"PER_TENANT\":\n # Delete subnet ports if no longer required\n if self.openstack_connector.subnet_in_use(lb) is False:\n self._detach_subnet_port(vapv, hostnames, lb)\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.delete_ip_from_ports(\n lb.vip_address, port_ids\n )\n elif deployment_model == \"PER_LOADBALANCER\":\n self._destroy_vapv(hostnames, lb)",
"def delete_balancer(self):\n response = self.client.delete_load_balancer(\n LoadBalancerArn=self.get_balancer_arn()\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200",
"def delete(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_load_balancer(loadbalancer_id,\n ignore_missing=True,\n cascade=True)",
"def delete_balancer(ctx):\n if self.balancer_exists():\n self.delete_balancer()\n ctx.info('Successfully deleted load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, nothing to delete.'.format(\n self.get_balancer_name()\n ))",
"def resource_cleanup(self):\n for lb in self.loadbalancers:\n self.octavia_client.load_balancer_delete(lb['id'], cascade=True)\n try:\n self.wait_for_lb_resource(\n self.octavia_client.load_balancer_show, lb['id'],\n provisioning_status='DELETED')\n except osc_lib.exceptions.NotFound:\n pass\n for fip in self.fips:\n self.neutron_client.delete_floatingip(fip)\n # we run the parent resource_cleanup last as it will remove instances\n # referenced as members in the above cleaned up load balancers\n super(LBAASv2Test, self).resource_cleanup()",
"def delete_loadbalancer(self, context, loadbalancer):\n LOG.info(\"Received request 'Delete Loadbalancer' for LB:%(lb)s \",\n {'lb': loadbalancer['id']})\n\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n }\n self._send_event(lb_const.EVENT_DELETE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])",
"def delete(self, loadbalancer_id):\n response.status = 201",
"def delete(self, load_balancer):\n # type: (LoadBalancer) -> BoundAction\n self._client.request(\n url=\"/load_balancers/{load_balancer_id}\".format(load_balancer_id=load_balancer.id), method=\"DELETE\"\n )\n return True",
"def delete(self, request, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_pool,\n pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)",
"def reset(ctx):\n delete(ctx)\n create(ctx)\n\n ctx.info('Load balancers reset completed.')",
"def pre_loadbalancer_pool_delete(self, resource_id):\n pass",
"def delete(self, params=None):\n self.logger.debug('Deleting %s with parameters: %s'\n % (self.type_name, params))\n return self.client.delete_load_balancer_policy(**params)",
"def delete_listeners(ctx):\n if self.balancer_exists():\n self.delete_listeners()\n ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(self.get_balancer_name()))",
"def delete(self):\n self._lbcall('delete_pool', [self._name])",
"def destroy_resources(LoadBalancerArn, ListenerArn, TargetGroupArn):\n # TODO: implement this method\n destroy_ASG()\n destroy_ELB(LoadBalancerArn, ListenerArn, TargetGroupArn)\n pass",
"def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def detach_elastic_load_balancer(ElasticLoadBalancerName=None, LayerId=None):\n pass",
"def delete(ctx, iface, resource_config, **_):\n\n # Create a copy of the resource config for clean manipulation.\n params = \\\n dict() if not resource_config else resource_config.copy()\n\n lb = params.get(LB_NAME) or ctx.instance.runtime_properties.get(LB_NAME)\n policy = \\\n params.get(RESOURCE_NAME) or \\\n ctx.instance.runtime_properties.get(RESOURCE_NAME)\n\n lb_delete_params = {\n LB_NAME: lb,\n RESOURCE_NAME: policy\n }\n\n try:\n iface.delete(lb_delete_params)\n except ClientError as e:\n if _.get('force'):\n raise OperationRetry('Retrying: {0}'.format(text_type(e)))\n pass",
"def delete(self, request, flavor_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_flavor(flavor_id,\n ignore_missing=True)",
"def delete(self):\n\n uri = \"{0}/{1}\".format(self.base_uri, self.ip_or_ifname_or_group_name)\n\n try:\n response = self.session.request(\"DELETE\", uri)\n\n except Exception as e:\n raise ResponseError(\"DELETE\", e)\n\n if not utils._response_ok(response, \"DELETE\"):\n raise GenericOperationError(response.text, response.status_code)\n\n logging.info(\"SUCCESS: Deleting %s\", self)\n\n # Delete back reference from BGP_Routers\n for neighbor in self.__parent_bgp_router.bgp_neighbors:\n if (\n neighbor.ip_or_ifname_or_group_name\n == self.ip_or_ifname_or_group_name\n ):\n self.__parent_bgp_router.bgp_neighbors.remove(neighbor)\n\n # Delete object attributes\n utils.delete_attrs(self, self.config_attrs)",
"def delete_entity(self, context, lb_obj):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id)\n msg = _(\"NetScaler driver lb_obj removal: %s\") % lb_obj.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)",
"def post_loadbalancer_pool_delete(self, resource_id, resource_dict):\n pass",
"def pre_loadbalancer_member_delete(self, resource_id):\n pass",
"def delete_load_balancer(self,\n instance_id: str,\n dnszone_id: str,\n lb_id: str,\n *,\n x_correlation_id: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n if instance_id is None:\n raise ValueError('instance_id must be provided')\n if dnszone_id is None:\n raise ValueError('dnszone_id must be provided')\n if lb_id is None:\n raise ValueError('lb_id must be provided')\n headers = {\n 'X-Correlation-ID': x_correlation_id\n }\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_load_balancer')\n headers.update(sdk_headers)\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n url = '/instances/{0}/dnszones/{1}/load_balancers/{2}'.format(\n *self.encode_path_vars(instance_id, dnszone_id, lb_id))\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers)\n\n response = self.send(request)\n return response",
"def teardown(client, blueprint_dir):\n logger.info(\"Running teardown on: %s\", blueprint_dir)\n state = get_state(blueprint_dir)\n if not state or \"network_name\" not in state:\n return\n all_services = client.service.list()\n for service in all_services:\n if service.network.name == state[\"network_name\"]:\n client.service.destroy(service)\n network = client.network.get(state[\"network_name\"])\n if network:\n client.network.destroy(network)\n save_state({}, blueprint_dir)",
"def pre_loadbalancer_healthmonitor_delete(self, resource_id):\n pass",
"def load_delete(id):\n load_key = client.key(\"load\", int(id))\n load = client.get(key=load_key)\n if not load:\n failed = {\"Error\": \"No load with this load_id exists\"}\n response = Response(\n response=json.dumps(failed),\n status=404,\n mimetype='application/json'\n )\n return response\n elif request.data:\n failed = {\"Error\": \"The request object does not follow specifications - see documentation.\"}\n response = Response(\n response=json.dumps(failed),\n status=400,\n mimetype='application/json'\n )\n return response\n client.delete(load_key)\n\n # Remove load from the boat if it was on one\n if load[\"carrier\"]:\n boat_key = client.key(\"boat\", load[\"carrier\"][\"id\"])\n boat = client.get(key=boat_key)\n\n # for load in boat[\"loads\"]:\n loads = [x for x in boat[\"loads\"] if x[\"id\"] != int(id)]\n boat.update({\"loads\": loads})\n client.put(boat)\n\n response = Response(\n status=204,\n mimetype='application/json'\n )\n return response",
"def l7pool_del(env, identifier):\n mgr = SoftLayer.LoadBalancerManager(env.client)\n try:\n mgr.del_lb_l7_pool(identifier)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def get_delete_rack_vthunder_load_balancer_flow(self, lb, cascade, vthunder_conf, device_dict):\n\n store = {}\n delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW)\n delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(\n requires=constants.LOADBALANCER))\n delete_LB_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(\n requires=constants.LOADBALANCER,\n provides=a10constants.VTHUNDER))\n delete_LB_flow.add(a10_database_tasks.MarkVThunderStatusInDB(\n requires=a10constants.VTHUNDER,\n inject={\"status\": constants.PENDING_DELETE}))\n delete_LB_flow.add(vthunder_tasks.SetupDeviceNetworkMap(\n requires=a10constants.VTHUNDER,\n provides=a10constants.VTHUNDER))\n delete_LB_flow.add(compute_tasks.NovaServerGroupDelete(\n requires=constants.SERVER_GROUP_ID))\n delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy(\n requires=constants.LOADBALANCER))\n delete_LB_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n delete_LB_flow.add(vthunder_tasks.GetVthunderConfByFlavor(\n inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,\n a10constants.DEVICE_CONFIG_DICT: device_dict},\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,\n a10constants.DEVICE_CONFIG_DICT, constants.FLAVOR_DATA),\n provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))\n if cascade:\n (pools_listeners_delete, store) = self._get_cascade_delete_pools_listeners_flow(lb)\n delete_LB_flow.add(pools_listeners_delete)\n delete_LB_flow.add(a10_network_tasks.GetLBResourceSubnet(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.SUBNET))\n delete_LB_flow.add(\n a10_database_tasks.GetChildProjectsOfParentPartition(\n requires=[a10constants.VTHUNDER],\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=a10constants.PARTITION_PROJECT_LIST\n ))\n delete_LB_flow.add(\n a10_database_tasks.CountLoadbalancersInProjectBySubnet(\n requires=[\n constants.SUBNET,\n a10constants.PARTITION_PROJECT_LIST,\n a10constants.USE_DEVICE_FLAVOR],\n provides=a10constants.LB_COUNT_SUBNET))\n delete_LB_flow.add(\n a10_database_tasks.CountLoadbalancersOnThunderBySubnet(\n requires=[a10constants.VTHUNDER, constants.SUBNET, a10constants.USE_DEVICE_FLAVOR],\n provides=a10constants.LB_COUNT_THUNDER))\n if CONF.a10_global.handle_vrid:\n delete_LB_flow.add(self.get_delete_rack_lb_vrid_subflow())\n delete_LB_flow.add(a10_network_tasks.DeallocateVIP(\n requires=[constants.LOADBALANCER, a10constants.LB_COUNT_SUBNET]))\n delete_LB_flow.add(virtual_server_tasks.DeleteVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER)))\n delete_LB_flow.add(a10_database_tasks.CountLoadbalancersWithFlavor(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER),\n provides=a10constants.LB_COUNT_FLAVOR))\n delete_LB_flow.add(nat_pool_tasks.NatPoolDelete(\n requires=(constants.SUBNET, constants.LOADBALANCER, a10constants.VTHUNDER,\n a10constants.LB_COUNT_FLAVOR, constants.FLAVOR_DATA)))\n delete_LB_flow.add(a10_database_tasks.CountLBThunderPartition(\n requires=a10constants.VTHUNDER,\n provides=a10constants.LB_COUNT_THUNDER_PARTITION))\n delete_LB_flow.add(vthunder_tasks.DeleteHealthMonitorOnVThunder(\n requires=(a10constants.LB_COUNT_THUNDER_PARTITION, a10constants.VTHUNDER)))\n if CONF.a10_global.network_type == 'vlan':\n delete_LB_flow.add(\n vthunder_tasks.DeleteInterfaceTagIfNotInUseForLB(\n requires=[\n constants.LOADBALANCER,\n a10constants.VTHUNDER]))\n delete_LB_flow.add(a10_database_tasks.MarkVThunderStatusInDB(\n name=a10constants.MARK_VTHUNDER_MASTER_DELETED_IN_DB,\n requires=a10constants.VTHUNDER,\n inject={\"status\": constants.DELETED}))\n delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB(\n requires=constants.LOADBALANCER))\n delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring(\n requires=constants.LOADBALANCER))\n delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(\n requires=constants.LOADBALANCER))\n delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(\n requires=constants.LOADBALANCER))\n delete_LB_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n delete_LB_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n name=a10constants.SET_THUNDER_UPDATE_AT,\n requires=a10constants.VTHUNDER))\n if lb.topology == \"ACTIVE_STANDBY\":\n delete_LB_flow.add(a10_database_tasks.GetBackupVThunderByLoadBalancer(\n requires=constants.LOADBALANCER,\n provides=a10constants.BACKUP_VTHUNDER))\n delete_LB_flow.add(a10_database_tasks.MarkVThunderStatusInDB(\n name=a10constants.MARK_VTHUNDER_BACKUP_DELETED_IN_DB,\n rebind={a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER},\n inject={\"status\": constants.DELETED}))\n delete_LB_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n name=a10constants.SET_THUNDER_BACKUP_UPDATE_AT,\n rebind={a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER}))\n return (delete_LB_flow, store)"
] |
[
"0.7630601",
"0.74928415",
"0.7286447",
"0.7257088",
"0.7153159",
"0.6958919",
"0.68135",
"0.6746743",
"0.65986437",
"0.62686634",
"0.6249868",
"0.61619234",
"0.6124974",
"0.61239636",
"0.6012765",
"0.5994635",
"0.5989202",
"0.5975469",
"0.5961489",
"0.58851093",
"0.58708817",
"0.5774464",
"0.57416403",
"0.5687461",
"0.5676152",
"0.56435287",
"0.5627736",
"0.55933553",
"0.55753416",
"0.55383235"
] |
0.81043327
|
0
|
Resets load balancer setup for the current environment.
|
def reset(ctx):
delete(ctx)
create(ctx)
ctx.info('Load balancers reset completed.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset():\n from . import core\n core.http.reset()",
"def reset(self, env):\n self._env = env\n return",
"def reset(self):\n self._config = Config()\n self._router = Router(())\n self._middleware = []\n self._start_response = None",
"def reset_env(self):\n return self.env.reset()",
"def reset():\n bwc = BandwidthConfigurator()\n bwc.reset()",
"def reset_config():\n return _set_config(_gen_config())",
"def reset():\n Vessel.reset_instances()",
"def reset():\n _runtime.reset()",
"def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()",
"def reset_config():\r\n # TODO implement configuration reset\r\n pass",
"def reset_all_requests(self):\n self._send_request(\"/reset\")",
"def reset(self) -> None:\n\n self.host.reset()",
"def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)",
"def reset(self):\n requests.put('{}/reset'.format(self._get_url()))",
"def reset( self ):\n self.conf = self.defaults",
"def reset(self, **kwargs):\n return self.env.reset(**kwargs)",
"def bcp_reset(self):\n self.machine.bcp.transport.send_to_all_clients(\"reset\")",
"def reset(self):\n return self.env.reset()",
"def reset(self):\n self.agents.reset()\n self._cur_obs, self._cur_lm = self.parallel_env.reset()\n self.agent_cum_rewards = np.zeros((len(self.agents), self.n_states, 1))\n self.agent_contiguous_states = np.full((len(self.agents), self.n_states), True)",
"def reset(self):\n self.ai.reset()",
"def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}",
"def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()",
"def reset(self, reset_from):\n self._grants.clear()\n self._groups.clear()\n self._reset_cached()\n self._id += 1\n for name, backend in self._backends.items():\n if name == reset_from:\n continue\n backend.reload()",
"def reset(self):\n self.reconnect()",
"def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None):\n if new_base_lr is not None:\n self.base_lr = new_base_lr\n if new_max_lr is not None:\n self.max_lr = new_max_lr\n if new_step_size is not None:\n self.step_size = new_step_size\n self.clr_iterations = 0.0",
"def reset_defaults(self):\n self.domain_list = [{\"domain\": \"mywebsite%s.com\" % uuid.uuid1()}]\n self.origin_list = [{\"origin\": \"mywebsite1.com\",\n \"port\": 443,\n \"ssl\": False}]\n self.caching_list = [{\"name\": \"default\", \"ttl\": 3600},\n {\"name\": \"home\",\n \"ttl\": 1200,\n \"rules\": [{\"name\": \"index\",\n \"request_url\": \"/index.htm\"}]}]\n self.service_name = str(uuid.uuid1())\n self.flavor_id = self.test_config.default_flavor",
"def _reset(self, env_id: np.ndarray) -> None:",
"def restart_all():\n\n restart_nginx()\n restart_supervisor()",
"def reset_to_cold(self):\n self._log_msg_start(\"CFG-RST - Reset to cold start\")\n self._ubx.send(\"CFG-RST\", navBbrMask=0xFFFF, resetMode=0x01)",
"def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None"
] |
[
"0.6528185",
"0.62851256",
"0.6279536",
"0.6173153",
"0.6142136",
"0.60960215",
"0.60925925",
"0.6026478",
"0.6022415",
"0.6013986",
"0.59514946",
"0.58933365",
"0.58326983",
"0.5831264",
"0.5827723",
"0.5823984",
"0.58123845",
"0.57883435",
"0.5766599",
"0.57214874",
"0.5714138",
"0.5700057",
"0.5692838",
"0.5656015",
"0.56492794",
"0.5645426",
"0.5641995",
"0.56285024",
"0.56243634",
"0.56145775"
] |
0.79088813
|
0
|
Momentum update in the paper model_ema = m model_ema + (1m) model
|
def moment_update(model, model_ema, m):
for p1, p2 in zip(model.parameters(), model_ema.parameters()):
p2.data.mul_(m).add_(1-m, p1.detach().data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def moment_update(model, model_ema, m):\r\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\r\n p2.data.mul_(m).add_(1 - m, p1.detach().data)\r\n # p2.data.mul_(m).add_(1 - m, p1.data)",
"def update_ema(self):",
"def momentum_update(model_q, model_k, m=0.999):\n for p1, p2 in zip(model_q.parameters(), model_k.parameters()):\n p2.data.mul_(m).add_(1 - m, p1.detach().data)",
"def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n smoothing_factor = 2/(num_days+1)\n EMA_prev = 0.0\n for idx in range(len(temp)):\n EMA_current = (temp[idx]*smoothing_factor)+EMA_prev*(1-smoothing_factor)\n # update values for next iteration\n temp[idx] = EMA_current\n EMA_prev = EMA_current \n return temp",
"def update_ema(metrics, metric, mode, iteration):\n decay = 0.99 if mode == 'train' else 0.9\n d = decay if iteration > 10 else 0.5\n\n for key, value in metric.items():\n metrics[mode][key] = metrics[mode][key] * d + value * (1 - d)\n\n return metrics",
"def calc_ema(self):\n emaFactor = self.settings['emaFactor']\n stepFactor = emaFactor ** self.vars['dt']\n if self.vars['step'] == 0:\n ema = float('NaN')\n elif self.vars['step'] == 1:\n ema = self.vars['speed_trace'][1]\n else:\n ema = stepFactor * self.vars['ema_trace'][self.vars['step'] - 1] + (\n 1 - stepFactor)*self.vars['speed_trace'][self.vars['step']]\n return ema",
"def _etaM(self,x):\n return self._etaM_cool(x) + self._etaM_hot(x)",
"def EMA_tick(n_periods, current_value, previous_ema):\n\n most_recent_weight = 2 / (n_periods + 1)\n return (current_value - previous_ema) * most_recent_weight + previous_ema",
"def compute_MAE(e):\n\n return np.mean(np.abs(e))",
"def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def temper_momentum(self, epoch):\n if epoch == 0:\n return\n self.momentums = [\n x + y for x, y in zip(self.momentums, self.momentum_temper_rates)\n ]\n for i, param_group in enumerate(self.optimizer.param_groups):\n param_group['momentum'] = self.momentums[i]",
"def mae(t, y):\n\treturn mean_absolute_error(t, y)",
"def addMomentumIndicators(self):\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n if not 'ema12' in self.df.columns:\n self.df['ema12'] = self.df.close.ewm(span=12, adjust=False).mean()\n\n if not 'ema26' in self.df.columns:\n self.df['ema26'] = self.df.close.ewm(span=26, adjust=False).mean()\n\n if not self.df['ema12'].dtype == 'float64' and not self.df['ema12'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'ema12' column not int64 or float64.\")\n\n if not self.df['ema26'].dtype == 'float64' and not self.df['ema26'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'ema26' column not int64 or float64.\")\n\n # calculate relative strength index\n self.df['rsi14'] = self.calculateRelativeStrengthIndex(\n self.df['close'], 14)\n # default to midway-50 for first entries\n self.df['rsi14'] = self.df['rsi14'].fillna(50)\n\n # calculate moving average convergence divergence\n self.df['macd'] = self.df['ema12'] - self.df['ema26']\n self.df['signal'] = self.df['macd'].ewm(span=9, adjust=False).mean()\n\n # calculate on-balance volume (obv)\n self.df['obv'] = np.where(self.df['close'] > self.df['close'].shift(1), self.df['volume'], \n np.where(self.df['close'] < self.df['close'].shift(1), -self.df['volume'], self.df.iloc[0]['volume'])).cumsum()\n\n # obv change percentage\n self.df['obv_pc'] = self.df['obv'].pct_change() * 100\n self.df['obv_pc'] = np.round(self.df['obv_pc'].fillna(0), 2)",
"def __em(self, x):\n _, log_resp = self._e_step(x)\n\n pi, mu, var = self._m_step(x, log_resp)\n\n self.__update_pi(pi)\n self.__update_mu(mu)\n self.__update_var(var)",
"def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))",
"def momentum(self, k):\n self._momentum = k\n self._energy = self.dispersion(k)",
"def update_ema(target_params, source_params, rate=0.99):\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)",
"def backpropagate(eta, momentum):\n for i_lay in range(len(layers)-1, 0, -1):\n lay = layers[i_lay]\n if i_lay == len(layers)-1:\n lay[\"delta\"] = lay[\"error\"] * dlogistic(lay[\"v\"])\n else:\n lay[\"delta\"] = (layers[i_lay+1][\"weigths\"][:, 1:].T @ layers[i_lay+1]\n [\"delta\"]) * dlogistic(lay[\"v\"])\n lay[\"Delta_w\"] = eta * lay[\"delta\"] @ layers[i_lay - 1][\"y\"].T +\\\n momentum * lay[\"Delta_w\"]",
"def update_alpha(model):\n a = model.params.alpha_prior[0]\n b = model.params.alpha_prior[1]\n\n alpha_old = model.params.alpha\n\n log_p_old = model.feat_alloc_dist.log_p(model.params)\n\n alpha_new = scipy.stats.gamma.rvs(a, scale=(1 / b))\n\n model.params.alpha = alpha_new\n\n log_p_new = model.feat_alloc_dist.log_p(model.params)\n\n if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, 0, 0):\n model.params.alpha = alpha_new\n\n else:\n model.params.alpha = alpha_old",
"def test_momentum_with_augmentation(self):\n tf.reset_default_graph()\n\n v = tf.Variable([1., 2., 3.])\n obj = tf.reduce_sum(tf.pow(v, 2))\n v1, obj1 = vectorize_model([v], obj, augment=1)\n\n iterations = 5\n lr = .5\n mu = tf.Variable(.5, name='mu')\n\n\n momentum_dict = MomentumOptimizer.create(v1, lr=lr, mu=mu, loss=obj1, w_is_state=True)\n print(momentum_dict)\n\n print(tf.global_variables())\n\n with tf.Session().as_default() as ss:\n tf.global_variables_initializer().run()\n for _ in range(iterations):\n print(momentum_dict.dynamics.eval())\n ss.run(momentum_dict.assign_ops)\n res = v.eval()\n\n print(res)\n\n mom_opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=.5)\n\n ts_momentum = mom_opt.minimize(obj, var_list=[v])\n\n print(mom_opt.get_slot_names())\n\n with tf.Session().as_default() as ss:\n tf.global_variables_initializer().run()\n for _ in range(iterations):\n print(v.eval(), mom_opt.get_slot(v, 'momentum').eval())\n ss.run(ts_momentum)\n res2 = v.eval()\n\n print(res2)\n\n self.assertLess(np.linalg.norm(res - res2), 1.e-5)",
"def weight_update_rmsprop_momentum(self, network):\n epsilon = self.epsilon\n gamma = self.gamma\n one_m_gamma = 1.0 - gamma\n beta = self.beta\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Initialize velocities to zero for momentum\n if self.vel_b is None or self.vel_q is None:\n self.vel_b = []\n self.vel_q = []\n self.vel_rx_inp = []\n self.vel_ry_inp = []\n self.vel_rx_pos_out = []\n self.vel_ry_pos_out = []\n self.vel_rx_neg_out = []\n self.vel_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.vel_b.append(np.zeros(layer.b.shape))\n self.vel_q.append(np.zeros(layer.q.shape))\n self.vel_rx_inp.append(np.zeros(layer.input_size))\n self.vel_ry_inp.append(np.zeros(layer.input_size))\n self.vel_rx_pos_out.append(np.zeros(layer.output_size))\n self.vel_ry_pos_out.append(np.zeros(layer.output_size))\n self.vel_rx_neg_out.append(np.zeros(layer.output_size))\n self.vel_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = gamma * self.ms_b[l] + one_m_gamma * self.dc_db[l] ** 2\n self.ms_q[l] = gamma * self.ms_q[l] + one_m_gamma * self.dc_dq[l] ** 2\n self.ms_rx_inp[l] = gamma * self.ms_rx_inp[l] + one_m_gamma * self.dc_drx_inp[l] ** 2\n self.ms_ry_inp[l] = gamma * self.ms_ry_inp[l] + one_m_gamma * self.dc_dry_inp[l] ** 2\n self.ms_rx_pos_out[l] = gamma * self.ms_rx_pos_out[l] + one_m_gamma * self.dc_drx_pos_out[l] ** 2\n self.ms_ry_pos_out[l] = gamma * self.ms_ry_pos_out[l] + one_m_gamma * self.dc_dry_pos_out[l] ** 2\n self.ms_rx_neg_out[l] = gamma * self.ms_rx_neg_out[l] + one_m_gamma * self.dc_drx_neg_out[l] ** 2\n self.ms_ry_neg_out[l] = gamma * self.ms_ry_neg_out[l] + one_m_gamma * self.dc_dry_neg_out[l] ** 2\n\n self.vel_b[l] *= beta\n self.vel_q[l] *= beta\n self.vel_rx_inp[l] *= beta\n self.vel_ry_inp[l] *= beta\n self.vel_rx_pos_out[l] *= beta\n self.vel_ry_pos_out[l] *= beta\n self.vel_rx_neg_out[l] *= beta\n self.vel_ry_neg_out[l] *= beta\n\n self.vel_b[l] += -self.alpha * self.dc_db[l] / np.sqrt(self.ms_b[l] + epsilon)\n self.vel_q[l] += -self.alpha * self.dc_dq[l] / np.sqrt(self.ms_q[l] + epsilon)\n self.vel_rx_inp[l] += -self.alpha * self.dc_drx_inp[l] / np.sqrt(self.ms_rx_inp[l] + epsilon)\n self.vel_ry_inp[l] += -self.alpha * self.dc_dry_inp[l] / np.sqrt(self.ms_ry_inp[l] + epsilon)\n self.vel_rx_pos_out[l] += -self.alpha * self.dc_drx_pos_out[l] / np.sqrt(self.ms_rx_pos_out[l] + epsilon)\n self.vel_ry_pos_out[l] += -self.alpha * self.dc_dry_pos_out[l] / np.sqrt(self.ms_ry_pos_out[l] + epsilon)\n self.vel_rx_neg_out[l] += -self.alpha * self.dc_drx_neg_out[l] / np.sqrt(self.ms_rx_neg_out[l] + epsilon)\n self.vel_ry_neg_out[l] += -self.alpha * self.dc_dry_neg_out[l] / np.sqrt(self.ms_ry_neg_out[l] + epsilon)\n\n layer.b += self.vel_b[l]\n layer.q += self.vel_q[l]\n layer.rx_inp += self.vel_rx_inp[l]\n layer.ry_inp += self.vel_ry_inp[l]\n layer.rx_pos_out += self.vel_rx_pos_out[l]\n layer.ry_pos_out += self.vel_ry_pos_out[l]\n layer.rx_neg_out += self.vel_rx_neg_out[l]\n layer.ry_neg_out += self.vel_ry_neg_out[l]",
"def calcMomentum(self):\n # start conditions\n if not self.quiet:\n fs = u'''Calculating momentum gain.\n Peak field: {self.rf_peak_field:.3f} MV/m\n Phase: {self.phase:.1f}°'''\n print(fs.format(**locals()))\n\n # Fortran method (0.8 ms to run cf 11 ms for Python code)\n self.t_array, self.gamma_dash_array, self.gamma_array, self.beta_array, self.p_array = calcMomentum.calcmomentum(self.freq, self.phase, self.gamma_start, self.dz, self.gamma_tilde_dash, self.phase_offset)\n # print(self.gamma_dash_array)\n self.final_p_MeV = self.p_array[-1] * -1e-6 * epsilon_e\n\n if not self.quiet:\n print(u'Final momentum: {:.3f} MeV/c'.format(self.final_p_MeV))\n self.calc_level = CALC_MOM",
"def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e",
"def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e",
"def amalgamate(self,i,j):\n # conserve momentum\n self.v[i] = (self.v[i]*self.m[i]+self.v[j]*self.m[j])/ \\\n (self.m[i]+self.m[j])\n self.r[i] = (self.r[j] - self.r[i])/2 + self.r[j] \n self.m[i] = self.m[i] + self.m[j]\n self.r[j] = self.r[self.n-1]\n self.v[j] = self.v[self.n-1]\n self.m[j] = self.m[self.n-1]\n self.n = self.n - 1",
"def _apply_smooth_update(self):\n self.print(\"SGD with Momentum: Applying smooth update...\", line_above=True)\n\n raw_update = self.get_h5_data(self.raw_update_path)\n update = self.get_h5_data(self.smooth_update_path)\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the smoothed update.\"\n \"Check the raw update and smoothing process.\"\n )\n\n max_upd = np.max(np.abs(update))\n print(f\"Max smooth model update: {max_upd}\")\n\n update_scaling_fac_alpha = self.alpha / max_upd\n\n self.print(\n f\"Recaling based on alpha: {update_scaling_fac_alpha},\"\n f\"New maximum update is: {max_upd * update_scaling_fac_alpha}\"\n )\n\n update *= update_scaling_fac_alpha\n\n # normalise theta and apply update\n theta_0 = self.get_h5_data(self._get_path_for_iteration(0, self.model_path))\n\n # Update parameters\n if max(self.roughness_decay_smoothing_length) > 0.0:\n theta_prev = self.get_h5_data(self.smoothed_model_path)\n\n # If relative perturbations are smoothed, make model physical\n if self.roughness_decay_type == \"relative_perturbation\":\n theta_prev = (theta_prev + 1) * theta_0\n else:\n theta_prev = self.get_h5_data(self.model_path)\n\n # Normalize the model and prevent division by zero in the outer core.\n theta_prev[theta_0 != 0] = theta_prev[theta_0 != 0] / theta_0[theta_0 != 0] - 1\n\n # Make sure that the model is only updated where theta is non_zero\n theta_new = np.zeros_like(theta_0)\n theta_new[theta_0 != 0] = (\n theta_prev[theta_0 != 0]\n - update[theta_0 != 0]\n - (1 - self.beta) * self.perturbation_decay * theta_prev[theta_0 != 0]\n )\n\n # Remove normalization from updated model and write physical model\n theta_physical = (theta_new + 1) * theta_0\n shutil.copy(\n self.model_path,\n self.tmp_model_path,\n )\n self.set_h5_data(\n self.tmp_model_path,\n theta_physical,\n )",
"def update_momentum(self, factor):\n if not self.gpu:\n self.psi *= np.exp(-1j*factor*self.m*self.V)\n else:\n self.g_psi_hat[...] = -1.0j*factor*self.m*self.g_V\n cumath.exp(self.g_psi_hat, out=self.g_psi_hat)\n self.g_psi *= self.g_psi_hat",
"def compute_mae(e):\n return np.mean(np.abs(e))",
"def _update_model(self, new_model):\n super()._update_model(new_model)\n\n if 'e' in self.tr_params:\n if self.state_no_train_de is None:\n for i in range(self.n_emissions - self.nr_no_train_de):\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n for i in range(self.n_d_emissions):\n if i < self.n_d_emissions - self.nr_no_train_de:\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n self.B[i][: -self.state_no_train_de, :] = (\n (1 - self.learning_rate)\n * new_model['B'][i][: -self.state_no_train_de, :]\n + self.learning_rate *\n self.B[i][: -self.state_no_train_de, :]\n )\n\n for i in range(self.n_emissions):\n normalise(new_model['B'][i], axis=1)"
] |
[
"0.8465972",
"0.7067352",
"0.6589039",
"0.61633873",
"0.6113697",
"0.6102108",
"0.6092487",
"0.60704297",
"0.6068411",
"0.6004033",
"0.6002073",
"0.59967154",
"0.5994754",
"0.59732616",
"0.5970538",
"0.5949824",
"0.5927358",
"0.5924119",
"0.59215385",
"0.59203863",
"0.5901667",
"0.5900722",
"0.5899984",
"0.58824676",
"0.58824676",
"0.58476263",
"0.5839889",
"0.58207846",
"0.5802545",
"0.57329303"
] |
0.8590939
|
1
|
Set an alternative useragent header
|
def set_user_agent(self, user_agent: str) -> None:
self.headers['User-Agent'] = user_agent
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _set_agent_header(self):\n self._api_client.set_default_header('User-Agent', self._api_client.user_agent)",
"def setUA(self, useragent):\n\t\tpass",
"def _change_user_agent(self):\n index = (self.current_user_agent_index + 1) % len(_USER_AGENT_LIST)\n self.headers['User-Agent'] = _USER_AGENT_LIST[index]\n self.current_user_agent_index = index",
"def _enforce_user_agent(headers: dict) -> dict:\n headers.update(SYNAPSE_USER_AGENT_HEADER)\n return headers",
"def set_option_user_agent(self, string, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionUserAgent/', {'String': string, 'apikey': apikey})))",
"def add_user_agent(self, value):\n # type: (str) -> None\n self.user_agent_policy.add_user_agent(value)",
"def test_user_agent(self):\n user_agent = b\"test-agent\"\n\n def update_expected_user_agent(expected):\n expected[3][\"attributes\"].update(\n {\"http.user_agent\": user_agent.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"user-agent\", user_agent])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_user_agent])",
"def test_user_agent(self):\n user_agent = b\"test-agent\"\n\n def update_expected_user_agent(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_USER_AGENT: user_agent.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"user-agent\", user_agent])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_user_agent])",
"def choose_headers(self):\n headers = self.headers\n if self.user_agents:\n headers[\"User-Agent\"] = random.choice(self.user_agents)\n return headers",
"def create_fakeheader(ua,browsers):\n\n headers = {'User-Agent': pick_random_fakeheader(ua, browsers)}\n return headers",
"def get_user_agent(user_agent: str | None) -> str:\r\n from wikibaseintegrator import __version__\r\n wbi_user_agent = f\"WikibaseIntegrator/{__version__}\"\r\n\r\n if user_agent is None:\r\n return_user_agent = wbi_user_agent\r\n else:\r\n return_user_agent = user_agent + ' ' + wbi_user_agent\r\n\r\n return return_user_agent",
"def userAgent(self):\n raise NotImplementedError",
"def user_agent():\n headers = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 OPR/46.0.2597.57',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G60 Safari/602.1',\n 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',\n ]\n return {'User-Agent': headers[random.randrange(0, len(headers))]}",
"def option_user_agent(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionUserAgent/')))",
"def use_random_user_agent(self, val=True):\n if val:\n self.random_user_agent = True\n self.user_agent = self.get_new_user_agent()\n return True\n else:\n self.random_user_agent = False\n self.user_agent = \"\"\n return False",
"def user_agent():\n ua_list = [\n\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);\",\n\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)\",\n\"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n\"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 \",\n]\n return random.choice(ua_list)",
"def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)",
"def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)",
"def userAgentForUrl(self, url):\n return \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"",
"def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n ua = f\"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_tf_available():\n ua += f\"; tensorflow/{_tf_version}\"\n if DISABLE_TELEMETRY:\n return ua + \"; telemetry/off\"\n if is_training_run_on_sagemaker():\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in define_sagemaker_information().items())\n # CI will set this value to True\n if os.environ.get(\"TRANSFORMERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua",
"def __init__(self):\n self.opener = urllib2.build_opener()\n self.opener.addheaders = [('User-agent', 'Mozilla/5.0')]",
"def view_user_agent():\n\n headers = get_headers()\n\n return jsonify({\"user-agent\": headers[\"user-agent\"]})",
"def user_agent(self):\n version = '{0}.{1}.{2}'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2])\n return \"PAYNL/SDK/{0} Python/{1} ({2})\".format(self.client_version, version, sys.hexversion)",
"def process_request_headers(request):\n request.headers.setdefault('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/51.0.2704.103 Safari/537.36')\n if 'redirect_urls' not in request.meta:\n request.headers['Referer'] = None",
"def set_end_user_ip(self, end_user_ip):\n self.headers = {\n 'Content-Type': 'application/json; charset=utf-8',\n 'X-Algolia-API-Key': self.api_key,\n 'X-Forwarded-For': end_user_ip,\n 'X-Algolia-Application-Id': self.application_id,\n 'User-Agent': ('Algolia Search for python %s' % VERSION)\n }",
"def user_agent(self):\n return self._session.headers[\"User-Agent\"]",
"def build_user_agent(application_name, version, url):\n return '%s/%s %s/%s (+%s)' % (application_name, version,\n 'python-simplemediawiki', __version__, url)",
"def get_new_user_agent(self):\n new_user_agent = user_agent.generate_navigator()[\"user_agent\"]\n if new_user_agent == self.user_agent:\n self.get_new_user_agent()\n\n return new_user_agent",
"def _random_user_agent(self):\n try:\n ua = UserAgent()\n return ua.random\n except:\n default_ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) \\\n AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/58.0.3029.110 Safari/537.36'\n return default_ua",
"def check_user_agent_string(self, headers, current_connection, thread_lock):\n try:\n headers['User-Agent']\n except KeyError:\n if current_connection['user_agent_penalty'] is False:\n self.update_score(current_connection, -100, thread_lock)\n self.update_connection_cache(current_connection, 'user_agent_penalty', thread_lock, True)\n print('No user agent string 100 deducted from connection score')\n print('user_agent_penalty '\n 'updated to: %s' % current_connection['user_agent_penalty'])"
] |
[
"0.76518005",
"0.7450856",
"0.7234104",
"0.70967805",
"0.68636405",
"0.6850527",
"0.66897154",
"0.65973204",
"0.63424844",
"0.6313411",
"0.6176743",
"0.6160825",
"0.611805",
"0.60785663",
"0.6078315",
"0.6071037",
"0.6050875",
"0.6050875",
"0.59958977",
"0.58809",
"0.5868236",
"0.5819565",
"0.5816085",
"0.57832485",
"0.5730955",
"0.5728341",
"0.5670711",
"0.5665731",
"0.5662503",
"0.5661808"
] |
0.7914726
|
0
|
Extract announcement data from the extracted html partial The html partial should be a Tag object returned from BeautifulSoup4.find()
|
def parse_announcement_data(self) -> 'Scraper':
logger.info('Parsing extracted html partial')
for tag in self.html_partial: # there are 63 tags
if tag.name == 'h4':
announcement_data = self.get_data_from_tag(tag)
self.announcement_data_list.append(announcement_data)
logger.info('Compiled announcement data list from html web page partial')
return self
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gray'})[0]\n date_text = date.contents[4].replace(\" \", \"\").split(\"\\n\")[3][:10]\n \n article_content = article_page_soup.find_all(\"div\", {'class': 'rich_media_content'})[0]\n article_text = article_content.get_text('\\n')\n is_original = check_if_original(article_content) or '[原创]' in title_text\n \n return {\n 'title': title_text,\n 'date': date_text,\n 'url': article_url,\n 'is_original': is_original,\n 'text': article_text\n \n}",
"def _extract_data(self,data,tag=None,cssid=None,cssclass=None,attrs=None,regexp=None,index=0):\n \n# cssclass = \"song\"\n# cssid = \"newsTable0\"\n# tag = \"div\"\n# import pdb\n# pdb.set_trace() \n \n if cssid: \n searchconstrain = SoupStrainer(tag, id=cssid)\n elif cssclass:\n searchconstrain = SoupStrainer(tag, attrs={\"class\":cssclass}) \n else:\n if isinstance(attrs, unicode):\n try:\n attrs = attrs.encode('utf-8')\n regexp = regexp.encode('utf-8')\n except:\n pass \n searchconstrain = SoupStrainer(tag, attrs={attrs:re.compile(regexp)})\n\n soup = BeautifulSoup(data,parseOnlyThese=searchconstrain)\n rslist = [ tp for tp in soup ]\n return rslist[index]",
"def extract_data_listing(html):\n id_finder = re.compile(r'PR[\\d]+~')\n return html.find_all('div', id=id_finder)",
"def parse(content):\n soup = BeautifulSoup(content, 'html.parser')\n if soup.article is None:\n return None\n period = parse_event_period(soup)\n reason = identify_reason(soup)\n if reason is False:\n return None\n return (period, reason)",
"def htmlExtractPart(page, tag, attrs):\n try:\n htmlParsePage(page)\n except UnicodeEncodeError:\n logging.warn('could not parse html')\n return page['data']\n\n bs = page['parsedHtml']\n el = bs.find(tag, attrs=attrs)\n if el != None:\n logging.debug('Successfully stripped html')\n return str(el)\n else:\n logging.debug('Could not strip html')\n return page['data']\n return",
"def scrape_html(self, html: str):\n\n soup = BeautifulSoup(html, features=\"lxml\")\n # Seprating tags using bs4\n data_tags_tags = soup.find_all(lambda tag: True if 'data-tags' in tag.attrs else False)\n self.complete_match = {tuple(sorted([v.strip() for v in tag.attrs['data-tags'].\n split(',')])): [tag, []] for tag in data_tags_tags}\n\n # Creating dict with all the complete tags\n for k, v in self.complete_match.items():\n hold_tags = set()\n for k2 in self.complete_match.keys():\n if k == k2:\n continue\n if set(k).issubset(k2):\n hold_tags.update(set(k2).difference(k))\n v[1] = sorted([{\"name\": tag} for tag in hold_tags], key=lambda tag: tag['name'])\n\n # Creating dict with all the partial and ambigious tags\n for k, v in self.complete_match.items():\n for L in range(0, len(k) + 1):\n for subset in itertools.permutations(k, L):\n if subset:\n if not self.partial_match.get(subset):\n self.partial_match[subset] = []\n self.partial_match[subset].append(v[0])\n\n # Inserting tags in the partial match dic for reference\n hold_partial_match = self.partial_match.copy()\n for k, v in self.partial_match.items():\n if len(v) > 1:\n values = set()\n for tags in v:\n values.update(tags.attrs['data-tags'].split(','))\n v2 = v.copy()\n v2.append(sorted([{\"name\": tag} for tag in set(values).difference(k)], key=lambda tag: tag['name']))\n hold_partial_match[k] = v2\n\n self.partial_match = hold_partial_match.copy()",
"def get_data_from_tag(self, tag: Tag) -> dict:\n self.verify_tag_structure(tag)\n title = tag.string\n url = tag.contents[0]['href'] # tag.contents[0].name is 'a'\n date_string = tag.next_sibling.next_sibling.contents[0]\n published_date = (self.get_date_from_string(date_string))\n announcement_data = {\n 'id': None,\n 'title': title,\n 'url': url,\n 'check_string': None,\n 'published_datetime': published_date,\n 'updated_datetime': None,\n 'retrieved_datetime': datetime.now(),\n 'stored_timestamp': None\n }\n self.check_announcement_content_validity(announcement_data)\n return announcement_data",
"def extract(self, doc, raw_html):\n super(KenyaTodayCrawler, self).extract(doc, raw_html)\n\n soup = BeautifulSoup(raw_html)\n\n # gather title\n doc.title = soup.find(attrs={\"property\":\"og:title\"})['content']\n\n #gather publish date\n date = self.extract_plaintext(soup.select(\"main.content .entry-meta .entry-time\"))\n doc.published_at = self.parse_timestamp(date)\n\n nodes = soup.select(\".content .entry-content p\")\n self.log.info(nodes)\n if len(nodes) > 1:\n doc.summary = self.extract_plaintext(nodes[0:1])\n doc.text = \"\\n\\n\".join(p.text.strip() for p in nodes[2:])\n\n doc.author = Author.unknown()",
"def summary(self, html_partial=False):\n try:\n ruthless = True\n while 1:\n self._build_doc(True)\n #pangwei add on 2014/12/08 begin\n for elem in self.tags(self._root, 'footer', 'select'):\n elem.drop_tree()\n #pangwei add on 2014/12/08 end\n for elem in self.tags(self._root, 'script', 'style'):\n elem.drop_tree()\n for elem in self.tags(self._root, 'body'):\n elem.set('id', 'readabilityBody')\n if ruthless:\n self.remove_unlikely_candidates()\n self.transform_misused_divs_into_paragraphs()\n \n candidates = self.score_paragraphs()\n best_candidate = self.select_best_candidate(candidates)\n if best_candidate:\n article = self.get_article(candidates, best_candidate,html_partial=html_partial)\n else:\n if ruthless:\n ruthless = False\n continue\n else:\n article = self._root.find('body')\n if article is None:\n article = self._root\n \n cleaned_article = self.sanitize(article, candidates)\n article_length = len(cleaned_article or '')\n retry_length = self.kwargs.get('retry_length',self.RETRY_LENGTH)\n of_acceptable_length = article_length >= retry_length\n if ruthless and not of_acceptable_length:\n ruthless = False\n continue\n else:\n return cleaned_article\n \n except StandardError, e:\n raise Unparseable(str(e)), None, sys.exc_info()[2]",
"def __init__(self, html_soup):\n # Drilling down to the internal wrapper <div> tag\n self.data = html_soup.find('div', class_='sbkBrv_SingleResultDesc')",
"def get_article_info(elem: str) -> ArticleInfo:\n\n headers = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/53.0.2785.143 Safari/537.36 '\n }\n\n time.sleep(60)\n print(f\"Collecting info from link {elem}...\")\n\n html_content = requests.get(elem, headers=headers).content\n soup = BeautifulSoup(html_content, 'lxml')\n\n base = 'div.grid-container div.single-post-grid div'\n\n title_query = f'{base} div.post-header div.post-header-container div.post-header-title div.the_title'\n\n title = soup.select_one(title_query).get_text()\n\n text_query = f'{base} div.post-inside div.post-content p'\n text_parts = []\n\n for elem1 in soup.select(text_query):\n temp_text = elem1.get_text().replace(\n '<strong>', ''\n ).replace(\n '</strong>', ''\n )\n\n text_parts.append(temp_text)\n\n full_text = ' '.join(text_parts)\n\n tags_query = f'{base} div.post-inside div.post-content div.tags a'\n tags = []\n\n for elem1 in soup.select(tags_query):\n tags.append(elem1.get_text())\n\n article = ArticleInfo(\n url=elem,\n title=title,\n text=full_text,\n keywords=tags\n )\n\n print(article)\n\n return article",
"def extract_raw_text(soup, url):\n \n title_class = \"nom-notice\"\n title = soup.find(class_=title_class)\n raw_infos = {}\n raw_infos['name'] = title.contents[0].replace(u'\\xa0', ' ')\n \n notice = soup.find(class_=\"notice\")\n \n summary = notice.find(class_=\"chapo\")\n if summary is not None:\n first_para = summary.find_all('p', recursive=False)[-1]\n first_para.tag = 'div'\n first_para['class'] = 'summary'\n raw_infos['summary'] = unicode(first_para)\n \n else:\n raw_infos['summary'] = unicode('')\n\n article = notice.find(class_='texte')\n if article is not None:\n article['class'] = 'article'\n raw_infos['article'] = unicode(article)\n \n sources = notice.find(class_='sources')\n raw_infos['sources'] = unicode(sources)\n \n works = notice.find(class_='oeuvres')\n if works is not None:\n works['class'] = 'works'\n raw_infos['works'] = unicode(works)\n \n # In function that writes, encode everything to bytes! .encode('utf-8')\n return raw_infos",
"def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()",
"def extract_one( html: str, fpath: Path ):\n # %%\n doc = BeautifulSoup( html, features='html.parser')\n\n ret = { 'linkedin_handle': fpath.name.split('.')[0] }\n _parse_top_card( ret, doc )\n # %%\n ret['about'] = _extract_about( doc )\n # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0:\n # print( f\"\\nVer más detected: \\nabout:{ret['about']} fpath={fpath}\" )\n\n ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])}\n # %%\n ret['work_experience'] = _parse_experiences( doc )\n ret['work_stats'] = calc_work_stats( ret['work_experience'])\n # %%\n ret['skills'] = proc_skills_section( doc )\n ret['education'] = _parse_education( doc )\n ret['education_stats'] = _education_stats( ret['education'])\n ret['accomplishments'] = _extract_accomplishments(doc)\n ret['profile_text_stats'] = profile_text_stats( doc )\n # %%\n return ret\n # %%",
"def archive_parse_for_posts(page_html):\n # <div\\s+class=\"post.+data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\n post_info_regex = \"\"\"<div\\s+class=\"post.+?data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\"\"\"\n post_info = re.findall(post_info_regex, page_html, re.IGNORECASE|re.DOTALL)\n return post_info",
"def __local_am(soup):\n return __get_local_g1_news(soup)",
"def getAdditionalDetails(self, soup):\n title_details = soup.find('div', id=\"titleDetails\")\n title_details = title_details.findAll('div', class_=\"txt-block\")\n return title_details",
"def parsed_html():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width\">\n <title>Page title</title>\n <link rel=\"stylesheet\" href=\"/static/styles.css\" />\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n <img src=\"/static/img.jpg\" width=\"500\" height=\"300\" />\n <img src=\"/static/img.gif\" layout=\"nodisplay\" />\n <img src=\"/static/img.png\" />\n <script type=\"text/javascript\" src=\"/static/scripts.js\" />\n <script type=\"application/json\" src=\"/static/data.json\" />\n </body>\n </html>\n \"\"\"\n )",
"def get_information(article_link):\n\n if \"video\" in article_link or \"/apps/\" in article_link or \"checknews\" in\\\n article_link or not re.search(r\"\\d\\d\\d\\d/\\d\\d/\\d\\d\", article_link):\n return None\n\n else:\n\n date_article = re.search(r\"\\d{4}/\\d{2}/\\d{2}\", article_link)[0]\n date_article = date.datetime.strptime(date_article, \"%Y/%m/%d\")\n\n diff_date = date.datetime.now() - date_article\n\n if diff_date.days > 7:\n return None\n\n else:\n req = requests.get(article_link)\n req.encoding = \"utf-8\"\n data = req.text\n soup = BeautifulSoup(data, \"lxml\")\n\n if soup.find(\n \"div\",\n class_=\"direct-headband\") or article_link != req.url:\n return None\n else:\n balise_title = soup.find(\"h1\")\n balise_title = balise_title.get_text()\n balise_title = re.sub(r\"\\s\\s+\", \"\", balise_title)\n\n newspaper = \"Liberation\"\n title = unidecode.unidecode(balise_title)\n\n author = \"\"\n for span in soup.find_all('span'):\n if span.get(\"class\") == ['author']:\n if(span.a):\n author = span.a.string\n if span.get(\"class\") == ['date']:\n if(span.time):\n date_p = date.datetime.strptime(\n span.time.get(\"datetime\"), \"%Y-%m-%dT\" +\n \"%H:%M:%S\").date()\n date_p = date_p.strftime(\"%Y-%m-%d\")\n print(date_p)\n\n content = \"\"\n for div in soup.find_all('div'):\n for p in div.find_all('p'):\n content += p.get_text() + \" \"\n content = re.sub(\"<>\", \"\", content)\n content = unidecode.unidecode(content)\n\n new_article = utils.recovery_article(\n title, newspaper, [author], date_p, content, \" \")\n\n return new_article",
"def scrape_story_metadata(self, story_id):\n url = '{0}/s/{1}'.format(self.base_url, story_id)\n result = requests.get(url)\n html = result.content\n #print html \n soup = BeautifulSoup(html, self.parser)\n\n # print soup\n try:\n pre_story_links = soup.find(id='pre_story_links').find_all('a')\n except AttributeError:\n pre_story_links = None\n if re.search(r\"var userid = (.*);\", str(soup)) is None:\n author_id = \"0\"\n else: \n author_id = int(re.search(r\"var userid = (.*);\", str(soup)).groups()[0]);\n #print re.search(r\"var title = (.*);\", str(soup))\n if re.search(r\"var title = (.*);\", str(soup)) is None:\n title = \"NO-TITLE\"\n else:\n title = re.search(r\"var title = (.*);\", str(soup)).groups()[0];\n title = unquote_plus(title)[1:-1]\n metadata_div = soup.find(id='profile_top')\n# times = metadata_div.find_all(attrs={'data-xutime':True})\n# metadata_text = metadata_div.find(class_='xgray xcontrast_txt').text\n# metadata_parts = metadata_text.split('-')\n# genres = self.get_genres(metadata_parts[2].strip())\n metadata = {\n 'id': story_id,\n# 'canon_type': pre_story_links[0].text,\n# 'canon': pre_story_links[1].text,\n 'author_id': author_id,\n 'title': title,\n# 'updated': int(times[0]['data-xutime']),\n# 'published': int(times[1]['data-xutime']),\n# 'lang': metadata_parts[1].strip(),\n# 'genres': genres\n }\n \"\"\"\n for parts in metadata_parts:\n parts = parts.strip()\n tag_and_val = parts.split(':')\n if len(tag_and_val) != 2:\n continue\n tag, val = tag_and_val\n tag = tag.strip().lower()\n if tag not in metadata:\n val = val.strip()\n try:\n val = int(val.replace(',', ''))\n metadata['num_'+tag] = val\n except:\n metadata[tag] = val\n if 'status' not in metadata:\n metadata['status'] = 'Incomplete'\n \"\"\"\n return metadata",
"def parse(self, response: BeautifulSoup):\n raise NotImplementedError",
"def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html",
"def get_info(url):\r\n soup = make_request(url)\r\n\r\n #get press release title\r\n title_text = soup.find(\"h2\", \"con-title\").text.strip()\r\n title = title_text.partition('\\n')[0]\r\n\r\n #get press release content and date\r\n div = soup.find_all(\"div\") #find div tags\r\n for ele in div:\r\n for div2 in ele(\"div\",\"text-right\"):\r\n if \"發佈日期\" in div2.text:\r\n text = ele.text\r\n date = re.findall(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", div2.text)[0]\r\n break #prevents reiterating upwards to all div parents\r\n return date, title, text",
"def get_article_body(url):\n headers = {'User-Agent': 'Codeup Data Science'}\n response = get(url, headers=headers)\n soup = BeautifulSoup(response.content, \"html.parser\") \n return soup.find('div', itemprop='text').text",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def from_html(self, content):\r\n pass",
"def parse_source(html, encoding='utf-8'):\n return BeautifulSoup(html, from_encoding=encoding)",
"def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html",
"def extract_answer_from_html(self, html):\n if html.strip().startswith('<'):\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n for p in soup.find_all('p'):\n if self.REPLY_RE.match(p.text):\n for el in list(p.previous_elements):\n if isinstance(el, bs4.element.Tag):\n el.decompose()\n p.decompose()\n break\n\n return str(soup)\n else:\n # plain text\n match = self.REPLY_RE.search(html)\n if match:\n return html[match.end(0):]\n\n return html"
] |
[
"0.62940294",
"0.6138685",
"0.582471",
"0.57500374",
"0.5688174",
"0.56493",
"0.5640577",
"0.56324977",
"0.56077284",
"0.55874634",
"0.5580428",
"0.5561971",
"0.5549291",
"0.5466563",
"0.53989834",
"0.53752327",
"0.53721714",
"0.5323852",
"0.5306437",
"0.5299389",
"0.5295665",
"0.5294891",
"0.5286825",
"0.52827746",
"0.52810407",
"0.52810407",
"0.5269709",
"0.52679724",
"0.52508235",
"0.52425253"
] |
0.80006164
|
0
|
check the title and url of the announcement
|
def check_announcement_content_validity(self, a: dict) -> None:
url_regex = r'[(http(s)?):\/\/(www\.)?a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)'
if a['title'] == '' or type(a['title']) is not NavigableString:
raise AnnouncementContentNotFound('Announcement title is empty or invalid')
if re.match(url_regex, a['url'], re.IGNORECASE) is None:
raise AnnouncementContentNotFound('Announcement URL is invalid')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def verify(self):\r\n self.title = self.title and self.title or '' \r\n self.descr = self.descr and self.descr or '' \r\n self.link = self.link and self.link or ''\r\n self.channelURL = self.channelURL and self.channelURL or ''",
"def test_announcement_view(self):\n response = self.client.get(url_for('main.announcements'))\n self.assertEqual(response.status_code, 200)",
"def get_information(article_link):\n\n if \"video\" in article_link or \"/apps/\" in article_link or \"checknews\" in\\\n article_link or not re.search(r\"\\d\\d\\d\\d/\\d\\d/\\d\\d\", article_link):\n return None\n\n else:\n\n date_article = re.search(r\"\\d{4}/\\d{2}/\\d{2}\", article_link)[0]\n date_article = date.datetime.strptime(date_article, \"%Y/%m/%d\")\n\n diff_date = date.datetime.now() - date_article\n\n if diff_date.days > 7:\n return None\n\n else:\n req = requests.get(article_link)\n req.encoding = \"utf-8\"\n data = req.text\n soup = BeautifulSoup(data, \"lxml\")\n\n if soup.find(\n \"div\",\n class_=\"direct-headband\") or article_link != req.url:\n return None\n else:\n balise_title = soup.find(\"h1\")\n balise_title = balise_title.get_text()\n balise_title = re.sub(r\"\\s\\s+\", \"\", balise_title)\n\n newspaper = \"Liberation\"\n title = unidecode.unidecode(balise_title)\n\n author = \"\"\n for span in soup.find_all('span'):\n if span.get(\"class\") == ['author']:\n if(span.a):\n author = span.a.string\n if span.get(\"class\") == ['date']:\n if(span.time):\n date_p = date.datetime.strptime(\n span.time.get(\"datetime\"), \"%Y-%m-%dT\" +\n \"%H:%M:%S\").date()\n date_p = date_p.strftime(\"%Y-%m-%d\")\n print(date_p)\n\n content = \"\"\n for div in soup.find_all('div'):\n for p in div.find_all('p'):\n content += p.get_text() + \" \"\n content = re.sub(\"<>\", \"\", content)\n content = unidecode.unidecode(content)\n\n new_article = utils.recovery_article(\n title, newspaper, [author], date_p, content, \" \")\n\n return new_article",
"async def announcement(ctx, bot: typing.Union[discord.Member, discord.User]):\n plonked = await is_plonked(ctx.author.id)\n if plonked:\n return\n \n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n print(data)\n announcement = data[\"annoucements\"]\n print(bool(announcement))\n e = discord.Embed(color=0xfecdea, title=f'Announcement ID: {announcement[\"post_id\"]}')\n if announcement != False:\n e.add_field(\n name=f'{announcement[\"post_title\"]}',\n value=announcement[\"post_body\"]\n )\n e.description = f\"Post created by {data['owner_name']} ({data['owner_id']})\"\n else: \n e.description = 'This bot doesn\\'t have an announcement. :cry:'\n\n await em(ctx, embed=e)",
"def test_content(test):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True",
"def test_get(self):\n # retrieve (get) request\n response = requests.get(self.url)\n # expected title of articles endpoint\n html_title = \"Crowd Scholar\"\n\n # assert get request returns a status code 200 (success)\n self.assertTrue(response.status_code is 200)\n # assert expected title is in response body\n self.assertTrue(html_title in response.text.title())",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string\n assert True",
"def _parse_title(self, links):\n for link in links:\n if \"hearing\" in link[\"title\"].lower():\n return link[\"title\"].replace(\"Notice\", \"\").strip()\n if \"special\" in link[\"title\"].lower():\n return \"Special Meeting\"\n return \"Illinois Medical District Commission\"",
"def test_list_views_check_main_title_descriptin(self):\n url = reverse('blogs:list')\n response = self.client.get(url)\n # TODO you need to check that the tiles are present in the list Dilshad. You are only looking for the http200\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.main_title, str(response.content))\n self.assertIn(self.description1, str(response.content))\n self.assertIn(self.description2, str(response.content))",
"def test_scrape(self):\n self.assertEqual(self.scraped.title, 'Heading!')\n self.assertEqual(self.scraped.link_text, 'Go to Google')\n self.assertEqual(self.scraped.link_url, 'http://Google.com')",
"def check_story_exists(self) -> bool:\n title_check = self._soup.find(\"title\").string\n if title_check == u'FicWad: fresh-picked original and fan fiction':\n return False\n return True",
"def validate(self, item):\n attempt, pkg_analyzer, journal_and_issue_data = item[:3]\n\n #The value returned from get('medline_title') when do not have title is None\n j_nlm_title = journal_and_issue_data.get('journal').get('medline_title')\n\n xml_tree = pkg_analyzer.xml\n xml_nlm_title = xml_tree.findtext('.//journal-meta/journal-id[@journal-id-type=\"nlm-ta\"]')\n\n if not xml_nlm_title:\n xml_nlm_title = ''\n\n if not j_nlm_title:\n j_nlm_title = ''\n\n if self._normalize_data(xml_nlm_title) == self._normalize_data(j_nlm_title):\n status, description = [models.Status.ok, 'Valid NLM journal title: %s' % xml_nlm_title]\n else:\n status, description = [models.Status.error, 'Mismatched data: %s. Expected: %s' % (xml_nlm_title, j_nlm_title)]\n\n return [status, description]",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_content(response):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_title(self):\n self.driver.get(\"https://demo.testchameleon.com/\")\n assert \"Gentellela Alela!\" in self.driver.title",
"def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE",
"def is_news_article(self, page):\n title = page.find(self.tag_prefix + self.title_tag).text\n for meta in self.titles_to_exclude:\n if title.startswith(meta):\n logging.info(\"{} No es un articulo. Se ignora\".format(title.encode('utf8')))\n return False\n logging.info(\"{} Es un articulo. Se procesa\".format(title.encode('utf8')))\n return True",
"def news_emergency():\n #Fetches data from API and creates global varibles.\n news_handle(news_fetch(config_fetcher('news_region'), config_fetcher('news_key')))\n #Creates a message or not.\n if 'reaking news' in title_1:\n news_emergency_text = Markup((f\"Breaking News! {title_1} \\n Click to continue reading: {url_1_final}\"))\n return news_emergency_text\n elif 'reaking news' in title_2:\n news_emergency_text = Markup((f\"Breaking News! {title_2} \\n Click to continue reading: {url_2_final}\"))\n return news_emergency_text\n elif 'russels' in title_1:\n news_emergency_text = Markup((f\"Breaking News! {title_1} \\n Click to continue reading: {url_1_final}\"))\n return news_emergency_text\n else:\n return None",
"def test_getTitle(self):\n def checkNameAndTitle(name, titlesolution):\n title = self._nameClassifierBuilder._getTitle(name)\n self.assertEquals(titlesolution, title)\n\n checkNameAndTitle(\"Mrs. ldajfhgp\", \"Mrs\")\n checkNameAndTitle(\"dlsfajkMrdlkjaf\", \"Mr\")\n checkNameAndTitle(\"dagddgwdasJonkheer\", \"Jonkheer\")",
"def test_explicit_discussion_link(self):\r\n self.settings.FEATURES['ENABLE_DISCUSSION_SERVICE'] = False\r\n self.check_discussion(\r\n tab_list=self.tabs_with_discussion,\r\n discussion_link_in_course=\"other_discussion_link\",\r\n expected_discussion_link=\"other_discussion_link\",\r\n expected_can_display_value=True,\r\n )",
"def test_none(self):\n\n feed = parseFeed()\n issues = []\n for item in feed.getElementsByTagName(\"entry\"):\n for description in item.getElementsByTagName(\"title\"):\n issues.append(description.firstChild.wholeText)\n self.assertEqual([], issues)",
"def check_valid_title(title):\n title_issues = TitleIssues(title_contains_nsfw=title_contains_nsfw(title))\n return title_issues",
"def verifyPageTitle(self, titleToVerify):\n try:\n actualTitle = self.getTitle()\n return self.util.verifyTextContains(actualTitle, titleToVerify)\n except:\n self.log.error(\"Failed to get page title\")\n print_stack()\n return False"
] |
[
"0.68196785",
"0.6165887",
"0.59821224",
"0.592882",
"0.59199625",
"0.58730316",
"0.5831485",
"0.57923007",
"0.57597804",
"0.5742691",
"0.5737139",
"0.5727141",
"0.5695846",
"0.5685224",
"0.5685224",
"0.5685224",
"0.5685224",
"0.5685224",
"0.5685224",
"0.5685224",
"0.5685224",
"0.56572884",
"0.56178737",
"0.5609314",
"0.55984735",
"0.55640423",
"0.55113846",
"0.55072457",
"0.54819494",
"0.5478438"
] |
0.7364866
|
0
|
Extract announcement data from a BeautifulSoup4 Tag object
|
def get_data_from_tag(self, tag: Tag) -> dict:
self.verify_tag_structure(tag)
title = tag.string
url = tag.contents[0]['href'] # tag.contents[0].name is 'a'
date_string = tag.next_sibling.next_sibling.contents[0]
published_date = (self.get_date_from_string(date_string))
announcement_data = {
'id': None,
'title': title,
'url': url,
'check_string': None,
'published_datetime': published_date,
'updated_datetime': None,
'retrieved_datetime': datetime.now(),
'stored_timestamp': None
}
self.check_announcement_content_validity(announcement_data)
return announcement_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_announcement_data(self) -> 'Scraper':\n logger.info('Parsing extracted html partial')\n for tag in self.html_partial: # there are 63 tags\n if tag.name == 'h4':\n announcement_data = self.get_data_from_tag(tag)\n self.announcement_data_list.append(announcement_data)\n logger.info('Compiled announcement data list from html web page partial')\n return self",
"def parse(content):\n soup = BeautifulSoup(content, 'html.parser')\n if soup.article is None:\n return None\n period = parse_event_period(soup)\n reason = identify_reason(soup)\n if reason is False:\n return None\n return (period, reason)",
"def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gray'})[0]\n date_text = date.contents[4].replace(\" \", \"\").split(\"\\n\")[3][:10]\n \n article_content = article_page_soup.find_all(\"div\", {'class': 'rich_media_content'})[0]\n article_text = article_content.get_text('\\n')\n is_original = check_if_original(article_content) or '[原创]' in title_text\n \n return {\n 'title': title_text,\n 'date': date_text,\n 'url': article_url,\n 'is_original': is_original,\n 'text': article_text\n \n}",
"def get_article_info(elem: str) -> ArticleInfo:\n\n headers = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/53.0.2785.143 Safari/537.36 '\n }\n\n time.sleep(60)\n print(f\"Collecting info from link {elem}...\")\n\n html_content = requests.get(elem, headers=headers).content\n soup = BeautifulSoup(html_content, 'lxml')\n\n base = 'div.grid-container div.single-post-grid div'\n\n title_query = f'{base} div.post-header div.post-header-container div.post-header-title div.the_title'\n\n title = soup.select_one(title_query).get_text()\n\n text_query = f'{base} div.post-inside div.post-content p'\n text_parts = []\n\n for elem1 in soup.select(text_query):\n temp_text = elem1.get_text().replace(\n '<strong>', ''\n ).replace(\n '</strong>', ''\n )\n\n text_parts.append(temp_text)\n\n full_text = ' '.join(text_parts)\n\n tags_query = f'{base} div.post-inside div.post-content div.tags a'\n tags = []\n\n for elem1 in soup.select(tags_query):\n tags.append(elem1.get_text())\n\n article = ArticleInfo(\n url=elem,\n title=title,\n text=full_text,\n keywords=tags\n )\n\n print(article)\n\n return article",
"def _extract_data(self,data,tag=None,cssid=None,cssclass=None,attrs=None,regexp=None,index=0):\n \n# cssclass = \"song\"\n# cssid = \"newsTable0\"\n# tag = \"div\"\n# import pdb\n# pdb.set_trace() \n \n if cssid: \n searchconstrain = SoupStrainer(tag, id=cssid)\n elif cssclass:\n searchconstrain = SoupStrainer(tag, attrs={\"class\":cssclass}) \n else:\n if isinstance(attrs, unicode):\n try:\n attrs = attrs.encode('utf-8')\n regexp = regexp.encode('utf-8')\n except:\n pass \n searchconstrain = SoupStrainer(tag, attrs={attrs:re.compile(regexp)})\n\n soup = BeautifulSoup(data,parseOnlyThese=searchconstrain)\n rslist = [ tp for tp in soup ]\n return rslist[index]",
"def find_data_in_soup(soup, tag: str, class_id:str) -> str:\n return soup.find(tag, class_=class_id).get_text()",
"def get_info(url):\r\n soup = make_request(url)\r\n\r\n #get press release title\r\n title_text = soup.find(\"h2\", \"con-title\").text.strip()\r\n title = title_text.partition('\\n')[0]\r\n\r\n #get press release content and date\r\n div = soup.find_all(\"div\") #find div tags\r\n for ele in div:\r\n for div2 in ele(\"div\",\"text-right\"):\r\n if \"發佈日期\" in div2.text:\r\n text = ele.text\r\n date = re.findall(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", div2.text)[0]\r\n break #prevents reiterating upwards to all div parents\r\n return date, title, text",
"def extract_raw_text(soup, url):\n \n title_class = \"nom-notice\"\n title = soup.find(class_=title_class)\n raw_infos = {}\n raw_infos['name'] = title.contents[0].replace(u'\\xa0', ' ')\n \n notice = soup.find(class_=\"notice\")\n \n summary = notice.find(class_=\"chapo\")\n if summary is not None:\n first_para = summary.find_all('p', recursive=False)[-1]\n first_para.tag = 'div'\n first_para['class'] = 'summary'\n raw_infos['summary'] = unicode(first_para)\n \n else:\n raw_infos['summary'] = unicode('')\n\n article = notice.find(class_='texte')\n if article is not None:\n article['class'] = 'article'\n raw_infos['article'] = unicode(article)\n \n sources = notice.find(class_='sources')\n raw_infos['sources'] = unicode(sources)\n \n works = notice.find(class_='oeuvres')\n if works is not None:\n works['class'] = 'works'\n raw_infos['works'] = unicode(works)\n \n # In function that writes, encode everything to bytes! .encode('utf-8')\n return raw_infos",
"def extract_element_data(soup, params):\r\n \r\n # 1. Find the right tag\r\n if 'class' in params:\r\n elements_found = soup.find_all(params['tag'], params['class'])\r\n else:\r\n elements_found = soup.find_all(params['tag'])\r\n \r\n # 2. Extract text from these tags\r\n if 'get' in params:\r\n element_texts = [el.get(params['get']) for el in elements_found]\r\n else:\r\n element_texts = [el.get_text() for el in elements_found]\r\n \r\n # 3. Select a particular text or concatenate all of them\r\n tag_order = params.get('order', 0)\r\n if tag_order == -1:\r\n output = '**__**'.join(element_texts)\r\n else:\r\n output = element_texts[tag_order]\r\n \r\n return output",
"def getHTMLTag(self, html, tag):\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find(tag)\n return content",
"def extractText(postSoup):\n for tag in postSoup.findAll(True):\n if tag.name in (\"code\"):\n tag.extract()\n else:\n tag.hidden=True\n\n return postSoup.renderContents()",
"def _getArticleContet(self,encodedTag):\r\n xmlEncodedTag = BeautifulSoup(encodedTag.string,\"lxml\")#encoded tag actually has a format of an XML\r\n articleContent = []\r\n for element in xmlEncodedTag.body.contents:\r\n if _getTextElement(element):\r\n articleContent.append(unidecode.unidecode(element.get_text()))\r\n if self._isEndOfArticleCommerical(element):\r\n continue\r\n wordPhraseToRefLink = {a.get_text().strip().lower():a.attrs['href'] for a in xmlEncodedTag.find_all(\"a\")}\r\n return articleContent,wordPhraseToRefLink",
"def _parse_message(self, soup):\n kind, = soup.attrs[u'class']\n title = soup.findChild().text\n body = ''.join(t.text for t in soup.findChildren()[1:])\n message = dict(kind=kind, title=title, body=body)\n for val in message.values():\n assert type(val) == str\n return message",
"def getAdditionalDetails(self, soup):\n title_details = soup.find('div', id=\"titleDetails\")\n title_details = title_details.findAll('div', class_=\"txt-block\")\n return title_details",
"def __local_am(soup):\n return __get_local_g1_news(soup)",
"def extract(self, doc, raw_html):\n super(KenyaTodayCrawler, self).extract(doc, raw_html)\n\n soup = BeautifulSoup(raw_html)\n\n # gather title\n doc.title = soup.find(attrs={\"property\":\"og:title\"})['content']\n\n #gather publish date\n date = self.extract_plaintext(soup.select(\"main.content .entry-meta .entry-time\"))\n doc.published_at = self.parse_timestamp(date)\n\n nodes = soup.select(\".content .entry-content p\")\n self.log.info(nodes)\n if len(nodes) > 1:\n doc.summary = self.extract_plaintext(nodes[0:1])\n doc.text = \"\\n\\n\".join(p.text.strip() for p in nodes[2:])\n\n doc.author = Author.unknown()",
"def get_article_body(url):\n headers = {'User-Agent': 'Codeup Data Science'}\n response = get(url, headers=headers)\n soup = BeautifulSoup(response.content, \"html.parser\") \n return soup.find('div', itemprop='text').text",
"def extract_news(parser):\n news_list = []\n\n titles = parser.find_all(\"tr\", class_=\"athing\")\n subtext = parser.find_all(\"td\", class_=\"subtext\")\n\n for i in range(len(titles)):\n x = titles[i].find_all(\"td\", class_=\"title\")[1]\n title = x.a.text\n url = x.a[\"href\"]\n c = subtext[i].find_all(\"a\")[4]\n if c.text == \"discuss\":\n comments = 0\n else:\n comments = c.text\n author = subtext[i].find(\"a\", class_=\"hnuser\").get_text()\n point = subtext[i].find(\"span\", class_=\"score\").text\n points = point.split(' ')[0]\n\n news_list.append({\"author\": author, \"comments\": comments, \"points\": points, \"title\": title, \"url\": url})\n\n return news_list",
"def archive_parse_for_posts(page_html):\n # <div\\s+class=\"post.+data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\n post_info_regex = \"\"\"<div\\s+class=\"post.+?data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\"\"\"\n post_info = re.findall(post_info_regex, page_html, re.IGNORECASE|re.DOTALL)\n return post_info",
"def scrape_story_metadata(self, story_id):\n url = '{0}/s/{1}'.format(self.base_url, story_id)\n result = requests.get(url)\n html = result.content\n #print html \n soup = BeautifulSoup(html, self.parser)\n\n # print soup\n try:\n pre_story_links = soup.find(id='pre_story_links').find_all('a')\n except AttributeError:\n pre_story_links = None\n if re.search(r\"var userid = (.*);\", str(soup)) is None:\n author_id = \"0\"\n else: \n author_id = int(re.search(r\"var userid = (.*);\", str(soup)).groups()[0]);\n #print re.search(r\"var title = (.*);\", str(soup))\n if re.search(r\"var title = (.*);\", str(soup)) is None:\n title = \"NO-TITLE\"\n else:\n title = re.search(r\"var title = (.*);\", str(soup)).groups()[0];\n title = unquote_plus(title)[1:-1]\n metadata_div = soup.find(id='profile_top')\n# times = metadata_div.find_all(attrs={'data-xutime':True})\n# metadata_text = metadata_div.find(class_='xgray xcontrast_txt').text\n# metadata_parts = metadata_text.split('-')\n# genres = self.get_genres(metadata_parts[2].strip())\n metadata = {\n 'id': story_id,\n# 'canon_type': pre_story_links[0].text,\n# 'canon': pre_story_links[1].text,\n 'author_id': author_id,\n 'title': title,\n# 'updated': int(times[0]['data-xutime']),\n# 'published': int(times[1]['data-xutime']),\n# 'lang': metadata_parts[1].strip(),\n# 'genres': genres\n }\n \"\"\"\n for parts in metadata_parts:\n parts = parts.strip()\n tag_and_val = parts.split(':')\n if len(tag_and_val) != 2:\n continue\n tag, val = tag_and_val\n tag = tag.strip().lower()\n if tag not in metadata:\n val = val.strip()\n try:\n val = int(val.replace(',', ''))\n metadata['num_'+tag] = val\n except:\n metadata[tag] = val\n if 'status' not in metadata:\n metadata['status'] = 'Incomplete'\n \"\"\"\n return metadata",
"def get_study_data(self, soup, url):\n pass",
"def _parse_article(self, a_cookie, a_ua):\n url = \"https://seekingalpha.com/article/%s\" % self._id\n r = safe_request(url, {})\n r_login = safe_request(url, a_cookie)\n\n soup_log = BeautifulSoup(r_login.text, 'html.parser')\n # Stops process if article invalid\n primary_about = soup_log.find_all(\"a\", href=True, sasource=\"article_primary_about\")\n if len(primary_about) != 1:\n # Excludes non-single-ticker articles\n print(\"Invalid Article\")\n self.valid = False\n return\n else:\n self.ticker = primary_about[0].text.split()[-1][1:-1]\n\n # Gets all includes and author\n about = soup_log.find_all(\"a\", href=True)\n for a in about:\n if 'sasource' in a.attrs:\n if a.attrs['sasource'] == \"article_about\":\n self.includes += a.text + \",\"\n elif a.attrs['sasource'] == \"auth_header_name\":\n self.author += a.text + \",\"\n\n self.includes = self.includes[:-1]\n self.author = self.author[:-1]\n self.title = soup_log.find_all('h1')[0].text\n self.pub_date = soup_log.find_all('time', itemprop=\"datePublished\")[0]['content'][:10]\n\n # Get Full Article Text\n name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')\n print(name_box)\n try:\n disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',\n range(len(name_box))))[0]\n except IndexError:\n disc_idx = len(name_box)\n self.text = ''.join(map(lambda x: x.text + \"\\n\", name_box[:disc_idx]))",
"def parse(self, response: BeautifulSoup):\n raise NotImplementedError",
"def extract_author(bs_soup):\n sub_item = bs_soup.find(\"div\", class_=AUTHOR_CLASS)\n if sub_item:\n return sub_item.text\n return None",
"def get_article_text(self, article_webpage):\n lemonde_parser = LeMondeHTMLParser()\n lemonde_parser.feed(article_webpage)\n return lemonde_parser.article_data",
"def parse_api(self, soup):\n pdict = {}\n pdict[\"has_publication\"] = False\n pdict[\"has_print\"] = False\n self.search_doi(soup)\n\n article_meta = soup.entry\n # remove unnecessary line break\n pdict[\"abstract\"] = get_string(article_meta, \"summary\").replace(\n \"\\n\", \" \"\n )\n print(repr(article_meta.summary.get_text(strip=True)))\n # sometimes the arXiv article title has unnecessary linebreak\n pdict[\"title\"] = get_string(article_meta, \"title\").replace(\"\\n \", \"\")\n pdict[\"title_latex\"] = pdict[\"title\"]\n\n pub_date = datetime.strptime(\n article_meta.updated.string, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n pdict[\"online_year\"] = str(pub_date.year)\n pdict[\"online_month\"] = str(pub_date.month)\n pdict[\"online_day\"] = str(pub_date.day)\n\n author = []\n for name in article_meta.find_all(\"name\"):\n name_ = re.match(r\"([\\s\\S]+) (\\w+)\", name.string)\n author.append([name_.group(2), name_.group(1)])\n pdict[\"author\"] = author\n return pdict",
"def _get_new_data(self, page_url, soup):\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data",
"def extract(self, data):",
"def parse(self, article: BeautifulSoup):\n # Need to find content in a different manner for each of the different sitetypes.\n # Read this as studio, video, article etc.\n # Assume that only articles ask for this functionality\n\n title = self.get_title(article)\n sub_title = self.get_sub_title(article)\n words = self.get_words(article)\n journalists = self.get_journalist(article)\n images = self.get_images(article)\n subscription = self.get_subscription(article)\n content_list = self.get_content(article)\n\n if not title:\n title = self.headline.revisions[0].title\n\n revision = Revision(timestamp=datetime.datetime.now(pytz.timezone(\"Europe/Oslo\")), title=title, sub_title=sub_title, words=words, subscription=subscription)\n\n article = Article(news_site=self.news_site, headline=self.headline)\n\n return revision, article, journalists, images, content_list",
"def get_article(url):\n \n r = requests.get(url) \n html_soup = BeautifulSoup(r.content, 'lxml')\n return html_soup"
] |
[
"0.6747848",
"0.61468184",
"0.61101943",
"0.6023411",
"0.59465456",
"0.5922927",
"0.5907416",
"0.583392",
"0.5683404",
"0.56611496",
"0.5640813",
"0.5633416",
"0.562401",
"0.5576179",
"0.55751514",
"0.5558634",
"0.555051",
"0.55294627",
"0.55007476",
"0.5500726",
"0.5455654",
"0.5447302",
"0.5438857",
"0.54238665",
"0.54161686",
"0.54160804",
"0.5390395",
"0.5383438",
"0.53779423",
"0.536632"
] |
0.7399995
|
0
|
Returns a collection of announcement objects in the form of an AnnouncementCollection object
|
def get_announcements(self, factory: 'AnnouncementFactory') -> 'AnnouncementCollection':
collection = factory.get_announcement_collection(self.get_announcement_data_list())
return collection
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(self):\n announcements = Announcement.query.all()\n announcements = announcements_schema.dump(announcements)\n\n if not announcements:\n return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served\n\n return {'status': 'success', 'announcements': announcements}, 200",
"def get(self):\n return {'status': 'success', 'count': Announcement.query.count()}, 200",
"async def getAnnouncements(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getAnnouncements()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getAnnouncements\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getAnnouncements\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/announcements\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)",
"def articles(self):\r\n return Articles(self)",
"def articles(self):\n articles = Post.objects.live().descendant_of(self)\n articles = articles.order_by('-date')\n\n return articles",
"def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()",
"def articles(self):\r\n return articles.Articles(self)",
"def get_all_posts_from_collection(self):\n response = self.get_comments_all_posts(PAYLOAD)\n collection = (response.json())\n return collection",
"def articles(self):\n return articles.Articles(self)",
"def get(self, request):\n announcement_id = request.GET.get(\"id\")\n if announcement_id:\n try:\n announcement = Announcement.objects.get(id=announcement_id)\n return self.success(AnnouncementSerializer(announcement).data)\n except Announcement.DoesNotExist:\n return self.error(\"Announcement does not exist\")\n announcement = Announcement.objects.all().order_by(\"-create_time\")\n if request.GET.get(\"visible\") == \"true\":\n announcement = announcement.filter(visible=True)\n return self.success(self.paginate_data(request, announcement, AnnouncementSerializer))",
"def parse_announcement_data(self) -> 'Scraper':\n logger.info('Parsing extracted html partial')\n for tag in self.html_partial: # there are 63 tags\n if tag.name == 'h4':\n announcement_data = self.get_data_from_tag(tag)\n self.announcement_data_list.append(announcement_data)\n logger.info('Compiled announcement data list from html web page partial')\n return self",
"def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories",
"def news(self) -> List[News]:\n return self._news",
"def list_amendments(self):\n try:\n return list(self[CONFIG_KEY][PROJ_MODS_KEY][AMENDMENTS_KEY].keys())\n except Exception as e:\n _LOGGER.debug(\"Could not retrieve available amendments: {}\".\n format(getattr(e, 'message', repr(e))))\n return None",
"def find_all(cls):\n return [AuthorModel(a['name'], str(a['_id']))\n for a in cls.db.newsdb.find()]",
"def get_announcement(self, request):\n return StringMessage(\n data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\")",
"def articles(self, audience_filter=None):\n articles = ArticlePage.objects.live().descendant_of(self)\n if audience_filter is not None:\n articles = articles.filter(audience__name=audience_filter)\n articles = articles.order_by('-date')\n return articles",
"def get_all_events(cls):\n try:\n events = list(events_coll.find())\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)",
"def _parse_notice(self, response):\n notice_documents = self._parse_notice_documents(response)\n meetings_list = []\n for meeting in response.meta.get('upcoming', []):\n # Check if the meeting date is in any document title, if so, assign docs to that meeting\n meeting_date_str = '{dt:%B} {dt.day}'.format(dt=meeting['start']['date'])\n if any(meeting_date_str in doc['note'] for doc in notice_documents):\n meetings_list.append({\n **meeting, 'documents': notice_documents,\n 'sources': [{\n 'url': response.url,\n 'note': ''\n }]\n })\n else:\n meetings_list.append({**meeting, 'documents': []})\n return meetings_list",
"def fetch_deliveries(self):\n deliveries_cursor = self.database.Deliveries.find()\n deliveries = []\n for delivery in deliveries_cursor:\n delivery.pop('_id', None)\n deliveries.append(delivery)\n return deliveries",
"def getAnnouncement(self, request):\n announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\"\n return StringMessage(data=announcement)",
"def attachments(self):\r\n return Attachments(self)",
"def get_activities(cls):\n objs = cls.objects\n return objs",
"def get_anamnesis_list(self):\n # tmp store periodicity\n\n p = RISKY_BEHAVIOR_PERIODICITY\n # Get QuerySet of first encounters in the given year/town for all clients.\n encounters = Encounter.objects.first(year = 2015, towns=self.towns)\n\n # Filter encounters so that only the specified date range is present.\n encounters = encounters.filter(performed_on__gte=self.datetime_from,\n performed_on__lt=self.datetime_to)\n\n # Get all clients whose first encounters fall into the specified range.\n clients = encounters.values('person')\n\n # Now get all the encounters for these clients that fulfill the specified criteria.\n encounters = Encounter.objects.filter(performed_on__gte=self.datetime_from,\n performed_on__lt=self.datetime_to,\n where__in=self.towns,\n person__in=clients)\n\n # Get client PKs from filtered encounters.\n encounter_data = {}\n\n for e in encounters:\n encounter_data.setdefault(e.person_id, {'first_encounter_date': date.max, 'objects': []})\n encounter_data[e.person_id]['first_encounter_date'] = min(encounter_data[e.person_id]['first_encounter_date'], e.performed_on)\n encounter_data[e.person_id]['objects'].append(e)\n\n # Finally, select these clients if they have anamnesis filled up.\n _a = Anamnesis.objects.filter(client__pk__in=encounter_data.keys()).select_related()\n _all = []\n\n # Annotate extra information needed in report.\n for a in _a:\n # Date of first encounter with client.\n a.extra_first_encounter_date = encounter_data[a.client_id]['first_encounter_date']\n # If has been cured before - True if there is not IncomeExamination\n # within selected encounters.\n a.extra_been_cured_before = not Service.objects.filter(encounter__in=encounter_data[a.client_id]['objects'],\n content_type=IncomeExamination.real_content_type()).exists()\n # When showing 'incidency', only those, who have not been cured before\n # should be returned.\n # if self.kind == 'incidence' and a.extra_been_cured_before is True:\n if a.extra_been_cured_before is True:\n continue\n\n # Information about risky behaviour and it's periodicity.\n try:\n ivrm = a.riskymanners_set.get(behavior=RISKY_BEHAVIOR_KIND.INTRAVENOUS_APPLICATION)\n\n a.iv_past = self.anamnesis_dictionary(ivrm.periodicity_in_past)\n a.iv_present = self.anamnesis_dictionary(ivrm.periodicity_in_present)\n\n if (ivrm.periodicity_in_present, ivrm.periodicity_in_past) == (p.NEVER, p.NEVER):\n a.extra_intravenous_application = 'c'\n elif ivrm.periodicity_in_present in (p.ONCE, p.OFTEN):\n a.extra_intravenous_application = 'b'\n elif ivrm.periodicity_in_present == p.NEVER and ivrm.periodicity_in_past in (p.ONCE, p.OFTEN):\n a.extra_intravenous_application = 'a'\n else:\n a.extra_intravenous_application = 'd'\n except RiskyManners.DoesNotExist:\n a.extra_intravenous_application = 'd'\n a.iv_past = 'unknown'\n a.iv_present = 'unknown'\n\n # Information about syringe sharing activity.\n if a.extra_intravenous_application not in ('c',):\n try:\n ssrm = a.riskymanners_set.get(behavior=RISKY_BEHAVIOR_KIND.SYRINGE_SHARING)\n\n # Use current periodicity in past/current according to\n # `extra_intravenous_application`\n a.ss_past = self.anamnesis_dictionary(ivrm.periodicity_in_past)\n a.ss_present = self.anamnesis_dictionary(ivrm.periodicity_in_present)\n\n per = (ssrm.periodicity_in_present\n if a.extra_intravenous_application == 'b'\n else ssrm.periodicity_in_past)\n\n if per in (p.ONCE, p.OFTEN):\n a.extra_syringe_sharing = 'yes'\n elif per == p.NEVER:\n a.extra_syringe_sharing = 'no'\n else:\n a.extra_syringe_sharing = 'unknown'\n except RiskyManners.DoesNotExist:\n a.extra_syringe_sharing = 'unknown'\n a.ss_past = 'unknown'\n a.ss_present = 'unknown'\n\n try:\n usrm = a.riskymanners_set.get(behavior=RISKY_BEHAVIOR_KIND.SEX_WITHOUT_PROTECTION)\n a.us_past = self.anamnesis_dictionary(usrm.periodicity_in_past)\n a.us_present = self.anamnesis_dictionary(usrm.periodicity_in_present)\n except RiskyManners.DoesNotExist:\n a.us_past = 'unknown'\n a.us_present = 'unknown'\n \n try:\n rarm = a.riskymanners_set.get(behavior=RISKY_BEHAVIOR_KIND.RISKY_APPLICATION)\n a.ra_past = self.anamnesis_dictionary(rarm.periodicity_in_past)\n a.ra_present = self.anamnesis_dictionary(rarm.periodicity_in_present)\n except RiskyManners.DoesNotExist:\n a.ra_past = 'unknown'\n a.ra_present = 'unknown'\n\n try:\n odrm = a.riskymanners_set.get(behavior=RISKY_BEHAVIOR_KIND.OVERDOSING)\n a.od_past = self.anamnesis_dictionary(odrm.periodicity_in_past)\n a.od_present = self.anamnesis_dictionary(odrm.periodicity_in_present)\n except RiskyManners.DoesNotExist:\n a.od_past = 'unknown'\n a.od_present = 'unknown'\n\n try:\n hcrm = a.riskymanners_set.get(behavior=RISKY_BEHAVIOR_KIND.HEALTH_COMPLICATIONS)\n a.hc_past = self.anamnesis_dictionary(hcrm.periodicity_in_past)\n a.hc_present = self.anamnesis_dictionary(hcrm.periodicity_in_present)\n except RiskyManners.DoesNotExist:\n a.hc_past = 'unknown'\n a.hc_present = 'unknown'\n\n _all.append(a)\n\n return _all",
"def apt_list(cal, c_id, start, end):\n\n # Get the appointments returning it as list of dictionaries\n appointments_result = cal.events().list(\n calendarId=c_id,\n timeMin=start,\n timeMax=end,\n singleEvents=True,\n orderBy='startTime'\n ).execute()\n appointments = appointments_result.get('items', [])\n return appointments",
"def get_all(collection):\n data = []\n for item in collection.find({}):\n data.append(item)\n return data",
"def results(self):\n out = []\n fields = 'eid doi pii pubmed_id title subtype creator afid affilname '\\\n 'affiliation_city affiliation_country author_count '\\\n 'author_names author_ids author_afids coverDate '\\\n 'coverDisplayDate publicationName issn source_id eIssn '\\\n 'aggregationType volume issueIdentifier article_number '\\\n 'pageRange description authkeywords citedby_count '\\\n 'openaccess fund_acr fund_no fund_sponsor'\n doc = namedtuple('Document', fields)\n for item in self._json:\n info = {}\n # Parse affiliations\n try:\n info[\"affilname\"] = _join(item['affiliation'], 'affilname')\n info[\"afid\"] = _join(item['affiliation'], 'afid')\n info[\"aff_city\"] = _join(item['affiliation'], 'affiliation-city')\n info[\"aff_country\"] = _join(item['affiliation'],\n 'affiliation-country')\n except KeyError:\n pass\n # Parse authors\n try:\n # Deduplicate list of authors\n authors = _deduplicate(item['author'])\n # Extract information\n surnames = _replace_none([d['surname'] for d in authors])\n firstnames = _replace_none([d['given-name'] for d in authors])\n info[\"auth_names\"] = \";\".join([\", \".join([t[0], t[1]]) for t in\n zip(surnames, firstnames)])\n info[\"auth_ids\"] = \";\".join([d['authid'] for d in authors])\n affs = []\n for auth in authors:\n aff = listify(_deduplicate(auth.get('afid', [])))\n affs.append('-'.join([d['$'] for d in aff]))\n info[\"auth_afid\"] = (';'.join(affs) or None)\n except KeyError:\n pass\n date = item.get('prism:coverDate')\n if isinstance(date, list):\n date = date[0].get('$')\n new = doc(article_number=item.get('article-number'),\n title=item.get('dc:title'), fund_sponsor=item.get('fund-sponsor'),\n subtype=item.get('subtype'), issn=item.get('prism:issn'),\n creator=item.get('dc:creator'), affilname=info.get(\"affilname\"),\n author_names=info.get(\"auth_names\"), doi=item.get('prism:doi'),\n coverDate=date, volume=item.get('prism:volume'),\n coverDisplayDate=item.get('prism:coverDisplayDate'),\n publicationName=item.get('prism:publicationName'),\n source_id=item.get('source-id'), author_ids=info.get(\"auth_ids\"),\n aggregationType=item.get('prism:aggregationType'),\n issueIdentifier=item.get('prism:issueIdentifier'),\n pageRange=item.get('prism:pageRange'),\n author_afids=info.get(\"auth_afid\"), fund_no=item.get('fund-no'),\n affiliation_country=info.get(\"aff_country\"),\n citedby_count=item.get('citedby-count'),\n openaccess=item.get('openaccess'), eIssn=item.get('prism:eIssn'),\n author_count=item.get('author-count', {}).get('$'),\n affiliation_city=info.get(\"aff_city\"), afid=info.get(\"afid\"),\n description=item.get('dc:description'), pii=item.get('pii'),\n authkeywords=item.get('authkeywords'), eid=item.get('eid'),\n fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id'))\n out.append(new)\n return out or None",
"def announcements_item(header=None, body=None, date=None, priority=\"\"):\n\n box = DIV(_class=\"announcement-box announcement-%s\" % priority)\n\n if date:\n box.append(DIV(DIV(date, _class=\"announcement-date\"),\n _class = \"fright\",\n ))\n\n text = DIV(_class=\"announcement-text\")\n if header:\n icons = {\"important\": \"fa-exclamation-circle\",\n \"critical\": \"fa-exclamation-triangle\",\n }\n icon = icons.get(priority)\n if icon:\n header = TAG[\"\"](I(_class=\"fa %s announcement-icon\" % icon),\n header,\n )\n text.append(DIV(header, _class=\"announcement-header\"))\n if body:\n text.append(DIV(body, _class=\"announcement-body\"))\n box.append(text)\n\n return LI(box)",
"def deals(self):\r\n return deals.Deals(self)",
"def activities(self):\r\n return activities.Activities(self)"
] |
[
"0.62996197",
"0.59415793",
"0.58581275",
"0.57568336",
"0.56691045",
"0.563069",
"0.5609823",
"0.5570636",
"0.5570226",
"0.5568708",
"0.55073804",
"0.5450226",
"0.54384685",
"0.5434181",
"0.54055053",
"0.53166115",
"0.5299681",
"0.5296883",
"0.525246",
"0.5247528",
"0.5230495",
"0.52122396",
"0.5197172",
"0.51747",
"0.51240754",
"0.51168895",
"0.5116329",
"0.51105505",
"0.51048523",
"0.51011527"
] |
0.7739937
|
0
|
Create a date only datetime object by extracting a date from a string The date string should be in the format "May 11st, 2020 by " else method raises DateStringFormatMismatch exception.
|
def get_date_from_string(date_string: str) -> datetime:
regex = r'^(January|February|March?|April|May|June|July|August|September|October|November|December)' \
r' (\d{1,2})(st|nd|rd|th), (\d{4}) by $'
if re.match(regex, date_string) is None:
raise DateStringFormatMismatch('Scraper.get_date_from_string()', date_string)
date_list = date_string.split(' ')
if len(date_list[1]) == 5: # i.e. '11st,' with comma
date_list[1] = date_list[1][0:2]
else: # i.e '2nd,' no zero prefix because ordinal date
date_list[1] = date_list[1][0:1].zfill(1)
new_date_string = ' '.join(date_list[0:3]) # new date extracted by removing 'by', spaces and commas
date = datetime.strptime(new_date_string, '%B %d %Y')
date = date.replace(tzinfo=python_timezone.utc) # localizing to avoid comparison error when sorting later
return date
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def string_to_date(string):\n params = string.strip().split('-')\n year = int(params[0])\n month = int(params[1])\n day = int(params[2])\n d = date(year, month, day)\n return d",
"def get_date(string):\r\n string = re.sub(' +', ' ', string)\r\n try:\r\n return dateutil.parser.parse(string)\r\n except (TypeError, ValueError):\r\n raise ValueError('{0!r} is not a valid date'.format(string))",
"def datetime_from_string(date_string):\n try:\n return datetime.datetime.strptime(date_string, date_format)\n except Exception as e:\n print(\"Got exception parsing date {} with format {} {}\"\n .format(date_string, date_format, e))",
"def date_parse(date_string) -> datetime:\n return datetime.strptime(date_string, DATE_FMT)",
"def get_date_from_string(self, datestr, fmt):\n return datetime.datetime.strptime(datestr, fmt)",
"def get_date(date_string):\n return datetime.datetime.strptime(date_string.split('T')[0], \"%Y-%m-%d\")",
"def date_from_string(date):\n _type = type(date)\n try:\n if _type == datetime.date:\n return date\n elif _type == datetime.datetime:\n return datetime.datetime.date(date)\n else:\n return datetime.datetime.date(datetime.datetime.strptime(date, '%Y-%m-%d'))\n except ValueError:\n return date\n except TypeError:\n return date",
"def _date_from_str(self, date_entry, date_str):\n dt_obj = None\n if date_str:\n dt_obj = parser.parse(date_str)\n if dt_obj < MIN_DATE or dt_obj > MAX_DATE:\n prompt = 'Please keep dates within Jan 1, 2015 up to today.'\n raise ValueError(prompt)\n \n return dt_obj",
"def from_format(cls, date_string: str, format_string: str) -> Date:\n from .datetime_ import DateTime\n return DateTime.strptime(date_string, format_string).date()",
"def parse_datetime(date_str: str) -> datetime:\n return dateutil.parser.parse(date_str)",
"def from_date_string(cls, date_string: str):\n date_lst = [int(ele) for ele in date_string.split('/')]\n return CustomDate.from_date(date(date_lst[2], date_lst[0], date_lst[1]))",
"def _get_date_from_str(date_input):\r\n return datetime.datetime.strptime(date_input.strip(), \"%Y-%m-%d\").replace(tzinfo=pytz.UTC)",
"def parse_date(datestring):\n y, m, d = datestring.split('-')\n if len(y) != 4 or len(m) != 2 or len(d) != 2:\n raise InvalidDateException('Date must be of format YYYY-MM-DD')\n return datetime.date(int(y), int(m), int(d))",
"def get_date(self, string):\n # remove new lines\n string = string.replace('\\n', '')\n # first, get first digit - day is then number value of following 2 chars \n firstDigit = re.search('\\d', string)\n day = string[firstDigit.start():firstDigit.start()+2]\n day = self.find_number(day)\n # then get year - match 4 digits\n yearLoc = re.search(r'\\d{4}(?!\\d)', string)\n year = string[yearLoc.start():yearLoc.end()]\n # then get month\n monthLoc = re.search(r'[A-Z]{1}[a-z]{2}', string)\n month = string[monthLoc.start():monthLoc.end()]\n try:\n month = strptime(month, '%b').tm_mon\n date = dt.datetime(int(str(year)), int(str(month)), int(str(day)))\n except ValueError:\n pass\n date = np.NAN\n return date",
"def _get_date(string):\n try:\n return _date.fromordinal(_dateparse(string).toordinal())\n except ValueError:\n print(string)\n raise",
"def convert_string_to_datetime(date_string):\n REGEX_1 = \"^[0-9]{1,2}\\s[a-zA-Z]{3}\\s[0-9]{4}\\s[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\s\\+[0-9]{4}$\"\n REGEX_2 = \"^[a-zA-Z]{3}\\,\\s[0-9]{1,2}\\s[a-zA-Z]{3}\\s[0-9]{4}\\s[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\s\\+[0-9]{4}$\"\n REGEX_3 = \"^[a-zA-Z]{3}\\,\\s[0-9]{1,2}\\s[a-zA-Z]{3}\\s[0-9]{4}\\s[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\s\\+[0-9]{4}\\s\\([a-zA-Z]{3}\\)$\"\n\n if re.match(REGEX_1, date_string):\n return datetime.strptime(date_string, '%d %b %Y %H:%M:%S %z')\n elif re.match(REGEX_2, date_string):\n return datetime.strptime(date_string, '%a, %d %b %Y %H:%M:%S %z')\n elif re.match(REGEX_3, date_string):\n return datetime.strptime(date_string, '%a, %d %b %Y %H:%M:%S %z (%Z)')\n else:\n return None",
"def str_to_datetime(datestr):\n\n from dateutil import parser\n from datetime import datetime\n\n date = parser.parse(datestr)\n return date",
"def str_to_date(date_str):\r\n\r\n if date_str == 'today':\r\n date = datetime.date.today()\r\n else:\r\n date = datetime.datetime.strptime(date_str, \"%Y-%m-%d\").date()\r\n\r\n return date",
"def extract_date(str_date):\n rgx = re.compile(r'((\\d{4})-(\\d{2})-(\\d{2}))')\n o_match = rgx.search(str_date)\n if o_match is not None:\n\n lng_day = int(o_match.group(4))\n lng_month = int(o_match.group(3))\n lng_year = int(o_match.group(2))\n\n # These digits may not give a legitimate combination of Y M D\n try:\n dte = datetime(lng_year, lng_month, lng_day)\n except ValueError:\n # Use today's values as defaults, and use any part that does work\n dte = datetime.now()\n # Start with day=1 in case the month is feb and the day 30 etc\n dte = datetime.replace(dte, day=1, hour=0, minute=0, \\\n second=0, microsecond=0)\n try:\n dte = datetime.replace(dte, year=lng_year)\n except ValueError:\n pass\n try:\n dte = datetime.replace(dte, month=lng_month)\n except ValueError:\n pass\n try:\n dte = datetime.replace(dte, day=lng_day)\n except ValueError:\n pass\n\n i_start = o_match.start()\n tpl_date_rest = (dte, str_date[0:i_start] + ' ' + \\\n str_date[i_start + 10:])\n\n else:\n tpl_date_rest = (None, str_date)\n\n return tpl_date_rest",
"def date_from_string(my_string):\n if my_string:\n return datetime.strptime(my_string, DATE_FORMAT).date()\n return None",
"def _parse_date(date_string: str) -> Union[datetime.datetime, str]:\n for date_format in KNOWN_DATE_FORMATS:\n try:\n date = datetime.datetime.strptime(date_string, date_format)\n return date\n except ValueError:\n continue\n return date_string",
"def datestring_to_date(datestring):\n year, month, day = datestring.split(\"-\")\n date = datetime.date(year=int(year), month=int(month), day=int(day))\n return date",
"def string_to_date(date_string):\n try:\n dt = datetime.datetime.strptime(date_string, \"%m%d%Y\")\n return dt.date()\n except:\n return None",
"def _str_to_date(self, date):\n return datetools.date_parser(date)",
"def string_to_date(date_string):\n\n return date(int(date_string[:4]),\n int(date_string[5:7]),\n int(date_string[8:10]))",
"def _datetime_obj_from_string(date_string, date_only=False):\n str_format = \"%Y-%m-%d %H:%M:%S\"\n if date_only is True:\n str_format = \"%Y-%m-%d\"\n\n return datetime.strptime(date_string, str_format)",
"def str_to_date(date_str: str) -> Optional[datetime.date]:\n if not date_str:\n # If the type is falsy, return None.\n return\n try:\n # Most dates in the API are in this format...\n return datetime.strptime(date_str, \"%m/%d/%Y\").date()\n except ValueError:\n # Please forgive me for this nested try-except block.\n # This API is _whack_.\n try:\n # But some are in this format...\n return datetime.strptime(date_str, \"%Y-%m-%d\").date()\n except ValueError:\n # And sometimes you get random crap like '0000-00-00'...\n return\n except TypeError:\n # If the type is truthy, but can't be cast to a date, return None.\n return",
"def get_date(datestring):\n elements = [int(a) for a in re.split(\"[-/]\", \"{0}\".format(datestring))]\n if len(elements) == 3:\n return datetime.date(elements[0], elements[1], elements[2])",
"def _string_to_date(datestr,fmt):\n if not isinstance(datestr,str):\n raise InstrumentParameterException('Value %s is not a string.' % str(datestr))\n try:\n date_time = time.strptime(datestr,fmt)\n date = (date_time[2],date_time[1],date_time[0])\n\n except ValueError:\n raise InstrumentParameterException('Value %s could not be formatted to a date.' % str(datestr))\n \n return date",
"def str2date(date_str, date_format):\n return datetime.strptime(date_str, date_format)"
] |
[
"0.76573557",
"0.76182073",
"0.75150347",
"0.7463151",
"0.7463052",
"0.7450171",
"0.7438522",
"0.7346099",
"0.7337427",
"0.7319477",
"0.73107994",
"0.72941107",
"0.7282974",
"0.72484726",
"0.7245461",
"0.71994174",
"0.7198044",
"0.71857935",
"0.71590173",
"0.7078841",
"0.70654005",
"0.70621467",
"0.7048788",
"0.70450807",
"0.70429945",
"0.7034858",
"0.7030509",
"0.7017091",
"0.69797796",
"0.69794106"
] |
0.813079
|
0
|
Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixedsize numpy array.
|
def _interactive_input_fn(hparams, decode_hp):
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length,
"source_string" if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self._input = raw_input.split('\\n')\n self._input = self._input[0:-1]\n\n for line in self._input:\n direction, steps = line.split()\n self._instructions.append((direction, int(steps)))",
"def iter_inputs(self) -> Iterable[str]:\n yield from self.static_inputs\n if self.allow_dynamic and self.dynamic_inputs:\n yield from self.dynamic_inputs",
"def main():\n with FullscreenWindow() as window:\n print('Press escape to exit')\n with Input() as input_generator:\n a = FSArray(window.height, window.width)\n for c in input_generator:\n if c == '<ESC>':\n break\n elif c == '<SPACE>':\n a = FSArray(window.height, window.width)\n else:\n row = random.choice(range(window.height))\n column = random.choice(range(window.width-len(repr(c))))\n a[row, column:column+len(repr(c))] = [repr(c)]\n window.render_to_terminal(a)",
"def get_inputs(self):\n inputs = Interaction.get_inputs(self)\n inputs.update(np.atleast_1d(self._consumes))\n return inputs",
"def inputs(self):\n pass",
"def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input",
"def input(self):",
"def read_input():\n\n read = sys.stdin.readlines()\n\n text = ''\n for line in read:\n text += line\n\n return text",
"def stdin(self):\n pass",
"def get_inputs(self):\r\n raise NotImplementedError",
"def get_inputs(self):\r\n raise NotImplementedError",
"def inputs(self):\n return self._inputs",
"def _get_inputs(self):\n return self.__inputs",
"def _get_inputs(self):\n return self.__inputs",
"def _get_inputs(self):\n return self.__inputs",
"def serving_input_fn():\n input_keys = tf.placeholder(\n dtype=tf.string, shape=[None], name='input_keys')\n\n with tf.name_scope('ReadInferenceData'):\n feature_dict, serialized_example = self._get_tensor_and_example(\n mode, shuffle=shuffle, num_epochs=num_epochs)\n\n # At serving time, the batch size will be 1. We need to reshape the\n # features to account for this.\n features = {}\n for key, tensor in iteritems(feature_dict):\n features[key] = tf.expand_dims(tensor, 0)\n tf.logging.info(key)\n tf.logging.info(features)\n\n inputs = {\n 'input_keys': input_keys,\n 'input_examples': serialized_example,\n }\n\n return tf.estimator.export.ServingInputReceiver(\n features=features, receiver_tensors=inputs)",
"def read_input():\n input()\n size = int(input().split()[-1])\n nb_edges = int(input().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = input()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = input()\n while 'Terminals' not in line:\n line = input()\n if 'SECTION' in line:\n line = input()\n while 'Terminals' not in line:\n line = input()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = input()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)",
"def input_fn():\n problem_count, batches = len(hparams.problems), []\n with tf.name_scope(\"input_reader\"):\n for n in xrange(problem_count):\n if fixed_problem is not None and n != fixed_problem:\n continue\n problem_instance = hparams.problem_instances[n]\n p_hparams = hparams.problems[n]\n with tf.name_scope(\"problem_%d\" % n):\n with tf.device(\"/cpu:0\"): # Input reading on CPU\n capacity = (\n p_hparams.max_expected_batch_size_per_shard * num_datashards)\n feature_map = data_reader.input_pipeline(\n problem_instance, data_file_patterns and data_file_patterns[n],\n capacity, mode, hparams,\n data_reader.hparams_to_batching_scheme(\n hparams,\n shard_multiplier=num_datashards,\n drop_long_sequences=(mode == tf.contrib.learn.ModeKeys.TRAIN\n or hparams.eval_drop_long_sequences),\n length_multiplier=(p_hparams.batch_size_multiplier)))\n\n # Reverse inputs and targets features if the problem was reversed.\n if problem_instance is not None:\n problem_instance.maybe_reverse_features(feature_map)\n problem_instance.maybe_copy_features(feature_map)\n else:\n if p_hparams.was_reversed:\n inputs = feature_map[\"inputs\"]\n targets = feature_map[\"targets\"]\n feature_map[\"inputs\"] = targets\n feature_map[\"targets\"] = inputs\n # Use the inputs as the targets if the problem is a copy problem.\n if p_hparams.was_copy:\n feature_map[\"targets\"] = feature_map[\"inputs\"]\n\n # Ensure inputs and targets are proper rank.\n while len(feature_map[\"inputs\"].get_shape()) != 4:\n feature_map[\"inputs\"] = tf.expand_dims(feature_map[\"inputs\"], axis=-1)\n while len(feature_map[\"targets\"].get_shape()) != 4:\n feature_map[\"targets\"] = tf.expand_dims(\n feature_map[\"targets\"], axis=-1)\n\n batches.append((feature_map[\"inputs\"], feature_map[\"targets\"],\n tf.constant(n), tf.constant(p_hparams.input_space_id),\n tf.constant(p_hparams.target_space_id)))\n\n # We choose which problem to process.\n loss_moving_avgs = [] # Need loss moving averages for that.\n for n in xrange(problem_count):\n with tf.variable_scope(\"losses_avg\"):\n loss_moving_avgs.append(\n tf.get_variable(\n \"problem_%d/total_loss\" % n, initializer=100.0,\n trainable=False))\n if fixed_problem is None:\n if (hparams.problem_choice == \"uniform\" or\n mode != tf.contrib.learn.ModeKeys.TRAIN):\n problem_choice = tf.random_uniform(\n [], maxval=problem_count, dtype=tf.int32)\n elif hparams.problem_choice == \"adaptive\":\n loss_moving_avgs = tf.stack(loss_moving_avgs)\n problem_choice = tf.multinomial(\n tf.reshape(loss_moving_avgs, [1, -1]), 1)\n problem_choice = tf.to_int32(tf.squeeze(problem_choice))\n elif hparams.problem_choice == \"distributed\":\n assert worker_replicas >= problem_count\n assert worker_replicas % problem_count == 0\n problem_choice = tf.to_int32(worker_id % problem_count)\n else:\n raise ValueError(\n \"Value of hparams.problem_choice is %s and must be \"\n \"one of [uniform, adaptive, distributed]\" % hparams.problem_choice)\n\n # Inputs and targets conditional on problem_choice.\n rand_inputs, rand_target, choice, inp_id, tgt_id = cond_on_index(\n lambda n: batches[n], problem_choice, 0, problem_count - 1)\n else:\n problem_choice = tf.constant(fixed_problem)\n # Take the only constructed batch, which is the fixed_problem.\n rand_inputs, rand_target, choice, inp_id, tgt_id = batches[0]\n\n # Set shapes so the ranks are clear.\n rand_inputs.set_shape([None, None, None, None])\n rand_target.set_shape([None, None, None, None])\n choice.set_shape([])\n inp_id.set_shape([])\n tgt_id.set_shape([])\n # Forced shape obfuscation is necessary for inference.\n if mode == tf.contrib.learn.ModeKeys.INFER:\n rand_inputs._shape = tf.TensorShape([None, None, None, None]) # pylint: disable=protected-access\n rand_target._shape = tf.TensorShape([None, None, None, None]) # pylint: disable=protected-access\n\n # Final feature map.\n rand_feature_map = {\n \"inputs\": rand_inputs,\n \"problem_choice\": choice,\n \"input_space_id\": inp_id,\n \"target_space_id\": tgt_id\n }\n if mode == tf.contrib.learn.ModeKeys.INFER:\n rand_feature_map[\"infer_targets\"] = rand_target\n rand_target = None\n # This is because of a bug in the tf.contrib.learn Estimator that\n # short-circuits prediction if it doesn't see a QueueRunner.\n # DummyQueueRunner implements the minimal expected interface but does\n # nothing.\n # TODO(rsepassi): Remove once we move to core Estimator.\n tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, DummyQueueRunner())\n return rand_feature_map, rand_target",
"def get_inputs(self):\n self.console.write(self.words.show_lines() + \"\\n\")\n self.console.write(self.jumper.jumper_output())\n guess = input(\"Guess a letter [a-z]: \").lower()\n self.good_guess = self.words.get_lines(guess)",
"def input_handling():\r\n # get program parameters from user\r\n k_in, n_in, Random = io.get_values()\r\n\r\n # Generation of n data points (2d or 3d)\r\n n, K, d, data, labels = io.sk_generator(n_in, k_in, Random)\r\n # Print description of Inputs and Random choices\r\n io.print_description(k_in, n_in, K, n, Random, d)\r\n\r\n return n, K, d, data, labels, Random",
"def _serving_input_fn(self):\n seq = tf.placeholder(dtype=tf.float32, shape=[None, None], name='seq')\n features = {'seq': seq}\n return tf.estimator.export.build_raw_serving_input_receiver_fn(features)",
"def input_fn(evaluate=False) -> tf.data.Dataset:\n\n # The dataset\n ds = tf.data.Dataset.from_generator( generator=train_generator, \n output_types=( { 'character' : tf.string } , tf.string ),\n output_shapes=( { 'character' : (Model.SEQUENCE_LENGHT,) } , () )\n )\n\n ds = ds.batch(64)\n ds = ds.prefetch(1)\n\n return ds",
"def read_input():\n return Path(__file__).with_name('input.txt').read_text().splitlines()",
"def input(self):\r\n pass",
"def get_input(inputs):\n return input(inputs)",
"def r(lines=1, prompt=None):\n lista = []\n for i in range(lines):\n l = sys.stdin.readline()\n if l:\n lista.append(l)\n else:\n exit()\n return lista",
"def get_user_input(self):\n result = []\n result.append(self.one_hot_translate_list(self.order_productfea, self.depthlist))\n result.append(tf.nn.embedding_lookup(self.product_embeddings, self.order_productid))\n result.append(tf.nn.embedding_lookup(self.aisle_embeddings, self.order_aisleid))\n result.append(tf.nn.embedding_lookup(self.department_embeddings, self.order_departmentid))\n result.append(self.one_hot_translate(self.order_productidx, self.max_productlen+1))\n user_input = tf.concat(result,axis=-1)\n return user_input",
"def ReadFromStdin():\n for line in sys.stdin:\n path_name = line.strip('\\n').split()\n try:\n path = path_name[0]\n name = path_name[1]\n except IndexError:\n raise IndexError('Malformed input on stdin')\n yield (path, name)",
"def inputs(self) -> Sequence[jnp.ndarray]:\n pass",
"def get_inputs(self):\n inputs = Interaction.get_inputs(self)\n inputs.update(np.atleast_1d(self._demands))\n return inputs"
] |
[
"0.6167433",
"0.5872662",
"0.5866722",
"0.58653957",
"0.57514834",
"0.57471544",
"0.57423514",
"0.5693443",
"0.5675602",
"0.56751627",
"0.56751627",
"0.5635416",
"0.5626456",
"0.5626456",
"0.5626456",
"0.561444",
"0.56078106",
"0.5591231",
"0.55875516",
"0.5550704",
"0.55429125",
"0.5535565",
"0.55200696",
"0.55125123",
"0.5510424",
"0.5505589",
"0.54881024",
"0.54732424",
"0.54660654",
"0.54630744"
] |
0.6619003
|
0
|
Shows an image using matplotlib and saves it.
|
def show_and_save_image(img, save_path):
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_image(path):\n img = mpimg.imread(path)\n imgplot = plt.imshow(img)\n plt.show()\n plt.close()",
"def save(image, name):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.savefig(name)",
"def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')",
"def show_image_in_new_figure(img, to_save=False, fname=\"extractor_test_results/result.png\"):\n \n plt.figure()\n skimage.io.imshow(img, cmap = 'gray')\n if show_plots: plt.show()\n \n if to_save:\n plt.savefig(fname)",
"def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)",
"def show_image(dataset, domain, image_class, image_name):\n\timage_file = io.imread(os.path.join(\"data\", dataset, domain, \"images\", image_class, image_name))\n\tplt.imshow(image_file)\n\tplt.pause(0.001)\n\tplt.figure()",
"def show_image(file_location):\n img = Image.open(file_location)\n img.show()",
"def save_figure(self, data):\n\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, \"gray\")\n\n\t\t#plt.show()\n\t\tself.plotfile = os.path.join('static', 'Figure' + '.png')\n\t\tplt.savefig(self.plotfile, dpi = sizes[1])",
"def plot_img(img, savefig=\"test.png\", **kwargs):\n plt.figure()\n if img.ndim > 2:\n plt.imshow(cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB), **kwargs)\n else:\n plt.imshow(img.astype(np.uint8), **kwargs)\n plt.axis(\"off\")\n if savefig:\n cv2.imwrite(savefig, img.astype(np.uint8))",
"def showImg(img, binary=True, fName=''):\n img = img[0, 0, :, :]\n\n if binary:\n img = img > 0.5\n\n img = Image.fromarray(np.uint8(img * 255), mode='L')\n\n if fName:\n img.save('assets/' + fName + '.png')\n else:\n img.show()",
"def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()",
"def test_plot_images(self):\n save_file(self.quart.plot_images)",
"def save_image(self):\r\n filename = filedialog.asksaveasfilename(title='Save Image As...',\r\n filetypes=((\"Portable Network Graphics (.png)\", \"*.png\"), (\"Portable Document Format(.pdf)\", \"*.pdf\")))\r\n self.graph.savefig(filename, dpi=self.graph.dpi)",
"def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()",
"def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()",
"def plt_to_ipy_img(dummy: any = None, **kwargs) -> IPython.display.Image:\n return IPython.display.Image(filename=plot_to_file(**kwargs))",
"def save_images(PATH, show_img, datasets, from_dataset):\n dataset = datasets[from_dataset]\n imgModels = dataset['models']\n for modelname, model in imgModels.items():\n print('save', modelname)\n plt.imshow(model[70])\n plt.set_cmap(\"gray\")\n plt.axis('off')\n plt.savefig(PATH + '/' + from_dataset + '_' + modelname + '.png', dpi=400)\n\n if show_img == True:\n plt.show()",
"def save_image(image, figsize, save_path, ticks=False, grey=True):\n fig = plt.figure(figsize=figsize)\n if grey:\n plt.imshow(image, cmap=plt.get_cmap('gray'))\n else:\n plt.imshow(image)\n if not ticks:\n plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n fig.savefig(save_path)\n plt.close(fig)\n return",
"def save_plot_as_png(self):\n file_save_path = QFileDialog.getSaveFileName(self, 'Save Plot PNG', \"\", \"PNG (*.png)|*.png\")\n\n if file_save_path[0]:\n self.figure.savefig(file_save_path[0], bbox_inches='tight')\n QMessageBox.about(self, \"Success!\", \"Your plot has been saved as png image successfully.\")",
"def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()",
"def on_save(self, event):\n file_choices = \"PNG (*.png)|*.png\"\n \n dlg = wx.FileDialog(\n self, \n message=\"Save plot as...\",\n defaultDir=os.getcwd(),\n defaultFile=\"plot.png\",\n wildcard=file_choices,\n style=wx.SAVE)\n \n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.canvas.print_figure(path, dpi=self.dpi)\n self.flash_status_message(\"Saved to %s\" % path)",
"def show_with_matplotlib(img, title):\n\n # Convert BGR image to RGB:\n img_RGB = img[:, :, ::-1]\n\n # Show the image using matplotlib:\n plt.imshow(img_RGB)\n plt.title(title)\n plt.show()",
"def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image",
"def save(self, fn):\n plt.imsave(fn, self.image)",
"def show_image(img, figsize=(10, 10)):\n plt.figure(figsize=figsize)\n plt.imshow(img)\n plt.show()",
"def show_save_plot(self, name=''):\n if self.save_plots:\n plt.savefig(os.path.join(self.out_folder, f'{name}.png'), dpi=300)\n plt.show()",
"def show_file(file_location):\n img = Image.open(file_location)\n img.show()",
"def subplot_to_figure(self):\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")",
"def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()"
] |
[
"0.7841939",
"0.77371234",
"0.7698087",
"0.7367039",
"0.7197469",
"0.7056606",
"0.70549476",
"0.7029917",
"0.70056736",
"0.69819826",
"0.6958245",
"0.6957882",
"0.69477004",
"0.69260085",
"0.6901715",
"0.68855405",
"0.6878939",
"0.6877376",
"0.6875134",
"0.68697375",
"0.685487",
"0.6834856",
"0.68099403",
"0.6805025",
"0.67844963",
"0.6769877",
"0.6756338",
"0.674348",
"0.673927",
"0.6728963"
] |
0.7918635
|
0
|
Run hooks after decodes have run.
|
def run_postdecode_hooks(decode_hook_args, dataset_split):
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
child_dir = decode_hook_args.decode_hparams.summaries_log_dir
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
final_dir = os.path.join(parent_dir, child_dir)
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_decode(self, encoded_data):\n \n config.COD_PROMPT = config.DEC_PROMPT\n print config.DEC_PROMPT + \" decoding...\"\n \n # while there is another decoder, run each item through the next decoder\n data = encoded_data\n success = False\n for decoder in self.decoder_list:\n current_decoder = decoder()\n success, data = self.recursive_decoder(current_decoder.decode, data)\n if not success:\n break\n print config.DEC_PROMPT + \"%s decoded to '%s'\" % ( current_decoder.name(),data)\n return success, data",
"def test_post_process(self):\n self.executed = False\n\n post_procs = pyamf.POST_DECODE_PROCESSORS[:]\n\n def restore_post_procs():\n pyamf.POST_DECODE_PROCESSORS = post_procs\n\n self.addCleanup(restore_post_procs)\n pyamf.POST_DECODE_PROCESSORS = []\n\n def postprocess(payload, context):\n self.assertEqual(payload, u'foo')\n self.assertEqual(context, {})\n\n self.executed = True\n\n return payload\n\n pyamf.add_post_decode_processor(postprocess)\n\n # setup complete\n bytes = pyamf.encode(u'foo', encoding=pyamf.AMF3).getvalue()\n\n self.decoder.send(bytes)\n ret = next(self.decoder)\n\n self.assertTrue(self.executed)\n self.assertEqual(ret, u'foo')",
"def _pre_filter_decode(self):\n\n pass",
"def _post_hooks(self):",
"def decoder(self):\n pass",
"def FinalizeCallback(self, unused_finalize_ret):\n tf.logging.info('DecodeProgram skip FinalizeCallback.')",
"def FinalizeCallback(self, unused_finalize_ret):\n tf.logging.info('ExperimentalDecodeProgram skip FinalizeCallback.')",
"def after_parsing(self):",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def restore_after_serialize(self):\n self.on_deserialize()",
"def decode(self):\n for layer in self.layers:\n layer.decode()",
"def decode(self):\n for layer in self.layers:\n layer.decode()",
"def postRun(self):\n pass",
"def _post_run_hook(self, runtime):\n pass",
"def decode(data): #@NoSelf",
"def _postprocess(self):",
"def _decode(self, rel_codes, anchors):\n pass",
"def decode(self, code):\n raise NotImplementedError",
"def _DecodeStep():\n _, decode_dict = self._model.ConstructDecodeGraph()\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return [self._OutfeedEnqueue(decode_dict)]",
"def after_process(self, packet, ret_packet):\n pass",
"def set_decoder(self, decoder):\n self.decoded = decoder(self)",
"def clearDumpDescCallbacks(self):\n pass",
"def off_hook(self) -> None:",
"def _post_load(self):\n pass",
"def post_load(self):\r\n for _, effect in self._effects.items():\r\n effect.post_load()"
] |
[
"0.6479824",
"0.6342177",
"0.61132634",
"0.5872782",
"0.57890147",
"0.5770451",
"0.57692546",
"0.5649763",
"0.56200325",
"0.56200325",
"0.56200325",
"0.56200325",
"0.56200325",
"0.5566848",
"0.55566543",
"0.5554071",
"0.5554071",
"0.55202794",
"0.55136657",
"0.5511754",
"0.5503483",
"0.548244",
"0.5478685",
"0.5426956",
"0.5413164",
"0.5402299",
"0.5400222",
"0.5370986",
"0.5359397",
"0.5347153"
] |
0.6441413
|
1
|
Turn LED on for the duration at the given intensity.
|
def blink(self, duration: int=1, intensity: int=0xff):
# Turn LED on
self.intensity(max(0, min(intensity, 0xff)))
# Turn LED off (after a delay)
upyt.sched.loop.call_later_ms(duration, self.off)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def turn_on(self, **kwargs: Any) -> None:\n if self._dimmable:\n level = kwargs.get(ATTR_BRIGHTNESS, self._last_brightness)\n else:\n level = 255\n self._light.turn_on(to_futurenow_level(level))",
"def set_intensity(intensity):\n ret = _LIB.led_matrix_click_set_intensity(intensity)\n if ret < 0:\n raise Exception(\"led matrix click set intensity failed\")",
"def turn_on(self, **kwargs: Any) -> None:\n self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n self._light.turn_on()",
"def on(self, intensity=None, blink=False, count=None):\n\t\tif count != None:\n\t\t\tself.saveValues()\n\n\t\tif intensity != None:\n\t\t\tself.setValue(intensity, blink, count)\n\t\tself.current_count = self.count\n\t\tself.current_blink = self.blink\n\t\tself.interface.output(self.pin, self.intensity)",
"def set_intensity(self,intensity):\n intensity=int(intensity)\n assert intensity>=0 and intensity<=100\n self.usbdev.ctrl_transfer(bmRequestType=0x21, bRequest=0x09, wValue=0x03a6, wIndex=0, data_or_wLength=[0xa6,0x00,intensity])\n self.intensity=intensity",
"def turn_on(self, **kwargs):\n self._brightness = 100\n self._state = 'on'\n #self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n #self._light.turn_on()\n _LOGGER.info(\"turn_on() is called\")",
"def turn_on(self, **kwargs):\n brightness_pct = 100\n if kwargs.get(ATTR_BRIGHTNESS):\n brightness_pct = \\\n brightness_to_percentage(int(kwargs.get(ATTR_BRIGHTNESS)))\n elif self._is_dimmable:\n brightness_pct = 101 # Sets the light to last known brightness.\n self._client.set_brightness(self._id, brightness_pct)",
"def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light",
"def setValue(self, intensity, blink=False, count=None):\n\t\tself.intensity = intensity\n\t\tself.count = count * 2 # Number of off and on cycles\n\t\tself.blink = blink",
"async def async_turn_on(self, **kwargs: Any) -> None:\n if (brightness := kwargs.get(ATTR_BRIGHTNESS)) is not None:\n # set the brightness, which will also turn on/off light\n if brightness == 255:\n brightness = 256 # this will end up as 16 which is max\n self._device.light_brightness = int(brightness / 16)\n else:\n self._device.light_on = True",
"def set_intensity(self, value):\n self._spectra._set_intensity(self._name, value)",
"def turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Turn on light %s %s\", self._device.ip, kwargs)\n if not self.is_on:\n self._device.power_on = True\n\n if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:\n self._device.brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]:\n color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])\n self._device.color_temperature = color_temp",
"def async_turn_on(self, **kwargs):\n self._state = STATE_ON\n transition = kwargs.get(ATTR_TRANSITION, self._fade_time)\n\n # Update state from service call\n if ATTR_BRIGHTNESS in kwargs:\n self._brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_HS_COLOR in kwargs:\n self._rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])\n # self._white_value = color_rgb_to_rgbw(*self._rgb)[3]\n\n if ATTR_WHITE_VALUE in kwargs:\n self._white_value = kwargs[ATTR_WHITE_VALUE]\n\n logging.debug(\"Setting light '%s' to %s with transition time %i\",\n self._name, repr(self.dmx_values), transition)\n asyncio.ensure_future(\n self._controller.set_channels_async(\n self._channels, self.dmx_values, transition=transition))\n self.async_schedule_update_ha_state()",
"def lightning_turnon(self):\n self.turnOn()",
"def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)",
"def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)",
"def power_up(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(5)\n self.light_led(6)",
"def led_duty_cycle(val):\n set_tmr_ocr(TMR1, OCRxB, val)",
"async def async_turn_on(self, **kwargs: Any) -> None:\n self._is_on = True\n await self.enable_rain_delay()",
"def set_led(self, value):\n GPIO.output(LED_PIN, value)",
"def turnLightOn(ID):\n dislin.litmod(ID, 'ON')",
"def turn_on(self, **kwargs):\n onValue = str((kwargs.get(ATTR_BRIGHTNESS, int(self._brightness))/255)*100)\n request = requests.post(self._resource,\n data=onValue,\n timeout=10)\n if (request.status_code == 200) or (request.status_code == 201):\n self._state = True\n else:\n _LOGGER.info(\"HTTP Status Code: %s\", request.status_code)\n _LOGGER.error(\"Can't turn on %s. Is resource/endpoint offline?\", self._resource)\n\n self.schedule_update_ha_state()",
"async def async_turn_on(self, **kwargs: Any) -> None:\n if (color_temp := kwargs.get(ATTR_COLOR_TEMP)) is not None:\n self._device.light_color_temp = color_temperature_mired_to_kelvin(\n color_temp\n )\n await super().async_turn_on(**kwargs)",
"def led_intensity(\n channel, intensity=0.0, source_of_event=None, unit=None, experiment=None\n):\n try:\n from DAC43608 import DAC43608\n except NotImplementedError:\n print(\"DAC43608 not available; using MockDAC43608\")\n from pioreactor.utils.mock import MockDAC43608 as DAC43608\n\n try:\n assert 0 <= intensity <= 100\n assert channel in CHANNELS\n dac = DAC43608()\n dac.power_up(getattr(dac, channel))\n dac.set_intensity_to(getattr(dac, channel), intensity / 100)\n except ValueError as e:\n logger.debug(e, exc_info=True)\n logger.error(\n \"Is the Pioreactor hardware installed on the RaspberryPi? Unable to find I²C for LED driver.\"\n )\n return False\n else:\n state = get_current_state_from_broker(unit, experiment)\n old_intensity = state[channel]\n state[channel] = intensity\n\n logger.info(f\"Updated LED {channel} from {old_intensity} to {intensity}.\")\n publish(\n f\"pioreactor/{unit}/{experiment}/leds/{channel}/intensity\",\n intensity,\n retain=True,\n )\n\n publish(\n f\"pioreactor/{unit}/{experiment}/leds/intensity\",\n json.dumps(state),\n retain=True,\n )\n\n publish(\n f\"pioreactor/{unit}/{experiment}/led_events\",\n json.dumps(\n {\n \"channel\": channel,\n \"intensity\": intensity,\n \"event\": \"change_intensity\",\n \"source_of_event\": source_of_event,\n }\n ),\n qos=QOS.EXACTLY_ONCE,\n )\n\n return True",
"def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True",
"def emitters_on(self):\n self.wp.digitalWrite(self.LEDON_PIN, self.wp.HIGH)\n self.wp.delayMicroseconds(20)",
"def click_led_intensity(channel, intensity, source_of_event):\n unit = get_unit_name()\n experiment = get_latest_experiment_name()\n\n return led_intensity(channel, intensity, source_of_event, unit, experiment)",
"def light_on(self, pin='D13'):\n self.light_set(pin, '1')",
"def turn_on(self):\n self._remote.power(1)",
"def turn_on(self, **kwargs):\n default_hs = (0, 0) if self._hs_color is None else self._hs_color\n hue_sat = kwargs.get(ATTR_HS_COLOR, default_hs)\n\n default_brightness = 0 if self._brightness is None else self._brightness\n brightness = kwargs.get(ATTR_BRIGHTNESS, default_brightness)\n\n default_white_value = 255 if self._white_value is None else self._white_value\n white_value = kwargs.get(ATTR_WHITE_VALUE, default_white_value)\n\n if brightness == 0 and white_value == 0 and not kwargs:\n # If the light would be off, and no additional parameters were\n # passed, just turn the light on full brightness.\n brightness = 255\n white_value = 255\n\n rgb = color_util.color_hsv_to_RGB(*hue_sat, brightness / 255 * 100)\n\n self._light.set_color(*rgb, white_value)"
] |
[
"0.67766994",
"0.6760989",
"0.6644507",
"0.6636387",
"0.6633147",
"0.6597575",
"0.65660846",
"0.65433735",
"0.65252846",
"0.6483592",
"0.6381838",
"0.6280884",
"0.6212819",
"0.61426216",
"0.6135334",
"0.6120748",
"0.61169463",
"0.60711604",
"0.6065121",
"0.6059004",
"0.60465336",
"0.60449916",
"0.596264",
"0.5954672",
"0.59398115",
"0.59337115",
"0.59316343",
"0.5922191",
"0.58770865",
"0.5874188"
] |
0.76953053
|
0
|
Install a fresh Drupal site Use Drush to setup the Drupal structure in database
|
def site_install(path, db_user, db_pass, db_host, db_name):
db_url = 'mysql://%s:%s@%s/%s' % (db_user, db_pass, db_host, db_name)
warning = """
WARNING: This is an inherently insecure method for interacting with the
database since the database password will be written to the command line
and will be visible to anyone who can access the .mysql_history. Additionally,
while this command is being run the password is exposed to anyone who can run
the ps command on the server. Unfortunately this is the only method that
Drush currently supports.
Do you still wish to proceed?
"""
confirm_overwrite(warning)
with cd(path):
run("drush site-install standard --db-url=%s --account-name=%s\
--account-pass=%s" % (db_url, 'admin', 'admin'))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def vanilla_site(parent, name, db_name, base_url=None, rewrite_base=None):\n\n # TODO check for trailing slash\n path = parent + '/' + name\n\n print header(\"Checking dependencies\")\n if exists(path):\n warning = \"\"\"\nA folder already exists at your destination path.\n\nDo you wish to overwrite?\n\"\"\"\n confirm_overwrite(warning)\n run(\"chmod -R u+w %s\" % path)\n run(\"rm -rf %s\" % path)\n\n if db.mysql_cnf_password_set():\n password = db.get_mysql_pass()\n print\n print green(\"You're ready to build a Drupal site.\")\n print\n else:\n exit('No MySQL credentials were found. Quitting.')\n\n print header(\"Downloading Drupal.\")\n download(parent, name)\n\n print header(\"Configuring the RewriteBase in the .htaccess file.\")\n files.enable_rewrite_base(path, rewrite_base)\n\n print header(\"Making the files directory and a settings.php file\")\n files.setup_files(path)\n files.setup_settings(path, db_name)\n\n print header(\"Creating the database and loading Drupal structure.\")\n site_install(path, 'bkennedy', password, '127.0.0.1', db_name)\n\n with cd(path):\n cache.clear()\n\n print header(\"Your Drupal site is ready to go.\")\n\n # run(\"drush dl -y devel backup_migrate\")\n # Send an email as part of the Jenkins build or at least print the URL",
"def __site_create_install(self, name, product_profile, drush_alias):\n (product, inst_profile) = product_profile.split(' ')\n try:\n # example: drush psite-create $SITE_NAME --label=\"$SITE_DESC\" --product=21e1fada-199c-492b-97bd-0b36b53a9da0\n debug_run(\"bin/pinit\")\n # Full path to drush to avoid the running a drush wrapper:\n debug_run(\"/usr/bin/drush psite-create %s --label='%s' --product=%s --nopoll\" % (name, name, product))\n debug_run(\"drush paliases\")\n self.__wait_for_job_success(name, 'create_site', delay=30, tries=36, loop_sleep=5)\n debug_run(\"drush -y @pantheon.%s.dev si --site-name='%s' %s\" % (name, name, inst_profile))\n except SystemExit as e:\n raise Exception(\"Site create failed. Err: %s\" % str(e))",
"def __site_create_install(self, p, create_source, target):\n _lst_platform_profile = create_source.split(' ')\n if len(_lst_platform_profile) != 2:\n raise Exception(\"Wrong create_source param: %s\" % create_source)\n (platform_alias, profile) = _lst_platform_profile\n\n try:\n run(\"drush provision-save @%s --context_type=site --uri=%s --platform=@%s --profile=%s --db_server=@server_localhost --client_name=admin\" % (target, target, platform_alias, profile))\n run(\"drush @%s provision-install\" % target)\n run(\"drush @hostmaster hosting-task --force @%s verify\" % platform_alias)\n\n except SystemExit as e:\n raise Exception(\"Installation of site failed. Err: %s\" % str(e))",
"def install():\n deploy()\n configure()",
"def init_site():\n\n from nikola.plugins.command.init import CommandInit, SAMPLE_CONF\n\n command = CommandInit()\n SAMPLE_CONF['SITE_URL'] = _get_site_url()\n SAMPLE_CONF.update(DATA)\n command.execute({'demo': True}, ['demo'])\n\n local('mv demo/* . && rmdir demo')\n local('touch files/.nojekyll')\n local('rm *.pyc')",
"def setup_module():\n svrhelp.setup_module()\n\n # Create the db now the server is running in its own dir.\n #db.init(...)",
"def cmd_install(self, wwwdata):\n server_dir = os.path.join(data_dir, 'server')\n shutil.copytree(server_dir, os.path.join(wwwdata, 'templeton'))\n return 0",
"def _init_remote():\r\n require('path', provided_by = [staging])\r\n\r\n create_project_dir()\r\n deploy_nosyncdb()\r\n create_virtualenv()\r\n install_requirements()\r\n create_db()\r\n create_secret_settings()\r\n syncdb()\r\n createsuperuser()\r\n install_site()\r\n reload()",
"def bootstrapFluidDB(serverName, databaseURI, solrURL, solrShards,\n createSchema=None, solrImport=None):\n # Install requirements\n\n sudo('DEBIAN_FRONTEND=noninteractive apt-get install -y '\n 'bzr git postgresql-server-dev-9.1 python-dev python-pip '\n 'python-virtualenv make logrotate openntpd')\n\n # Create a 'fluidinfo' user\n sudo('sudo adduser --system --home /var/lib/fluidinfo '\n ' --gecos \"Fluidinfo,,,\" --disabled-password '\n ' --shell /bin/bash fluidinfo')\n sudo('chown -R fluidinfo /var/lib/fluidinfo')\n\n # Upload and set up the code.\n deploymentPath = os.path.join('/srv', serverName)\n revision = datetime.utcnow().strftime('%Y%m%d-%H%M')\n revisionPath = os.path.join(deploymentPath, revision)\n\n sudo('mkdir -p {0}'.format(revisionPath))\n sudo('chown -R fluidinfo {0}'.format(deploymentPath))\n\n local('git archive --prefix=fluidinfo/ -v --format tar HEAD | '\n 'bzip2 > fluidinfo.tar.bz2')\n put('fluidinfo.tar.bz2')\n sudo('cp fluidinfo.tar.bz2 {0}'.format(revisionPath))\n\n with cd(revisionPath):\n sudo('chown -R fluidinfo {0}'.format(revisionPath))\n sudo('chown fluidinfo fluidinfo.tar.bz2')\n sudo('tar jxvf fluidinfo.tar.bz2', user='fluidinfo')\n sudo('mkdir -p var/log var/log/trace var/run var/tmp',\n user='fluidinfo')\n\n with cd(os.path.join(revisionPath, 'fluidinfo')):\n sudo('virtualenv .', user='fluidinfo')\n sudo('mkdir -p /var/lib/fluidinfo/pip-cache', user='fluidinfo')\n sudo('./bin/pip install --use-mirrors '\n '--download-cache=/var/lib/fluidinfo/pip-cache '\n '--log /tmp/pip.log '\n '-r requirements.txt', user='fluidinfo')\n if createSchema:\n sudo('bin/python bin/fluidinfo '\n ' bootstrap-database {0}'.format(databaseURI),\n user='fluidinfo')\n # We use this to make sure that the database is properly configured.\n sudo('bin/python bin/fluidinfo '\n ' patch-status {0}'.format(databaseURI),\n user='fluidinfo')\n\n # Run full DIH on all solr shards.\n if solrImport:\n for shard in solrShards.split(','):\n run('curl http://{0}/dataimport?command=full-import&'\n 'clean=true&commit=true&optimize=false'.format(shard))\n\n # On successful completion, clean up /tmp\n sudo('rm -f /tmp/pip.log')\n\n # Copy and setup configuration files.\n deployConfigFiles(\n {'deployment-path': deploymentPath,\n 'server-name': serverName,\n 'revision-path': revisionPath,\n 'solr-url': solrURL,\n 'solr-shards': solrShards,\n 'postgres-uri': databaseURI},\n\n ('fluidinfo/fluidinfo-api.conf.template',\n '{revision-path}/fluidinfo-api.conf'),\n\n ('logrotate/fluidinfo-api.template',\n '/etc/logrotate.d/fluidinfo-api'),\n\n ('upstart/fluidinfo-api.conf.template',\n '/etc/init/fluidinfo-api.conf'),\n\n ('upstart/fluidinfo-api-node.conf.template',\n '/etc/init/fluidinfo-api-node.conf'),\n\n ('ntpd/ntpd.conf',\n '/etc/openntpd/ntpd.conf'))\n\n with cd(deploymentPath):\n sudo('ln -fs {0} current'.format(revision))\n sudo('/etc/init.d/openntpd restart')\n sudo('start fluidinfo-api')",
"def quickstart(*args, **kwargs):\n\n setup(*args, **kwargs)\n update_site(*args, **kwargs)\n restart_site(*args, **kwargs)",
"def install(where='local'):\n config = get_config(where)\n print 'using configuration: %s' % config\n with settings(host_string=config['host_string']):\n if not files.exists(config['installation_dir']):\n run('git clone %(git_repo)s %(installation_dir)s' % config)\n with cd(config['installation_dir']):\n run('git submodule init')\n run('git submodule update --init')\n\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('python2.7 bootstrap.py -c %(cfg)s' % config)\n deploy(where)\n secs = 4\n sleep(secs)\n init_db(where)",
"def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)",
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)",
"def setup():\n is_installed = wp_cli('core is-installed')\n\n if is_installed:\n wp_cli('core download')\n\n install_params = {}\n\n install_params['url'] = prompt('URL: ')\n install_params['title'] = prompt('Title: ')\n install_params['admin_user'] = prompt('Admin User: ')\n install_params['admin_password'] = prompt('Admin Password: ')\n install_params['admin_email'] = prompt('Admin Email: ')\n\n config_params = {}\n\n config_params['dbname'] = prompt('Database Name: ')\n config_params['dbuser'] = prompt('Database User: ')\n config_params['dbpass'] = prompt('Database Password: ')\n config_params['dbhost'] = prompt('Database Hostname: ')\n\n wp_cli('core install {0}'.format(' '.join(['--%s=\"%s\"' % (k, v) for (k, v) in install_params.items()])))\n\n wp_cli('core config {0}'.format(' '.join(['--%s=\"%s\"' % (k, v) for (k, v) in config_params.items()])))\n\n wp_cli('core update-db')\n else:\n abort('WordPress is already installed.')",
"def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()",
"def __site_create_import(self, name, source_archive):\n try:\n debug_run(\"bin/pinit\")\n # yes, we are using the same label as the site name. label is actually\n # a mandatory parameter.\n # Full path to drush to avoid running a drush wrapper:\n debug_run(\"/usr/bin/drush psite-import --nopoll --label=%s %s %s\" % (name, name, source_archive))\n debug_run(\"drush paliases\")\n\n self.__wait_for_job_success(name, 'import_site_dev', delay=30, tries=36, loop_sleep=5)\n except SystemExit as e:\n raise Exception(\"Site import failed. Err: %s\" % str(e))",
"def dev_site(live_path, dev_parent, dev_name, dev_db_name='',\n base_url='', rewrite_base=''):\n with mute():\n remote = git.get_remote_url(live_path)\n dev_path = '%s/%s' % (dev_parent, dev_name)\n if exists(dev_path):\n warning = \"\"\"\nA folder already exists at your destination path.\n\nDo you wish to overwrite it?\n\"\"\"\n confirm_overwrite(warning)\n\n with mute():\n run('rm -rf %s' % dev_path)\n with cd(dev_parent):\n run('git clone %s %s' % (remote, dev_name))\n\n with cd(dev_path):\n run('git fetch')\n run('git branch')\n\n # Determinine a branching strategy\n strategy_prompt = \"\"\"\nHow would you like to create your dev site:\n1) Use an existing Git branch\n2) Create a new Git branch\n:\n\"\"\"\n strategy = prompt(strategy_prompt,\n validate=validate_branching_strategy)\n\n # Checkout an existing branch\n if strategy == '1':\n branch_prompt = \"\"\"\nWhich existing branch would you like to use for this dev site?\n\"\"\"\n # TODO - add validation\n dev_branch = prompt(branch_prompt)\n run('git checkout %s' % dev_branch)\n run('git pull origin %s' % dev_branch)\n\n # Create new branch\n if strategy == '2':\n start_branch_prompt = \"\"\"\nWhich branch should we use to start from?\n\"\"\"\n start_branch = prompt(start_branch_prompt)\n run('git checkout %s' % start_branch)\n dev_branch_prompt = \"\"\"\nWhat would like to name the new dev branch?\n\"\"\"\n dev_branch = prompt(dev_branch_prompt)\n run('git checkout -b %s' % dev_branch)\n # Look for an git origin in the live site\n\n # cd to the dev parent dir and clone the repo from origin\n\n # switch to the develop branch\n\n # git fetch\n\n # git pull origin develop\n\n # Duplicate the live mysql db as a dev db\n # Look into cross platform ways to just do the db duplication without\n # needing to write the db dump file and then do the insert\n\n # Configure the settings.php and .htaccess files for the dev site\n\n # Copy the files folder from the live site to the dev site\n # Eventually there should be a option here for doing read only sym-links\n # Or maybe some S3 thingy\n\n # drush cc all on dev\n\n # done",
"def make_site_dx(context):\n portal = getSite()\n\n if portal._tree is not None:\n # We assume the object has been already initialized\n return\n\n portal._initBTrees()\n\n for obj_meta in portal._objects:\n obj_id = obj_meta[\"id\"]\n logger.info(\"Migrating object %r\", obj_id)\n # Load the content object ...\n obj = portal.__dict__.pop(obj_id)\n if not isinstance(obj, Broken) and obj_id not in (\n \"portal_quickinstaller\",\n \"portal_form_controller\",\n ):\n # ...and insert it into the btree.\n # Use _setOb so we don't reindex stuff: the paths stay the same.\n portal._setOb(obj_id, obj)\n\n delattr(portal, \"_objects\")\n portal._p_changed = True",
"def install():\n execute(generate)\n execute(upload)",
"def deploy():\n update_treesheets()\n restart_treesheets()",
"def create_sitemaps():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('rm rnacentral/sitemaps/*')\n env.run('python rnacentral/manage.py create_sitemaps')\n slack(\"Created sitemaps at ves-oy-a4\")",
"def install(self, egg, dir_path):",
"def update_site():\n site_path = os.path.join(PROJECTS_ROOT, CURRENT_SITE)\n docs_path = os.path.join(site_path, 'doc_src')\n with cd(site_path):\n run('git pull --all')\n run('workon djangopatterns && pip install -r %s/setup/requirements.txt' % site_path)\n run('workon djangopatterns && %s/manage.py syncdb' % site_path)\n # run('workon djangopatterns && %s/manage.py migrate' % site_path)\n run('workon djangopatterns && %s/manage.py collectstatic --noinput' % site_path)\n run('workon djangopatterns && %s/manage.py compress' % site_path)\n with cd(docs_path):\n run('git pull --all')\n # run('workon djangopatterns && cd doc_src && make clean')\n # run('workon djangopatterns && cd doc_src && make json')\n reload_site()",
"def setUp(self):\n self.setup_remote_site()\n self.setup_local_site()",
"def init_new_db(args):\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n session = Session()\n session.add(Environment(name='normal', slickurl='http://slicker.homestead-corp.com/slickij', buildurl='?', filename='hs-tcrunij.tar.gz', tcrunijsubdir='hs-tcrunij/tcrunij'))\n session.add(Environment(name='dev', slickurl='http://octomom.homestead-corp.com/slickij', buildurl='?', filename='tcrunij.tar.gz', tcrunijsubdir='tcrunij/tcrunij'))\n session.commit()",
"def load_new_data():\n require('settings', provided_by=[production, staging])\n \n maintenance_up()\n load_data()\n maintenance_down()",
"def setup_machine():\n # Initial setup and package install.\n sudo(\"aptitude update\")\n sudo(\"aptitude -y install git-core python-dev python-setuptools \"\n \"postgresql-dev postgresql-client build-essential \"\n \"libpq-dev subversion mercurial apache2 \"\n \"libapache2-mod-wsgi\")",
"def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()"
] |
[
"0.63983303",
"0.6073262",
"0.5945958",
"0.5833292",
"0.5776026",
"0.56814235",
"0.5650706",
"0.5622328",
"0.5554226",
"0.5548526",
"0.55232364",
"0.5519522",
"0.54835784",
"0.5424124",
"0.5408809",
"0.5405939",
"0.53874534",
"0.5381521",
"0.5368194",
"0.53665733",
"0.53506017",
"0.5340146",
"0.5329633",
"0.5326657",
"0.53078705",
"0.53046197",
"0.5286795",
"0.5272127",
"0.5256125",
"0.52445424"
] |
0.7213197
|
0
|
Download the latest Drupal project
|
def download(parent, name=None):
with cd(parent):
if not name:
run("drush dl")
else:
run("drush dl --drupal-project-rename=%s" % name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()",
"def _download_project(name, apikey):\n payload = {'apikey': apikey, 'project': name, 'version': 'portia'}\n r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload)\n return r.content",
"def pub_download(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n download_theme(args, base_url, api_key, prefix=project)",
"def download_fabric_factory():\n local('hg clone http://bitbucket.org/yml/fabric_factory/')",
"def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)",
"def main():\n get_obofoundry(force_download=True)",
"def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')",
"def vanilla_site(parent, name, db_name, base_url=None, rewrite_base=None):\n\n # TODO check for trailing slash\n path = parent + '/' + name\n\n print header(\"Checking dependencies\")\n if exists(path):\n warning = \"\"\"\nA folder already exists at your destination path.\n\nDo you wish to overwrite?\n\"\"\"\n confirm_overwrite(warning)\n run(\"chmod -R u+w %s\" % path)\n run(\"rm -rf %s\" % path)\n\n if db.mysql_cnf_password_set():\n password = db.get_mysql_pass()\n print\n print green(\"You're ready to build a Drupal site.\")\n print\n else:\n exit('No MySQL credentials were found. Quitting.')\n\n print header(\"Downloading Drupal.\")\n download(parent, name)\n\n print header(\"Configuring the RewriteBase in the .htaccess file.\")\n files.enable_rewrite_base(path, rewrite_base)\n\n print header(\"Making the files directory and a settings.php file\")\n files.setup_files(path)\n files.setup_settings(path, db_name)\n\n print header(\"Creating the database and loading Drupal structure.\")\n site_install(path, 'bkennedy', password, '127.0.0.1', db_name)\n\n with cd(path):\n cache.clear()\n\n print header(\"Your Drupal site is ready to go.\")\n\n # run(\"drush dl -y devel backup_migrate\")\n # Send an email as part of the Jenkins build or at least print the URL",
"def download_latest_version(target_name = '', target_dir = None):\n url_address = 'https://github.com/muhammadfredo/FrMaya/archive/master.zip'\n if target_dir is None:\n temp_dir = path.Path(tempfile.gettempdir())\n else:\n temp_dir = path.Path(target_dir)\n temp_frmaya_zip = temp_dir / '{}.zip'.format(target_name)\n temp_frmaya_dir = temp_dir / target_name\n\n with open(temp_frmaya_zip, 'wb') as temp_zip:\n temp_zip.write(urllib2.urlopen(url_address).read())\n zipfile.ZipFile(temp_frmaya_zip).extractall(temp_frmaya_dir)\n\n return path.Path(temp_frmaya_zip).abspath(), path.Path(temp_frmaya_dir).abspath()",
"def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)",
"def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return",
"def download_build(self, name, dst_directory):\n logging.info('Not downloading build because no Filestore.')",
"def download_url(filename, url):\n latest_package_url = request.urlopen(url).read().decode(\"utf-8\")\n print(\"Downloading latest package:\\n{}\".format(latest_package_url))\n request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback)",
"def project():",
"def project():",
"def project():",
"def download_release(self):\n if self.cache_dir is not None:\n download = Download(cache=self.cache_dir)\n else:\n self.log.warning(\"not using a download cache for uwsgi\")\n download = Download()\n\n download_url = self.options.get(\"download-url\", DOWNLOAD_URL)\n download_path, is_temp = download(\n download_url.format(self.uwsgi_version), md5sum=self.md5sum)\n return download_path",
"def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)",
"def download():\n raise NotImplementedError",
"def getProjectURL():",
"def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)",
"def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");",
"def on_DownloadTools_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n rootdir = os.getcwd()\n print(f\"We will download all file in {rootdir}{os.sep}Download/ Directory\")\n self.OnlyDisplay(f\"autoDownloadGman {self.url}\")\n self.MainFile = autoDownloadGman(self.url)\n\n self.OnlyDisplay(f\"autoDeployGman {self.MainFile}\")\n autoDeployGman(self.MainFile)",
"def download():\n\treturn response.download(request, db)",
"def official(ctx, build_number):\n # TODO: Cache API calls to be nice too kashike and the gang\n minecraft_version = ctx.parent.minecraft_version\n known_builds = minecraft_version.known_paper_builds\n if not known_builds:\n raise ClickException()\n if build_number is None:\n build_number = max(known_builds)\n if build_number not in known_builds:\n print(f\"Known builds for {minecraft_version}:\", file=sys.stderr)\n print_wrapped(', '.join(map(str, known_builds)))\n raise ClickException(f\"Build {build_number} is not a valid build for {minecraft_version}\")\n latest_build = max(known_builds)\n if build_number != latest_build:\n click.echo(f\"The latest build for {minecraft_version} is {latest_build}.\")\n click.confirm(f\"Are you sure you want to use {build_number} instead?\", abort=True)\n jar = OfficialPaperJar(minecraft_version, build_number)\n try:\n jar.validate_cache()\n except CacheInvalidationException as e:\n e.print(\"Paper jar\")\n print()\n print(f\"Downloading Paper {build_number}....\")\n jar.update()\n assert jar.resolved_path.exists()\n return jar",
"def test_download(self):\n test_file = os.path.join(self._system.get_temporary_path(), \"nusoft.test\")\n self._system.download(\"http://www.github.com\", name=test_file)\n self.assertTrue(os.path.exists(test_file))\n os.remove(test_file)",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def _download_and_extract_popt_devel(self):\n raise NotImplementedError('Implement this method.')",
"def download(all):\n print(\"Downloading\")",
"def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)"
] |
[
"0.6580372",
"0.6464609",
"0.63208455",
"0.6228037",
"0.6014168",
"0.5966778",
"0.5897198",
"0.5885061",
"0.581801",
"0.57892925",
"0.5750816",
"0.5717377",
"0.5698092",
"0.5602394",
"0.5602394",
"0.5602394",
"0.5594754",
"0.55718493",
"0.5547418",
"0.5503806",
"0.54807496",
"0.5477755",
"0.5429718",
"0.5405739",
"0.5405055",
"0.53988105",
"0.5390253",
"0.5385263",
"0.5381678",
"0.5381205"
] |
0.67670256
|
0
|
Create a dev site by copying an existing live site
|
def dev_site(live_path, dev_parent, dev_name, dev_db_name='',
base_url='', rewrite_base=''):
with mute():
remote = git.get_remote_url(live_path)
dev_path = '%s/%s' % (dev_parent, dev_name)
if exists(dev_path):
warning = """
A folder already exists at your destination path.
Do you wish to overwrite it?
"""
confirm_overwrite(warning)
with mute():
run('rm -rf %s' % dev_path)
with cd(dev_parent):
run('git clone %s %s' % (remote, dev_name))
with cd(dev_path):
run('git fetch')
run('git branch')
# Determinine a branching strategy
strategy_prompt = """
How would you like to create your dev site:
1) Use an existing Git branch
2) Create a new Git branch
:
"""
strategy = prompt(strategy_prompt,
validate=validate_branching_strategy)
# Checkout an existing branch
if strategy == '1':
branch_prompt = """
Which existing branch would you like to use for this dev site?
"""
# TODO - add validation
dev_branch = prompt(branch_prompt)
run('git checkout %s' % dev_branch)
run('git pull origin %s' % dev_branch)
# Create new branch
if strategy == '2':
start_branch_prompt = """
Which branch should we use to start from?
"""
start_branch = prompt(start_branch_prompt)
run('git checkout %s' % start_branch)
dev_branch_prompt = """
What would like to name the new dev branch?
"""
dev_branch = prompt(dev_branch_prompt)
run('git checkout -b %s' % dev_branch)
# Look for an git origin in the live site
# cd to the dev parent dir and clone the repo from origin
# switch to the develop branch
# git fetch
# git pull origin develop
# Duplicate the live mysql db as a dev db
# Look into cross platform ways to just do the db duplication without
# needing to write the db dump file and then do the insert
# Configure the settings.php and .htaccess files for the dev site
# Copy the files folder from the live site to the dev site
# Eventually there should be a option here for doing read only sym-links
# Or maybe some S3 thingy
# drush cc all on dev
# done
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_site(apps, schema_editor):\n return site_models.Site.objects.create(\n name='The SATNet Network',\n domain='localhost:8000'\n )",
"def create(site):\n\n # Run the \"createsite\" script on the VM. \n # That will create the site for you.\n Vagrant.run_script_on_vm('createsite', site)",
"def copy_to_site():\n if os.path.exists(SITE_GFR):\n rmtree(SITE_GFR)\n\n try:\n os.makedirs(SITE_GFR)\n except OSError as e:\n # We don't care if it already exists although it shouldn't exist.\n if e.errno != errno.EEXIST:\n raise\n\n for route in os.listdir(GFR_ROUTES_LOCATION):\n copyfile(GFR_ROUTES_LOCATION + route, SITE_GFR + route)\n\n if os.path.exists(SITE_OUTPUT):\n rmtree(SITE_OUTPUT)\n\n try:\n os.makedirs(SITE_OUTPUT)\n except OSError as e:\n # We don't care if it already exists although it shouldn't exist.\n if e.errno != errno.EEXIST:\n raise\n\n for route in os.listdir(OUTPUT_LOCATION):\n copyfile(OUTPUT_LOCATION + route, SITE_OUTPUT + route)",
"def vanilla_site(parent, name, db_name, base_url=None, rewrite_base=None):\n\n # TODO check for trailing slash\n path = parent + '/' + name\n\n print header(\"Checking dependencies\")\n if exists(path):\n warning = \"\"\"\nA folder already exists at your destination path.\n\nDo you wish to overwrite?\n\"\"\"\n confirm_overwrite(warning)\n run(\"chmod -R u+w %s\" % path)\n run(\"rm -rf %s\" % path)\n\n if db.mysql_cnf_password_set():\n password = db.get_mysql_pass()\n print\n print green(\"You're ready to build a Drupal site.\")\n print\n else:\n exit('No MySQL credentials were found. Quitting.')\n\n print header(\"Downloading Drupal.\")\n download(parent, name)\n\n print header(\"Configuring the RewriteBase in the .htaccess file.\")\n files.enable_rewrite_base(path, rewrite_base)\n\n print header(\"Making the files directory and a settings.php file\")\n files.setup_files(path)\n files.setup_settings(path, db_name)\n\n print header(\"Creating the database and loading Drupal structure.\")\n site_install(path, 'bkennedy', password, '127.0.0.1', db_name)\n\n with cd(path):\n cache.clear()\n\n print header(\"Your Drupal site is ready to go.\")\n\n # run(\"drush dl -y devel backup_migrate\")\n # Send an email as part of the Jenkins build or at least print the URL",
"def make_site(\n cls,\n name,\n url,\n user_display=REMOTE_SITE_USER_DISPLAY,\n mode=SODAR_CONSTANTS['SITE_MODE_TARGET'],\n description='',\n secret=build_secret(),\n ):\n values = {\n 'name': name,\n 'url': url,\n 'mode': mode,\n 'description': description,\n 'secret': secret,\n 'user_display': user_display,\n }\n site = RemoteSite(**values)\n site.save()\n return site",
"def create_new_site(site_name):\n path = WEBROOT / site_name\n if _get_site_id(site_name) is not None or path.exists():\n raise FileExistsError('site_name_taken')\n querystring = 'insert into {} (sitename) values (%s) returning id;'\n result = execute_query(querystring.format(TABLES[0]), (site_name,))\n siteid = result[0][0]\n querystring = 'insert into {} (site_id, dirname) values (%s, %s)'\n result = execute_query(querystring.format(TABLES[2]), (siteid, '/'))\n # create the physical destination (mirror) so that css and images can be moved there\n path.mkdir(parents=True)",
"def __site_create_install(self, p, create_source, target):\n _lst_platform_profile = create_source.split(' ')\n if len(_lst_platform_profile) != 2:\n raise Exception(\"Wrong create_source param: %s\" % create_source)\n (platform_alias, profile) = _lst_platform_profile\n\n try:\n run(\"drush provision-save @%s --context_type=site --uri=%s --platform=@%s --profile=%s --db_server=@server_localhost --client_name=admin\" % (target, target, platform_alias, profile))\n run(\"drush @%s provision-install\" % target)\n run(\"drush @hostmaster hosting-task --force @%s verify\" % platform_alias)\n\n except SystemExit as e:\n raise Exception(\"Installation of site failed. Err: %s\" % str(e))",
"def make_site(self, site_name, site_dir):\n web_root = self.settings.get('webrootDir')\n folder = os.path.join(site_dir, web_root)\n make_file = self.find_make_file(site_name, site_dir)\n Utils.remove_dir(folder)\n if make_file and web_root:\n # Run drush make\n # Get the repo webroot\n make_opts = self.settings.get('makeOpts')\n make_cmds = ['make', make_file, folder]\n make_cmds += make_opts\n make = Drush.call(make_cmds)\n return make",
"def setup_remote_site(self):\n raise NotImplementedError",
"def web_site_mode():\r\n css = make_style.create_css()\r\n websites = dict()\r\n argument = dict()\r\n links = '<p align=\"center\">'\r\n for args in range(1, len(sys.argv)):\r\n website, head = make_content.create_content(sys.argv[args])\r\n websites[head] = website\r\n argument[sys.argv[args]] = head\r\n filename = convert_txt_to_html(sys.argv[args])\r\n links += '<a href=' + filename + '>' + head + '</a>---'\r\n links += END_P\r\n for args in range(1, len(sys.argv)):\r\n html_body = \"\"\r\n header = create_head(css, argument[sys.argv[args]])\r\n html_head = header + links\r\n website = websites[argument[sys.argv[args]]]\r\n for website_info in website:\r\n html_body += website_create_body(website_info)\r\n filename = convert_txt_to_html(sys.argv[args])\r\n html = html_head + html_body\r\n file = open(filename, \"+w\")\r\n file.write(html)\r\n file.close()",
"def init(theme=\"dimension\", name=\"John Doe\"):\n print(f\"Generating site at {os.path.abspath(name)}\")\n\n shutil.copytree(os.path.join(os.path.dirname(__file__), \"example_site\"), os.path.abspath(name))\n\n # Generate initial config.yml file\n with open(os.path.join(name, \"config.yml\"), \"w+\") as config_file:\n config_file.write(f\"# See https://ezcv.readthedocs.io for documentation\\nname: {name}\\ntheme: {theme}\\nresume: false\")\n\n if theme != \"dimension\":\n # Check if theme is remote theme, and download it if it is\n \n remote_themes = get_remote_themes()\n if remote_themes.get(theme, False):\n original_directory = os.path.abspath(os.getcwd()) # Store CWD\n os.chdir(os.path.abspath(name)) # Go into new site folder\n setup_remote_theme(theme, remote_themes[theme]) # Download theme \n os.chdir(original_directory) # Navigate back to original cwd\n\n print(f\"Site generated and is available at {os.path.abspath(name)}\")",
"def mkweb(project_name, mode):\n\n MAIN_FOLDER = data.get_base_path(data.WEB)\n\n if mode != 'MAIN':\n MAIN_FOLDER += f'{mode}/'\n \n webproject = folders.WebProject(project_name, MAIN_FOLDER)\n\n webproject.create_project()\n click.echo(f'Project created succesfull in {webproject.project_path}')\n cli_commands.start_git(webproject.project_path)\n cli_commands.show_dir_path(webproject.project_path)\n # cli_commands.start_vscode(webproject.project_path)\n\n click.echo('Project Path copied to clipboard...')",
"def new_site(site_id):\n #the status check may be unnecessary\n if site_id:\n #we have to request the site object because the\n #callback data does not include _etag\n data = get_resource(\"sites\", \"/\" + site_id)\n url = \"{0}/{1}\".format(conf.api_server, data[\"_links\"][\"self\"][\"href\"])\n name = data['name']\n sid = data['sid']\n path = data['sid']\n #username = data['username']\n mail = data['mail']\n etag = data['_etag']\n pw = mysql_password()\n\n template = env.get_template('settings.php')\n settings_php = template.render(\n profile='cu_fit',\n site=name,\n sid=sid,\n pw=pw,\n path=path,\n status=data['status'],\n )\n\n with open(\"/tmp/{0}.settings.php\".format(sid), \"w\") as ofile:\n ofile.write(settings_php)\n\n fabfile.add_site_db(sid, name, pw)\n #using execute forces @roles('ENV') to be used when running the function\n execute(fabfile.add_site_dslm, sid=sid)\n execute(fabfile.push_settings, sid=sid)\n\n fabfile.site_install(sid, name, conf.profile)\n\n #if fit plus you might have packages to symlink\n if \"packages\" in data:\n execute(fabfile.add_site_packages, sid=sid, packages=data[\"packages\"])\n\n #make sure passwords aren't in plain text when passed around by the api\n pw = encrypt_db_pw(conf.SECRET_KEY, pw)\n payload = {'db_key': pw, 'sid': sid, 'path': path}\n #if the site was posted as 'available' don't change it to 'installed'\n if data['status'] == 'pending':\n payload['status'] = 'installed'\n #update the site object in the api with the db pw...etag is required\n send_site_patch(url, payload, etag)\n #clean up site file permissions...\n #execute(fabfile.set_site_perms, sid=sid)\n #email the user...template in templates/email.html\n #send_site_intro(site_id, sid, you = mail)\n #update the aliases here\n #export_aliases.delay()",
"def copy():\n put(os.path.join('dist', get_egg_name()), remote_egg_dir)",
"def launch_site(data):\n\n if data and data['status'] == 'launching':\n #cast to string in case the id is an object\n site_id = str(data['_id'])\n #request full site info to get the site's ID hash\n data = get_resource(\"sites\", \"/\" + site_id)\n template = env.get_template('settings.php')\n settings_php = template.render(\n profile='cu_fit',\n sid=data['sid'],\n path=data['path'],\n status=data['status'],\n )\n\n with open(\"/tmp/{0}.settings.php\".format(data['sid']), \"w\") as ofile:\n ofile.write(settings_php)\n\n execute(fabfile.launch_site, sid=data['sid'])",
"def create(site):\n\n # Make sure the site name is all lowercased,\n # with no spaces. Yesod requires that.\n if not Utilities.is_word(site):\n Utilities.log(\"Site names must contain letters and numbers only,\")\n Utilities.log(\"with no spaces, dashes, or underscores.\")\n Utilities.log(\"Please choose a valid site name.\")\n exit(1)\n\n # Create the site.\n Yesod.create(site)",
"def setup_local_site(self):\n raise NotImplementedError",
"def create_site(build_dir, content_dir, template_dir, no_cache, verbose):\n\n # set logging options\n logging.basicConfig(\n level=logging.DEBUG if verbose else logging.INFO,\n format='%(levelname)s: %(message)s',\n )\n\n # set directories\n build_path = Path(build_dir)\n\n content_path = Path(content_dir)\n if not content_path.is_dir():\n click.echo(f\"Directory '{content_path}' not found.\")\n sys.exit(-1)\n\n template_path = Path(template_dir)\n if not template_path.is_dir():\n click.echo(f\"Directory '{template_path}' not found.\")\n sys.exit(-1)\n\n template_paths = (default_template_path, template_path)\n\n try:\n # process file tree in template path\n build_file_tree(template_path, build_path, no_cache,\n actions={\n '.html': ignore,\n '.scss': ('.css', compile_scss),\n }\n )\n\n # get site object model\n som = SOM(content_path)\n\n # initialize render action\n render_html = render_html_factory(som, template_paths)\n\n # process file tree in content path\n build_file_tree(content_path, build_path, no_cache,\n actions={\n '.scss': ('.css', compile_scss),\n '.rst': ('.html', render_html),\n '.json': ('.html', render_html),\n '.yaml': ('.html', render_html),\n '.yml': ('.html', render_html),\n '.html': ('.html', render_html),\n '.htm': ('.html', render_html),\n '.md': ('.html', render_html),\n '.markdown': ('.html', render_html),\n }\n )\n\n except SpekulatioError as err:\n click.echo(str(err))\n sys.exit(-1)",
"def deploy():\n build()\n copy()\n install()",
"def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")",
"def run(site):\n\n # Make sure there is a site by this name.\n Yesod.check_for_site(site)\n\n # Now run the site on the development server.\n Yesod.run(site)",
"def update_site():\n site_path = os.path.join(PROJECTS_ROOT, CURRENT_SITE)\n docs_path = os.path.join(site_path, 'doc_src')\n with cd(site_path):\n run('git pull --all')\n run('workon djangopatterns && pip install -r %s/setup/requirements.txt' % site_path)\n run('workon djangopatterns && %s/manage.py syncdb' % site_path)\n # run('workon djangopatterns && %s/manage.py migrate' % site_path)\n run('workon djangopatterns && %s/manage.py collectstatic --noinput' % site_path)\n run('workon djangopatterns && %s/manage.py compress' % site_path)\n with cd(docs_path):\n run('git pull --all')\n # run('workon djangopatterns && cd doc_src && make clean')\n # run('workon djangopatterns && cd doc_src && make json')\n reload_site()",
"def check_for_site(site):\n\n # Construct the path to the site folder.\n site_path = os.path.dirname(Settings.devbox_folder) + os.sep + site\n\n # Make sure the folder exists. If there's no folder,\n # there is no such site. \n if not os.path.isdir(site_path):\n message1 = \"There is no site named '\" + site + \"'.\"\n message2 = \"Please create it first with the command: \"\n message2 += Settings.program + \" create \" + site\n Utilities.log(message1)\n Utilities.log(message2)\n exit(1)",
"def deploy_ec2():\n import webbrowser\n run('cd deepvideoanalytics && git pull && cd docker_GPU && ./rebuild.sh && nvidia-docker-compose up -d')\n # webbrowser.open('{}:8000'.format(env.hosts[0]))",
"def __build_one_site(root_dir, all_docs, domain_refs, doc_entry_ref, www_root_dir, site_name, github_url, user_group):\n www_build_dir = www_root_dir + site_name + \"/\"\n\n print(\"Reset build directory : \" + www_build_dir)\n shutil.rmtree(www_build_dir, ignore_errors=True)\n try:\n shutil.copytree(\"src/default_www/\", www_build_dir)\n except: # strange behaviour on Windows, try again ...\n shutil.copytree(\"src/default_www/\", www_build_dir)\n \n all_domain_cats = { XML.xpath_plain(all_docs, \".//*[@ref='\"+itm_ref+\"']/@cat\") for itm_ref in domain_refs } - {\"\"}\n \n domain_cats = []\n for cat_ref in all_domain_cats:\n cat_restrict = XML.xpath_plain(all_docs, \".//*[@ref='\"+cat_ref+\"']/@restricted_to\")\n if (cat_restrict == \"\") or (user_group in cat_restrict):\n domain_cats += [cat_ref]\n with_errors = user_group in \"#devs,#admin\" \n path = HTM.store_home_page(www_build_dir, all_docs, \"\", root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n\n for cat_ref in domain_cats:\n try:\n path = HTM.store_index_page(www_build_dir, all_docs, cat_ref, root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n except:\n LIB.debug_error()\n LIB.debug(\"### Error for category index : \", cat_ref)\n\n for itm_ref in domain_refs:\n cat_ref = XML.xpath_plain(all_docs, \".//*[@ref='\"+itm_ref+\"']/@cat\")\n if (cat_ref in domain_cats):\n try:\n path = HTM.store_content_page(www_build_dir+cat_ref+\"/\", all_docs, itm_ref, root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n except:\n LIB.debug_error()\n LIB.debug(\"### Error for file : \", itm)\n\n path = HTM.store_glossary_page(www_build_dir, all_docs, \"\", root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n\n if with_errors:\n path = HTM.store_error_page(www_build_dir, all_docs, \"\", root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)",
"def make_site_dx(context):\n portal = getSite()\n\n if portal._tree is not None:\n # We assume the object has been already initialized\n return\n\n portal._initBTrees()\n\n for obj_meta in portal._objects:\n obj_id = obj_meta[\"id\"]\n logger.info(\"Migrating object %r\", obj_id)\n # Load the content object ...\n obj = portal.__dict__.pop(obj_id)\n if not isinstance(obj, Broken) and obj_id not in (\n \"portal_quickinstaller\",\n \"portal_form_controller\",\n ):\n # ...and insert it into the btree.\n # Use _setOb so we don't reindex stuff: the paths stay the same.\n portal._setOb(obj_id, obj)\n\n delattr(portal, \"_objects\")\n portal._p_changed = True",
"def copy_docs():\n local('rsync -av --delete --exclude=.svn %s:%s/ /tmp/djangodocs/' %\n (env.hosts[0], env.deploy_base.child('docbuilds')))",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)"
] |
[
"0.6176075",
"0.60619366",
"0.6060742",
"0.6030078",
"0.59882927",
"0.5953914",
"0.5896026",
"0.58862174",
"0.5880198",
"0.5772577",
"0.568508",
"0.5643675",
"0.5567811",
"0.55583143",
"0.5544927",
"0.5542888",
"0.55298686",
"0.5514869",
"0.5511138",
"0.5498806",
"0.548514",
"0.54766184",
"0.54558396",
"0.5443236",
"0.5430185",
"0.542897",
"0.5417366",
"0.54110765",
"0.54110765",
"0.54110765"
] |
0.708023
|
0
|
Setup a complete, vanilla Drupal install Download Drupal, configure the settings.php database file, configure the .htaccess file, and then populate the database with the default Drupal structure.
|
def vanilla_site(parent, name, db_name, base_url=None, rewrite_base=None):
# TODO check for trailing slash
path = parent + '/' + name
print header("Checking dependencies")
if exists(path):
warning = """
A folder already exists at your destination path.
Do you wish to overwrite?
"""
confirm_overwrite(warning)
run("chmod -R u+w %s" % path)
run("rm -rf %s" % path)
if db.mysql_cnf_password_set():
password = db.get_mysql_pass()
print
print green("You're ready to build a Drupal site.")
print
else:
exit('No MySQL credentials were found. Quitting.')
print header("Downloading Drupal.")
download(parent, name)
print header("Configuring the RewriteBase in the .htaccess file.")
files.enable_rewrite_base(path, rewrite_base)
print header("Making the files directory and a settings.php file")
files.setup_files(path)
files.setup_settings(path, db_name)
print header("Creating the database and loading Drupal structure.")
site_install(path, 'bkennedy', password, '127.0.0.1', db_name)
with cd(path):
cache.clear()
print header("Your Drupal site is ready to go.")
# run("drush dl -y devel backup_migrate")
# Send an email as part of the Jenkins build or at least print the URL
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setup_module():\n svrhelp.setup_module()\n\n # Create the db now the server is running in its own dir.\n #db.init(...)",
"def setup():\n\n debs = (\"python-setuptools\", \"apache2\", \"libapache2-mod-wsgi\")\n\n require(\"hosts\", provided_by=[production, staging])\n sudo(\"apt-get install %s\" % \" \".join(debs))\n sudo(\"easy_install virtualenv pip\")\n sudo(\"mkdir -p %(path)s\" % env)\n with cd(\"%(path)s\" % env):\n sudo(\"mkdir -p releases; mkdir -p packages\")\n sudo(\"virtualenv --no-site-packages .\")\n sudo(\"mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo\")",
"def setup():\n is_installed = wp_cli('core is-installed')\n\n if is_installed:\n wp_cli('core download')\n\n install_params = {}\n\n install_params['url'] = prompt('URL: ')\n install_params['title'] = prompt('Title: ')\n install_params['admin_user'] = prompt('Admin User: ')\n install_params['admin_password'] = prompt('Admin Password: ')\n install_params['admin_email'] = prompt('Admin Email: ')\n\n config_params = {}\n\n config_params['dbname'] = prompt('Database Name: ')\n config_params['dbuser'] = prompt('Database User: ')\n config_params['dbpass'] = prompt('Database Password: ')\n config_params['dbhost'] = prompt('Database Hostname: ')\n\n wp_cli('core install {0}'.format(' '.join(['--%s=\"%s\"' % (k, v) for (k, v) in install_params.items()])))\n\n wp_cli('core config {0}'.format(' '.join(['--%s=\"%s\"' % (k, v) for (k, v) in config_params.items()])))\n\n wp_cli('core update-db')\n else:\n abort('WordPress is already installed.')",
"def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()",
"def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)",
"def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()",
"def site_install(path, db_user, db_pass, db_host, db_name):\n db_url = 'mysql://%s:%s@%s/%s' % (db_user, db_pass, db_host, db_name)\n warning = \"\"\"\nWARNING: This is an inherently insecure method for interacting with the\ndatabase since the database password will be written to the command line\nand will be visible to anyone who can access the .mysql_history. Additionally,\nwhile this command is being run the password is exposed to anyone who can run\nthe ps command on the server. Unfortunately this is the only method that\nDrush currently supports.\n\nDo you still wish to proceed?\n\"\"\"\n confirm_overwrite(warning)\n\n with cd(path):\n run(\"drush site-install standard --db-url=%s --account-name=%s\\\n --account-pass=%s\" % (db_url, 'admin', 'admin'))",
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)",
"def setup():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n setup_directories()\n setup_virtualenv()\n clone_repo()\n checkout_latest()\n install_requirements()\n install_apache_conf()\n deploy_to_s3()",
"def setup_machine():\n # Initial setup and package install.\n sudo(\"aptitude update\")\n sudo(\"aptitude -y install git-core python-dev python-setuptools \"\n \"postgresql-dev postgresql-client build-essential \"\n \"libpq-dev subversion mercurial apache2 \"\n \"libapache2-mod-wsgi\")",
"def bootstrap():\n create_virtualenv()\n install_init_script()\n if not files.exists(env.code_root):\n clone_all()\n #deploy_from_local()\n pull_and_checkout_all()\n update_requirements()\n print '\\nNow add your database password to localsettings.py and run syncdb'",
"def setup_server():\n\n require('environment', provided_by=env.environments)\n upgrade_packages()\n # Install required system packages for deployment, plus some extras\n # Install pip, and use it to install virtualenv\n install_packages()\n sudo(\"easy_install -i http://d.pypi.python.org/simple -U pip\")\n sudo(\"pip install -i http://d.pypi.python.org/simple -U virtualenv\")\n create_postgis_template()\n create_db_user()\n create_db()\n create_webserver_user()",
"def setup_db():\n logger.info('Setting up db')\n setup_all_db()\n setup_emails()",
"def setup(*args, **kwargs):\n\n mirror = kwargs.get('mirror','n')\n setup_server(mirror)\n setup_django(*args, **kwargs)\n put_config_files(*args)",
"def setup_mysql():\n with lcd(env.projectroot):\n put(\"manage/sysconf/%(target)s/mysql/setup-mysql.sql\" % env, \"/tmp\")\n #sudo(\"mysql -u root -p < /tmp/setup-mysql.sql\")\n sudo(\"mysql -u root < /tmp/setup-mysql.sql\")",
"def install():\n deploy()\n configure()",
"def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)",
"def setup():\n # Ignore errors if the user already exists.\n with settings(user=env.ROOT_USER, password=env.ROOT_PASS, warn_only=True):\n # Create a new system user.\n result = execute('system.user_create',\n env.SYSTEM_USER,\n env.SYSTEM_PASS)\n\n # Upload SSH key for the new system.\n if result.get(env.host):\n execute('system.user_sshkey', env.SYSTEM_USER)\n\n ##############################\n # RUN SERVER UPDATES\n ##############################\n\n execute('system.update')\n\n ##############################\n # BASIC SERVER SECURITY\n ##############################\n\n # Disable password authentication.\n execute('system.ssh_disable_password_authentication')\n # Disable root login.\n execute('system.ssh_disable_root_login')\n # Restart SSH.\n execute('system.ssh_restart')\n\n # Install ufw\n execute('ufw.install')\n # Deny incoming connections.\n execute('ufw.default')\n # Allow SSH (22/tcp) access.\n execute('ufw.allow', 'ssh')\n # Allow HTTP (80/tcp) access.\n execute('ufw.allow', 'http')\n # Allow HTTPS (443/tcp) access.\n execute('ufw.allow', 'https')\n # Enable the firewall.\n execute('ufw.enable')\n\n # Install supervisor\n execute('supervisor.install')\n\n # Install mercurial\n execute('mercurial.install')\n\n # Install nginx\n execute('nginx.install')\n execute('nginx.config')\n execute('nginx.restart')\n\n # Setup Python Environment.\n require('PYTHON_VENV')\n\n execute('python.dev')\n execute('python.venv', env.PYTHON_VENV)\n execute('python.install', env.PYTHON_VENV)\n\n # Deploy the project.\n #\n # fab --config=config.conf project.clone \\\n # project.config \\\n # project.migrate \\\n # project.collectstatic \\\n # project.restart\n execute('project.clone')\n execute('project.config')\n execute('project.migrate')\n execute('project.collectstatic')\n execute('project.restart')\n\n execute('supervisor.restart')\n execute('supervisor.reread')\n execute('supervisor.update')",
"def prepare_graphite():\n # Setup sys.path\n prepare_graphite_imports()\n os.environ['DJANGO_SETTINGS_MODULE'] = 'graphite.settings'\n\n # Redirect logs somewhere writable\n from django.conf import settings\n settings.LOG_DIR = tempfile.gettempdir()\n\n # Setup Django\n import django\n django.setup()",
"def setup_database(self):\n self.db.setup_database()",
"def _init_remote():\r\n require('path', provided_by = [staging])\r\n\r\n create_project_dir()\r\n deploy_nosyncdb()\r\n create_virtualenv()\r\n install_requirements()\r\n create_db()\r\n create_secret_settings()\r\n syncdb()\r\n createsuperuser()\r\n install_site()\r\n reload()",
"def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')",
"def setup():\n load_app()\n setup_db()",
"def setUp(self):\n self.setup_remote_site()\n self.setup_local_site()",
"def setup(self):\n \n dbpath, config = self._start()\n \n self.logger.msg1(\"Workding directory: \"+dirname(dbpath))\n # test if already exists - build from scratch or not?\n if exists(dbpath):\n if not self.reset: \n self.logger.msg1(\"Skipping database build; database exists\")\n return None, None \n self.logger.msg1(\"Removing existing database\")\n os.remove(dbpath)\n\n # create a new database file\n self.logger.msg1(\"Creating new database: \"+basename(dbpath)) \n setup_db(dbpath, tables=self.tables)\n \n return dbpath, config",
"def bootstrap():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(root)s' % env, user=env.deploy_user)\n clone_repo()\n setup_dirs()\n link_config_files()\n update_services()\n create_virtualenv()\n update_requirements()\n create_local_settings()",
"def install_module(request):\n reuse_db = request.config.getoption(\"--reuse-db\")\n\n if request.config.getoption(\"--db\") == 'sqlite':\n os.environ['TRYTOND_DATABASE_URI'] = \"sqlite://\"\n if reuse_db:\n # A hack to check if the database exists and if it\n # does, load that and run tests.\n Database = backend.get('Database')\n\n # cursor.test forgets to set flavor!\n # no time to report a bug!\n Flavor.set(Database.flavor)\n os.environ['DB_NAME'] = 'fulfilio'\n else:\n os.environ['DB_NAME'] = ':memory:'\n\n elif request.config.getoption(\"--db\") == 'postgres':\n os.environ['TRYTOND_DATABASE_URI'] = \"postgresql://\"\n if reuse_db:\n os.environ['DB_NAME'] = 'test_fulfilio'\n else:\n os.environ['DB_NAME'] = 'test_' + str(int(time.time()))\n\n if reuse_db:\n Database = backend.get('Database')\n database = Database().connect()\n cursor = database.cursor()\n databases = database.list(cursor)\n cursor.close()\n if os.environ['DB_NAME'] in databases:\n if request.config.getoption(\"--reset-db\"):\n cursor = database.cursor()\n databases = database.drop(cursor, os.environ['DB_NAME'])\n cursor.close()\n else:\n # tryton test forgets to init the pool\n # for existing database\n Pool(os.environ['DB_NAME']).init()\n\n config.set('database', 'uri', os.environ['TRYTOND_DATABASE_URI'])\n from trytond.tests import test_tryton\n test_tryton.install_module('payment_gateway_stripe')",
"def test_installation(request):\n import sys, os, os.path\n from Pyblosxom import pyblosxom\n\n config = request.getConfiguration()\n\n # BASE STUFF\n print \"Welcome to PyBlosxom's installation verification system.\"\n print \"------\"\n print \"]] printing diagnostics [[\"\n print \"pyblosxom: %s\" % pyblosxom.VERSION_DATE\n print \"sys.version: %s\" % sys.version.replace(\"\\n\", \" \")\n print \"os.name: %s\" % os.name\n print \"codebase: %s\" % config.get(\"codebase\", \"--default--\")\n print \"------\"\n\n # CONFIG FILE\n print \"]] checking config file [[\"\n print \"config has %s properties set.\" % len(config)\n print \"\"\n required_config = [\"datadir\"]\n\n nice_to_have_config = [\"blog_title\", \"blog_author\", \"blog_description\",\n \"blog_language\", \"blog_encoding\", \n \"base_url\", \"depth\", \"num_entries\", \"renderer\", \n \"cacheDriver\", \"cacheConfig\", \"plugin_dirs\", \n \"load_plugins\"]\n missing_properties = 0\n for mem in required_config:\n if not config.has_key(mem):\n print \" missing required property: '%s'\" % mem\n missing_properties = 1\n\n for mem in nice_to_have_config:\n if not config.has_key(mem):\n print \" missing optional property: '%s'\" % mem\n\n print \"\"\n print \"Refer to the documentation for what properties are available\"\n print \"and what they do.\"\n\n if missing_properties:\n print \"\"\n print \"Missing properties must be set in order for your blog to\"\n print \"work.\"\n print \"\"\n print \"This must be done before we can go further. Exiting.\"\n return\n\n print \"PASS: config file is fine.\"\n\n print \"------\"\n print \"]] checking datadir [[\"\n\n # DATADIR\n # FIXME - we should check permissions here?\n if not os.path.isdir(config[\"datadir\"]):\n print \"datadir '%s' does not exist.\" % config[\"datadir\"] \n print \"You need to create your datadir and give it appropriate\"\n print \"permissions.\"\n print \"\"\n print \"This must be done before we can go further. Exiting.\"\n return\n\n print \"PASS: datadir is fine.\"\n\n print \"------\"\n print \"Now we're going to verify your plugin configuration.\"\n\n if config.has_key(\"plugin_dirs\"):\n\n from Pyblosxom import plugin_utils\n plugin_utils.initialize_plugins(config[\"plugin_dirs\"],\n config.get(\"load_plugins\", None))\n\n no_verification_support = []\n\n for mem in plugin_utils.plugins:\n if \"verify_installation\" in dir(mem):\n print \"=== plugin: '%s'\" % mem.__name__\n\n if \"__version__\" in dir(mem):\n print \" version: %s\" % mem.__version__\n else:\n print \" plugin has no version.\"\n\n try:\n if mem.verify_installation(request) == 1:\n print \" PASS\"\n else:\n print \" FAIL!!!\"\n except AssertionError, error_message:\n print \" FAIL!!! \", error_message\n\n else:\n no_verification_support.append(mem.__name__)\n\n if len(no_verification_support) > 0:\n print \"\"\n print \"The following plugins do not support installation verification:\"\n for mem in no_verification_support:\n print \" %s\" % mem\n else:\n print \"You have chosen not to load any plugins.\"",
"def init():\n\n banner(\"init\")\n with show(\"output\"):\n if not env.get('no_apt_update'):\n sudo('apt-get update')\n\n require.directory(env.path, mode=\"777\", use_sudo=True)\n require.directory('/var/run/%s' % env.project_name, owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/log/%s' % env.project_name, owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/log/supervisord/', owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/run/supervisord/', owner='www-data', group='www-data', mode='770', use_sudo=True)\n\n require.deb.packages([\n 'gcc', 'python-all-dev', 'libpq-dev', 'libjpeg-dev', 'libxml2-dev', 'libxslt1-dev', 'libmysqlclient-dev',\n 'libfreetype6-dev', 'libevent-dev', 'supervisor'\n ])\n require.python.pip(version=\"1.0\")\n\n new_virtualenv()\n\n me = run('whoami')\n sudo('adduser %s www-data' % me)\n\n install_nginx()\n\n if env.mysql:\n require.mysql.server(password=env.mysql_password)\n with settings(mysql_user='root', mysql_password=env.mysql_password):\n require.mysql.user(env.mysql_username, env.mysql_password)\n require.mysql.database(env.mysql_dbname, owner=env.mysql_username)",
"def setup_module():\n\n c = Config()\n if c.get('general', 'in_production'): # pragma: no cover\n raise RuntimeError(\"DO NOT run destructive test on production system\")\n\n \"Pull in the filesystem dump from a previous mirth run\"\n mi = MirthInteraction()\n mi.restore_database()\n\n \"Run a quick sanity check, whole module requires a populated db\"\n connection = db_connection('warehouse')\n count = connection.session.query(HL7_Msh).count()\n connection.disconnect()\n\n if count < 4000:\n err = \"Minimal expected count of records not present. \"\\\n \"Be sure to run 'process_testfiles_via_mirth' as a prerequisite\"\n raise RuntimeError(err)"
] |
[
"0.61548936",
"0.60947293",
"0.6059112",
"0.59985375",
"0.5975852",
"0.59535164",
"0.5928912",
"0.58542085",
"0.58447844",
"0.58415353",
"0.5792753",
"0.5785525",
"0.5685478",
"0.568248",
"0.5669951",
"0.55929977",
"0.55778635",
"0.55460817",
"0.553817",
"0.5500487",
"0.5469103",
"0.543061",
"0.54022646",
"0.5399677",
"0.5395807",
"0.5387659",
"0.537897",
"0.5367182",
"0.53505385",
"0.5336982"
] |
0.6883463
|
0
|
an iterator over the keys of metakey and its dereferenced values.
|
def __getitem__(self, metakey):
for key in self.metadb[metakey]:
yield key, self.datadb[key]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)",
"def items(self):\n for metakey in self:\n yield metakey, self[metakey]",
"def iteritems(self):\n for key in self:\n yield key, self[key]",
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def itervalues(self):\n for key in self:\n yield self[key]",
"def iteritems(self):\n for key in self:\n yield (key, self[key])",
"def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]",
"def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)",
"def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())",
"def iterkeyrefs(self):\n for key in self.iterkeys():\n yield ref(key)",
"def get_values(self, ckey):\n for next_key, item in yield_obj(self, ckey):\n if isdictinstance(item):\n for final, elem in yield_obj(item, next_key):\n if isdictinstance(elem) and elem.has_key(final):\n yield elem[final]\n else:\n yield elem\n elif isinstance(item, list) or isinstance(item, GeneratorType):\n for final, elem in item:\n for last, att in yield_obj(elem, final):\n if isdictinstance(att) and att.has_key(last):\n yield att[last]\n else:\n yield att",
"def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k",
"def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value",
"def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value",
"def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()",
"def __iter__(self, *args, **kwargs):\n for key in self.keys(*args, **kwargs):\n yield key",
"def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())",
"def itervalues(self, key=None):\n if key != None:\n vals = self.get(key)\n if vals != None:\n for val in vals:\n yield val\n else:\n for key in self.iterkeys():\n vals = self.get(key)\n for val in vals:\n yield val",
"def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()",
"def __iter__(self):\n for key, value in self.read():\n yield key, value",
"def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())",
"def keys(self):\n return iter(k for k, _ in self._pairs())",
"def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)",
"def __iter__(self):\n\n for each in list(self.keys()):\n yield each",
"def __iter__(self):\n prefix = len(META_NS) + 2\n for key, value in self.stats.items():\n yield (key[prefix:-6], int(value))",
"def iterkeys(self):",
"def iterkeys(self):",
"def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj",
"def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj",
"def itervalues(self, multi=False):\n for k, v in self.iteritems(multi=multi):\n yield v"
] |
[
"0.74286705",
"0.74038",
"0.7262638",
"0.7250744",
"0.7151584",
"0.7122191",
"0.70534873",
"0.7005466",
"0.69557893",
"0.6933972",
"0.6917245",
"0.69147",
"0.69001734",
"0.69001734",
"0.6877285",
"0.68627656",
"0.6821079",
"0.6786858",
"0.67688274",
"0.6751377",
"0.6750948",
"0.6731732",
"0.67135966",
"0.6704901",
"0.6664065",
"0.6653711",
"0.6653711",
"0.66402876",
"0.66402876",
"0.66306"
] |
0.74917674
|
0
|
an iterator over the metakeys and their corresponding values.
|
def items(self):
for metakey in self:
yield metakey, self[metakey]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)",
"def __getitem__(self, metakey):\n for key in self.metadb[metakey]:\n yield key, self.datadb[key]",
"def itervalues(self):\n for key in self:\n yield self[key]",
"def iteritems(self):\n for key in self:\n yield key, self[key]",
"def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]",
"def iteritems(self):\n for key in self:\n yield (key, self[key])",
"def __iter__(self):\n prefix = len(META_NS) + 2\n for key, value in self.stats.items():\n yield (key[prefix:-6], int(value))",
"def __iter__(self):\n for metatag in self.meta.findall(CN('meta:user-defined')):\n yield (metatag.get(CN('meta:name')), metatag.text)",
"def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k",
"def __iter__(self):\n for key, value in self.read():\n yield key, value",
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value",
"def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value",
"def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)",
"def __iter__(self) -> (str, np.ndarray):\n for k, v in self.fields.items():\n yield k, v",
"def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())",
"def itervalues(self, multi=False):\n for k, v in self.iteritems(multi=multi):\n yield v",
"def __iter__(self):\n for value in self.__dict__.values():\n yield value",
"def __iter__(self):\n\n for each in list(self.keys()):\n yield each",
"def __iter__(self):\n for keyword in self.meta.findall(CN('meta:keyword')):\n yield keyword.text",
"def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()",
"def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())",
"def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)",
"def itervalues(self, key=None):\n if key != None:\n vals = self.get(key)\n if vals != None:\n for val in vals:\n yield val\n else:\n for key in self.iterkeys():\n vals = self.get(key)\n for val in vals:\n yield val",
"def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r",
"def __iter__(self, *args, **kwargs):\n for key in self.keys(*args, **kwargs):\n yield key",
"def __iter__(self):\n key = list(self.keys())[0]\n length = len(self[key])\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield res",
"def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()",
"def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]",
"def __iter__(self) -> Tuple[str, Any]:\n for attr_name, attr_val in self.__dict__.items():\n yield attr_name, attr_val"
] |
[
"0.77093714",
"0.7637563",
"0.7584621",
"0.7581623",
"0.7512615",
"0.7484198",
"0.7458412",
"0.7443295",
"0.73784",
"0.73640764",
"0.7329393",
"0.73129827",
"0.73129827",
"0.7273179",
"0.7223564",
"0.7205529",
"0.7146982",
"0.71324456",
"0.71234393",
"0.7087726",
"0.707989",
"0.70612514",
"0.7031475",
"0.7015435",
"0.70143056",
"0.69942933",
"0.69654423",
"0.6964025",
"0.6950408",
"0.69156766"
] |
0.7975261
|
0
|
an iterator over the unique keys of all metakeys and their dereferenced values.
|
def unique_values(self):
for key in self.metadb.unique_values():
yield key, self.datadb[key]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def items(self):\n for metakey in self:\n yield metakey, self[metakey]",
"def iteritems(self):\n for key in self:\n yield key, self[key]",
"def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())",
"def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)",
"def iteritems(self):\n for key in self:\n yield (key, self[key])",
"def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())",
"def itervalues(self):\n for key in self:\n yield self[key]",
"def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]",
"def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value",
"def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value",
"def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()",
"def __iter__(self):\n if self._len_keys == 1:\n yield from self._dict.keys()\n else:\n for key in self._dict.keys():\n yield tuple(sorted(list(key)))",
"def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k",
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key",
"def __getitem__(self, metakey):\n for key in self.metadb[metakey]:\n yield key, self.datadb[key]",
"def keys(self):\n return iter(k for k, _ in self._pairs())",
"def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())",
"def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()",
"def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)",
"def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj",
"def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj",
"def iterkeyrefs(self):\n for key in self.iterkeys():\n yield ref(key)",
"def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]",
"def __iter__(self):\n\t\treturn self.keys()",
"def iterkeys(self):\n\n for bucket in self.buckets.itervalues():\n for key in bucket.iterkeys():\n yield key",
"def iterkeys(self):\n return self.__iter__()",
"def __iter__(self):\n for metatag in self.meta.findall(CN('meta:user-defined')):\n yield (metatag.get(CN('meta:name')), metatag.text)",
"def keys(self):\n for ts in self:\n yield ts",
"def iterkeys(self):"
] |
[
"0.7411535",
"0.71149856",
"0.70268494",
"0.6994934",
"0.69928217",
"0.69273615",
"0.68922776",
"0.6891039",
"0.6890184",
"0.6890184",
"0.6867791",
"0.6856759",
"0.6842534",
"0.67885214",
"0.67820734",
"0.6760459",
"0.6741788",
"0.6714845",
"0.67108667",
"0.6708048",
"0.6704264",
"0.6704264",
"0.66444737",
"0.6619901",
"0.6612892",
"0.6612412",
"0.6570913",
"0.654111",
"0.6538495",
"0.65358156"
] |
0.75245976
|
0
|
an iterator over the keys whose metakeys satisfy q and their dereferenced values.
|
def query(self, q):
for key in self.metadb.query(q):
yield key, self.datadb[key]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)",
"def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())",
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def keys(self):\n return iter(k for k, _ in self._pairs())",
"def __getitem__(self, metakey):\n for key in self.metadb[metakey]:\n yield key, self.datadb[key]",
"def iterkeys(self):\n\n for bucket in self.buckets.itervalues():\n for key in bucket.iterkeys():\n yield key",
"def __iter__(self, *args, **kwargs):\n for key in self.keys(*args, **kwargs):\n yield key",
"def items(self):\n for metakey in self:\n yield metakey, self[metakey]",
"def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key",
"def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k",
"def iterkeys(self):\n return self.__iter__()",
"def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)",
"def iterkeys(self):\n return iter(self._sequence)",
"def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())",
"def itervalues(self):\n for key in self:\n yield self[key]",
"def iterkeys(self):",
"def iterkeys(self):",
"def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())",
"def iterkeys(self):\n r = self.solr.select('%s:%s %s:*'\n % (self.index_uuid_field, self.index_uuid,\n self.d_uid_field))\n for doc in r.results:\n yield doc[self.d_uid_field]\n for _ in range(r.numFound // 10):\n r = r.next_batch()\n for doc in r.results:\n yield doc[self.d_uid_field]",
"def __iter__(self):\n\t\treturn self.keys()",
"def __iter__(self):\n return self.ordered_keys.__iter__()",
"def exact_key_items(self):\n for key_node, value in self.get_tree_entries():\n for key in self._defining_context.infer_node(key_node):\n if is_string(key):\n yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)",
"def iteritems(self):\n for key in self:\n yield key, self[key]",
"def iterkeyrefs(self):\n for key in self.iterkeys():\n yield ref(key)",
"def iterkeys(d):\r\n return iter(getattr(d, _iterkeys)())",
"def iterkeys(d):\n return iter(getattr(d, _iterkeys)())",
"def keys(self):\n for ts in self:\n yield ts",
"def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj",
"def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj",
"def __iter__(self):\n return iter(self.keys())"
] |
[
"0.65659654",
"0.6442065",
"0.63845867",
"0.62278324",
"0.6215119",
"0.62128973",
"0.6139345",
"0.61231196",
"0.61042035",
"0.6099117",
"0.60899293",
"0.6079643",
"0.6071832",
"0.59990674",
"0.598659",
"0.5984135",
"0.5984135",
"0.59521896",
"0.59317625",
"0.59167755",
"0.5893455",
"0.58888936",
"0.58869857",
"0.5884109",
"0.58667433",
"0.58499455",
"0.5828842",
"0.5808093",
"0.5808093",
"0.5806121"
] |
0.7133837
|
0
|
Overwrite default hyperparameters of a network, based on the flags
|
def overwrite_hyperparams(self):
try:
default_hyperparams = self.hyperparams
for key in default_hyperparams:
try:
flag = self.FLAGS[key]
param_value = flag.value
if param_value is not None:
self.hyperparams[key] = param_value
except:
pass
except:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_hyperparams(use_defaults):\n if use_defaults:\n n_neurons, n_hidden, n_steps, k_prob = default_hyperparams()\n return n_neurons, n_hidden, n_steps, k_prob\n\n print (\"Select number of neurons in recurrent layer (default \" +\n \"100):\")\n n_neurons = int(input())\n print (\"Select number of hidden neurons in fully connected \" +\n \"layer (default 100):\")\n n_hidden = int(input())\n print (\"Select n_steps; the max number of words to be read \" +\n \"from each abstract (default 50):\")\n n_steps = int(input())\n print (\"Select k_prob; the dropout probability (default 0.5):\")\n k_prob = float(input())\n\n return n_neurons, n_hidden, n_steps, k_prob",
"def set_default_params(self, opt):\n self.config.embed_dim = opt.embed_dim or 200\n self.config.rnn_size = opt.rnn_size or 512\n self.config.nrnn_layer = opt.nrnn_layer or 2\n self.config.rnn_dropout = opt.rnn_dropout or 0.5\n self.config.rnnout_dim = 2 * self.config.rnn_size * self.config.nrnn_layer\n ## MULTIMODAL (ATTENTION)\n self.config.cnnout_dim = opt.cnnout_dim or 512\n self.config.cnnout_w = opt.cnnout_w or 14\n self.config.cnnout_h = opt.cnnout_h or 14\n self.config.cnnout_spat = self.config.cnnout_w * self.config.cnnout_h\n self.config.multfeat_dim = opt.multfeat_dim or 512\n self.config.attfeat_dim = opt.attfeat_dim or 256\n self.config.netout_dim = opt.answer_size\n ## [attlstm] in: {2*multfeat_dim, att_rnn_s_dim} {att_rnn_size, att_rnn_s_dim}\n self.config.att_rnn_size = opt.att_rnn_size or 512\n self.config.att_rnn_nlayer = opt.att_rnn_nlayer or 1\n self.config.att_rnn_dropout = opt.att_rnn_dropout or 0.0\n # TODO: There could be a protential bugs if self.config.att_rnn_nlayer > 1\n assert(self.config.att_rnn_nlayer == 1)\n self.config.att_rnn_s_dim = self.config.att_rnn_size * self.config.att_rnn_nlayer\n\n # optimization\n self.config.max_grad_norm = opt.max_grad_norm or 0.1\n self.config.initializer_scale = 0.008",
"def modify_commandline_options(parser, is_train=True):\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature-matching loss')\n\n return parser",
"def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params",
"def set_parameters(self, create_models=True, **parameters):\n flag_nn_opti = False\n\n # Set attributes\n for param, value in parameters.items():\n if param in self.DEFAULT_VALUES.keys():\n if getattr(self, param) != value:\n # We change param value\n setattr(self, param, value)\n if param in ['hidden_layers', 'lr']:\n flag_nn_opti = True\n\n else:\n raise Exception(f'Parameter {param} not known.')\n\n # Create torch instances\n if create_models and flag_nn_opti:\n self._create_networks_and_optimizer()",
"def default_optimization_hparams() -> Dict[str, Any]:\n return {\n \"optimizer\": {\n \"type\": \"Adam\",\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n # TODO(zhiting): allow module-level control of gradient_multipliers\n \"name\": None\n }",
"def config_override(params, flags_obj):\n # Change runtime.tpu to the real tpu.\n params.override({\n 'runtime': {\n 'tpu': flags_obj.tpu,\n }\n })\n\n # Get the first level of override from `--config_file`.\n # `--config_file` is typically used as a template that specifies the common\n # override for a particular experiment.\n for config_file in flags_obj.config_file or []:\n params = hyperparams.override_params_dict(\n params, config_file, is_strict=True)\n\n # Get the second level of override from `--params_override`.\n # `--params_override` is typically used as a further override over the\n # template. For example, one may define a particular template for training\n # ResNet50 on ImageNet in a config fid pass it via `--config_file`,\n # then define different learning rates and pass it via `--params_override`.\n if flags_obj.params_override:\n params = hyperparams.override_params_dict(\n params, flags_obj.params_override, is_strict=True)\n\n params.validate()\n params.lock()\n\n pp = pprint.PrettyPrinter()\n logging.info('Final experiment parameters: %s', pp.pformat(params.as_dict()))\n\n model_dir = flags_obj.model_dir\n if 'train' in flags_obj.mode:\n # Pure eval modes do not output yaml files. Otherwise continuous eval job\n # may race against the train job for writing the same file.\n train_utils.serialize_config(params, model_dir)\n\n return params",
"def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_params(self):\n for pp in self.params:\n if 'optimizer_param' in pp.tags:\n pp.set_value(np.zeros(pp.get_value(borrow=True).shape, dtype=theano.config.floatX))",
"def compile_update_default(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n\n if len(nnet.all_layers) > 1:\n feature_layer = nnet.all_layers[-2]\n else:\n feature_layer = nnet.input_layer\n final_layer = nnet.svm_layer\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization\n if Cfg.weight_decay:\n l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n objective, train_acc = final_layer.objective(prediction, targets)\n train_loss = T.cast((objective) / targets.shape[0], dtype='floatX')\n train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX')\n train_obj = l2_penalty + train_loss\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Hinge loss\n nnet.hinge_loss = theano.function([inputs, targets],\n [train_loss, train_acc])\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n if nnet.data.n_classes == 2:\n scores = test_prediction[:, 1] - test_prediction[:, 0]\n else:\n scores = T.zeros_like(targets)\n objective, test_acc = final_layer.objective(test_prediction, targets)\n test_loss = T.cast(objective / targets.shape[0], dtype='floatX')\n test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX')\n test_obj = l2_penalty + test_loss\n # get network feature representation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, scores, l2_penalty,\n test_rep_norm, test_loss])",
"def override_params_using_opts(self):\n if self.opts is not None:\n for key, val in self.models.items():\n for opt in self.opts.__dict__:\n if opt in self.models[key].__dict__ \\\n and self.opts.__dict__[opt] is not None:\n self.models[key].__dict__[opt] = self.opts.__dict__[opt]\n self.label+='_'+opt+'_'+str(self.opts.__dict__[opt])\n print('Model: ',key,'. Overriding parameter ',opt,' to ',\\\n self.opts.__dict__[opt])\n print('Setting label to ',self.label)",
"def reset_parameters(self) -> None:\n for i in range(self._num_edge_types):\n nn.init.xavier_uniform_(self.edge_message_functions[i].weight,)\n nn.init.constant_(self.edge_message_functions[i].bias, val=0.0)\n nn.init.orthogonal_(self.rnn.weight_hh)\n nn.init.xavier_uniform_(self.rnn.weight_ih)\n nn.init.constant_(self.rnn.bias_hh, val=0.0)\n nn.init.constant_(self.rnn.bias_ih, val=0.0)",
"def modify_commandline_options(parser, is_train=True):\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n parser.add_argument('--TPN', type=str, default=None, help='Use the Time Prediction Network (TPN), and load specified model')\n\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n parser.add_argument('--lambda_L2', type=float, default=0.0, help='weight for tumour tissue over rest of brain. Range [0,1]')\n parser.add_argument('--gamma', type=float, default=1.0, help='weight for time loss, when TPN is set to True')\n return parser",
"def training_from_flag(flags):\n \n print(flags)\n\n # Get the data\n train_loader, test_loader = data_reader.read_data(flags)\n\n print(\"Making network now\")\n\n # Make Network\n if flags.use_conv:\n ntwk = Network(CNN, flags, train_loader, test_loader)\n else:\n ntwk = Network(MLP, flags, train_loader, test_loader)\n total_param = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad)\n print(\"Total learning parameter is: %d\"%total_param)\n \n # Training process\n print(\"Start training now...\")\n ntwk.train()\n\n # Do the house keeping, write the parameters and put into folder, also use pickle to save the flags obejct\n write_flags_and_BVE(flags, ntwk)\n # put_param_into_folder(ntwk.ckpt_dir)",
"def default_opts():\n return tf.contrib.training.HParams(\n num_repeats=1,\n superclass=False,\n class_proportion=1.0,\n invert_images=False,\n min_val=0, # set any 0 in the input image, to this new min_val. ---> if >0, then don't do anything\n train_classes=['5', '6', '7', '8', '9'],\n test_classes=['5', '6', '7', '8', '9'],\n degrade_type='vertical', # vertical, horizontal or random: the model completes image degraded by this method\n degrade_step='hidden', # 'test' (apply at gen of test set), or 'input', 'hidden', 'none' (applied in graph)\n completion_gain=1.0,\n train_recurse=False,\n test_recurse=False,\n recurse_iterations=5, # if >1, then PC is recursive (only supported for Hopfield i.e. no recursion on training)\n rsummary_batches=2,\n input_mode={\n \"train_first\": \"complete\",\n \"train_inference\": \"complete\",\n \"test_first\": \"complete\",\n \"test_inference\": \"complete\"\n },\n evaluate=True,\n train=True,\n visualise_vc=False,\n visualise_dg_at_vc=False,\n visualise_pc_at_dg=False,\n visualise_pc_at_vc=False,\n evaluate_mode='simple' # simple = calc compl. of pc use pattern_completion_workflow,\n # expA_isolate_view = test completion and visualise at each stage\n # expA_isolate = test completion and range of tests to isolate performance of components\n )",
"def default_hparams():\n return tf.contrib.training.HParams(\n decay_rate=0.96,\n decay_steps=2000,\n leaky=False,\n learning_rate=0.001,\n # loss_type=[sigmoid, softmax, margin]\n loss_type='margin',\n # mask_type=[none, label, norm, routing, weighted-routing]\n mask_type='weighted-routing',\n balance_factor=0.005,\n num_prime_capsules=32,\n num_latent_capsules=16,\n num_latent_atoms=16,\n padding='VALID',\n remake=True,\n routing=3,\n verbose=True,\n unsupervised=True,\n ema_decay=0.99,\n boost_step=50,\n boost_factor=0.1,\n target_min_freq=0.03,\n target_max_freq=0.12,\n boosting=True\n )",
"def modify_commandline_options(parser, is_train=True):\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_S', type=float, default=1.0, help='weight for Shading loss')\n parser.add_argument('--lambda_BA', type=float, default=1.0, help='weight for Brightest area loss')\n # parser.add_argument('--lambda_BP', type=float, default=1.0, help='weight for Brightest pixel loss')\n parser.add_argument('--lambda_BC', type=float, default=1.0, help='weight for Brightest coordinate loss')\n parser.add_argument('--lambda_regLTM', type=float, default=1.0, help='weight for LTM regularization.')\n parser.add_argument('--latent_Ls', action='store_true', help='Input Ls as latent.')\n parser.add_argument('--latent_Lt', action='store_true', help='Input Lt as latent.')\n parser.add_argument('--in_Ls', action='store_true', help='Input Ls as Input.')\n parser.add_argument('--in_Lt', action='store_true', help='Input Lt as Input.')\n parser.add_argument('--LTM', action='store_true', help='Use LTM.')\n parser.add_argument('--cas', action='store_true', help='Cascade network.')\n parser.add_argument('--no_brightness', action='store_true', help='No to calc brightness')\n parser.add_argument('--no_latent_color', action='store_true', help='Not to extract latent color. (Not to use with LTM)')\n parser.add_argument('--cat_In', action='store_true', help='Concat Input')\n parser.add_argument('--reg_LTM', action='store_true', help='Regularizaiton LTM.')\n parser.add_argument('--enc_LTM', action='store_true', help='Encoding LTM.')\n parser.add_argument('--enc_ill_hid', type=int, default=-1, help='The hidden layer dimention of illumination encoder. if -1 no to use hidden layer.')\n parser.add_argument('--dim_LTM', type=int, default=5, help='Encoding LTM number.')\n \n return parser",
"def set_hyperparams(self, params):",
"def reset_parameters(self) -> None:\n std = math.sqrt(3 / self.in_features)\n self.weight.data.uniform_(-std, std)\n self.bias.data.uniform_(-std, std)",
"def reset_parameters(self, reset_mode='glorot_uniform'):\n\n if reset_mode == 'glorot_uniform':\n if self.weight_decomp == 'block':\n nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))\n elif self.weight_decomp == 'basis':\n nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))\n else:\n nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))\n\n if self.bias is not None:\n torch.nn.init.zeros_(self.bias)\n elif reset_mode == 'schlichtkrull':\n if self.weight_decomp == 'block':\n nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))\n elif self.weight_decomp == 'basis':\n nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))\n else:\n nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))\n\n if self.bias is not None:\n torch.nn.init.zeros_(self.bias)\n elif reset_mode == 'uniform':\n stdv = 1.0 / math.sqrt(self.weights.size(1))\n if self.weight_decomp == 'block':\n self.blocks.data.uniform_(-stdv, stdv)\n elif self.weight_decomp == 'basis':\n self.bases.data.uniform_(-stdv, stdv)\n self.comps.data.uniform_(-stdv, stdv)\n else:\n self.weights.data.uniform_(-stdv, stdv)\n\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n else:\n raise NotImplementedError(f'{reset_mode} parameter initialisation method has not been implemented')",
"def voxel_env_override_defaults(env, parser):\n parser.set_defaults(\n encoder_type='conv',\n encoder_subtype='convnet_simple',\n hidden_size=512,\n obs_subtract_mean=0.0,\n obs_scale=255.0,\n actor_worker_gpus=[0],\n )",
"def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config",
"def set_training_params(use_defaults):\n if use_defaults:\n n_epochs, batch_size, epsilon = default_training_params()\n return n_epochs, batch_size, epsilon\n\n print (\"Select number of epochs to train (default 100):\")\n n_epochs = int(input())\n print (\"Select batch size (default 64):\")\n batch_size = int(input())\n print (\"Select learning rate (default 0.0001):\")\n epsilon = float(input())\n return n_epochs, batch_size, epsilon",
"def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])",
"def reset_parameters(self):\n init_method = getattr(init, self.initialization)\n for layer in range(self.num_layers):\n fc = self.get_fc(layer)\n init_method(fc.weight.data)\n if self.use_bias:\n init.constant(fc.bias.data, val=0)\n init_method(self.out.weight.data)\n init.constant(self.out.bias.data, val=0)",
"def default_transformer_poswise_net_hparams(input_dim: int, output_dim: int=512) ->Dict[str, Any]:\n return {'layers': [{'type': 'Linear', 'kwargs': {'in_features': input_dim, 'out_features': output_dim * 4, 'bias': True}}, {'type': 'ReLU', 'kwargs': {'inplace': True}}, {'type': 'Dropout', 'kwargs': {'p': 0.1}}, {'type': 'Linear', 'kwargs': {'in_features': output_dim * 4, 'out_features': output_dim, 'bias': True}}], 'name': 'ffn'}",
"def step6_set_gan_params(params):\n global GAN_PARAMS\n GAN_PARAMS = {**GAN_PARAMS, **params}",
"def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj"
] |
[
"0.6560901",
"0.6490657",
"0.62144095",
"0.6189958",
"0.6081534",
"0.60703486",
"0.6063563",
"0.60463375",
"0.60463375",
"0.60463375",
"0.6041865",
"0.6037213",
"0.60356396",
"0.60238945",
"0.5970691",
"0.59528214",
"0.5942642",
"0.59286654",
"0.5928044",
"0.5914245",
"0.5908478",
"0.5814068",
"0.5808892",
"0.5800626",
"0.5792644",
"0.57898605",
"0.57798517",
"0.5779406",
"0.5768885",
"0.575417"
] |
0.75112396
|
0
|
Saves session = weights as a checkpoint
|
def save_session(self):
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
current_step = tf.train.global_step(self.session, self.global_step)
path = self.saver.save(self.session, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)",
"def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')",
"def checkpoint(self):\n save()",
"def backup_session(saver, sess, model_dir, global_t, n_episode=0):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filename = \"checkpoint-%d\" % (n_episode)\n saver.save(sess, model_dir + \"/\" + filename, global_step=global_t)\n return",
"def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")",
"def save_model_checkpoint(model, optimizer, global_step, epoch_info, file_name):\n output = {\n \"model\" : model.state_dict(),\n \"optimizer\" : optimizer.state_dict(),\n \"global_step\" : global_step + 1,\n \"epoch_info\" : epoch_info\n }\n torch.save(output, file_name)",
"def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)",
"def save(self,sess):\n self.saver.save(sess,\"./Models/\" + self.mod_name + \".ckpt\")",
"def save(self, sess):\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model')\n if not os.path.exists(self.model.ckpt_dir):\n os.makedirs(self.model.ckpt_dir)\n self.saver.save(sess, ckpt_path, global_step=self.gstep)",
"def save_checkpoint(model, save_path):\n torch.save(model.state_dict(), save_path)",
"def write_checkpoint(self, session):\n base_save_path = self.params.cp_save_dir+self.params.model_name+\"_v\"+self.params.version\n full_save_path = self.full_saver.save(session,\n save_path=base_save_path,\n global_step=self.global_step,\n latest_filename=self.params.cp_latest_filename)\n self.logger.log_info(\"Full model saved in file %s\"%full_save_path)\n return base_save_path",
"def save_checkpoint(self) -> Dict[str, Union[Dict[str, torch.Tensor], dict]]:\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model = self.model.module.state_dict()\n else:\n model = self.model.state_dict()\n\n checkpoint = {\n \"model_state_dict\": deepcopy(model),\n \"optimizer_state_dict\": deepcopy(self.optimizer.state_dict()),\n }\n return checkpoint",
"def _save_model(graph_or_sess):\r\n if isinstance(graph_or_sess, tf.Graph):\r\n ops = graph_or_sess.get_operations()\r\n for op in ops:\r\n if 'variable' in op.type.lower():\r\n raise ValueError('Please input a frozen graph (no variables). Or pass in the session object.')\r\n\r\n with graph_or_sess.as_default():\r\n sess = tf.Session(config=configProto)\r\n\r\n fake_var = tf.Variable([0.0], name=\"fake_var\")\r\n sess.run(tf.global_variables_initializer())\r\n else:\r\n sess=graph_or_sess\r\n\r\n PATH = os.path.join(\"model\", \"tmp-model\")\r\n make_dir(path = os.path.dirname(PATH))\r\n saver = tf.train.Saver()\r\n #i should deal with the case in which sess is closed.\r\n saver.save(sess, PATH)\r\n\r\n if isinstance(graph_or_sess, tf.Graph):\r\n sess.close()\r\n\r\n return PATH + \".meta\"",
"def save_states(self, checkpoint):\n raise NotImplementedError()",
"def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)",
"def save_checkpoint(self, fname, save_optimizer=True):\n # -- Set the network to the full MultiHead_Module network to save everything in the class not only the current model -- #\n self.network = self.mh_network\n\n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().save_checkpoint(fname, save_optimizer)\n\n # -- Set the flag in already_trained_on -- #\n if not self.already_trained_on[str(self.fold)]['checkpoint_should_exist']:\n # -- Set the flag to True -- #\n self.already_trained_on[str(self.fold)]['checkpoint_should_exist'] = True\n # -- Add the current head keys for restoring (should be in correct order due to OrderedDict type of heads) -- #\n self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'] = list(self.mh_network.heads.keys())\n # -- Add the current active task for restoring -- #\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'] = self.mh_network.active_task\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model",
"def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)",
"def save_checkpoint(self, session: tf.Session, global_step: int):\n _delete_old_checkpoints(str(self.info.checkpoint_path))\n _save_checkpoint(session, str(self.info.checkpoint_path),\n str(self.info.model_file), global_step)",
"def save_checkpoint(model: nn.Module, args: Namespace, path: str):\r\n state = {\r\n 'args': args,\r\n 'state_dict': model.state_dict()\r\n }\r\n torch.save(state, path)",
"def save_to_checkpoint(self, chkpt):\n chkpt[self.name] = self.state_dict()",
"def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)",
"def save_checkpoint(self, filename=None):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n torch.save(state, filename)",
"def save_checkpoint(self, checkpoint_path='checkpoint.pth'):\n # Move the model back to the cpu so it can be loaded onto machines\n # without gpu's as well.\n self.model.to('cpu')\n\n checkpoint = {\n 'model_architecture': self.model_architecture,\n 'input_size': self.input_size,\n 'output_size': self.output_size,\n 'hidden_layers': self.hidden_layers,\n 'learn_rate': self.learn_rate,\n 'drop_p': self.drop_p,\n 'class_to_idx': self.model.class_to_idx,\n 'current_epoch': self.model.current_epoch,\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'model_state_dict': self.model.state_dict()\n }\n torch.save(checkpoint, checkpoint_path)",
"def save_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n model_dict = {'net_state_dict': self.net.state_dict(),\n 'use_cuda': self.use_cuda}\n\n print(\"Saving model to {}\".format(model_file))\n torch.save(model_dict, model_file)",
"def checkpoint():",
"def save_checkpoint(self, filename='checkpoint.pth'):\n torch.save(self.state_dict(), filename)",
"def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint",
"def save_session(self):\r\n if not os.path.exists(self.config.dir_model):\r\n os.makedirs(self.config.dir_model)\r\n self.saver.save(self.sess, self.config.dir_model)\r\n print(\"Save session succeed\")",
"def save_checkpoint_manual(model: LFADS, path: str):\n model_wts = [v.numpy() for v in model.trainable_variables]\n optim_wts = model.optimizer.get_weights()\n checkpoint = {\"model\": model_wts, \"optimizer\": optim_wts}\n with open(path, \"wb\") as fout:\n pickle.dump(checkpoint, fout)",
"def save(self, checkpoint_dir, step):\n model_name = \"CNN.model\"\n model_dir = \"%s\" % (\"cnn\")\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)"
] |
[
"0.7273272",
"0.721551",
"0.7180363",
"0.717209",
"0.7092982",
"0.70917594",
"0.70860475",
"0.70602214",
"0.7046638",
"0.7022896",
"0.70210314",
"0.69389325",
"0.69260246",
"0.6919135",
"0.6914021",
"0.6907725",
"0.6904546",
"0.6894275",
"0.6886038",
"0.68804497",
"0.6876584",
"0.6876471",
"0.68637013",
"0.68632954",
"0.68474376",
"0.6845208",
"0.68440896",
"0.682196",
"0.6813336",
"0.67950875"
] |
0.7346784
|
0
|
converts pdf file to xml file
|
def pdftoxml(pdfdata):
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes "%s" "%s"' % (pdffout.name, os.path.splitext(tmpxml)[0])
cmd = cmd + " >/dev/null 2>&1" # can't turn off output, so throw away even stderr yeuch
os.system(cmd)
pdffout.close()
#xmlfin = open(tmpxml)
xmldata = xmlin.read()
xmlin.close()
return xmldata
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_grobid_xml(self, paper_id):\n\n filename=cfg.folder_pdf+paper_id+\".pdf\"\n filename_xml=cfg.folder_content_xml+paper_id+\".xml\"\n\n ## check if XML file is already available\n if os.path.isfile(filename_xml):\n ## yes, load from cache\n root=etree.parse(filename_xml)\n # check the validity of the xml\n if self.check_validity_of_xml(root):\n return root\n else:\n raise Exception(\"Error in xml, pdf either broken or not extractable (i.e Unicode mapping missing\")\n else:\n if not os.path.isfile(filename):\n raise Exception(\"PDF for \"+paper_id+\" does not exist.\")\n ## no, get from GROBID\n url = cfg.grobid_url + '/processFulltextDocument'\n params = {\n 'input': open(filename, 'rb')\n }\n response = requests.post(url, files=params)\n if response.status_code == 200:\n ## it worked. now parse the result to XML\n parser = etree.XMLParser(encoding='UTF-8', recover=True)\n tei = response.content\n tei = tei if not isinstance(tei, text_type) else tei.encode('utf-8')\n root = etree.fromstring(tei, parser)\n ## and store it to xml cache\n with open(filename_xml, 'wb') as f:\n f.write(etree.tostring(root, pretty_print=True))\n # Check if the xml file derived from a valid pdf with unicode mapping\n # Correct: <teiHeader xml:lang=\"en\">\n # Incorrect: <teiHeader xml:lang=\"de\">\n if self.check_validity_of_xml(root):\n return root\n else:\n raise Exception(\"Error in xml, pdf either broken or not extractable (i.e Unicode mapping missing)\")\n else:\n raise Exception(\"Error calling GROBID for \"+paper_id+\": \"+str(response.status_code)+\" \"+response.reason)",
"def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")",
"def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()",
"def do_single_file_preprocess(pdf_file):",
"def pdf_to_txt(full_path):\n file = open(full_path,'rb')\n extracted_text = parser.from_buffer(file)\n return extracted_text['content']",
"def convertAnnotatedPDF(fname, refNrPath, origPDF):\n #tempdir is where I will save in between files\n try:\n os.mkdir(\"tempDir\")\n except:\n pass\n print(fname+\" is being exported.\")\n\n # get info on origin pdf\n input1 = PdfFileReader(open(origPDF, \"rb\"))\n npages = input1.getNumPages()\n pdfsize = input1.getPage(0).mediaBox\n pdfx = int(pdfsize[2])\n pdfy = int(pdfsize[3])\n # rM will not create a file when the page is empty so this is a\n # placeholde empty file to use.\n rm2svg(emptyRm, \"tempDir/emptyrm.svg\", coloured_annotations=True,\n x_width=pdfx, y_width=pdfy)\n\n # find what the page hashes are\n content = json.loads(open(refNrPath + \".content\").read())\n # convert all pages\n pdflist = []\n for pg, pg_hash in enumerate(content['pages']):\n # print(pg)\n rmpath = refNrPath + \"/\" + pg_hash + \".rm\"\n if os.path.isfile(rmpath):\n rm2svg(rmpath, \"tempDir/temprm\" + str(pg) + \".svg\", coloured_annotations=False, x_width=pdfx, y_width=pdfy)\n svg_path = \"tempDir/temprm\" + str(pg) + \".svg\"\n else:\n svg_path = \"tempDir/emptyrm.svg\"\n convertSvg2PdfCmd = \"\".join([\"rsvg-convert -f pdf -o \", \"tempDir/temppdf\" + str(pg), \".pdf \", svg_path])\n os.system(convertSvg2PdfCmd)\n pdflist.append(\"tempDir/temppdf\"+str(pg)+\".pdf\")\n # merge the annotated pages\n merged_rm = \"tempDir/merged_rm.pdf\"\n os.system(\"convert \"+ (\" \").join(pdflist)+\" \"+merged_rm)\n # stamp extracted annotations onto original with pdftk\n stampCmd = \"\".join([\"pdftk \", origPDF, \" multistamp \", merged_rm, \" output \", origPDF[:-4], \"_annot.pdf\"])\n os.system(stampCmd)\n # Remove temporary files\n shutil.rmtree(\"tempDir\", ignore_errors=False, onerror=None)\n return True",
"def to_xml_file(self, xml_file_path):\n s = self.to_xml()\n with open(xml_file_path, \"w+b\") as f:\n f.write(s)",
"def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')",
"def print_xml(tree, file):\n tree.write(file, encoding=\"utf-8\", xml_declaration=True)",
"def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)",
"def process_xml(self):\n self.process_gpx_file(str(self.filename))",
"def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))",
"def meta2xml(meta, filename):\n\n # this is stupid, just use dict2xml\n xml = dict2xml(meta)\n with open(filename, 'w+') as output:\n output.write(xml)",
"def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')",
"def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)",
"def xml2txt(filename):\n try:\n tree = et.parse(filename)\n except:\n return None\n root = tree.getroot()\n namespace = root.tag.split('}')[0]+'}'\n body = root.find(namespace+'contentSet')\\\n .find(namespace+'inlineXML')\\\n .find(namespace+'html')\\\n .find(namespace+'body')\n\n out = \"\"\n for elem in body:\n if elem.tag.split('}')[-1] == 'p':\n if elem.text:\n text = get_text(elem)\n if len(text) > 0:\n out += text.strip() + '\\n' # New paragraph (single newline)\n\n return out",
"def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))",
"def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")",
"def convert(self):\n self._convert()\n self._write_docx()",
"def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )",
"def pdf_to_test(file_name):\n #Opening, reading and parsing a pdf file to string\n pdfFileObj = open(file_name, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdf_string = pdfReader.getPage(0).extractText()\n \n #Find the RechnungsNr.\n start_of_RN = pdf_string.find(\"No.Invoice Date\") + len(\"No.Invoice Date\")\n rechnungs_nr = pdf_string[start_of_RN:start_of_RN+7]\n \n #Find the address\n start_of_address = pdf_string.find(\"Invoice Address\") + len(\"Invoice Address\")\n end_of_address = pdf_string.find(\"Payment Terms:\")\n address = pdf_string[start_of_address:end_of_address]\n \n #Liefermonat commenrs\n start_of_contract = pdf_string.find(\"Company Name / Line of business\") + len(\"Company Name / Line of business\")\n end_of_contract = pdf_string.find(\"Summary of Charges\")\n contract = pdf_string[start_of_contract:end_of_contract]\n \n #Nettobetrag - read base charge\n start_of_netto = pdf_string.find(\"Base Charges\") + len(\"Base Charges\")\n end_of_netto = pdf_string.find(\"Click Charges - Color\")\n nettobetrag = pdf_string[start_of_netto:end_of_netto]\n \n pdfFileObj.close()\n \n return pdfFileObj.name, rechnungs_nr, address, contract, nettobetrag",
"def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))",
"def xml2html(self):\n handler = open(self.xml_doc).read()\n soup = BeautifulSoup(handler, 'xml')\n\n fw = open(self.filename_out, 'w')\n\n fw.write(\"<!DOCTYPE html>\" + os.linesep)\n fw.write(\"<html>\" + os.linesep)\n fw.write(\"<head>\" + os.linesep)\n fw.write('<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">' + os.linesep)\n fw.write(\"<link rel=\\\"stylesheet\\\" href=\\\"%s\\\" type=\\\"text/css\\\" />\" % self.stylesheet_name + os.linesep)\n fw.write(\"<title></title>\" + os.linesep)\n fw.write(\"</head>\" + os.linesep)\n fw.write(\"<body>\" + os.linesep)\n\n # Load styles in dictionaries\n for style in soup.find_all(\"style\"):\n style_name = style.get(\"style:name\")\n #print \"style: %s children: %s descendants: %s\" % (str(style_name), str(len(list(style.children))), len(list(style.descendants)))\n for style_child in style.children:\n fs = style_child.get(\"fo:font-style\")\n if fs:\n self.style_fontstyle[style_name] = fs\n fontw = style_child.get(\"fo:font-weight\")\n if fontw:\n self.style_fontweight[style_name] = fontw\n # read alignment\n txta = style_child.get(\"fo:text-align\")\n if txta:\n self.style_textalignment[style_name] = txta\n # !!!\n tu = style_child.get(\"style:text-underline-type\")\n if tu:\n self.style_textunderline[style_name] = \"underlined\"\n # page break\n break_before = style_child.get(\"fo:break-before\")\n if break_before:\n self.style_break_before[style_name] = break_before\n\n\n # Navigate down the document through h and p tags\n #\n for text in soup.find_all(re.compile(\"^h|^p\")):\n\n # From bs4 docs: If a tag has only one child, and that child is a NavigableString, the child is made available as .string:\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"P9\">- Any text here!</text:p>\n #\n # To do:\n #\n # Beware of this case:\n # - <text:p text:style-name=\"P8\">\n # <text:span text:style-name=\"T4\">\n #\n\n # Get the attributes so the styles and the outlines\n text_attrs = dict(text.attrs)\n\n # Get the styles, if any\n try:\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n if text.string:\n t = unicode(text.string)\n if t:\n fw.write(self.outliner(self.stylizer(t, t_style), t_outline_level, t_style).encode('utf-8'))\n\n # e.g. page breaks come as a node with no children whose style contains fo:break-before:\"page\"\n elif len(list(text.children)) == 0:\n fw.write(self.outliner(unicode(\"\"), t_outline_level, t_style).encode('utf-8'))\n\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"Textbody\">\n # jkjksk skjkjkjs dhh\n # <text:s />\n # <text:span text:style-name=\"T3\">Bold</text:span>\n # <text:s />\n # </text:p>\n #\n # else drill down one level\n else:\n buffer = unicode(\"\")\n t = buffer\n u = buffer\n t_outline_level = \"paragraph\"\n t_style = \"\"\n for i in text.children:\n # Get the attributes so the styles\n try:\n text_attrs = dict(i.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n # whenever the element has no style\n # take the parent's one\n try:\n text_attrs = dict(i.parent.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n # if the current tag has only one child, and that child is a NavigableString\n if i.string:\n t = unicode(i.string)\n\n # space\n elif i.name == \"s\":\n t = unicode(\" \")\n\n # else drill down another level\n else:\n t = unicode(\"\")\n for j in i.children:\n if j.string:\n u = unicode(j.string)\n elif j.name == \"s\":\n u = unicode(\" \")\n else:\n u = unicode(\"\")\n if u:\n t = t + self.stylizer(u, t_style)\n\n # build up a unicode string containing the whole paragraph\n if t:\n buffer = buffer + self.stylizer(t, t_style)\n\n # outline the buffered unicode string and write it to the output file\n fw.write(self.outliner(buffer, t_outline_level, t_style).encode('utf-8'))\n\n fw.write(\"</body>\" + os.linesep)\n fw.write(\"</html>\" + os.linesep)\n fw.close()",
"def xml_to_conll(self, xml_file_path):\n\n if not os.path.exists(CONLL_PATH):\n self.create_directories(CONLL_PATH)\n\n\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Output Folder\n chapter_output = open(CONLL_PATH+os.path.split(file)[-1]+'.conll', 'w', encoding='utf8')\n\n print('Converting: ' + chapter_input.name + ' to Conll09 file: ' + chapter_output.name)\n\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n for sentence in chapter_input.find_all('s'):\n line_id = 0\n for terminal in sentence.find_all('t'):\n line_id, terminal_id, form, lemma, plemma = line_id+1, terminal.get('id'), terminal.get('word'), terminal.get('lemma'), terminal.get('lemma')\n pos, ppos = terminal.get('pos'), terminal.get('pos')\n feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1 = \"_\" * 9 # <3 Python!\n chapter_output.write(\"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\"\n \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\n\"\n % (str(line_id)+\"-\"+terminal_id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1))\n chapter_output.write(\"\\n\")\n\n chapter_output.close()\n\n print(\"Done!\")",
"def export_to_file(self, filename):\n if len(filename.split(\".\")) == 1:\n filename += \".xml\"\n xmlstring = self._dommodel.toprettyxml(\" \", \"\\n\")\n with open(filename, \"w\") as f:\n f.write(xmlstring)",
"def pdf_to_text(self, f):\n cmd = [\"pdftohtml\", \"-zoom\", \"1.35\", \"-xml\", \"-stdout\", f.name]\n code, stdout, stderr = self.shell(cmd)\n if code > 0:\n raise ValueError(stderr)\n return stdout.decode('utf-8')",
"def toPDF(Infos):\n\n\n #returnPDF = PDFDocument(\"output\")\n #returnPDF.Infos.get(\"name\")\n returnPDF = PDF(\"Courier\", Infos.get(\"name\"))\n if Infos.get('contact'):\n returnPDF.contact(Infos.get(\"contact\"))\n if Infos.get('Current position'):\n returnPDF.currentposition(Infos.get(\"Current position\"))\n if Infos.get('Education'):\n returnPDF.currentposition(Infos.get(\"Education\"))\n if Infos.get('Langue'):\n returnPDF.currentposition(Infos.get(\"Langue\"))\n returnPDF.output(\"result.pdf\", 'F')",
"def clean_PDF(submission):\n src = submission.file_upload.file.name\n pdf1 = PdfFileReader(src)\n merger = PdfFileMerger(strict=False, )\n merger.append(pdf1, import_bookmarks=False)\n merger.addMetadata({'/Title': '',\n '/Author': '',\n '/Creator': '',\n '/Producer': ''})\n fd, temp_file = tempfile.mkstemp(suffix='.pdf')\n merger.write(temp_file)\n merger.close()\n os.close(fd)\n shutil.move(temp_file, src) # replace the original PDF on the server",
"def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())",
"def convert2pdf(foppath, infolder, outfolder, lang=\"es\"):\n\tinpath = os.path.join(infolder, \"*.xml\")\n\tfilecounter = 0\n\t\n\tprint(\"Starting...\")\n\t\n\tif not os.path.exists(outfolder):\n\t\tos.makedirs(outfolder)\n \n\tscriptdir = os.path.dirname(os.path.realpath(__file__))\n\t\n\tfor filepath in glob.glob(inpath):\n\t\tprint(\"Doing file \" + filepath)\n\t\tfilecounter+= 1\n\t\tfn = os.path.basename(filepath)[:-4]\n\t\t\n\t\tcommand = \"java -Dfop.home=\" + foppath + \" -jar \" + os.path.join(foppath, \"build/fop.jar\") + \" -xml \" + filepath + \" -xsl \" + os.path.join(scriptdir, \"tei2pdf.xsl\") + \" -pdf \" + os.path.join(outfolder, fn + \".pdf\" + \" -param lang '\" + lang + \"'\")\n\t\tsubprocess.call(command, shell=True)\n\t\n\tprint(\"Done. \" + str(filecounter) + \" files treated.\")"
] |
[
"0.67023027",
"0.6365666",
"0.60989606",
"0.6029197",
"0.5841964",
"0.5824973",
"0.5780665",
"0.5717895",
"0.57166094",
"0.564895",
"0.56473297",
"0.56250364",
"0.5558366",
"0.5530251",
"0.5512295",
"0.55119246",
"0.5511825",
"0.5509534",
"0.55055887",
"0.54943115",
"0.54859096",
"0.5484161",
"0.542181",
"0.54066664",
"0.54003066",
"0.53906965",
"0.5365771",
"0.5357961",
"0.5354713",
"0.53469163"
] |
0.71766394
|
1
|
converts pdf file to xml file
|
def pdftoxml(pdfdata):
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes "%s" "%s"' % (pdffout.name, os.path.splitext(tmpxml)[0])
cmd = cmd + " >/dev/null 2>&1" # can't turn off output, so throw away even stderr yeuch
os.system(cmd)
pdffout.close()
#xmlfin = open(tmpxml)
xmldata = xmlin.read()
xmlin.close()
return xmldata
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_grobid_xml(self, paper_id):\n\n filename=cfg.folder_pdf+paper_id+\".pdf\"\n filename_xml=cfg.folder_content_xml+paper_id+\".xml\"\n\n ## check if XML file is already available\n if os.path.isfile(filename_xml):\n ## yes, load from cache\n root=etree.parse(filename_xml)\n # check the validity of the xml\n if self.check_validity_of_xml(root):\n return root\n else:\n raise Exception(\"Error in xml, pdf either broken or not extractable (i.e Unicode mapping missing\")\n else:\n if not os.path.isfile(filename):\n raise Exception(\"PDF for \"+paper_id+\" does not exist.\")\n ## no, get from GROBID\n url = cfg.grobid_url + '/processFulltextDocument'\n params = {\n 'input': open(filename, 'rb')\n }\n response = requests.post(url, files=params)\n if response.status_code == 200:\n ## it worked. now parse the result to XML\n parser = etree.XMLParser(encoding='UTF-8', recover=True)\n tei = response.content\n tei = tei if not isinstance(tei, text_type) else tei.encode('utf-8')\n root = etree.fromstring(tei, parser)\n ## and store it to xml cache\n with open(filename_xml, 'wb') as f:\n f.write(etree.tostring(root, pretty_print=True))\n # Check if the xml file derived from a valid pdf with unicode mapping\n # Correct: <teiHeader xml:lang=\"en\">\n # Incorrect: <teiHeader xml:lang=\"de\">\n if self.check_validity_of_xml(root):\n return root\n else:\n raise Exception(\"Error in xml, pdf either broken or not extractable (i.e Unicode mapping missing)\")\n else:\n raise Exception(\"Error calling GROBID for \"+paper_id+\": \"+str(response.status_code)+\" \"+response.reason)",
"def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")",
"def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()",
"def do_single_file_preprocess(pdf_file):",
"def pdf_to_txt(full_path):\n file = open(full_path,'rb')\n extracted_text = parser.from_buffer(file)\n return extracted_text['content']",
"def convertAnnotatedPDF(fname, refNrPath, origPDF):\n #tempdir is where I will save in between files\n try:\n os.mkdir(\"tempDir\")\n except:\n pass\n print(fname+\" is being exported.\")\n\n # get info on origin pdf\n input1 = PdfFileReader(open(origPDF, \"rb\"))\n npages = input1.getNumPages()\n pdfsize = input1.getPage(0).mediaBox\n pdfx = int(pdfsize[2])\n pdfy = int(pdfsize[3])\n # rM will not create a file when the page is empty so this is a\n # placeholde empty file to use.\n rm2svg(emptyRm, \"tempDir/emptyrm.svg\", coloured_annotations=True,\n x_width=pdfx, y_width=pdfy)\n\n # find what the page hashes are\n content = json.loads(open(refNrPath + \".content\").read())\n # convert all pages\n pdflist = []\n for pg, pg_hash in enumerate(content['pages']):\n # print(pg)\n rmpath = refNrPath + \"/\" + pg_hash + \".rm\"\n if os.path.isfile(rmpath):\n rm2svg(rmpath, \"tempDir/temprm\" + str(pg) + \".svg\", coloured_annotations=False, x_width=pdfx, y_width=pdfy)\n svg_path = \"tempDir/temprm\" + str(pg) + \".svg\"\n else:\n svg_path = \"tempDir/emptyrm.svg\"\n convertSvg2PdfCmd = \"\".join([\"rsvg-convert -f pdf -o \", \"tempDir/temppdf\" + str(pg), \".pdf \", svg_path])\n os.system(convertSvg2PdfCmd)\n pdflist.append(\"tempDir/temppdf\"+str(pg)+\".pdf\")\n # merge the annotated pages\n merged_rm = \"tempDir/merged_rm.pdf\"\n os.system(\"convert \"+ (\" \").join(pdflist)+\" \"+merged_rm)\n # stamp extracted annotations onto original with pdftk\n stampCmd = \"\".join([\"pdftk \", origPDF, \" multistamp \", merged_rm, \" output \", origPDF[:-4], \"_annot.pdf\"])\n os.system(stampCmd)\n # Remove temporary files\n shutil.rmtree(\"tempDir\", ignore_errors=False, onerror=None)\n return True",
"def to_xml_file(self, xml_file_path):\n s = self.to_xml()\n with open(xml_file_path, \"w+b\") as f:\n f.write(s)",
"def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')",
"def print_xml(tree, file):\n tree.write(file, encoding=\"utf-8\", xml_declaration=True)",
"def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)",
"def process_xml(self):\n self.process_gpx_file(str(self.filename))",
"def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))",
"def meta2xml(meta, filename):\n\n # this is stupid, just use dict2xml\n xml = dict2xml(meta)\n with open(filename, 'w+') as output:\n output.write(xml)",
"def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')",
"def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)",
"def xml2txt(filename):\n try:\n tree = et.parse(filename)\n except:\n return None\n root = tree.getroot()\n namespace = root.tag.split('}')[0]+'}'\n body = root.find(namespace+'contentSet')\\\n .find(namespace+'inlineXML')\\\n .find(namespace+'html')\\\n .find(namespace+'body')\n\n out = \"\"\n for elem in body:\n if elem.tag.split('}')[-1] == 'p':\n if elem.text:\n text = get_text(elem)\n if len(text) > 0:\n out += text.strip() + '\\n' # New paragraph (single newline)\n\n return out",
"def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))",
"def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")",
"def convert(self):\n self._convert()\n self._write_docx()",
"def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )",
"def pdf_to_test(file_name):\n #Opening, reading and parsing a pdf file to string\n pdfFileObj = open(file_name, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdf_string = pdfReader.getPage(0).extractText()\n \n #Find the RechnungsNr.\n start_of_RN = pdf_string.find(\"No.Invoice Date\") + len(\"No.Invoice Date\")\n rechnungs_nr = pdf_string[start_of_RN:start_of_RN+7]\n \n #Find the address\n start_of_address = pdf_string.find(\"Invoice Address\") + len(\"Invoice Address\")\n end_of_address = pdf_string.find(\"Payment Terms:\")\n address = pdf_string[start_of_address:end_of_address]\n \n #Liefermonat commenrs\n start_of_contract = pdf_string.find(\"Company Name / Line of business\") + len(\"Company Name / Line of business\")\n end_of_contract = pdf_string.find(\"Summary of Charges\")\n contract = pdf_string[start_of_contract:end_of_contract]\n \n #Nettobetrag - read base charge\n start_of_netto = pdf_string.find(\"Base Charges\") + len(\"Base Charges\")\n end_of_netto = pdf_string.find(\"Click Charges - Color\")\n nettobetrag = pdf_string[start_of_netto:end_of_netto]\n \n pdfFileObj.close()\n \n return pdfFileObj.name, rechnungs_nr, address, contract, nettobetrag",
"def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))",
"def xml2html(self):\n handler = open(self.xml_doc).read()\n soup = BeautifulSoup(handler, 'xml')\n\n fw = open(self.filename_out, 'w')\n\n fw.write(\"<!DOCTYPE html>\" + os.linesep)\n fw.write(\"<html>\" + os.linesep)\n fw.write(\"<head>\" + os.linesep)\n fw.write('<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">' + os.linesep)\n fw.write(\"<link rel=\\\"stylesheet\\\" href=\\\"%s\\\" type=\\\"text/css\\\" />\" % self.stylesheet_name + os.linesep)\n fw.write(\"<title></title>\" + os.linesep)\n fw.write(\"</head>\" + os.linesep)\n fw.write(\"<body>\" + os.linesep)\n\n # Load styles in dictionaries\n for style in soup.find_all(\"style\"):\n style_name = style.get(\"style:name\")\n #print \"style: %s children: %s descendants: %s\" % (str(style_name), str(len(list(style.children))), len(list(style.descendants)))\n for style_child in style.children:\n fs = style_child.get(\"fo:font-style\")\n if fs:\n self.style_fontstyle[style_name] = fs\n fontw = style_child.get(\"fo:font-weight\")\n if fontw:\n self.style_fontweight[style_name] = fontw\n # read alignment\n txta = style_child.get(\"fo:text-align\")\n if txta:\n self.style_textalignment[style_name] = txta\n # !!!\n tu = style_child.get(\"style:text-underline-type\")\n if tu:\n self.style_textunderline[style_name] = \"underlined\"\n # page break\n break_before = style_child.get(\"fo:break-before\")\n if break_before:\n self.style_break_before[style_name] = break_before\n\n\n # Navigate down the document through h and p tags\n #\n for text in soup.find_all(re.compile(\"^h|^p\")):\n\n # From bs4 docs: If a tag has only one child, and that child is a NavigableString, the child is made available as .string:\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"P9\">- Any text here!</text:p>\n #\n # To do:\n #\n # Beware of this case:\n # - <text:p text:style-name=\"P8\">\n # <text:span text:style-name=\"T4\">\n #\n\n # Get the attributes so the styles and the outlines\n text_attrs = dict(text.attrs)\n\n # Get the styles, if any\n try:\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n if text.string:\n t = unicode(text.string)\n if t:\n fw.write(self.outliner(self.stylizer(t, t_style), t_outline_level, t_style).encode('utf-8'))\n\n # e.g. page breaks come as a node with no children whose style contains fo:break-before:\"page\"\n elif len(list(text.children)) == 0:\n fw.write(self.outliner(unicode(\"\"), t_outline_level, t_style).encode('utf-8'))\n\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"Textbody\">\n # jkjksk skjkjkjs dhh\n # <text:s />\n # <text:span text:style-name=\"T3\">Bold</text:span>\n # <text:s />\n # </text:p>\n #\n # else drill down one level\n else:\n buffer = unicode(\"\")\n t = buffer\n u = buffer\n t_outline_level = \"paragraph\"\n t_style = \"\"\n for i in text.children:\n # Get the attributes so the styles\n try:\n text_attrs = dict(i.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n # whenever the element has no style\n # take the parent's one\n try:\n text_attrs = dict(i.parent.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n # if the current tag has only one child, and that child is a NavigableString\n if i.string:\n t = unicode(i.string)\n\n # space\n elif i.name == \"s\":\n t = unicode(\" \")\n\n # else drill down another level\n else:\n t = unicode(\"\")\n for j in i.children:\n if j.string:\n u = unicode(j.string)\n elif j.name == \"s\":\n u = unicode(\" \")\n else:\n u = unicode(\"\")\n if u:\n t = t + self.stylizer(u, t_style)\n\n # build up a unicode string containing the whole paragraph\n if t:\n buffer = buffer + self.stylizer(t, t_style)\n\n # outline the buffered unicode string and write it to the output file\n fw.write(self.outliner(buffer, t_outline_level, t_style).encode('utf-8'))\n\n fw.write(\"</body>\" + os.linesep)\n fw.write(\"</html>\" + os.linesep)\n fw.close()",
"def xml_to_conll(self, xml_file_path):\n\n if not os.path.exists(CONLL_PATH):\n self.create_directories(CONLL_PATH)\n\n\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Output Folder\n chapter_output = open(CONLL_PATH+os.path.split(file)[-1]+'.conll', 'w', encoding='utf8')\n\n print('Converting: ' + chapter_input.name + ' to Conll09 file: ' + chapter_output.name)\n\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n for sentence in chapter_input.find_all('s'):\n line_id = 0\n for terminal in sentence.find_all('t'):\n line_id, terminal_id, form, lemma, plemma = line_id+1, terminal.get('id'), terminal.get('word'), terminal.get('lemma'), terminal.get('lemma')\n pos, ppos = terminal.get('pos'), terminal.get('pos')\n feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1 = \"_\" * 9 # <3 Python!\n chapter_output.write(\"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\"\n \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\n\"\n % (str(line_id)+\"-\"+terminal_id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1))\n chapter_output.write(\"\\n\")\n\n chapter_output.close()\n\n print(\"Done!\")",
"def export_to_file(self, filename):\n if len(filename.split(\".\")) == 1:\n filename += \".xml\"\n xmlstring = self._dommodel.toprettyxml(\" \", \"\\n\")\n with open(filename, \"w\") as f:\n f.write(xmlstring)",
"def pdf_to_text(self, f):\n cmd = [\"pdftohtml\", \"-zoom\", \"1.35\", \"-xml\", \"-stdout\", f.name]\n code, stdout, stderr = self.shell(cmd)\n if code > 0:\n raise ValueError(stderr)\n return stdout.decode('utf-8')",
"def toPDF(Infos):\n\n\n #returnPDF = PDFDocument(\"output\")\n #returnPDF.Infos.get(\"name\")\n returnPDF = PDF(\"Courier\", Infos.get(\"name\"))\n if Infos.get('contact'):\n returnPDF.contact(Infos.get(\"contact\"))\n if Infos.get('Current position'):\n returnPDF.currentposition(Infos.get(\"Current position\"))\n if Infos.get('Education'):\n returnPDF.currentposition(Infos.get(\"Education\"))\n if Infos.get('Langue'):\n returnPDF.currentposition(Infos.get(\"Langue\"))\n returnPDF.output(\"result.pdf\", 'F')",
"def clean_PDF(submission):\n src = submission.file_upload.file.name\n pdf1 = PdfFileReader(src)\n merger = PdfFileMerger(strict=False, )\n merger.append(pdf1, import_bookmarks=False)\n merger.addMetadata({'/Title': '',\n '/Author': '',\n '/Creator': '',\n '/Producer': ''})\n fd, temp_file = tempfile.mkstemp(suffix='.pdf')\n merger.write(temp_file)\n merger.close()\n os.close(fd)\n shutil.move(temp_file, src) # replace the original PDF on the server",
"def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())",
"def convert2pdf(foppath, infolder, outfolder, lang=\"es\"):\n\tinpath = os.path.join(infolder, \"*.xml\")\n\tfilecounter = 0\n\t\n\tprint(\"Starting...\")\n\t\n\tif not os.path.exists(outfolder):\n\t\tos.makedirs(outfolder)\n \n\tscriptdir = os.path.dirname(os.path.realpath(__file__))\n\t\n\tfor filepath in glob.glob(inpath):\n\t\tprint(\"Doing file \" + filepath)\n\t\tfilecounter+= 1\n\t\tfn = os.path.basename(filepath)[:-4]\n\t\t\n\t\tcommand = \"java -Dfop.home=\" + foppath + \" -jar \" + os.path.join(foppath, \"build/fop.jar\") + \" -xml \" + filepath + \" -xsl \" + os.path.join(scriptdir, \"tei2pdf.xsl\") + \" -pdf \" + os.path.join(outfolder, fn + \".pdf\" + \" -param lang '\" + lang + \"'\")\n\t\tsubprocess.call(command, shell=True)\n\t\n\tprint(\"Done. \" + str(filecounter) + \" files treated.\")"
] |
[
"0.67023027",
"0.6365666",
"0.60989606",
"0.6029197",
"0.5841964",
"0.5824973",
"0.5780665",
"0.5717895",
"0.57166094",
"0.564895",
"0.56473297",
"0.56250364",
"0.5558366",
"0.5530251",
"0.5512295",
"0.55119246",
"0.5511825",
"0.5509534",
"0.55055887",
"0.54943115",
"0.54859096",
"0.5484161",
"0.542181",
"0.54066664",
"0.54003066",
"0.53906965",
"0.5365771",
"0.5357961",
"0.5354713",
"0.53469163"
] |
0.71766394
|
0
|
Train weak classifier based on a given feature.
|
def trainWeakClassifier(trainingSamples, weights, feature):
#compute feature values
featureValues = []
positiveOrNegative = []
for sample in trainingSamples:
featureValues.append(feature.computeScore(sample[0], 0, 0))
positiveOrNegative.append(sample[1])
#zip with weights and sort by feature value
featureValues = zip(featureValues, weights, positiveOrNegative)
featureValues = sorted(featureValues, key=lambda tup: tup[0])
#sum all weights of the positive and negative samples
negativeWeightsTotal = 0
positiveWeightsTotal = 0
for value in featureValues:
if value[2] == 1:
positiveWeightsTotal += value[1]
else:
negativeWeightsTotal += value[1]
#find the feature with the smallest error
bestFeatureIndex = 0
bestFeatureError = 1e10
negativeWeightsSoFar = 0
positiveWeightsSoFar = 0
positiveOnTheLeft = 0
positivesTotal = 0
for i in range(0, len(featureValues)):
error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal
error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal
error = min([error1, error2])
if bestFeatureError > error:
bestFeatureError = error
bestFeatureIndex = i
positiveOnTheLeft = positivesTotal
if featureValues[i][2] == 1:
positiveWeightsSoFar += featureValues[i][1]
positivesTotal += 1
else:
negativeWeightsSoFar += featureValues[i][1]
#count how much samples are there on the right
positiveOnTheRight = positivesTotal - positiveOnTheLeft
#determine the polarity and threshold
polarity = -1
threshold = featureValues[bestFeatureIndex][0]
if positiveOnTheLeft > positiveOnTheRight:
polarity = 1
else:
polarity = -1
#build and return a weak classifier
return WeakClassifier(feature, threshold, polarity)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def trainModel( self, featureTrain, classTrain):",
"def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')",
"def train(self, features, labels):\n pass",
"def train(self, features, labels):\n self._clf.fit(features, labels)",
"def train(self, features, labels):\n self._clf.fit(features, labels)",
"def train_clf(x_train, y_train, clf_model=\"decision_tree\"):\n clf = classifiers[clf_model]\n clf.fit(x_train, y_train)\n return clf",
"def train_classifier(self, class_id):\n raise NotImplementedError(\"Classifier training must be implemented first.\")",
"def train(x, y, model, tune):\n\n # for labelled data\n if len(y.index) != 0:\n # split dataset into train & test\n x_train, x_test, y_train, y_test = train_test_split(\n x,\n y,\n test_size=0.2,\n shuffle=True,\n random_state=42)\n\n # log.info(find_best_features(x, x_train, y_train))\n\n log.info('Using columns {cols}'.format(cols=x.columns))\n\n classifier = model(x, y, x_train, x_test, y_train, y_test)\n if tune:\n classifier.tune()\n\n # handle exceptions so that one failing model doesn't cause failure(when using --offline)\n try:\n start_time = datetime.now()\n classifier.train()\n end_time = datetime.now()\n log.info('Model execution time: {time}'.format(time=(end_time - start_time)))\n except Exception as e:\n log.error(e)\n\n # for unlabelled data\n else:\n x_train, x_test = train_test_split(\n x,\n test_size=0.2,\n shuffle=True,\n random_state=42)\n\n classifier = model(x, y, x_train, x_test, pd.DataFrame(), pd.DataFrame())\n classifier.train()",
"async def train(gradient_boosting: bool = False) -> bool:\n data = clf.dataset()\n return clf.train(data['X'], data['y'], gradient_boosting)",
"def svm_train_classifier(self):\n\n # needed because a SVM needs more than 1 class\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n else:\n training_data = []\n categories = []\n id = 0\n\n for gesture, value in self.saved_gestures.items():\n id += 1\n # needed to map the id returned from the SVM to a name of a gesture\n self.category_to_gesture[id] = gesture\n categories.append(id)\n\n x = []\n y = []\n z = []\n for elem in value:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n training_data.append(self.get_fft(x, y, z))\n\n # normalized length of fft\n self.cutoff_length = min([len(l) for l in training_data])\n\n normalized_fft = []\n for l in training_data:\n normalized_fft.append(l[:self.cutoff_length])\n\n training_data = normalized_fft\n\n self.classifier.fit(training_data, categories)",
"def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y",
"def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y",
"def train(self, X, y):\r\n # the nearest neighbor classifier simply remembers all the training data\r\n self.Xtr = X\r\n self.ytr = y",
"def train_classifier(attacked_node: int, training_nodes: List[int], graph_p: g.Graph, embedding: e.Embedding,\n num_bins: int, classifier: sk_base.ClassifierMixin,\n mem_acc: ma.MemoryAccess) -> sk_base.ClassifierMixin:\n assert (attacked_node not in graph_p.nodes())\n assert (all([second_rem_node in graph_p.nodes() for second_rem_node in training_nodes]))\n\n if mem_acc.has_classification_model(classifier_name=str(classifier), emb_func_name=str(embedding),\n graph_name=str(graph_p), attacked_node=attacked_node,\n training_nodes=training_nodes, num_bins=num_bins):\n return mem_acc.load_classification_model(classifier_name=str(classifier), emb_func_name=str(embedding),\n graph_name=str(graph_p), attacked_node=attacked_node,\n training_nodes=training_nodes, num_bins=num_bins)\n\n train_features, train_labels = load_train_features_and_labels(attacked_node=attacked_node,\n training_nodes=training_nodes,\n embedding=embedding, graph_p=graph_p,\n num_bins=num_bins, mem_acc=mem_acc)\n classifier = fit_classifier(training_features=train_features, training_labels=train_labels, classifier=classifier)\n\n mem_acc.save_classification_model(classification_model=classifier, emb_func_name=str(embedding),\n graph_name=str(graph_p), attacked_node=attacked_node,\n training_nodes=training_nodes, num_bins=num_bins)\n\n return classifier",
"def train(self, dataset):\n \"*** YOUR CODE HERE question 1 ***\"\n while True:\n trainingComplete = True\n data = dataset.iterate_once(1)\n\n for feature, label in data:\n\n if nn.as_scalar(label) != self.get_prediction(feature):\n self.w.update(feature, nn.as_scalar(label))\n trainingComplete = False\n\n if trainingComplete:\n break",
"def train(self, features, labels):\n self.train_features = features\n self.train_labels = labels\n #raise NotImplementedError",
"def fit(self, trainingFeatures, trainingTargets):\r\n\r\n \"\"\" Implement kNN learning below. \"\"\"\r\n\r\n self._fitCalled = True\r\n self.labels = trainingTargets\r\n self.data = trainingFeatures",
"def train(self, df: pd.DataFrame, label_column: str):\n assert label_column not in self.feature_columns, 'Label column is in the feature list.'\n assert label_column in df.columns, 'Label column is not in the dataframe.'\n\n X = self.preprocess(df)\n y = df[label_column].values\n\n model = RandomForestClassifier(**self.model_params)\n model.fit(X, y)\n self.model = model",
"def train(self, X, y):",
"def fit(self, features, classes):\n\n # TODO: finish this.\n classes = np.array(classes)\n features = np.array(features)\n idx_1 = np.where(classes == 1)[0]\n idx_0 = np.where(classes == 0)[0]\n new_features = np.concatenate((features[idx_0,:], features[idx_1,:]), axis=0)\n new_classes = np.concatenate((classes[idx_0], classes[idx_1]), axis=0)\n \n self.classifier.fit(new_features, new_classes)",
"def run_weak_classifier(x: np.ndarray, c: svm.SVC) -> int:\n x = x.reshape((1, 36))\n return 1 if c.predict(x)[0] == 1 else 0",
"def __trainLocal__(self, featureVals, targetVals):\n pass",
"def train_model(evidence, labels):\n model = sklearn.neighbors.KNeighborsClassifier(n_neighbors = 1)\n model.fit(evidence,labels)\n return model",
"def train(self, X, y):\n pass",
"def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y",
"def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r",
"def train(self,X,y):\n #the nearest neighbour classifier simply remembers all the training data\n self.Xtr=X\n self.ytr=y",
"def train_knn(training_data):\n return knnclassifier(training_data, keys, 3)",
"def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()",
"def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n session.run(optimizer, feed_dict={y: label_batch, x: feature_batch, keep_prob: keep_probability})"
] |
[
"0.6870671",
"0.6641588",
"0.6511581",
"0.64583826",
"0.64583826",
"0.6346193",
"0.63030595",
"0.6284348",
"0.62136",
"0.618147",
"0.61618143",
"0.61618143",
"0.6142733",
"0.61174417",
"0.6100654",
"0.60273075",
"0.5981706",
"0.592598",
"0.5914481",
"0.59115297",
"0.5890265",
"0.58834875",
"0.58762306",
"0.58730894",
"0.5846636",
"0.583788",
"0.5822323",
"0.58205444",
"0.5813239",
"0.5813202"
] |
0.7230463
|
0
|
Converts a list of finished workers into a result.
|
def getResults(workers):
results = []
for worker in workers:
results += worker.getResults()
return results
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def after_split_results(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure hindcast\": [],\n \"success hindcast\": [],\n }\n if msg.type.startswith(\"success\"):\n if config[\"results tarballs\"][\"archive hindcast\"]:\n last_date = max(map(arrow.get, msg.payload))\n if arrow.get(last_date).shift(days=+1).day == 1:\n yyyymmm = arrow.get(last_date).format(\"YYYY-MMM\").lower()\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.archive_tarball\",\n args=[\"hindcast\", yyyymmm, \"graham-dtn\"],\n )\n )\n return next_workers[msg.type]",
"def imap(self, iterable):\n def get_results():\n \"\"\"Get a result from the worker output queue and try to yield\n results back to the caller.\n\n This yields results back in the order of their associated tasks.\n \"\"\"\n self._recv_result() # blocks\n tasks = self._tasks_in_progress\n results = self._task_results_waiting\n\n for task_id in tasks.keys():\n if task_id not in results:\n break\n\n del tasks[task_id]\n result = results.pop(task_id)\n yield result.value\n\n for result in self._map_to_workers(iterable, get_results):\n yield result",
"def after_download_results(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure nowcast\": [],\n \"failure nowcast-green\": [],\n \"failure forecast\": [],\n \"failure forecast2\": [],\n \"failure hindcast\": [],\n \"failure nowcast-agrif\": [],\n \"success nowcast\": [],\n \"success nowcast-green\": [],\n \"success forecast\": [],\n \"success forecast2\": [],\n \"success hindcast\": [],\n \"success nowcast-agrif\": [],\n }\n if msg.type.startswith(\"success\"):\n run_type = msg.type.split()[1]\n run_date = msg.payload[run_type][\"run date\"]\n if run_type == \"hindcast\":\n next_workers[msg.type].append(\n NextWorker(\"nowcast.workers.split_results\", args=[run_type, run_date])\n )\n return next_workers[msg.type]\n if run_type.startswith(\"nowcast\"):\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.make_plots\",\n args=[\"nemo\", run_type, \"research\", \"--run-date\", run_date],\n )\n )\n if run_type == \"nowcast\":\n compare_date = arrow.get(run_date).shift(days=-1).format(\"YYYY-MM-DD\")\n next_workers[msg.type].extend(\n [\n NextWorker(\n \"nowcast.workers.make_plots\",\n args=[\n \"nemo\",\n run_type,\n \"comparison\",\n \"--run-date\",\n compare_date,\n ],\n ),\n NextWorker(\n \"nowcast.workers.make_CHS_currents_file\",\n args=[run_type, \"--run-date\", run_date],\n ),\n ]\n )\n if run_type == \"nowcast-green\":\n next_workers[msg.type].append(\n NextWorker(\"nowcast.workers.ping_erddap\", args=[\"nowcast-green\"])\n )\n if arrow.get(run_date).shift(days=+1).day == 1:\n yyyymmm = arrow.get(run_date).format(\"YYYY-MMM\").lower()\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.archive_tarball\",\n args=[\"nowcast-green\", yyyymmm, \"graham-dtn\"],\n )\n )\n return next_workers[msg.type]\n if run_type.startswith(\"forecast\"):\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.make_CHS_currents_file\",\n args=[run_type, \"--run-date\", run_date],\n )\n )\n return next_workers[msg.type]",
"def _extract_completed_runs_from_futures(self) -> None:\n\n # In code check to make sure we don;t exceed resource allocation\n if len(self.futures) > sum(self.client.nthreads().values()):\n warnings.warn(\"More running jobs than resources available \"\n \"Should not have more futures/runs in remote workers \"\n \"than the number of workers. This could mean a worker \"\n \"crashed and was not able to be recovered by dask. \"\n )\n\n # A future is removed to the list of futures as an indication\n # that a worker is available to take in an extra job\n done_futures = [f for f in self.futures if f.done()]\n for future in done_futures:\n self.results.append(future.result())\n self.futures.remove(future)",
"def receive_workers_output(node_request_map, results_list, free_nodes, command, idle_nodes):\n\n if dist.get_backend() == \"nccl\": # Async\n for node, req in node_request_map:\n if req.is_completed():\n result = build_metrics_dict(node) if command == COMMAND_TESTVAL else build_grads_dict(node)\n results_list.append(result)\n free_nodes.append(node)\n node_request_map.remove((node,req))\n print_rank(f\"Finished releasing the nodes {free_nodes}\", loglevel=logging.DEBUG)\n else: # Sync\n print_rank(f\"Waiting for a workers\", loglevel=logging.DEBUG)\n gather_objects = [(None,None,None) for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\" All workers have finished ... taking the remaining clients {len(output)}\", loglevel=logging.DEBUG)\n output = [e for i,e in enumerate(output) if i not in idle_nodes ] # Cleanup for idle workers\n results_list = results_list + output[1:]\n free_nodes = list(range(1, size()))\n \n return node_request_map, results_list, free_nodes",
"def workers(self):\n return self.worker_list",
"def imap_unordered(self, iterable):\n def get_results():\n \"\"\"Get a result from the worker output queue and try to yield\n results back to the caller.\n\n This yields results back in the order of their associated tasks.\n \"\"\"\n result = self._recv_result() # blocks\n del self._tasks_in_progress[result.task_id]\n del self._task_results_waiting[result.task_id]\n yield result.value\n\n for result in self._map_to_workers(iterable, get_results):\n yield result",
"def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result",
"def after_download_wwatch3_results(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure forecast2\": [],\n \"failure nowcast\": [],\n \"failure forecast\": [],\n \"success forecast2\": [],\n \"success nowcast\": [],\n \"success forecast\": [],\n }\n if msg.type.startswith(\"success\"):\n run_type = msg.type.split()[1]\n run_date = checklist[\"WWATCH3 run\"][run_type][\"run date\"]\n if run_type.startswith(\"forecast\"):\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.update_forecast_datasets\",\n args=[\"wwatch3\", run_type, \"--run-date\", run_date],\n )\n )\n return next_workers[msg.type]",
"def process_workers(worker_id: int, future: Future, params: DownloadCommandParameters) -> None:\n # Wait for the result.\n res = future.result()\n # If response is empty, let the user know.\n if not res:\n logger.error(f\"No data in response.\")\n raise ValueError(\"Issue downloading images.\")\n else:\n download_file = os.path.join(params.output, f\"download_{worker_id}.zip\")\n with open(download_file, \"wb\") as stream:\n for item in res:\n stream.write(item)",
"def get_worker_list(self):\n return [{WORKER_ID_KEY: worker_id, REGISTRATION_STATUS_KEY: value}\n for worker_id, value in self.registered_workers.items()]",
"def wait_for_workers(self):\r\n stop = False\r\n workers = self.aggregator.get_participants()\r\n\r\n while not stop: \r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(1)\r\n participant = resp.notification['participant']\r\n workers.append(participant)\r\n print('Task %s: participant %s has joined' % (self.task_name, participant))\r\n except Exception as err:\r\n print(\"Task %s: joined %d participants out of %d\" % (self.task_name, len(workers), self.Nworkers))\r\n #print(err)\r\n #print('Check here: error')\r\n #import code\r\n #code.interact(local=locals())\r\n pass\r\n\r\n if len(workers) == self.Nworkers:\r\n stop = True\r\n\r\n workers = self.aggregator.get_participants()\r\n return list(workers.keys())",
"def after_download_fvcom_results(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure x2 nowcast\": [],\n \"failure r12 nowcast\": [],\n \"success x2 nowcast\": [],\n \"success r12 nowcast\": [],\n }\n if msg.type.startswith(\"success\"):\n run_type = msg.type.split()[2]\n run_date = msg.payload[run_type][\"run date\"]\n model_config = msg.payload[run_type][\"model config\"]\n next_workers[msg.type].extend(\n [\n NextWorker(\n \"nowcast.workers.get_vfpa_hadcp\", args=[\"--data-date\", run_date]\n ),\n NextWorker(\n \"nowcast.workers.make_plots\",\n args=[\n \"fvcom\",\n f\"{run_type}-{model_config}\",\n \"research\",\n \"--run-date\",\n run_date,\n ],\n ),\n ]\n )\n if run_type == \"nowcast\":\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.ping_erddap\",\n args=[f\"fvcom-{model_config}-nowcast\"],\n )\n )\n return next_workers[msg.type]",
"def serial_worker(jobs_queue):\n return (get_and_format(**job) for job in jobs_queue)",
"def after_collect_river_data(msg, config, checklist):\n next_workers = {\"crash\": [], \"failure\": [], \"success\": []}\n return next_workers[msg.type]",
"def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers",
"def give_workers_list(self):\n return self._workers",
"async def wait_for_complete(self, workers: Iterable[Worker] | None = None) -> None:\n\n await asyncio.gather(*[worker.wait() for worker in (workers or self)])",
"def result(self, format_output=None):\n if format_output is None:\n return self._finished_\n return [r[:2] + (format_output(r[2]),) for r in self._finished_]",
"def do_same_job(self, func, input_list):\n task_submitted = []\n for data in input_list:\n task_submitted.append(self.executor.submit(func, *data))\n\n return [t.result() for t in task_submitted]",
"def terminate_workers(self, input_q, output_q, workers, interrupted=False):\n if not interrupted:\n for _ in workers:\n input_q.put(None, block=True)\n\n accumulators = []\n while len(accumulators) != len(workers):\n accumulators.append(output_q.get())\n logger.info(\"%d accumulators retrieved from output queue\", len(accumulators))\n\n for worker in workers:\n if worker.is_alive():\n worker.terminate()\n\n input_q.close()\n output_q.close()\n return accumulators",
"def get_output(self, worker_metadatas):\n outputs = []\n has_pulp_pull = PLUGIN_PULP_PULL_KEY in self.workflow.exit_results\n try:\n pulp_sync_results = self.workflow.postbuild_results[PLUGIN_PULP_SYNC_KEY]\n crane_registry = pulp_sync_results[0]\n except (KeyError, IndexError):\n crane_registry = None\n\n for platform in worker_metadatas:\n for instance in worker_metadatas[platform]['output']:\n instance['buildroot_id'] = '{}-{}'.format(platform, instance['buildroot_id'])\n\n if instance['type'] == 'docker-image':\n # update image ID with pulp_pull results;\n # necessary when using Pulp < 2.14. Only do this\n # when building for a single architecture -- if\n # building for many, we know Pulp has schema 2\n # support.\n if len(worker_metadatas) == 1 and has_pulp_pull:\n if self.workflow.builder.image_id is not None:\n instance['extra']['docker']['id'] = self.workflow.builder.image_id\n\n # update repositories to point to Crane\n if crane_registry:\n pulp_pullspecs = []\n docker = instance['extra']['docker']\n for pullspec in docker['repositories']:\n image = ImageName.parse(pullspec)\n image.registry = crane_registry.registry\n pulp_pullspecs.append(image.to_str())\n\n docker['repositories'] = pulp_pullspecs\n\n outputs.append(instance)\n\n return outputs",
"async def list_workers(self, *, option: ListApiOptions) -> ListApiResponse:\n try:\n reply = await self._client.get_all_worker_info(timeout=option.timeout)\n except DataSourceUnavailable:\n raise DataSourceUnavailable(GCS_QUERY_FAILURE_WARNING)\n\n result = []\n for message in reply.worker_table_data:\n data = protobuf_message_to_dict(\n message=message, fields_to_decode=[\"worker_id\", \"raylet_id\"]\n )\n data[\"worker_id\"] = data[\"worker_address\"][\"worker_id\"]\n data[\"node_id\"] = data[\"worker_address\"][\"raylet_id\"]\n data[\"ip\"] = data[\"worker_address\"][\"ip_address\"]\n data[\"start_time_ms\"] = int(data[\"start_time_ms\"])\n data[\"end_time_ms\"] = int(data[\"end_time_ms\"])\n data[\"worker_launch_time_ms\"] = int(data[\"worker_launch_time_ms\"])\n data[\"worker_launched_time_ms\"] = int(data[\"worker_launched_time_ms\"])\n result.append(data)\n\n num_after_truncation = len(result)\n result = self._filter(result, option.filters, WorkerState, option.detail)\n num_filtered = len(result)\n # Sort to make the output deterministic.\n result.sort(key=lambda entry: entry[\"worker_id\"])\n result = list(islice(result, option.limit))\n return ListApiResponse(\n result=result,\n total=reply.total,\n num_after_truncation=num_after_truncation,\n num_filtered=num_filtered,\n )",
"def aliveworkers(workers):\n \n #ping everyone using threads\n threads=[]\n results={}\n output=threading.Lock()\n \n def threadcode(worker):\n worker=worker[:]\n logging.info(\"Pinging %r\" % (worker,))\n results[worker]=sshping(worker)\n logging.info (\"Worker %r is %s.\" % (worker, [\"down\",\"up\"][results[worker]]))\n \n for i,worker in enumerate(workers):\n threads.append(threading.Thread())\n threads[i].run=lambda: threadcode(worker)\n threads[i].start()\n threads[i].join(0.1)\n \n #wait for threads to finish\n for thread in threads:\n thread.join()\n \n aliveworkers=[worker for worker,result in results.items() if result==True]\n return aliveworkers",
"def run_all(self):\n results = []\n # Keep a loop going until all the tasks are gone:\n i = 0\n while self.tasks:\n i += 1\n time.sleep(0.0)\n print(f\"\\nOuter loop count: {i}\")\n # pop a task off the end\n task = self.tasks.pop()\n # run that task:\n try:\n res = task.send(None) # TaskLoop.run_all() - do_a_few_things() - count() - yield\n print(\"returned from send:\", res)\n self.tasks.insert(0, task) # move task to the begining of the list\n except StopIteration as si: # task completed yield return StopIteration exception\n results.append(si.args[0])\n print(\"task: {} result >>> {}\".format(task, si.args[0]))\n return results",
"def fetchObjects(self):\n try:\n for i in service.Service.get_workers():\n yield i\n except Exception as e:\n Events.Status.emit(f\"unable to fetch worker information: {e}\")",
"def _checker_worker(self):\n results = {}\n for cmd in self.check_cmds:\n res = subprocess.call(cmd.split(), stdout=open('/dev/null', 'w'))\n self.log(\"'%s' finished, result: %s\" % (cmd, res))\n results[cmd] = res\n if rospy.is_shutdown():\n return\n with self._lock:\n # just add results into the data structure\n self._results.add(results)",
"def after_collect_weather(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure 2.5km 00\": [],\n \"failure 2.5km 06\": [],\n \"failure 2.5km 12\": [],\n \"failure 2.5km 18\": [],\n \"failure 1km 00\": [],\n \"failure 1km 12\": [],\n \"success 2.5km 00\": [],\n \"success 2.5km 06\": [],\n \"success 2.5km 12\": [],\n \"success 2.5km 18\": [],\n \"success 1km 00\": [],\n \"success 1km 12\": [],\n msg.type: after_download_weather(msg, config, checklist),\n }\n if msg.type.endswith(\"2.5km 00\"):\n if msg.type.startswith(\"success\"):\n grib_dir = Path(checklist[\"weather forecast\"][\"00 2.5km\"])\n fcst_date_yyyymmdd = grib_dir.parent.stem\n fcst_date = arrow.get(fcst_date_yyyymmdd, \"YYYYMMDD\").format(\"YYYY-MM-DD\")\n next_workers[\"success 2.5km 00\"].extend(\n [\n NextWorker(\"nowcast.workers.collect_weather\", args=[\"06\", \"2.5km\"]),\n NextWorker(\n \"nowcast.workers.crop_gribs\",\n args=[\"06\", \"--fcst-date\", fcst_date],\n ),\n ]\n )\n if msg.type.endswith(\"2.5km 06\"):\n if msg.type.startswith(\"success\"):\n next_workers, race_condition_workers = after_download_weather(\n msg, config, checklist\n )\n next_workers.extend(\n [\n NextWorker(\"nowcast.workers.collect_weather\", args=[\"12\", \"2.5km\"]),\n NextWorker(\"nowcast.workers.crop_gribs\", args=[\"12\"]),\n ]\n )\n return next_workers, race_condition_workers\n if msg.type.endswith(\"2.5km 12\"):\n if msg.type.startswith(\"success\"):\n next_workers, race_condition_workers = after_download_weather(\n msg, config, checklist\n )\n next_workers.extend(\n [\n NextWorker(\"nowcast.workers.collect_weather\", args=[\"18\", \"2.5km\"]),\n NextWorker(\"nowcast.workers.crop_gribs\", args=[\"18\"]),\n ]\n )\n return next_workers, race_condition_workers\n if msg.type.endswith(\"2.5km 18\"):\n if msg.type.startswith(\"success\"):\n grib_dir = Path(checklist[\"weather forecast\"][\"18 2.5km\"])\n fcst_date_yyyymmdd = grib_dir.parent.stem\n fcst_date = (\n arrow.get(fcst_date_yyyymmdd, \"YYYYMMDD\")\n .shift(days=+1)\n .format(\"YYYY-MM-DD\")\n )\n next_workers[\"success 2.5km 18\"].extend(\n [\n NextWorker(\"nowcast.workers.download_weather\", args=[\"00\", \"1km\"]),\n NextWorker(\"nowcast.workers.download_weather\", args=[\"12\", \"1km\"]),\n NextWorker(\"nowcast.workers.collect_weather\", args=[\"00\", \"2.5km\"]),\n NextWorker(\n \"nowcast.workers.crop_gribs\",\n args=[\"00\", \"--fcst-date\", fcst_date],\n ),\n ]\n )\n return next_workers[msg.type]",
"def mock_workers(task, num_workers):\n results = [\n [{\n \"name\": \"tweet\",\n \"value\": \"%d. Trump Trump everywhere not a Hillary to see.\" % x\n }] for x in range(num_workers)]\n return results",
"def after_make_ssh_files(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure nowcast\": [],\n \"failure forecast2\": [],\n \"success nowcast\": [],\n \"success forecast2\": [],\n }\n if msg.type.startswith(\"success\"):\n next_workers[msg.type].append(\n NextWorker(\"nowcast.workers.make_v202111_runoff_file\")\n )\n next_workers[msg.type].append(NextWorker(\"nowcast.workers.make_runoff_file\"))\n return next_workers[msg.type]"
] |
[
"0.6667437",
"0.6196105",
"0.61376584",
"0.6127806",
"0.6084685",
"0.58928",
"0.58717954",
"0.5821175",
"0.57959896",
"0.57860565",
"0.5752882",
"0.57220596",
"0.5717673",
"0.5701846",
"0.5694865",
"0.56930745",
"0.5682493",
"0.567067",
"0.5606531",
"0.5564939",
"0.55476886",
"0.5514081",
"0.54987985",
"0.54836917",
"0.54835343",
"0.54747385",
"0.5439839",
"0.5435765",
"0.54162997",
"0.54041654"
] |
0.69760686
|
0
|
unpacks buffer contents into dictionary
|
def read(self, buf):
contents = dict()
for element in self.elements:
if element.offset + element.size > len(buf):
logger.trace("cannot unpack {} for {}.{} buffer too small {}",
element.name, element.block_name, element.block_version, len(buf))
contents[element.name] = None
continue
s, = struct.unpack_from(element.structure, buf, element.offset)
if element.decode:
s = element.decode(s)
contents[element.name] = s
return contents
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def decode(cls, buffer: bytes) -> Dict[str, Any]:\n pstruct = Struct()\n pstruct.ParseFromString(buffer)\n dictionary = dict(pstruct)\n cls._patch_dict_restore(dictionary)\n return dictionary",
"def unpack (self, buffer):\n\t\timport struct\n\t\tvalues = struct.unpack (self.struct, buffer)\n\t\tj = 0\n\t\tfor i in self.structref:\n\t\t\tself.value[i[self.NAME]] = values[j]\n\t\t\tj = j + 1",
"def decode_buffer(buf):\n return buf.getvalue().decode('utf-8')",
"def get_dict_of_bytes2(self):\n pass",
"def _unpack(self, headerBytes):\n pass",
"def unpackFromBuffer(self, buffer):\n (self.inode_num, self.cdate, self.mdate, self.perms, self.level, flagVal, self.length, self.magic_number) \\\n = INodeFormat.unpack_from(buffer)\n self.flags = INodeType(flagVal)\n assert self.magic_number == INODE_MAGIC, \"Bad magic in INode.unpackFromBuffer\"\n\n # unpack the block pointers in the INode structure\n off = INodeFormat.size\n self.block_ptrs = [0] * INode.BlockPtrsPerInode\n\n for i in range(INode.BlockPtrsPerInode):\n (self.block_ptrs[i],) = BlockPointerFormat.unpack_from(buffer, off)\n off += 4",
"def RPC_decode(sock,blocking=False):\r\n # Get the dictionary length\r\n # Then, Get the dictionary\r\n if blocking:\r\n length = int(sock.recv(RPC_FIXED_SIZE,blocking=True))\r\n dict_str = sock.recv(length,blocking=True)\r\n else:\r\n length = int(sock.recv(RPC_FIXED_SIZE))\r\n dict_str = sock.recv(length)\r\n \r\n dict_obj = deserialize(dict_str) # Convert to object\r\n return dict_obj",
"def parse_bytes_to_dict(bytes_to_parse):\n return ast.literal_eval(bytes_to_parse.decode(\"utf-8\"))",
"def decode(fh):\n # (dmrs { ... })*",
"def _get_meas_dict_from_buffer(self, buffer):\n meas_dict = {}\n for meas in buffer:\n msg_id = self._get_meas_identifier(meas)\n if msg_id not in meas_dict:\n meas_dict[msg_id] = {\"bursts\" : [], \"explicit\":[]}\n if \"burst\" in meas.meas_type:\n meas_dict[msg_id][\"bursts\"].append( meas )\n else:\n meas_dict[msg_id][\"explicit\"].append( meas )\n return meas_dict",
"def readchunk(self):\n chunksize = self.readdword()\n chunktype = ChunkType(self.readword())\n chunkdata = self.readbytearr(chunksize - 6)\n return {\n \"type\": chunktype,\n \"data\": _ParseChunk(chunktype, chunkdata, self.PIXELSIZE),\n }",
"def unpack( self, key, data ) :\r\n\r\n return struct.unpack(self[key], data)",
"def from_buffer(self, buf):\n with self.lock:\n # if we're on python3, convert buf to bytes\n # otherwise this string is passed as wchar*\n # which is not what libmagic expects\n if type(buf) == str and str != bytes:\n buf = buf.encode('utf-8', errors='replace')\n return magic_buffer(self.cookie, buf)",
"def _decode_dict(data: BencodedString) -> dict:\n result_dict = {}\n data.del_prefix(1)\n\n while True:\n if data.bytes:\n if data.bytes[0] != END_MARKER:\n key = _decode(data)\n value = _decode(data)\n result_dict[key] = value\n else:\n data.del_prefix(1)\n break\n else:\n raise ValueError(\n \"Cannot decode a dictionary, reached end of the bencoded \"\n \"string before the end marker was found. Most likely the \"\n \"bencoded string is incomplete or incorrect.\"\n )\n\n return result_dict",
"def mmtf_bytes_to_mmtf_dict(bytestring):\n\n raw = msgpack.unpackb(bytestring)\n return decode_dict(raw)",
"def unpack(self, s):\n\n raise NotImplementedError()",
"def _decode_compound(fp):\n values = {}\n tag_type = ord(fp.read(1))\n while tag_type > 0:\n name = _decode_string(fp)\n values[name] = _MAP[tag_type](fp)\n tag_type = ord(fp.read(1))\n return values",
"def parse_data(self, byte_stream: BytesIO, header: Header) -> Dict[Any, Any]:\n return self.packet_type_to_parser[header.subpacket_id](byte_stream, header)",
"def _decode(self, parts: typing.List[int]) -> typing.Dict:\n info = {field.name: field.decode(parts[i]) for i, field in enumerate(self.fields)}\n return info",
"def message(self, byte_stream: BytesIO, header: Header):\n data: Dict = {}\n length: int = byte_stream.read(1)[0]\n\n # Two step: immutable int[] -> string\n byte_data = byte_stream.read(length)\n data[DataEntryIds.MESSAGE] = byte_data.decode('ascii')\n\n # Do something with data\n LOGGER.info(\"Incoming message: \" + str(data[DataEntryIds.MESSAGE]))\n return data",
"def _decode_25739(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29987:\n start_byte += n_bytes\n n_bytes = 2\n var_size = struct.unpack('<H', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n return {'file_path': data[start_byte:\n start_byte + n_bytes].decode('utf-8')}",
"def parse_bytes_stream_from_message(msg: bytes,\n length_bytes: int,\n code_bytes: int\n ) -> Dict:\n\n code = int.from_bytes(msg[length_bytes:\n length_bytes + code_bytes],\n byteorder)\n data = msg[length_bytes + code_bytes:]\n\n return {\"code\": code,\n \"data\": data}",
"def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None",
"def unpack_segment(self, segment: bytes) -> dict:\n unpacked = struct.unpack(HEADER_FORMAT + DATA_FORMAT, segment)\n unpacked = dict(zip(SEGMENT_KEYS, unpacked))\n unpacked['flag'] = Flag(unpacked['flag'])\n return unpacked",
"def decode_map(as_bytes: typing.List[int]) -> dict:\n raise NotImplementedError()",
"def bdecode_buffer(data):\n\tif isinstance(data, str):\n\t\tdata = data.encode()\n\twith BytesIO(data) as f:\n\t\treturn bdecode(f)",
"def unpack_serializable_from(\n buffer: bytes, offset: int = 0\n) -> tuple[serializable_lib.Serializable, int]:\n module_name, module_name_size = unpack_str_from(buffer, offset=offset)\n offset += module_name_size\n class_name, class_name_size = unpack_str_from(buffer, offset=offset)\n offset += class_name_size\n serializable_length, serializable_length_size = _unpack_length_from(\n buffer, offset=offset\n )\n offset += serializable_length_size\n serializable_bytes, *_ = struct.unpack_from(\n f'!{serializable_length}s', buffer, offset=offset\n )\n module = importlib.import_module(module_name)\n cls = getattr(module, class_name)\n value = cls.from_bytes(serializable_bytes)\n return value, (\n module_name_size\n + class_name_size\n + serializable_length_size\n + serializable_length\n )",
"def revert(self):\n headerdump = self.file.readp(0, 16)\n if sum(headerdump):\n dictat,dictlen = struct.unpack(\"<QQ\", headerdump)\n dictblob = self.file.readp(dictat, dictlen)\n self.keys = pickle.loads(dictblob)\n self.buffered = {}\n self.cache = {}\n self.awaitingpunch = []\n\n else:\n self.keys = {}\n self.buffered = {}\n self.cache = {}\n self.awaitingpunch = []",
"def processReadback(resp):\n a = np.fromstring(resp, dtype='<u1')\n return {\n 'build': a[51],\n 'serDAC': a[56],\n 'noPllLatch': bool((a[58] & 0x80) > 0),\n 'ackoutI2C': a[61],\n 'I2Cbytes': a[69:61:-1],\n 'executionCounter': (a[53] << 8) + a[52]\n }",
"def test_parse_bytes(parser):\n doc = parser.parse(b'{\"hello\": \"world\"}')\n assert doc.as_dict() == {'hello': 'world'}"
] |
[
"0.7675933",
"0.66703975",
"0.61456925",
"0.61004704",
"0.5928765",
"0.58829206",
"0.5864981",
"0.58403814",
"0.5826353",
"0.5808131",
"0.5780132",
"0.5750063",
"0.5699226",
"0.56634146",
"0.5656456",
"0.5635454",
"0.56325823",
"0.56243306",
"0.56127644",
"0.5570557",
"0.55615914",
"0.5541405",
"0.5535027",
"0.5522767",
"0.5503588",
"0.55032116",
"0.5477089",
"0.5466418",
"0.54635626",
"0.5452495"
] |
0.7160867
|
1
|
Translates a word into Pig Latin. The "word" parameter is assumed to be an English word, returned as a string.
|
def pig_latin(word):
if word[0] in 'aeiou':
return f'{word}way'
return f'{word[1:]}{word[0]}ay'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pig_latin(word):\n if word[0] in 'aeiou':\n return f\"{word}way\"\n\n return f\"{word[1:]}{word[0]}ay\"",
"def pigLatinTranslator(word):\n\n vowels = \"aeiouAEIOU\"\n word = str(word)\n if not word.isalpha():\n return \"Please submit a single word.\"\n elif len(word) < 2:\n return \"Please submit a longer word.\"\n else:\n if word[0] in vowels:\n return word + \"yay\"\n for letter in word:\n word = word[1:] + word[0]\n if word[0] in vowels:\n return word + \"ay\"\n return word[1:] + word[0] + \"ay\"",
"def makePigLatin(word): \n m = len(word)\n vowels = \"a\", \"e\", \"i\", \"o\", \"u\", \"y\" \n # short words are not converted \n if m<3 or word==\"the\":\n return word\n else:\n for i in vowels:\n if word.find(i) < m and word.find(i) != -1:\n m = word.find(i)\n if m==0:\n return word+\"way\" \n else:\n return word[m:]+word[:m]+\"ay\"",
"def pig_latinify(word):\n result = \"\"\n if len(word) > 0 and word.isalpha():\n first = word[0]\n if is_vowel(first): # starts with a vowel\n result = str(word) + \"yay\"\n else: # starts with non-vowel\n cut = position_of_vowel(word) # where to cut the word\n if cut > 0: # \"street\"-->\"eet+str+ay\"\n result = word[cut:] + word[:cut] + \"ay\"\n else: # no vowel found\n result = word + \"ay\"\n else:\n result = 'Only letters allowed!'\n\n return result",
"def pig_latin(word):\n \n first_letter = word[0]\n rest_of_word = word[1 : ]\n \n # Student should complete function on the next lines.\n \n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u':\n return word + \"way\"\n else:\n return rest_of_word + first_letter + \"ay\"",
"def pig_latinify(word):\n\n first_letter = word[0]\n\n if first_letter in VOWELS:\n output_word = word + \"yay\"\n else:\n #scan for vowel if word starts with a consonant\n for i in range(len(word)):\n individual_letter = word[i]\n if individual_letter in VOWELS:\n output_word = word[i:] + word[:i] + \"ay\"\n break\n else:\n continue\n\n return output_word",
"def word_to_pig_latin(word):\r\n\r\n # matches on a cluster of consonants\r\n pattern = re.compile(r'^[^aeiouAEIOU]+')\r\n\r\n if re.findall(r'^qu', word):\r\n # keeps qu together a la quiet\r\n pattern = re.compile(r'^qu')\r\n beginning = re.findall(pattern, word)\r\n word = pattern.sub('', word)\r\n word += str(beginning[0]) + 'ay'\r\n return word\r\n\r\n elif re.findall(r'[^aeiouAEIOU]y[^aeiouAEIOU]', word):\r\n # if y has a consonant on either side it treats it like a vowel\r\n pattern = re.compile(r'^[^aeiouAEIOUy]+')\r\n beginning = re.findall(pattern, word)\r\n word = pattern.sub('', word)\r\n word += str(beginning[0]) + 'ay'\r\n return word\r\n\r\n # stores the beginning match\r\n elif re.findall(pattern, word):\r\n beginning = re.findall(pattern, word)\r\n\r\n # pulls out those consonants and gets rid of them\r\n word = pattern.sub('', word)\r\n\r\n # adds the consonants onto the end of the word\r\n word += str(beginning[0]) + 'ay'\r\n return word",
"def pig_latin(word):\n first_letter = word[0]\n rest_of_word = word[1 : ]\n #print(\"First letter is\", first_letter)\n #print(\"rest_of_word is\", rest_of_word)\n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u': \n pig_latin_word = word + 'way'\n else: \n pig_latin_word = rest_of_word + first_letter + 'ay'\n return pig_latin_word",
"def pig_latinify(word):\n\n # Identify a local variable for vowels.\n vowels = [\"a\",\"e\", \"i\", \"o\", \"u\"]\n\n # Assign the appropriate suffix depending if it is a vowel or consonant.\n vowel_suffix = \"yay\"\n consonant_suffix = \"ay\"\n\n pig_word = \"\"\n\n index = 0\n first_vowel = -1\n\n # Part 1: Find the beginning consonant cluster\n # Use for loop to check each letter until it reaches a vowel.\n for letter in word:\n if letter in vowels:\n first_vowel = index\n break\n else:\n index += 1\n\n # Part 2: Make the new word\n # No vowel found so it adds \"ay\" to the end of the word\n if first_vowel == -1:\n pig_word = word[:index] + word[index:] + consonant_suffix\n\n # First letter is a vowel, so it adds \"yay\" to the end of the word\n elif first_vowel == 0:\n pig_word = word + vowel_suffix\n\n # Take all the consonants up to the first vowels, add them to end, and add \"ay\"\n else:\n pig_word = word[first_vowel:] + word[0:first_vowel] + consonant_suffix\n\n return pig_word",
"def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word",
"def pig_latin(phrase):\n\n\n # loop over each word in the phrase\n # in word[0] starts with aeiou\n # add yay to the end of that word\n # if word[0] starts with non aeiou\n # move word[0] to the end and add ay\n\n result = []\n\n for word in phrase.split():\n\n if word[0] in 'aeiou':\n\n result.append(word + 'yay')\n\n else:\n\n result.append(word[1:] + word[0] + 'ay')\n\n return \" \".join(result)",
"def translate(str):\r\n if isPig(str):\r\n return(PigToEnglish(str))\r\n return(EnglishToPig(str))",
"def pig_latin(s):\n s = s.lower()\n s = s.split()\n pig_s = []\n for i in s:\n if i[0] in 'aeiouAEIOU':\n i += 'way'\n pig_s.append(i)\n\n else:\n i = i[1:] + i[0] + 'ay'\n pig_s.append(i)\n\n return ' '.join(pig_s)",
"def latinize_word(word):\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()",
"def translate_leet(phrase):",
"def main():\n print(\"Translate English into Pig Latin.\\n\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n while True:\n #get input string\n input_string = input(\"Enter a word to translate: \").lower()\n #split string into words array\n words = []\n start = 0\n end = 0\n while end != -1:\n end = input_string.find(' ', start)\n words.append(input_string[start:end])\n start = end + 1\n\n pig_latin = []\n for word in words:\n #check first letter\n if word[0] in CONSONANTS:\n #if consonant remove first letter and add -ay\n pig_latin_word = word[1:]\n pig_latin_word = pig_latin_word + word[0] + \"ay\"\n elif word[0] in VOWELS:\n #else add -way\n pig_latin_word = word + \"way\"\n pig_latin.append(pig_latin_word)\n\n print(\"\\n\\n\")\n for word in pig_latin:\n #print word\n print(word + \" \", end = '')\n print(\"\\n\\n\")\n\n #ask to stop\n stop = input(\"Translate another? (Enter else 'n' to quit) : \")\n #if 'n' break\n if stop.lower() == 'n':\n break\n input(\"Press enter to quit: \")",
"def pig_latin_sentence(sentence):\n output = []\n for word in sentence.split():\n output.append(pig_latin(word))\n\n return ' '.join(output)",
"def PigToEnglish(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")",
"def convert_all(text):\r\n\tpig_tokens = ''\r\n\r\n\t#tokenizes the text\r\n\ttokens = word_tokenize(text)\r\n\r\n\t#regex for non-alphabetical characters\r\n\tpattern = re.compile(r'[^a-zA-Z]')\r\n\r\n\t#converts the words to pig latin and appends them to the sentence.\r\n\tfor token in tokens:\r\n\t\tif not re.findall(pattern, token):\r\n\t\t\tword = word_to_pig_latin(token)\r\n\r\n\t\t\tif re.findall(r'[A-Z]', word):\r\n\t\t\t\tword = word.lower()\r\n\t\t\t\tword = word.capitalize()\r\n\t\t\tpig_tokens += ' ' + word\r\n\t\telse:\r\n\t\t\tpig_tokens += token\r\n\r\n\tpig_text = ''.join(pig_tokens)\r\n\r\n\treturn pig_text",
"def translate(self, word, context=None, pos_tag=None):\n #Get contextual translation from google translate\n par = {\"text\": word, \"raw\": \"raw\"}\n r = requests.post(self.translation_url, data=par)\n results = r.text\n translated_word = get_from_html_text(results, 'TRANSLATED_TEXT')\n \n #Perform lookup in the text file from the C# translator\n #if there is no match, take the best match from the bing file\n# print \"Translated: \", word, \" ->\", translated_word\n return translated_word",
"def translate(self):\n\t\tvowels = \"aeiou\"\n\n\t\tif (self.word[0] not in vowels) and (self.word[1] in vowels):\n\t\t\tnew_word = self.word[1:] + self.word[0] + \"ay\"\n\t\telif self.word[0] in vowels:\n\t\t\tnew_word = self.word + \"way\"\n\t\telse:\n\t\t\tnew_word = self.word[2:] + self.word[:2] + \"ay\"\n\n\t\tprint(new_word)",
"def EnglishToPig(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")",
"def translate(word: str) -> str:\n global LINE_DIVIDER\n\n parser = WiktionaryParser()\n def_ = parser.fetch(word.lower())\n ret = \"\"\n for word_payload in def_:\n definitions = word_payload['definitions']\n\n translations = {d['partOfSpeech']: LINE_DIVIDER.join(d['text'])\n for d in definitions}\n ret += LINE_DIVIDER.join(f\"{k}: {v}\" for k,v in translations.items())\n\n return ret",
"def translate(word, translateDict):\n\n translation = \"\"\n for char in word:\n translation += translateDict.get(char,\" \")\n\n return translation",
"def replace_word_candidate(self, word):\n capital_flag = word[0].isupper()\n word = word.lower()\n if capital_flag and word in self.teencode_dict:\n return self.replace_teencode(word).capitalize()\n elif word in self.teencode_dict:\n return self.replace_teencode(word)\n\n for couple in self.word_couples:\n for i in range(2):\n if couple[i] == word:\n if i == 0:\n if capital_flag:\n return couple[1].capitalize()\n else:\n return couple[1]\n else:\n if capital_flag:\n return couple[0].capitalize()\n else:\n return couple[0]",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def normalize_word(self, word):\n # translates via 'None' (no change), provides punctuation for deletion\n return word.translate(None, string.punctuation).lower()",
"def pig_sentence(self, sentence):\n if len(sentence) > 0:\n result = \" \".join(self.pig_word(x) for x in sentence.split())\n return result\n else:\n return 'It looks like you entered nothing!'",
"def normalize_word(word):\n\n return word.lower()",
"def ms_transliterate_word(logger, word, lang_code=None, script_code=None):\n if lang_code is None:\n lang_code = 'hi'\n if script_code is None:\n script_code = 'Deva'\n params = {\n 'api-version' : '3.0',\n 'language' : lang_code,\n 'fromScript' : script_code,\n 'toScript' : 'Latn'\n }\n body = [{\n 'text': word\n }]\n\n request = requests.post(MS_TRANS_URL, headers=MS_REQUEST_HEADERS, params=params, json=body)\n if request.status_code == 200:\n response = request.json()\n trans = response[0]['text']\n else:\n logger.info(f\"Transliterate error with {request.status_code} for {word}\")\n trans = None\n return trans"
] |
[
"0.80773824",
"0.80713946",
"0.79174745",
"0.7726841",
"0.76903975",
"0.75911087",
"0.7572945",
"0.7533277",
"0.73740995",
"0.71870416",
"0.7040902",
"0.68419164",
"0.68339497",
"0.67273414",
"0.66776496",
"0.66121346",
"0.6572036",
"0.6488959",
"0.64439183",
"0.6443493",
"0.6410065",
"0.6401635",
"0.6224568",
"0.6173165",
"0.6068076",
"0.5993854",
"0.59739137",
"0.5970245",
"0.5940327",
"0.5939121"
] |
0.80754715
|
1
|
Cleans the database before fully installing the module by uninstalling old Soupese modules and remapping data
|
def _clean_database(self):
# noinspection PyUnresolvedReferences
env = self.env
cr = env.cr
modules_to_resolve = [
'ch_vendor_info',
'API_PDA_receiver',
'delivery_report_custom',
'myevo_base',
'myevo_nobutton_sending_email',
'myevo_web',
'purchase_order_custom']
# Rename model module ch_vendor_info
cr.execute("""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'ch_vendor_info'""")
# Delete module soupese_base models that exists in old models
cr.execute("""DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_res_users'""")
# Rename
cr.execute("""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'API_PDA_receiver'""")
# Rename module
cr.execute("""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'delivery_report_custom'""")
cr.execute("""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_base'""")
cr.execute(
"""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_nobutton_sending_email'""")
cr.execute("""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_web'""")
# Delete module soupese_base models that exists in old models
cr.execute("""DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_measure_scale'""")
cr.execute("""DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_pda_operation'""")
cr.execute("""DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_res_partner'""")
# Rename module
cr.execute("""UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'purchase_order_custom'""")
# Rename module_ in base
for x in modules_to_resolve:
cr.execute("""
DELETE FROM ir_model_data
WHERE name = 'module_%s' AND module = 'base' AND model = 'ir.module.module'""", (x,))
# Uninstall modules
cr.execute("""UPDATE ir_module_module SET state = 'uninstalled' WHERE name = '%s'""", (x,))
# Remove vendor.information.scale table
cr.execute("DROP TABLE vendor_information_scale")
# Commit finally
cr.commit()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clean_db(self):\n drop_all_product_lists(server=self.server_url,\n username=self.API_USERNAME,\n password=self.API_PASSWORD)\n drop_all_products(server=self.server_url,\n username=self.API_USERNAME,\n password=self.API_PASSWORD)",
"def clean_up():\n drop_all_tables()\n create_all()",
"def _purge_mlrun_db(self):\n self._delete_mlrun_db()\n self._scale_down_mlrun_deployments()",
"def plone4_cleanup(context):\n portal = api.portal.get()\n qi_tool = api.portal.get_tool('portal_quickinstaller')\n\n # first, make sure all our new un-installers are installed :-)\n installed = [p['id'] for p in qi_tool.listInstalledProducts()]\n reinstall = [p for p in REMOVE_PRODUCTS if p not in installed]\n for p in reinstall:\n log.info(\"Reinstalling first: %s\", p)\n qi_tool.installProducts([p])\n\n # nuke content that we will not support after upgrade\n catalog = api.portal.get_tool('portal_catalog')\n for (p, ctype) in REMOVE_CONTENT.items():\n if p not in REMOVE_PRODUCTS:\n continue\n log.info(\"Removing content: %s\", p)\n for brain in catalog(portal_type=ctype):\n api.content.delete(brain.getObject())\n\n # sjeez TTW persistent interface references\n pvc = portal.portal_view_customizations\n log.info(\"Removing portal_view_customizations TTW cruft\")\n pvc.manage_delObjects(pvc.objectIds())\n\n # remove all the unwanted cruft packages themselves\n for p in REMOVE_PRODUCTS:\n log.info(\"Uninstalling: %s\", p)\n qi_tool.uninstallProducts([p])\n\n # clear the archetypes reference catalog\n log.info(\"Clearing reference catalog\")\n portal.reference_catalog.manage_catalogClear()\n\n # clean cruft from catalog\n log.info(\"Rebuilding portal_catalog. This will take a loooong time.\")\n ctool = api.portal.get_tool('portal_catalog')\n ctool.clearFindAndRebuild()\n\n # migrate to plone.app.contenttypes\n log.info(\"Enabling plone.app.contenttypes\")\n qi_tool.installProducts(['plone.app.contenttypes'])\n\n log.info(\"Committing changes.\")\n transaction.commit()",
"def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)",
"def clean():\n user_init.clean_setup()",
"def reset_db_danger():\n from flask.ext.migrate import init, migrate\n # Remove the migration folder if exist\n if os.path.exists('migrations'):\n shutil.rmtree('migrations')\n\n # Remove the sqlite database files if exist\n for fl in glob.glob('*.sqlite'):\n os.remove(fl)\n\n # Reset Migration Database\n init()\n\n # migrate database to latest revision\n migrate(message='init')",
"def _clean_up(self):",
"def recreate():\n from data.seed import Seed\n\n if click.confirm(\"Are you sure you want to lose all your data\"):\n db.drop_all()\n db.create_all()\n Seed().data()",
"def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())",
"def cleanup(self):\n\n # uninstall sourcedata\n if self.conversion.install_dataset_path.exists():\n # without the ChangeWorkingDir the command does not operate inside\n # of dataset_path\n with utils.ChangeWorkingDir(self.dataset_path):\n datalad.uninstall(\n path=self.conversion.install_dataset_name,\n dataset=self.dataset_path,\n recursive=True\n )\n\n # remove bids conversion\n bids_dir = self._get_bids_dir()\n if bids_dir.exists():\n self.log.info(\"Remove %s\", bids_dir)\n shutil.rmtree(bids_dir)",
"def clear_db():\n from example_data import ExampleDataLoader\n ExampleDataLoader.clean_db()",
"def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()",
"def rebuild_db():\n delete_db()\n create_db()\n insert_db()",
"def reset_db():\n db.drop_all()\n _init_db()",
"def tearDown(self):\r\n collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'\r\n if SplitModuleTest.modulestore:\r\n for collection in ('active_versions', 'structures', 'definitions'):\r\n modulestore().db.drop_collection(collection_prefix + collection)\r\n # drop the modulestore to force re init\r\n SplitModuleTest.modulestore = None\r\n super(SplitModuleTest, self).tearDown()",
"def prepareUninstall():\n pass",
"def clean_up(self):\n\t\tpass",
"def cleanUp(self):\r\n pass",
"def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)",
"def clean_db(self):\n ## init local variable\n clean_repos = [repo for repo in self.repos if exists(repo)]\n remov_repos = [repo for repo in self.repos if not exists(repo)]\n\n ## Remove repos in data base\n if remov_repos != []:\n ## Print warning and which repo will be removed\n print \"/!\\\\ Following repo not exist and will be removed in the data base\"\n for repo in remov_repos:\n print \"/!\\\\ '%s' removed from data base\"%(repo)\n print\n\n ## Update data base file\n with open(self.fname, 'w') as f:\n f.write(\"\\n\".join(clean_repos))\n\n ## Update self.repos list\n self.repos = clean_repos",
"def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")",
"def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")",
"def clean_all_db():\n for model in [\n Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,\n Document, Project, Framework]:\n model.objects.all().delete()",
"def recreate_db():\n drop_db()\n create_db()\n populate_db()",
"def clean_up(self):\n pass",
"def clean_up(self):\n pass",
"def db_cleanup(self):\n with self.context():\n meido.db.session.remove()\n meido.db.drop_all()",
"def tearDown(self):\n db.drop_all()",
"def tearDown(self):\n db.drop_all()"
] |
[
"0.68918467",
"0.6858987",
"0.67867607",
"0.6762056",
"0.67421025",
"0.66503596",
"0.66246384",
"0.66123",
"0.65286297",
"0.6502771",
"0.6472854",
"0.6469703",
"0.64444715",
"0.6402137",
"0.6390408",
"0.6326387",
"0.632179",
"0.63064533",
"0.629369",
"0.62919813",
"0.62892485",
"0.62847173",
"0.6278436",
"0.6269152",
"0.62592614",
"0.62335527",
"0.62335527",
"0.62289643",
"0.6223932",
"0.6223932"
] |
0.7934076
|
0
|
Transform a PropertiesList into a list of names
|
def property_list_to_str(properties: th.PropertiesList) -> List[str]:
return [name for (name, prop) in properties.items()]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getPropertyNames(self):\n return self._property_names",
"def list_to_names(names):\n names_list = []\n for n in names:\n names_list.append(names[n].details['name'])\n return names_list",
"def getPropertyNamesAsStrings(self):\n return self._propertyStringNames",
"def transform_property_info_list(se, prop_list, output_type):\n props = [{\"description\": _prop.get(\"description\"),\n \"domain\": transform_schemaclasses_lst(se,\n _prop.get(\"domain\"),\n output_type),\n \"range\": transform_schemaclasses_lst(se,\n _prop.get(\"range\"),\n output_type),\n \"curie\": se.cls_converter.get_curie(_prop.get(\"uri\")),\n \"label\": se.cls_converter.get_label(_prop.get(\"uri\")),\n \"uri\": _prop.get(\"uri\"),\n \"object\": se.get_property(_prop.get(\"uri\"))} for _prop in prop_list]\n return props",
"def take_name(List):\n list_of_names = []\n for i in range(len(List)):\n if isinstance(List[i], Attribute):\n x = List[i]\n list_of_names.append(x.Name)\n elif isinstance(List[i], list):\n list_of_names.append(take_name(List[i]))\n else:\n list_of_names.append(List[i])\n return list_of_names",
"def _getPropName(self):\n return self.properties.keys()",
"def merge_properties_lists(*properties_lists: th.PropertiesList) -> th.PropertiesList:\n result = th.PropertiesList()\n for properties_list in properties_lists:\n for name, prop in properties_list.items():\n result.append(prop)\n return result",
"def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]",
"def _PropertiesToXml(self, properties):\n xml_properties = []\n\n for propname in properties:\n if not self.has_key(propname):\n continue\n\n propname_xml = saxutils.quoteattr(propname)\n\n values = self[propname]\n if not isinstance(values, list):\n values = [values]\n\n proptype = datastore_types.PropertyTypeName(values[0])\n proptype_xml = saxutils.quoteattr(proptype)\n\n escaped_values = self._XmlEscapeValues(propname)\n open_tag = u'<property name=%s type=%s>' % (propname_xml, proptype_xml)\n close_tag = u'</property>'\n xml_properties += [open_tag + val + close_tag for val in escaped_values]\n\n return xml_properties",
"def _PropList(self):\n prop_list = []\n\n if self.HASH_PROPERTIES is None and self.HASH_EXCLUDE is None:\n return prop_list\n\n # TODO(ckl): comprehensive list of \"internal\" properties\n exclude_list = self.HASH_EXCLUDE or tuple()\n exclude_list += metadata_api.GetFieldNames(self, ui_readonly=True)\n # TODO(raulg): The deleted can be removed from the exclude_list after all\n # records have been purged of deleted fields.\n exclude_list += ('deleted', 'key_subtype', 'key_order', 'key_name')\n\n for prop in self._properties:\n if '__' in prop and not prop.endswith('key_name'):\n continue\n if self.HASH_PROPERTIES is not None and prop not in self.HASH_PROPERTIES:\n continue\n if self.HASH_EXCLUDE is not None and prop in exclude_list:\n continue\n prop_list.append(prop)\n\n prop_list.sort()\n return prop_list",
"def getPeopleNames(the_list):\n new_list = []\n if type(the_list) == list:\n for person in the_list:\n if person['@type'] == \"Person\":\n new_list.append(person['name'])\n else:\n new_list.append(the_list['name'])\n return new_list",
"def getNames(self) -> List[unicode]:\n ...",
"def Student_names(l:list)->list:\n result=[]\n for s in l:\n result.append(s.name)\n return result",
"def _groupNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.dhGroups]",
"def names(cls) -> List[str]:",
"def get_property_names(self, *, is_allprop):\n # Let default implementation return supported live and dead properties\n propNames = super().get_property_names(is_allprop=is_allprop)\n # Add fieldnames as properties\n tableName, primKey = self.provider._split_path(self.path)\n if primKey is not None:\n conn = self.provider._init_connection()\n fieldlist = self.provider._get_field_list(conn, tableName)\n for fieldname in fieldlist:\n propNames.append(\"{%s:}%s\" % (tableName, fieldname))\n conn.close()\n return propNames",
"def names(self) -> list[str]:",
"def list_property(\n self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:\n return self._env.list_property(key)",
"def _clist(slist):\n retList = []\n if slist == None:\n return retList\n for p in slist:\n aobj = {}\n for prop in p.allProperties():\n if prop in IGNORED_PROPS:\n continue\n tmpval = p.valueForProperty_(prop)\n if type(tmpval) == ABMultiValueCoreDataWrapper:\n aval = [(_getVal(tmpval.labelAtIndex_(i)),\n _getVal(tmpval.valueAtIndex_(i)))\n for i in range(0, tmpval.count())]\n else:\n aval = _getVal(tmpval)\n if aval is not None:\n aobj[prop.lower()] = aval\n retList.append(aobj)\n return retList",
"def extract_full_names(people):\n result = []\n \n for lst in names:\n x = ''\n for name in lst.values():\n x += ' ' + name \n x = x[1:] \n result.append(x)\n return result",
"def names(self) -> List:\n ...",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def getOptions(self, propertyListName: unicode) -> ghidra.framework.options.Options:\n ...",
"def named_objs(objlist):\n objs = []\n for k, obj in objlist:\n if hasattr(k, '__name__'):\n k = k.__name__\n else:\n k = as_unicode(k)\n objs.append((k, obj))\n return objs",
"def get_cached_property_names(self): # real signature unknown; restored from __doc__\n return []",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()"
] |
[
"0.6508711",
"0.6318532",
"0.62844825",
"0.61966",
"0.6056604",
"0.60518324",
"0.603768",
"0.60026366",
"0.5970194",
"0.59697604",
"0.5968708",
"0.5859622",
"0.58432716",
"0.58058524",
"0.5804858",
"0.5763744",
"0.5757145",
"0.57084846",
"0.56631404",
"0.5659311",
"0.55932015",
"0.55839205",
"0.5580559",
"0.55706656",
"0.5560634",
"0.55561125",
"0.5550095",
"0.5550095",
"0.5549343",
"0.5549343"
] |
0.779912
|
0
|
Merge multiple PropertiesList objects into a single one
|
def merge_properties_lists(*properties_lists: th.PropertiesList) -> th.PropertiesList:
result = th.PropertiesList()
for properties_list in properties_lists:
for name, prop in properties_list.items():
result.append(prop)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def merge_list(metas: List[ProjectMeta]) -> ProjectMeta:\n res_meta = ProjectMeta()\n for meta in metas:\n res_meta = res_meta.merge(meta)\n return res_meta",
"def update_multiple_objects_properties(self, object_list):\n\n #if self.settings.LOG_VERBOSE and self.settings.ENABLE_OBJECT_LOGGING: logger.debug(\"Processing multiple object properties updates: %s\" % (len(object_list)))\n\n for object_properties in object_list:\n\n self.update_object_properties(object_properties)",
"def update_values (self, property_list, dest_dict, overwrite_init=False ):\r\n for property in property_list:\r\n visible = True\r\n for attr in property[\"attrs\"]:\r\n if attr[\"type\"] == \"private\":\r\n if attr[\"value\"].lower() == 'true':\r\n visible = False\r\n\r\n if not visible:\r\n continue\r\n\r\n value = {} # Value dict, format: {value, dirty}\r\n value_list = [] # List for multiple values\r\n init_value_list = []\r\n init_value = {}\r\n # Add all the values (object with multiple getters) \r\n # to a list of values\r\n for getter in property['getters']:\r\n getter_name = getter['name']\r\n getter_value = self.execute_getter( getter_name, self.object )\r\n if str(getter_value).startswith('('):\r\n python_value = self.__get_python_format(\r\n property['params'], \r\n getter_value \r\n )\r\n else:\r\n python_value = [getter_value]\r\n\r\n value_list.append(python_value)\r\n if overwrite_init:\r\n init_value_list.append(python_value)\r\n\r\n value['value'] = value_list # Property value\r\n value['dirty'] = False # Must save or not\r\n value['inheritance'] = self.mode # Used for revert\r\n \r\n if overwrite_init:\r\n init_value['value'] = init_value_list\r\n init_value['dirty'] = False \r\n init_value['inheritance'] = self.mode\r\n \r\n dest_dict[property['name']] = value\r\n \r\n if overwrite_init:\r\n self.init_values[property['name']] = init_value",
"def aggregate_input_properties(component_list):\n return combine_component_properties(component_list, 'input_properties')",
"def _combine(self, results_list):\n pass",
"def add_properties(self, *args):\n for s in args:\n if s not in self.properties:\n self.properties.append(s)",
"def transform_property_info_list(se, prop_list, output_type):\n props = [{\"description\": _prop.get(\"description\"),\n \"domain\": transform_schemaclasses_lst(se,\n _prop.get(\"domain\"),\n output_type),\n \"range\": transform_schemaclasses_lst(se,\n _prop.get(\"range\"),\n output_type),\n \"curie\": se.cls_converter.get_curie(_prop.get(\"uri\")),\n \"label\": se.cls_converter.get_label(_prop.get(\"uri\")),\n \"uri\": _prop.get(\"uri\"),\n \"object\": se.get_property(_prop.get(\"uri\"))} for _prop in prop_list]\n return props",
"def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])",
"def merge_from_obj(self, obj, lists_only=False):\n self.clean()\n obj.clean()\n obj_config = obj._config\n all_props = self.__class__.CONFIG_PROPERTIES\n for key, value in six.iteritems(obj_config):\n attr_config = all_props[key]\n attr_type, default, __, merge_func = attr_config[:4]\n if (merge_func is not False and value != default and\n (not lists_only or (attr_type and issubclass(attr_type, list)))):\n self._merge_value(attr_type, merge_func, key, value)",
"def merge(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> List[Any]:\n return [*list1, *list2]",
"def mergeWith(self, others):",
"def get_properties_for_a_collection_of_objects(vim, type,\r\n obj_list, properties):\r\n client_factory = vim.client.factory\r\n if len(obj_list) == 0:\r\n return []\r\n prop_spec = get_prop_spec(client_factory, type, properties)\r\n lst_obj_specs = []\r\n for obj in obj_list:\r\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\r\n prop_filter_spec = get_prop_filter_spec(client_factory,\r\n lst_obj_specs, [prop_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[prop_filter_spec])",
"def merger(self, *lists):\n\t\tself.merged=[]\n\t\tfor i in range(len(lists[0][0])):\n\t\t\tself.temp=[]\n\t\t\tfor j in range(len(lists[0])):\n\t\t\t\tself.temp.append(lists[0][j][i])\n\t\t\tself.merged.append(self.temp)\n\t\treturn self.merged",
"def _clist(slist):\n retList = []\n if slist == None:\n return retList\n for p in slist:\n aobj = {}\n for prop in p.allProperties():\n if prop in IGNORED_PROPS:\n continue\n tmpval = p.valueForProperty_(prop)\n if type(tmpval) == ABMultiValueCoreDataWrapper:\n aval = [(_getVal(tmpval.labelAtIndex_(i)),\n _getVal(tmpval.valueAtIndex_(i)))\n for i in range(0, tmpval.count())]\n else:\n aval = _getVal(tmpval)\n if aval is not None:\n aobj[prop.lower()] = aval\n retList.append(aobj)\n return retList",
"def auto_property_list(self, prop_reader_cls, offset_addr, n_offsets, n_items_per_sub_list=0, sub_list_prefix=''):\n prop_reader = prop_reader_cls()\n use_sub_lists = n_items_per_sub_list > 0\n prop_list = {}\n\n # If we don't have sub lists in the property list, just use the main prop_list as the current sub list\n if use_sub_lists:\n sub_list = None\n n_sub_lists = (n_offsets - 1) // n_items_per_sub_list\n else:\n sub_list = prop_list\n n_sub_lists = 1\n\n # Initialize read\n data_addr = offset_addr + n_offsets * 4\n offset = self.uint32()\n\n for prop_id in range(n_offsets - 1):\n # Maybe update sub list\n if use_sub_lists:\n sub_list_id = prop_id // n_sub_lists\n prop_id = prop_id % n_sub_lists\n\n if prop_id == 0:\n prop_list[f'{sub_list_prefix}{sub_list_id}'] = sub_list = {}\n\n # Read the property\n next_offset = self.uint32()\n with self.offset_context(data_addr + offset):\n data = prop_reader.read(prop_id, self, next_offset - offset)\n sub_list.update(data)\n\n offset = next_offset\n\n # Set the pointer after the data\n self.seek(data_addr + offset)\n\n return prop_list",
"def merge(self, registry):\n for property_name, property_item in registry.items():\n if property_name not in self:\n self.set_property(property_item)",
"def combine(self, patch):\n exclusive = set([\"config\", \"default\", \"mandatory\", \"presence\",\n \"min-elements\", \"max-elements\"])\n kws = set([s.keyword for s in self.slist]) & exclusive\n add = [n for n in patch.slist if n.keyword not in kws]\n self.slist.extend(add)",
"def merge_configurations(config_list):\n current_config = {}\n for config in config_list:\n current_config = merge(current_config, config)\n\n return current_config",
"def merge_lists(l1, l2):\n return [ *l1, *l2 ]",
"def merge(*args):\n return reduce(list.__add__, args, list())",
"def getPropertiesAll():",
"def SetProperties(self, prop_lst):\n # Parses Property list, ignoring all bad values\n for prop in prop_lst:\n if len(prop) != 2:\n continue\n else:\n if not isinstance(prop[0], basestring) or not \\\n isinstance(prop[1], basestring):\n continue\n else:\n self.SetProperty(prop[0], prop[1])\n return True",
"def merge_lists(src, new):\n l_min, l_max = (src, new) if len(src) < len(new) else (new, src)\n\n l_min.extend(None for i in range(len(l_min), len(l_max)))\n\n for i, val in enumerate(new):\n if isinstance(val, dict) and isinstance(src[i], dict):\n new[i] = merge_dicts(src[i], val)\n elif isinstance(val, list) and isinstance(src[i], list):\n new[i] = merge_lists(src[i], val)\n elif val is not None:\n new[i] = val\n else:\n new[i] = src[i]\n\n return new",
"def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items",
"def test_merge_many_parameters(self):\n sup = StackUpdateParameter({\n 'parameters': {\n 'param1': 'value1change',\n 'param2': 'value2-no-change',\n 'param3': 'value3change',\n 'paramx': 'value-x-not-existing'},\n 'version': 1,\n 'stackName': 'bla',\n 'region': 'eu-west-1'})\n\n original_parameters = [{\n 'ParameterKey': 'param1',\n 'ParameterValue': 'value1-to-be-changed'\n }, {\n 'ParameterKey': 'param2',\n 'ParameterValue': 'value2-no-change'\n }, {\n 'ParameterKey': 'param3',\n 'ParameterValue': 'value3-to-be-changed'\n }, {\n 'ParameterKey': 'param4',\n 'ParameterValue': 'value4-not-updated'\n }]\n\n expected_output = [{\n 'ParameterKey': 'param3',\n 'ParameterValue': 'value3change'\n }, {\n 'ParameterKey': 'param1',\n 'ParameterValue': 'value1change'\n }, {\n 'ParameterKey': 'param2',\n 'UsePreviousValue': True\n }, {\n 'ParameterKey': 'param4',\n 'UsePreviousValue': True\n }]\n result = sup.merge(original_parameters)\n\n self.assertItemsEqual(result, expected_output)",
"def ImportFromPropertyValues(self, propertyvalues, overwrite = False):\n result = []\n for pv in iter(propertyvalues):\n key = pv.Name\n if overwrite is True or key not in self:\n item = pv.Value\n if 'com.sun.star.util.DateTime' in repr(type(item)):\n item = datetime.datetime(item.Year, item.Month, item.Day,\n item.Hours, item.Minutes, item.Seconds, int(item.NanoSeconds / 1000))\n elif 'com.sun.star.util.Date' in repr(type(item)):\n item = datetime.datetime(item.Year, item.Month, item.Day)\n elif 'com.sun.star.util.Time' in repr(type(item)):\n item = datetime.datetime(item.Hours, item.Minutes, item.Seconds, int(item.NanoSeconds / 1000))\n result.append((key, item))\n self.update(result)\n return True",
"def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1",
"def merge_with_comment_phrase(words, *list_properties):\n properties = []\n for arg in list_properties: properties.append(arg)\n for k in range(len(words)-1,0,-1):\n if re.match('^(\\(.*?\\))', words[k].strip()):\n words[k-1] = ''.join([words[k-1], re.search('( *\\(.*?\\))', words[k].strip()).groups()[0]])\n words[k] = re.sub('(\\(.*?\\))', '', words[k])\n if words[k].strip(', ') == '':\n words[k-1] = ''.join([words[k-1], words[k]])\n del words[k]\n for type_format in properties:\n del type_format[k]\n return tuple([words]+properties)",
"def update_metadata(metadata_src_lst, metadata_dest_lst):\n if metadata_src_lst and metadata_dest_lst:\n if not isinstance(metadata_dest_lst[0], list): # annotation from one rater only\n metadata_dest_lst[0]._update(metadata_src_lst[0], TRANSFORM_PARAMS)\n else: # annotations from several raters\n for idx, _ in enumerate(metadata_dest_lst[0]):\n metadata_dest_lst[0][idx]._update(metadata_src_lst[0], TRANSFORM_PARAMS)\n return metadata_dest_lst",
"def merge(self, hps, overwrite=True):\n if isinstance(hps, HyperParameters):\n hps = hps.space\n for hp in hps:\n self._retrieve(\n hp.name,\n hp.__class__.__name__,\n hp.get_config(),\n overwrite=overwrite)"
] |
[
"0.6601269",
"0.624817",
"0.60950977",
"0.5915258",
"0.58907413",
"0.5888157",
"0.58808917",
"0.56685555",
"0.5652203",
"0.56403077",
"0.5575447",
"0.5571971",
"0.5523679",
"0.55089104",
"0.55035204",
"0.5478272",
"0.5384981",
"0.53160214",
"0.5303527",
"0.52926284",
"0.5290993",
"0.52901787",
"0.5256115",
"0.52304906",
"0.5215124",
"0.52109253",
"0.5189911",
"0.5180176",
"0.5169682",
"0.5158192"
] |
0.8291769
|
0
|
r"""Downsample a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a multiple of the downsampling factor.
|
def downsample_2d(x, k=None, factor=2, gain=1, data_format='NHWC'):
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
down=factor,
pad0=(p + 1) // 2,
pad1=p // 2,
data_format=data_format)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format='NHWC'):\n\n assert isinstance(factor, int) and factor >= 1\n convH, convW, _inC, _outC = w.shape\n assert convW == convH\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * gain\n p = (k.shape[0] - factor) + (convW - 1)\n s = [factor, factor]\n x = _simple_upfirdn_2d(x, k, pad0=(p + 1) // 2,\n pad1=p // 2, data_format=data_format)\n\n return jax.lax.conv_general_dilated(\n x,\n w,\n window_strides=s,\n padding='VALID',\n dimension_numbers=(data_format, 'HWIO', data_format))",
"def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * gain\r\n p = k.shape[0] - factor\r\n return Oncuda._simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)",
"def downsample(y, u, n, nsper=None, keep=False):\n # axis to operate along\n axis = 0\n\n # filter and downsample\n # prime factor decomposition.\n for k in prime_factor(n):\n y = decimate(y, q=k, ftype='fir', axis=axis)\n\n # index for downsampling u\n sl = [slice(None)] * u.ndim\n sl[axis] = slice(None, None, n)\n u = u[sl]\n\n # Removal of the last simulated period to eliminate the edge effects\n # due to the low-pass filter.\n if not keep:\n y = y[..., :-1]\n u = u[..., :-1]\n\n return u, y",
"def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Image input",
"def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda', gpu=True):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n w = tf.convert_to_tensor(w)\r\n convH, convW, _inC, _outC = w.shape.as_list()\r\n assert convW == convH\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * gain\r\n p = (k.shape[0] - factor) + (convW - 1)\r\n if data_format == 'NCHW':\r\n s = [1, 1, factor, factor]\r\n else:\r\n s = [1, factor, factor, 1]\r\n x = Oncuda._simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl, gpu=gpu)\r\n return tf.nn.conv2d(x, w, strides=s, padding='VALID', data_format=data_format)",
"def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):\r\n\r\n k = Oncuda._setup_kernel(k) * gain\r\n p = k.shape[0] - 1\r\n return Oncuda._simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)",
"def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization(momentum=0.8)(u)\n u = Concatenate()([u, skip_input])\n return u",
"def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u",
"def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out",
"def deconv2d(layer_input, filters, f_size=8, dropout_rate=0,permanent=False):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate and not permanent:\n u = Dropout(dropout_rate)(u)\n elif dropout_rate and permanent:\n # permanent droput from my main man fchollet <3\n u=Lambda(lambda x: K.dropout(x, level=dropout_rate))(u) \n \n u = BatchNormalization(momentum=0.8)(u)\n return u",
"def down_sample(num_filters):\n out = nn.HybridSequential()\n for _ in range(2):\n out.add(nn.Conv2D(num_filters, 3, strides=1, padding=1))\n out.add(nn.BatchNorm(in_channels=num_filters))\n out.add(nn.Activation('relu'))\n out.add(nn.MaxPool2D(2))\n return out",
"def make_downsample_layer(self, num_filter, stride):\n if stride != 1:\n downsample = tf.keras.Sequential()\n downsample.add(tf.keras.layers.Conv2D(filters=num_filter,\n kernel_size=(1, 1),\n strides=stride))\n downsample.add(tf.keras.layers.BatchNormalization())\n else:\n downsample = lambda x: x\n return downsample",
"def downsample(self, factor):\n self.img = self.img[::factor, ::factor, :] if self.fast else self.img\n self.comb_structure_mask = self.comb_structure_mask[::factor, ::factor]\n self.unknown_mask = self.unknown_mask[::factor, ::factor]",
"def degrade_image(im, psf, downsample, shift_range):\n\n shift = np.random.randint(shift_range[0], shift_range[1], (1, 2))[0]\n\n # Add shift\n im = fourier_shift(np.fft.fftn(im), shift)\n im = np.fft.ifftn(im).real\n\n # Blur and downsample\n im = convolve2d(im, psf)\n im = downscale_local_mean(im, (downsample, downsample))\n\n return im",
"def downsample2d(inputArray, kernelSize):\n average_kernel = np.ones((kernelSize,kernelSize))\n\n blurred_array = sig.convolve2d(inputArray, average_kernel, mode='same')\n downsampled_array = blurred_array[::kernelSize,::kernelSize]\n return downsampled_array",
"def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value\n input,\n filter_sizes,\n out_backprop,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=None):\n padding, explicit_paddings = convert_padding(padding)\n return gen_nn_ops.conv2d_backprop_filter(\n input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,\n explicit_paddings, data_format, dilations, name)",
"def upsample(x, filters):\n x = tf.keras.layers.Conv2DTranspose(\n filters, kernel_size=3, strides=2, padding='same', use_bias=True)(\n x)\n x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)\n return x",
"def downsample_batch(imstack, fac=2, method=\"PIL\"):\n\n if method == \"PIL\":\n out = np.zeros(\n (\n imstack.shape[0],\n imstack.shape[1] // fac,\n imstack.shape[2] // fac,\n imstack.shape[3],\n ),\n \"float32\",\n )\n if out.shape[-1] == 3:\n # this is just an RGB image, so no need to loop over channels with PIL\n for i in range(imstack.shape[0]):\n out[i] = np.array(\n PIL.Image.fromarray(imstack[i].astype(\"uint8\")).resize(\n (out.shape[2], out.shape[1]), resample=PIL.Image.LANCZOS\n )\n )\n else:\n for i in range(imstack.shape[0]):\n for j in range(imstack.shape[3]):\n out[i, :, :, j] = np.array(\n PIL.Image.fromarray(imstack[i, :, :, j]).resize(\n (out.shape[2], out.shape[1]), resample=PIL.Image.LANCZOS\n )\n )\n\n elif method == \"dsm\":\n out = np.zeros(\n (\n imstack.shape[0],\n imstack.shape[1] // fac,\n imstack.shape[2] // fac,\n imstack.shape[3],\n ),\n \"float32\",\n )\n for i in range(imstack.shape[0]):\n for j in range(imstack.shape[3]):\n out[i, :, :, j] = dsm(imstack[i, :, :, j], (fac, fac))\n\n elif method == \"nn\":\n out = imstack[:, ::fac, ::fac]\n\n elif fac > 1:\n raise Exception(\"Downfac > 1. Not a valid downsampling method\")\n\n return out",
"def convolveAndDownsample(img):\n # Select every other pixel from G\n G = sp.signal.convolve2d(img, guassianFilter, 'same')\n return G[::2, ::2]",
"def down_sampling(record, down_sampling_factor=16):\n\n if len(record.shape) == 1:\n return record[slice(0, record.shape[0], down_sampling_factor)]\n else:\n row_idx = np.arange(record.shape[0])\n col_idx = np.arange(0, record.shape[1], down_sampling_factor)\n\n return record[np.ix_(row_idx, col_idx)]",
"def blur_img(img,key='blur_small',stride=1,pad_type='None'):\n\n kernel = kernel_bank[key]\n return img_conv_2D(img,kernel,stride,pad_type)",
"def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signal.kaiser(WINDOW_SIZE, 5)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor",
"def downsample(filters, size, shape, apply_batchnorm=True):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n result.add(\n tf.keras.layers.Conv2D(filters, size, strides=2, padding='same', batch_input_shape=shape, \n kernel_initializer=initializer, use_bias=False))\n\n if apply_batchnorm:\n result.add(tf.keras.layers.BatchNormalization())\n\n result.add(tf.keras.layers.LeakyReLU())\n\n return result",
"def down_block(x, out_channels, name, downsample=True, act=tf.nn.relu):\n with tf.variable_scope(name):\n input_channels = x.shape.as_list()[-1]\n x_0 = x\n x = act(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv1')\n x = act(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv2')\n if downsample:\n x = dsample(x)\n if downsample or input_channels != out_channels:\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='sn_conv3')\n if downsample:\n x_0 = dsample(x_0)\n return x_0 + x",
"def downsample_1d(myarr, factor, weightsarr =[ -1], weighted = True, in_quad = False): \n if in_quad:\n myarr = np.power(myarr, 2)\n \n xs = myarr.shape[0]\n crop_arr = myarr[:xs-(xs % int(factor))]\n crop_weights = weightsarr[:xs-(xs % int(factor))]\n \n if weighted == True:\n if np.mean(weightsarr) == -1:\n print(\"CAUTION!!!! You didn't specify what to weight by!\")\n dsarr = -1\n else:\n dsarr = np.average( np.concatenate(\n [[crop_arr[i::factor] for i in range(factor)] ]\n ),weights = np.concatenate(\n [[crop_weights[i::factor] for i in range(factor)] ]\n ) ,axis=0)\n \n else: # when weighted == False:\n dsarr = np.mean( np.concatenate(\n [[crop_arr[i::factor] for i in range(factor)] ]\n ),axis=0)\n if in_quad:\n dsarr = np.sqrt(dsarr)\n return dsarr",
"def TransitionDown(inputs, n_filters, dropout_p=0.2):\n\n l = BN_ReLU_Conv(inputs, n_filters, filter_size=1, dropout_p=dropout_p)\n l = Pool2DLayer(l, 2, mode='max')\n\n return l\n # Note : network accuracy is quite similar with average pooling or without BN - ReLU.\n # We can also reduce the number of parameters reducing n_filters in the 1x1 convolution",
"def deconv(depth, nfilter, ksize=3, stride=1, \r\n pad_in=0, pad_out=0, groups=1,\r\n dilation=1, pad_mode='zeros',\r\n bias=True, lrelu=None):\r\n assert (depth>0 and nfilter>0 and ksize>0 and ksize%2==1 and \r\n stride>0 and pad_in>=0 and pad_out>=0 and dilation>=1 and\r\n groups>=1 and depth%groups==0 and nfilter%groups==0)\r\n deconv_ = nn.ConvTranspose2d(depth, nfilter, ksize, stride, \r\n pad_in, pad_out, groups, bias, dilation,\r\n pad_mode)\r\n if lrelu is not None:\r\n deconv_ = nn.Sequential(deconv_, \r\n nn.LeakyReLU(lrelu, inplace=True))\r\n return deconv_",
"def upsample_2d(x, k=None, factor=2, gain=1, data_format='NHWC'):\n assert isinstance(factor, int) and factor >= 1\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor**2))\n p = k.shape[0] - factor\n return _simple_upfirdn_2d(\n x,\n k,\n up=factor,\n pad0=(p + 1) // 2 + factor - 1,\n pad1=p // 2,\n data_format=data_format)",
"def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, beta=5.0, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signallib.kaiser(WINDOW_SIZE, beta)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signallib.firwin(2 * half_len + 1, f_c, window=('kaiser', beta))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor",
"def upsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda', gpu=True):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * (gain * (factor ** 2))\r\n p = k.shape[0] - factor\r\n return Oncuda._simple_upfirdn_2d(x, k, up=factor, pad0=(p+1)//2+factor-1, pad1=p//2, data_format=data_format, impl=impl, gpu=gpu)"
] |
[
"0.59208506",
"0.5851584",
"0.5679151",
"0.5676317",
"0.565813",
"0.5653539",
"0.55858755",
"0.55676895",
"0.5492554",
"0.5436388",
"0.5431901",
"0.5408714",
"0.54077697",
"0.536935",
"0.5363559",
"0.5292055",
"0.52802724",
"0.5259954",
"0.51976967",
"0.5101671",
"0.50778955",
"0.5062474",
"0.50211805",
"0.5013427",
"0.4998385",
"0.4997543",
"0.49726444",
"0.49540058",
"0.49463892",
"0.4887083"
] |
0.590075
|
1
|
Writes an object to a text file, using JSON representation
|
def save_to_json_file(my_obj, filename):
import json
with open(filename, 'w', encoding='utf-8') as f:
obj = json.dumps(my_obj)
f.write(obj)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_to_json_file(my_obj, filename):\n with open(filename, 'w', encoding='utf-8') as file:\n return file.write(json.dumps(my_obj))",
"def save_to_json_file(my_obj, filename):\n with open(filename, mode=\"w\", encoding=\"utf-8\") as writer:\n json.dump(my_obj, writer)",
"def save_to_json_file(my_obj, filename):\n with open(filename, mode='w') as file:\n file.write(json.dumps(my_obj))",
"def save_to_json_file(my_obj, filename):\n with open(filename, \"w\") as f:\n j = json.dumps(my_obj)\n f.write(j)\n f.close()",
"def save_to_json_file(my_obj, filename):\n with open(filename, \"w\", encoding=\"utf-8\") as opening:\n json.dump(my_obj, opening)",
"def save_to_json_file(my_obj, filename):\n import json\n with open(filename, mode='w', encoding='utf-8') as f:\n json.dump(my_obj, f)",
"def save_to_json_file(my_obj, filename):\n with open(filename, \"w\") as myfile:\n return myfile.write(json.dumps(my_obj))",
"def write_json(obj_to_write: Any, filename: str):\n \n with open(filename, 'w') as json_file:\n json.dump(obj_to_write, json_file, indent=4)",
"def save_to_json_file(my_obj, filename):\n with open(filename, 'w') as json_file:\n written = json_file.write(json.dumps(my_obj))\n return written",
"def save_to_json_file(my_obj, filename):\n import json\n with open(filename, 'w') as file:\n json.dump(my_obj, file)",
"def write_json(obj, fpath):\n mkdir_if_missing(osp.dirname(fpath))\n with open(fpath, 'w', encoding='utf-8') as f:\n json.dump(obj, f, indent=4, separators=(',', ': '), ensure_ascii=False) # 添加中文支持",
"def save_to_json_file(my_obj, filename):\n with open(filename, 'w') as file:\n json.dump(my_obj, file)",
"def save_to_json_file(my_obj, filename):\n with open(filename, 'w+') as json_file:\n json.dump(my_obj, json_file)",
"def save_data(file_to_save, object_to_serialize):\r\n with open(file_to_save, \"w\", encoding=\"utf-8\") as f:\r\n f.write(json.dumps(object_to_serialize, indent=2, ensure_ascii=False))",
"def to_json(obj: ConfiguredBaseModel, file: str):\n if file:\n with open(file, \"w\") as f:\n f.write(obj.json(indent=4))\n console.print(f\"\\nOutput written to {file}\\n\")\n else:\n print_json(obj.json(indent=4))",
"def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)",
"def save_file(obj, file_path):\n with gzip.open(file_path, \"wb\") as fp:\n fp.write(json.dumps(obj).encode('utf-8'))",
"def writeToJson(inputObj,fileLoc):\n myFile = open(fileLoc,'w')\n json.dump(inputObj, myFile, sort_keys=True, indent=4, separators=(',', ': '))",
"def write_json_to_file(json_object, filename):\n try:\n # Try to serialize it before writing\n json_object = json.dumps(json_object)\n except TypeError:\n print(\"Failed to serialize the object\")\n try:\n json_object = json.loads(json_object)\n json_object = json.dumps(json_object)\n except TypeError:\n print(\"Failed secondary serialization of json object\")\n\n json_file = robot_dir + \"/output/original/{}_orig.json\".format(filename.replace(' ', ''))\n with open(json_file, 'w') as json_orig_file:\n json_orig_file.writelines(json_object)",
"def write_json(self, obj, mode='wb', **kwargs):\n with self.open(mode) as f:\n return json.dump(obj, f, **kwargs)",
"def output_json(file_name, domain_object, output_dir):\n\n with open(path.join(output_dir, file_name + '.json'), 'w', encoding='utf-8', errors='replace') as out_file:\n json_object = jsonpickle.encode(domain_object)\n out_file.write(json_object)\n out_file.write('\\n')",
"def write_complex_json(filepath, obj):\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(obj, file_obj, cls=ExtendedEncoder, ensure_ascii=False, indent=2)",
"def save_file(file_path, json_obj):\n try:\n with open(file_path, 'w') as outfile:\n json.dump(json_obj, outfile)\n logging.debug(\"Saved JSON to file '{0}'\".format(file_path))\n except IOError:\n logging.error(\"¯\\_(ツ)_/¯ Can't even open the file for writing: {0}'\".format(file_path))\n sys.exit(1)",
"def serialize_file(cls, obj, file_path='./data.json'):\n file_stream = open(file_path, 'wb')\n json.dump(obj, file_stream, cls=CustomTypeEncoder)\n file_stream.flush()\n file_stream.close()",
"def save_json(self, file: Union[str, TextIO]) -> None:\n if hasattr(file, 'write'):\n file_ctx = nullcontext(file)\n else:\n file_ctx = open(file, 'w')\n\n with file_ctx as fp:\n for d in self:\n json.dump(d.dict(), fp)\n fp.write('\\n')",
"def save_json(dict_obj, path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'w') as json_file:\n json.dump(dict_obj, json_file)",
"def write_json_file(self, fname, content):\n pass",
"def write_json_file(self, fname, content):\n pass",
"def write(self):\n self.json_o.write()",
"def write(self, obj):\n try:\n self.obj.write(json.dumps(obj))\n setattr(self, \"write\", self.delimited_write)\n except:\n self.bad_obj(obj)"
] |
[
"0.7918543",
"0.7879459",
"0.7872899",
"0.7861363",
"0.7859837",
"0.7836132",
"0.7831267",
"0.77715296",
"0.77463853",
"0.7736864",
"0.7728628",
"0.7719162",
"0.76741004",
"0.7646637",
"0.76425725",
"0.7556395",
"0.7547116",
"0.7506853",
"0.74896044",
"0.74833363",
"0.74560595",
"0.74013835",
"0.73124975",
"0.7283968",
"0.72483826",
"0.7237741",
"0.72068256",
"0.72068256",
"0.7205878",
"0.7191541"
] |
0.79062027
|
1
|
Function that takes a dataframe and outputs a BedTool object
|
def get_Bedtool_from_dataframe(df: object, output_file: str):
pybedtools.BedTool.from_dataframe(df).saveas(
output_file
)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def dataframe_features(df, db):\n def generator():\n for gene_id in df.index:\n yield asinterval(db[gene_id])\n\n return pybedtools.BedTool(generator())",
"def transform(self, dataframe: DataFrame) -> DataFrame:",
"def my_feature_xxx(df: pd.DataFrame):\n\n # CODE HERE\n\n return df",
"def dst(df):\n pass",
"def start_pipeline(df):\n new_df = df.copy()\n new_df = new_df[[\"Title\", \"Genre\", \"Director\", \"Actors\", \"Plot\"]]\n return new_df",
"def passing(df):\n pass",
"def _dataframe_to_feather(df, fn, **kwargs):\n\treturn df.to_feather(fn, **kwargs)",
"def process_to_dataframe(chkpts, tags=['final_energy', 'job_name']):\n\n pass",
"def apply(dataframe, parameters=None):\n if parameters is None:\n parameters = {}\n dataframe = attributes_filter.filter_df_keeping_spno_activities(dataframe,\n max_no_activities=constants.MAX_NO_ACTIVITIES)\n dataframe = auto_filter.apply_auto_filter(dataframe, parameters=parameters)\n dfg = df_statistics.get_dfg_graph(dataframe)\n activities_count = attributes_filter.get_attribute_values(dataframe, xes.DEFAULT_NAME_KEY)\n net, im, fm = inductive_miner.apply_dfg(dfg, parameters=parameters)\n spaths = get_shortest_paths(net)\n aggregated_statistics = get_decorations_from_dfg_spaths_acticount(net, dfg, spaths,\n activities_count,\n variant=\"performance\")\n gviz = pn_vis_factory.apply(net, im, fm, parameters={\"format\": \"svg\"}, variant=\"performance\",\n aggregated_statistics=aggregated_statistics)\n return get_base64_from_gviz(gviz), export_petri_as_string(net, im, fm), \".pnml\", \"parquet\"",
"def algorithm(df, params):\n\n output = {}\n\n # PUT YOUR OWN IMPLEMENTATION HERE\n # STORE YOUR ANALYSIS OUTPUT IN OUTPUT\n\n return output",
"def make_dataset(self, df, **kwargs):\n\t\treturn df",
"def process(self, df):\n output = self.accumulator.identity()\n dataset = df[\"dataset\"]\n cfg = loadConfig()\n # We can access the data frame as usual\n # The dataset is written into the data frame\n # outside of this function\n\n\n## Jets\n jet = JaggedCandidateArray.candidatesfromcounts(\n df['nJet'],\n pt = df['Jet_pt'].content,\n eta = df['Jet_eta'].content,\n phi = df['Jet_phi'].content,\n mass = df['Jet_mass'].content,\n goodjet = df['Jet_isGoodJetAll'].content,\n bjet = df['Jet_isGoodBJet'].content,\n jetId = df['Jet_jetId'].content,\n puId = df['Jet_puId'].content,\n )\n jet = jet[(jet['goodjet']==1)]\n btag = jet[jet['bjet']==1]\n light = jet[(jet['goodjet']==1) & (jet['bjet']==0)]\n spectator = jet[(abs(jet.eta)>2.0) & (abs(jet.eta)<4.7) & (jet.pt>25) & (jet['puId']>=7) & (jet['jetId']>=6)] # 40 GeV seemed good. let's try going lower\n leading_spectator = spectator[spectator.pt.argmax()]\n \n ## Leptons\n lepton = JaggedCandidateArray.candidatesfromcounts(\n df['nLepton'],\n pt = df['Lepton_pt'].content,\n eta = df['Lepton_eta'].content,\n phi = df['Lepton_phi'].content,\n mass = df['Lepton_mass'].content,\n pdgId = df['Lepton_pdgId'].content,\n )\n\n fw = light[abs(light.eta).argmax()] # the most forward light jet\n ## Muons\n muon = lepton[abs(lepton['pdgId'])==13]\n dimuon = muon.choose(2)\n OSmuon = (dimuon.i0['pdgId'] * dimuon.i1['pdgId'] < 0)\n dimuon = dimuon[OSmuon]\n\n ## Electrons\n electron = lepton[abs(lepton['pdgId'])==11]\n dielectron = electron.choose(2)\n OSelectron = (dielectron.i0['pdgId'] * dielectron.i1['pdgId'] < 0)\n dielectron = dielectron[OSelectron]\n\n ## MET\n met_pt = df[\"MET_pt\"]\n met_phi = df[\"MET_phi\"]\n\n ## Event classifieres\n \n \n ## define selections (maybe move to a different file at some point)\n trilep = ((df['nLepton']==3) & (df['nVetoLepton']>=3))\n twoJet = (jet.counts>=2) # those are any two jets\n oneBTag = (btag.counts>0)\n twoMuon = ( muon.counts==2 )\n #Zveto_mu = ( (dimuon.counts<1) )# | (abs(dimuon.mass - 91)>15) )\n Zveto_mu_wide = ( (abs(dimuon.mass-91.)<15).counts<1 )\n Zveto_ele_wide = ( (abs(dielectron.mass-91.)<15).counts<1 )\n Zveto_mu_narrow = ( (abs(dimuon.mass-91.)<10).counts<1 )\n Zveto_ele_narrow = ( (abs(dielectron.mass-91.)<10).counts<1 )\n met = (met_pt > 50)\n fwdJet = (spectator.counts>0)\n fwdJet50 = ((leading_spectator.pt>50).counts>0)\n\n\n ## work on the cutflow\n output['totalEvents']['all'] += len(df['weight'])\n\n\n processes = ['tW_scattering', 'TTW', 'TTX', 'diboson', 'ttbar', 'DY']\n cutflow = Cutflow(output, df, cfg, processes)\n \n\t#IDK if these are right?????\n cutflow.addRow( 'trilep', trilep )\n cutflow.addRow( 'twoJet', twoJet )\n cutflow.addRow( 'oneBTag', oneBTag )\n cutflow.addRow( 'met', met )\n\n\n # pre selection of events\n event_selection = cutflow.selection\n\n ## And fill the histograms\n # just the number of electrons and muons\n output['N_ele'].fill(dataset=dataset, multiplicity=electron[event_selection].counts, weight=df['weight'][event_selection]*cfg['lumi'])\n output['N_mu'].fill(dataset=dataset, multiplicity=muon[event_selection].counts, weight=df['weight'][event_selection]*cfg['lumi'])\n # N jet and N b without selections on those\n output['N_jet'].fill(dataset=dataset, multiplicity=jet[trilep & met].counts, weight=df['weight'][trilep & met]*cfg['lumi'])\n output['N_b'].fill(dataset=dataset, multiplicity=btag[trilep & met].counts, weight=df['weight'][trilep & met]*cfg['lumi'])\n # forward jet properties\n output['N_spec'].fill(dataset=dataset, multiplicity=spectator[event_selection].counts, weight=df['weight'][event_selection]*cfg['lumi'])\n output['pt_spec_max'].fill(dataset=dataset, pt=leading_spectator[event_selection & (spectator.counts>0)].pt.flatten(), weight=df['weight'][event_selection & (spectator.counts>0)]*cfg['lumi'])\n output['eta_spec_max'].fill(dataset=dataset, eta=leading_spectator[event_selection & (spectator.counts>0)].eta.flatten(), weight=df['weight'][event_selection & (spectator.counts>0)]*cfg['lumi'])\n \n # something a bit more tricky\n output['N_diele'].fill(dataset=dataset, multiplicity=dielectron[event_selection].counts, weight=df['weight'][event_selection]*cfg['lumi'])\n output['N_dimu'].fill(dataset=dataset, multiplicity=dimuon[event_selection].counts, weight=df['weight'][event_selection]*cfg['lumi'])\n\n output['MET_pt'].fill(dataset=dataset, pt=df[\"MET_pt\"][event_selection].flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n output['MT'].fill(dataset=dataset, pt=df[\"MT\"][event_selection].flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n ht = jet[jet['goodjet']==1].pt.sum()\n output['HT'].fill(dataset=dataset, ht=ht[event_selection].flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n st = jet[jet['goodjet']==1].pt.sum() + lepton.pt.sum() + df['MET_pt']\n output['ST'].fill(dataset=dataset, ht=st[event_selection].flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n b_nonb_pair = btag.cross(light)\n jet_pair = light.choose(2)\n output['mbj_max'].fill(dataset=dataset, mass=b_nonb_pair[event_selection].mass.max().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n output['mjj_max'].fill(dataset=dataset, mass=jet_pair[event_selection].mass.max().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n lepton_bjet_pair = lepton.cross(btag)\n output['mlb_max'].fill(dataset=dataset, mass=lepton_bjet_pair[event_selection].mass.max().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n output['mlb_min'].fill(dataset=dataset, mass=lepton_bjet_pair[event_selection].mass.min().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n lepton_jet_pair = lepton.cross(jet)\n output['mlj_max'].fill(dataset=dataset, mass=lepton_jet_pair[event_selection].mass.max().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n output['mlj_min'].fill(dataset=dataset, mass=lepton_jet_pair[event_selection].mass.min().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n met_and_lep_pt = lepton.pt.sum() + met_pt\n output['MET_lep_pt'].fill(dataset=dataset, pt=met_and_lep_pt[event_selection].flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n trailing_lep = lepton[lepton.pt.argmin()] \n leading_lep = lepton[lepton.pt.argmax()]\n output['trailing_lep_pt'].fill(dataset=dataset, pt=trailing_lep[event_selection].pt.min().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n output['leading_lep_pt'].fill(dataset=dataset, pt=leading_lep[event_selection].pt.max().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n output['fw_pt'].fill(dataset=dataset, pt=fw[event_selection].pt.sum().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n output['fw_eta'].fill(dataset=dataset, eta=fw[event_selection].eta.sum().flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n R = (abs((leading_lep.eta.sum()-leading_spectator.eta.sum())**2 + (leading_lep.phi.sum()-leading_spectator.phi.sum()**2)))**0.5 #Change leading_spectator to jet ##ADD ABS()\n output['R'].fill(dataset=dataset, multiplicity = R[event_selection].flatten(), weight=df['weight'][event_selection]*cfg['lumi'])\n\n return output",
"def crest2brat(df, output_dir):\n if not type(df) == pd.core.frame.DataFrame:\n print(\"first parameter should be a pandas data frame\")\n raise TypeError\n\n # first, check if the 'source' directory exists\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for index, row in df.iterrows():\n ann_file = \"\"\n t_idx = 1\n args_count = 0\n idx = ast.literal_eval(str(row['idx']))\n span1 = idx['span1']\n span2 = idx['span2']\n signal = idx['signal']\n label = int(row['label'])\n direction = int(row['direction'])\n\n span1_string = ' '.join(ast.literal_eval(str(row['span1'])))\n span2_string = ' '.join(ast.literal_eval(str(row['span2'])))\n signal_string = ' '.join(ast.literal_eval(str(row['signal'])))\n\n if len(span1) > 0:\n span_type = 'Span1'\n if label == 1 and direction == 0:\n span_type = 'Cause'\n elif label == 1 and direction == 1:\n span_type = 'Effect'\n ann_file += \"T{}\\t{} \".format(t_idx, span_type)\n spans1 = []\n for span in span1:\n spans1.append(\"{} {}\".format(span[0], span[1]))\n ann_file += (';'.join(spans1)).strip()\n ann_file += \"\\t{}\\n\".format(span1_string)\n t_idx += 1\n args_count += 1\n\n if len(span2) > 0:\n span_type = 'Span2'\n if label == 1 and direction == 0:\n span_type = 'Effect'\n elif label == 1 and direction == 1:\n span_type = 'Cause'\n ann_file += \"T{}\\t{} \".format(t_idx, span_type)\n spans2 = []\n for span in span2:\n spans2.append(\"{} {}\".format(span[0], span[1]))\n ann_file += (';'.join(spans2)).strip()\n ann_file += \"\\t{}\\n\".format(span2_string)\n t_idx += 1\n args_count += 1\n\n if len(signal) > 0:\n ann_file += \"T{}\\tSignal \".format(t_idx)\n signals = []\n for span in signal:\n signals.append(\"{} {}\".format(span[0], span[1]))\n ann_file += (';'.join(signals)).strip()\n ann_file += \"\\t{}\\n\".format(signal_string)\n t_idx += 1\n\n if label == 1:\n if args_count == 2:\n if direction == 0:\n ann_file += \"R1\\tCausal Arg1:T1 Arg2:T2\\n\"\n elif direction == 1:\n ann_file += \"R1\\tCausal Arg1:T2 Arg2:T1\\n\"\n elif label == 0:\n if args_count == 2:\n if direction == 0:\n ann_file += \"R1\\tNonCausal Arg1:T1 Arg2:T2\\n\"\n elif direction == 1:\n ann_file += \"R1\\tNonCausal Arg1:T2 Arg2:T1\\n\"\n\n ann_file = ann_file.strip('\\n')\n\n # writing .ann and .txt files\n file_name = \"{}\".format(str(row['global_id']))\n\n with open('{}/{}.ann'.format(output_dir, file_name), 'w') as file:\n file.write(ann_file)\n with open('{}/{}.txt'.format(output_dir, file_name), 'w') as file:\n file.write(row['context'])",
"def BDT(BDTSetup, dataloader, factory):\n\n factory.BookMethod(dataloader, ROOT.TMVA.Types.kBDT,\n BDTSetup.ModelName ,\"!H:!V:NTrees=\"+BDTSetup.TreeNumber+\":MinNodeSize=\"+BDTSetup.NMinActual+\"%:BoostType=Grad:Shrinkage=\"+BDTSetup.Shrinkage+\n \":UseBaggedBoost:BaggedSampleFraction=\"+BDTSetup.BaggingActual+\":nCuts=\"+BDTSetup.NCutActual+\":MaxDepth=\"+BDTSetup.MaxDepth+\n \":IgnoreNegWeightsInTraining=True\" )\n\n return",
"def bow(data_frame, description):\n text = list(data_frame['article'])\n vectorizer = CountVectorizer(stop_words='english') # create the transform\n vectorizer.fit(text) # tokenize and build vocab\n # save bow vectorizer as pickle\n with open('resources/bow_encoder_' + description + '.pkl', 'wb') as f:\n pickle.dump(vectorizer.vocabulary_, f)\n f.close()\n data_frame['bow'] = data_frame['article'].apply(lambda x: vectorizer.transform([x]))\n return data_frame",
"def example_bedtool(fn):\n fn = os.path.join(data_dir(), fn)\n if not os.path.exists(fn):\n raise ValueError(\"%s does not exist\" % fn)\n return BedTool(fn)",
"def df():\n fs.df()",
"def records_from_breaches_dataframe(dataframe, sequences):\n records = _sequences_to_new_records(sequences)\n for record in records:\n record.features = [\n f\n for f in record.features\n if not f.qualifiers.get(\"is_a_breach\", False)\n ]\n colors_cycle_iterator = colors_cycle()\n columns_colors = {\n c: next(colors_cycle_iterator) for c in dataframe.columns\n }\n for rec, (i, row) in zip(records, dataframe.iterrows()):\n for column in dataframe.columns:\n locations = row[column]\n if not locations:\n continue\n for location in locations.split(\",\"):\n annotate_record(\n rec,\n location=_parse_location(location),\n label=column,\n color=columns_colors[column],\n ApEinfo_fwdcolor=columns_colors[column],\n ApEinfo_revcolor=columns_colors[column],\n is_a_breach=True,\n )\n return records",
"def apply(self, df: DataFrame) -> Optional[Report]:",
"def _fit(self, df):\n return df",
"def bid_dataset_1():\n\n bm = BidManager()\n\n bm.add_bid(1, 6.7, 0, True, 0)\n bm.add_bid(1, 6.6, 1, True, 0)\n bm.add_bid(1, 6.5, 2, True, 0)\n bm.add_bid(1, 6.4, 3, True, 0)\n bm.add_bid(1, 6.3, 4, True, 0)\n bm.add_bid(1, 6, 5, True, 0)\n\n bm.add_bid(1, 1, 6, False, 0)\n bm.add_bid(1, 2, 7, False, 0)\n bm.add_bid(2, 3, 8, False, 0)\n bm.add_bid(2, 4, 9, False, 0)\n bm.add_bid(1, 6.1, 10, False, 0)\n \n return bm.get_df()",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def format_split_bgc(df,float_id, bgc_template_df):\n bgc_metadata_df = format_bgc_metadata(df_concat,float_id)\n\n # \"\"\"\"Adds any missing columns to data dataframe\"\"\"\n df_concat = bgc_template_df.append(df_concat)\n\n \"\"\"trims down df to only data columns\"\"\"\n df_concat = df_concat[bgc_data_columns]\n\n return float_id, df_concat, bgc_metadata_df",
"def desc_df(df):\n print(df.shape)\n print(df.columns)",
"def data_specific_processing(self, dataframe):\n return dataframe",
"def get_BedTool_for_functional_motifs(funMotifs: dict, tissue: str, db_user_name: str, db_name: str, output_file: str):\r\n # establish connection\r\n conn = psycopg2.connect(\r\n database=db_name, user=db_user_name)\r\n\r\n # create list of motif ids that will be extracted from table\r\n motifs = \"(\"\r\n for funMotif in funMotifs[tissue]:\r\n motifs = motifs + f\"{funMotif}, \"\r\n motifs = motifs[:-2] + \")\"\r\n\r\n # TODO: remove necessity of motifs, but somehow contain tissue-dependency\r\n # create sql command\r\n sql = f\"SELECT chr, motifstart, motifend, name, strand, mid FROM motifs WHERE mid IN {motifs} \" \\\r\n f\"AND functionality = 'YES'\"\r\n\r\n # get data into data frame\r\n df = pd.read_sql_query(sql, conn)\r\n\r\n # close connection\r\n conn.close()\r\n\r\n get_Bedtool_from_dataframe(df, output_file)\r\n\r\n return",
"def data_process(df_toprocess=None, cutoff=0.2, bv_cutoff=0.15, catalog=None):\n\n print \"Selecting objects..\"\n df_toprocess['sigma_pi/pi'] = df_toprocess.loc[:, 'parallax_error'].astype(float) / df_toprocess.loc[:, 'parallax']\\\n .astype(float)\n print \"..Done\\nCutoff at relative parallax error of %s\\n----------\" % cutoff\n\n # only take objects with relative parallax error < cutoff\n df_toprocess = df_toprocess.loc[df_toprocess.loc[:, 'parallax'] /\n df_toprocess.loc[:, 'parallax_error'] > 1. / cutoff]\n\n print catalog\n if catalog is None:\n print \"Replacing whitespace with nan\"\n df_toprocess = df_toprocess.replace(' ', np.nan) # some cells are ' ' instead of nan\n\n print \"Converting BTmag and VTmag to floats..\"\n df_toprocess.BTmag = df_toprocess.BTmag.astype(float)\n df_toprocess.VTmag = df_toprocess.VTmag.astype(float)\n # Some values are NaN:\n print \"Removing objects with missing BT or VT measurements..\"\n df_toprocess = df_toprocess[df_toprocess.BTmag.notnull()]\n df_toprocess = df_toprocess[df_toprocess.VTmag.notnull()]\n\n print \"Computing B-V and M_V..\"\n df_toprocess['B_V'] = df_toprocess.BTmag - df_toprocess.VTmag\n df_toprocess['M_V'] = df_toprocess.VTmag - 5. * (np.log10(1000. / df_toprocess.parallax) - 1.)\n\n print \"Converting sigma BT and sigma VT to float..\"\n df_toprocess.e_BTmag = df_toprocess.e_BTmag.astype(float)\n df_toprocess.e_VTmag = df_toprocess.e_VTmag.astype(float)\n\n print \"Computing sigma B-V..\"\n df_toprocess['e_B_V'] = np.sqrt(df_toprocess.e_BTmag.pow(2)+df_toprocess.e_VTmag.pow(2))\n\n print \"Applying selection on sigma BT-VT < %s..\" % bv_cutoff\n df_toprocess = df_toprocess[df_toprocess.e_B_V < bv_cutoff]\n\n if catalog == 'xmatch_TGAS_Simbad.csv':\n df_toprocess = df_toprocess.loc[(df_toprocess['J'] < 11.) & (df_toprocess['K'] < 11.)]\n print \"min in J: %s\" % np.max(df_toprocess['J'])\n print \"max in J: %s\" % np.min(df_toprocess['J'])\n df_toprocess.insert(10, 'B_V', df_toprocess.loc[:, 'B'] - df_toprocess.loc[:, 'V'])\n\n df_toprocess.insert(10, 'J_K', df_toprocess.loc[:, 'J'] - df_toprocess.loc[:, 'K'])\n df_toprocess.insert(10, 'M_G', df_toprocess.loc[:, 'phot_g_mean_mag'] - 5. *\n (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))\n df_toprocess.insert(10, 'M_J', df_toprocess.loc[:, 'J'] - 5. *\n (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))\n df_toprocess.insert(10, 'M_K', df_toprocess.loc[:, 'K'] - 5. *\n (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))\n\n if catalog == 'xmatch_TGAS_VSX.csv':\n df_toprocess = df_toprocess[df_toprocess.V == 0]\n print \"%s objects selected\" % len(df_toprocess)\n print \"..Done\\n----------\"\n return df_toprocess",
"def __init__(self, dataframe):\n self._dataframe = dataframe \n self._data_grouped_by_manufacturer = self._group_by_manufacturer()\n self._data_agg_by_mean_value = self._agg_by_mean()\n self._formatted_data = self._format_data()",
"def bh_parse(*, df_list, **_):\n\n # load pdf from externaldatapath directory\n pages = range(5, 13)\n bh_df_list = []\n for x in pages:\n bh_df = read_pdf(f'{externaldatapath}Blackhurst_WatWithdrawalsforUSIndustrialSectorsSI.pdf',\n pages=x, stream=True)[0]\n bh_df_list.append(bh_df)\n\n df = pd.concat(bh_df_list, sort=False)\n df = df.rename(columns={\"I-O code\": \"ActivityConsumedBy\",\n \"I-O description\": \"Description\",\n \"gal/$M\": \"FlowAmount\",\n })\n # hardcode\n # original data in gal/million usd\n df.loc[:, 'FlowAmount'] = df['FlowAmount'] / 1000000\n df['Unit'] = 'gal/USD'\n df['SourceName'] = 'Blackhurst_IO'\n df['Class'] = 'Water'\n df['FlowName'] = 'Water Withdrawals IO Vector'\n df['Location'] = US_FIPS\n df = assign_fips_location_system(df, '2002')\n df['Year'] = '2002'\n df['DataReliability'] = 5 # tmp\n df['DataCollection'] = 5 # tmp\n\n return df",
"def bid_dataset_0():\n\n bm = BidManager()\n bm.add_bid(1, 3, 0, True, 0)\n bm.add_bid(2, 4, 1, True, 0)\n bm.add_bid(5, 1, 2, True, 0)\n\n bm.add_bid(4, 2, 3, False, 0)\n bm.add_bid(1, 1, 4, False, 0)\n bm.add_bid(5, 6, 5, False, 0)\n \n return bm.get_df()"
] |
[
"0.64251095",
"0.5862894",
"0.5698449",
"0.56902516",
"0.56305456",
"0.5613798",
"0.56004375",
"0.5597725",
"0.550653",
"0.547061",
"0.54529804",
"0.5449333",
"0.54357535",
"0.5413284",
"0.54047763",
"0.53786874",
"0.5349155",
"0.5321677",
"0.5304573",
"0.5302112",
"0.5267031",
"0.5253318",
"0.5231496",
"0.5228321",
"0.5205386",
"0.5171199",
"0.51691884",
"0.516369",
"0.51568544",
"0.5141192"
] |
0.7643427
|
0
|
Function that extracts the information for motifs functional in a specified tissue from a psql database
|
def get_BedTool_for_functional_motifs(funMotifs: dict, tissue: str, db_user_name: str, db_name: str, output_file: str):
# establish connection
conn = psycopg2.connect(
database=db_name, user=db_user_name)
# create list of motif ids that will be extracted from table
motifs = "("
for funMotif in funMotifs[tissue]:
motifs = motifs + f"{funMotif}, "
motifs = motifs[:-2] + ")"
# TODO: remove necessity of motifs, but somehow contain tissue-dependency
# create sql command
sql = f"SELECT chr, motifstart, motifend, name, strand, mid FROM motifs WHERE mid IN {motifs} " \
f"AND functionality = 'YES'"
# get data into data frame
df = pd.read_sql_query(sql, conn)
# close connection
conn.close()
get_Bedtool_from_dataframe(df, output_file)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_tag_info(xint,conn):\n\n get_tags = ('SELECT DISTINCT fip2.value '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, '\n 'feature f, cvterm cvt, feature_interactionprop fip2, cvterm cvt2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fip.type_id = cvt.cvterm_id AND cvt.name = \\'participating feature\\' '\n 'AND fi.feature_interaction_id = fip2.feature_interaction_id AND fip2.type_id = cvt2.cvterm_id '\n 'AND cvt2.name = \\'comment\\' AND f.uniquename = %s AND i.uniquename = %s')\n tags = connect(get_tags,xint,conn)\n return(tags)",
"def extract_notes(db_config,\n note_id_column_name=constants.NOTE_ID_COLUMN_NAME,\n text_column_name=constants.TEXT_COLUMN_NAME,\n financial_flag_column_name=constants.OUTCOME_COLUMN_NAME):\n db_connection = pg.connect(db_config)\n\n # exclude any notes tagged as `is_error`\n query = \"\"\"\n SELECT flagged_notes.row_id as {0}, flagged_notes.text as {1}, flagged_notes.financial_flag as {2}\n FROM flagged_notes\n WHERE flagged_notes.iserror is null\n \"\"\".format(note_id_column_name, text_column_name, financial_flag_column_name)\n\n df = pd.read_sql(query, db_connection)\n return df",
"def get_motif_pssm(self, cluster_num, motif_num):\n #conn = sql3.connect(self.dbfile)\n #cursor = conn.cursor()\n #cursor.execute('select max(iteration) from motif_infos')\n #iteration = cursor.fetchone()[0]\n\n #query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'\n #params = [self.iteration, cluster_num, motif_num]\n #cursor.execute(query, params)\n #rowid = cursor.fetchone()[0]\n #motif_infos = self.tables['motif_infos']\n #rowid = motif_infos[(motif_infos.iteration==self.iteration) & \n # (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1\n rowid = self.__get_motif_id(cluster_num, motif_num)\n\n #query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'\n #params = [self.iteration, rowid]\n #pssm = pd.read_sql( query, conn, params=params )\n motif_pssm_rows = self.tables['motif_pssm_rows']\n pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]\n pssm.drop( ['motif_info_id', 'iteration', 'row'], 1, inplace=True )\n return pssm",
"def extract_notes(infile):\n\n # get patient ID\n subj_id = patient_id_from_file(infile)\n \n #get lab_events for this patient\n con = open_db()\n \n query = \\\n \"\"\"\n SELECT i.chartdate, i.charttime, i.description, i.category, i.text\n FROM noteevents i\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n\n notes = pd.read_sql_query(query,con)\n \"\"\" change time stamp to seconds from origin \"\"\"\n \n origin = pd.to_datetime(wfdb.rdheader(infile).base_datetime)\n notes.insert(0, 'time', '')\n for idx, row in notes.iterrows():\n notes['time'].iloc[idx]=int((pd.to_datetime(row['charttime'])-origin).total_seconds())\n del notes['charttime']\n del notes['chartdate']\n\n return (notes)",
"def extract_feature_notes(contig, type, start, stop, direction, notes, pos, ref, alt):\n notes = re.sub(\"%\\d+\", \" \", notes)\n desc = re.search(\"Note=(.*?);\", notes)\n id = re.search(\"ID=(.*?);\", notes)\n name = re.search(\"Name=(.*?);\", notes)\n\n if id and name and desc:\n print ('Contig: ' + contig)\n print ('Annotation_type: ' + type)\n print ('Start:Stop:Direction '+ start+':'+stop+':'+direction)\n print (\"Position: \" + pos)\n print ('Ref:Alt: ' + ref + ':'+ alt)\n print(\"ID: \" + id.group(1))\n # print(\"Name: \" + name.group(1))\n print(\"Description: \" + desc.group(1))\n # if name:\n # print(\"Name: \" + name.group(1))\n #\n # if desc:\n # print(\"Desc: \" + desc.group(1))\n\n\n print()",
"def subjectinfo(subject_id):\n import pandas as pd\n from nipype.interfaces.base import Bunch\n \n def construct_sj(trialinfo, subject_id, run_num, cond_name):\n \"\"\"construct df\"\"\"\n df_sj = trialinfo[(trialinfo['subject']==int(subject_id)) & (trialinfo['session']==int(run_num))]\n sj_info = pd.DataFrame()\n sj_info['onset'] = df_sj['runtime']\n sj_info['duration'] = 0.\n sj_info['weight'] = 1.\n trial_type = df_sj['seq'].replace({1:'Low', 2:'High'})\n sj_info['trial_type'] = trial_type\n sj_info_cond = sj_info[sj_info['trial_type']==cond_name]\n return sj_info_cond\n\n def select_confounds(subject_id, run_num):\n \"\"\"import confounds tsv files\"\"\"\n confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)\n confounds_file = confounds_dir+f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (int(subject_id), int(run_num))\n conf_df = pd.read_csv(confounds_file, sep='\\t')\n return conf_df\n\n def confounds_regressor(conf_df, conf_names):\n \"\"\"select confounds for regressors\"\"\"\n conf_select = conf_df[conf_names].loc[4:].fillna(0) # ignore first 4 dummy scans\n conf_select_list = [conf_select[col].values.tolist() for col in conf_select] \n return conf_select_list\n\n def find_runs(subject_id):\n \"\"\"find available runs from func\"\"\"\n from glob import glob\n func_dir = f'/output/smooth_nomask/preproc/sub-%02d/' % int(subject_id) \n func_files = glob(func_dir+'*bold.nii')\n runs = []\n for f in func_files:\n tmp = f.split('/')\n run = tmp[5].split('_')[2].split('-')[1]\n runs.append(int(run))\n return sorted(runs)\n \n conf_names = ['csf','white_matter','global_signal',\n 'dvars','std_dvars','framewise_displacement', 'rmsd',\n 'a_comp_cor_00', 'a_comp_cor_01', 'a_comp_cor_02', 'a_comp_cor_03', 'a_comp_cor_04', 'a_comp_cor_05', 'cosine00', 'cosine01', 'cosine02', 'cosine03', 'cosine04', 'cosine05',\n 'trans_x', 'trans_y', 'trans_z', 'rot_x','rot_y','rot_z']\n\n alltrialinfo = pd.read_csv('/code/data/fmri_behavioural_new.csv')\n alltrialinfo.head()\n \n subject_info = []\n onset_list = []\n condition_names = ['High', 'Low']\n runs = find_runs(subject_id)\n print(runs)\n for run in runs:\n for cond in condition_names:\n run_cond = construct_sj(alltrialinfo, subject_id, run, cond)\n onset_run_cond = run_cond['onset'].values\n onset_list.append(sorted(onset_run_cond))\n\n subject_info = []\n for r in range(len(runs)):\n onsets = [onset_list[r*2], onset_list[r*2+1]]\n regressors_all = select_confounds(subject_id, runs[r])\n regressors = confounds_regressor(regressors_all, conf_names)\n\n subject_info.insert(r,\n Bunch(conditions=condition_names,\n onsets=onsets,\n durations=[[0], [0]],\n regressors=regressors,\n regressor_names=conf_names,\n amplitudes=None,\n tmod=None,\n pmod=None))\n\n return subject_info # this output will later be returned to infosource",
"def get_transient_info(database, transient_id):\n #WARNING WARNING WARNING\n #wm_ra_err / wm_decl_err may not be a good estimate of real position error\n #ToDo: This needs verifying and probably fixing\n\n transients_query = \"\"\"\\\n SELECT tr.*\n ,rc.wm_ra as ra_deg\n ,rc.wm_ra_err as ra_err_arcsec\n ,rc.wm_decl as decl_deg\n ,rc.wm_decl_err as decl_err_arcsec\n ,im.taustart_ts as trigger_time\n ,ex.f_peak as trigger_f_peak\n ,ex.f_peak_err as trigger_f_peak_err\n ,ex.f_int as trigger_f_int\n ,ex.f_int_err as trigger_f_int_err\n ,rf.avg_f_peak as mean_f_peak\n ,(rf.avg_weighted_f_peak / avg_f_peak_weight) as weighted_mean_f_peak\n ,rf.avg_f_int as mean_f_int\n ,(rf.avg_weighted_f_int / avg_f_int_weight) as weighted_mean_f_int\n ,rf.f_datapoints\n FROM transient tr\n ,runningcatalog rc\n ,runningcatalog_flux rf\n ,image im\n ,extractedsource ex\n WHERE tr.id = %(transid)s\n AND tr.runcat = rc.id\n AND rf.runcat = rc.id\n AND rf.band = tr.band\n AND ex.id = tr.trigger_xtrsrc\n AND im.id = ex.image\n \"\"\"\n cursor = database.connection.cursor()\n cursor.execute(transients_query, {'transid':transient_id})\n raw_transient_results = cursor.fetchall()\n transients = convert_results_to_list_of_dicts(raw_transient_results,\n cursor.description)\n assert len(transients) == 1 #sanity check\n return transients[0]",
"def get_tgis_metadata(dbif=None):\n\n dbif, connected = init_dbif(dbif)\n\n # Select metadata if the table is present\n try:\n statement = \"SELECT * FROM tgis_metadata;\\n\"\n dbif.execute(statement)\n rows = dbif.fetchall()\n except:\n rows = None\n\n if connected:\n dbif.close()\n\n return rows",
"def query_for_ints(filename, conn):\n\n # Query db and return a list of all interactions in form of tuples. [(int, FBig), (int, FBig), etc.]\n get_ints = ('SELECT DISTINCT i.uniquename, ig.uniquename '\n 'FROM interaction i, feature_interaction fi, interaction_group_feature_interaction igfi, interaction_group ig '\n 'WHERE i.is_obsolete=\\'f\\' AND ig.is_obsolete = \\'f\\' AND i.interaction_id = fi.interaction_id '\n 'AND fi.feature_interaction_id = igfi.feature_interaction_id AND igfi.interaction_group_id = ig.interaction_group_id '\n 'ORDER BY ig.uniquename')\n int_tuples = connect(get_ints, 'no_query', conn)\n\n # Print column headers\n with open(filename, 'w') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter = '\\t')\n csv_writer.writerow(['#ID(s) Interactor A', 'ID(s) Interactor B','Alt ID(s) Interactor A', 'Alt ID(s) Interactor B',\n 'Alias(es) Interactor A', 'Alias(es) Interactor B', 'Interaction Detection Method(s)',\n 'Publication 1st Author(s)', 'Publication ID(s)', 'Taxid Interactor A', 'Taxid Interactor B',\n 'Interaction Type(s)', 'Source Database(s)', 'Interaction Identifier(s)', 'Confidence Value(s)',\n 'Expansion Method(s)', 'Biological Role(s) Interactor A', 'Biological Role(s) Interactor B',\n 'Experimental Role(s) Interactor A', 'Experimental Role(s) Interactor B', 'Type(s) Interactor A',\n 'Type(s) Interactor B', 'Xref(s) Interactor A', 'Xref(s) Interactor B', 'Interaction Xref(s)',\n 'Annotation(s) Interactor A', 'Annotation(s) Interactor B', 'Interaction Annotation(s)', 'Host Organism(s)',\n 'Interaction Parameters', 'Creation Date', 'Update Date', 'Checksum Interactor A', 'Checksum Interactor B',\n 'Interaction Checksum', 'Negative', 'Feature(s) Interactor A', 'Feature(s) Interactor B', 'Stoichiometry Interactor A',\n 'Stoichiometry Interactor B', 'Identification Method(s) Participant A', 'Identification Method(s) Participant B'])\n \n # Get a list of detection method ids that are children of 'interaction detection method'\n # to distinguish from participant detection methods since we curate both types into same field\n method_tuple = (103168,)\n int_method_list = get_child_ids(method_tuple,conn)\n\n # Get a list of experimental role ids that are children of 'experimental role'\n # to distinguish from biological roles since we curate both types into same field\n role_tuple = (103631,)\n role_list = get_child_ids(role_tuple,conn)\n\n # For each interaction, get data\n for int in int_tuples:\n\n # Place interaction ID in variable for column 14\n int_id14 = 'flybase:' + int[0]\n\n # Place FBig ID in variable for column 25\n int_xref25 = 'flybase:' + int[1]\n\n # Make a query tuple with interaction ID to get more data\n qint = (int[0],)\n\n # Get collection ID associated with interaction if there is one and add to column 25\n get_FBlc = ('SELECT DISTINCT l.uniquename '\n 'FROM library l, library_interaction li, interaction i '\n 'WHERE i.interaction_id = li.interaction_id AND li.library_id = l.library_id '\n 'AND i.is_obsolete = \\'f\\' AND l.is_obsolete = \\'f\\' AND i.uniquename = %s')\n FBlc = connect(get_FBlc, qint, conn)\n if FBlc:\n int_xref25 += '|flybase:' + FBlc[0][0]\n\n # Get author for column 8\n get_author = ('SELECT DISTINCT pa.surname, pa.givennames, p.pyear '\n 'FROM pubauthor pa, pub p, interaction_pub ip, interaction i '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.pub_id= p.pub_id '\n 'AND p.pub_id = pa.pub_id AND pa.rank = 1 AND i.uniquename = %s')\n author = connect(get_author, qint, conn)\n if author:\n if author[0][0] is not None:\n last = author[0][0]\n else:\n last = ''\n if author[0][1] is not None:\n first = author[0][1]\n else:\n first = ''\n if author[0][2] is not None:\n year = author[0][2]\n else:\n year = ''\n ref8 = last + \" \" + first + \" (\" + year + \")\"\n else:\n ref8 = '-'\n\n # Get pub IDs, FBrf and pmid, for column 9\n # Get FBrf and pub type -all interactions should be associated with an FBrf\n get_FBrf = ('SELECT DISTINCT p.uniquename, cvt.name '\n 'FROM interaction i, interaction_pub ip, pub p, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id '\n 'AND ip.pub_id = p.pub_id AND p.type_id = cvt.cvterm_id AND '\n 'p.is_obsolete = \\'f\\' AND i.uniquename = %s')\n FBrf = connect(get_FBrf, qint, conn)\n pubid9 = 'flybase:' + FBrf[0][0]\n\n # Use FBrf to query for pubmed ID\n # If associated FBrf is a personal communication, it will not have a pmid\n if FBrf[0][1] == 'personal communication to FlyBase':\n pub_query = ()\n\n # If associated FBrf is a paper and not e.g. suppl material, use FBrf to get pmid\n elif FBrf[0][1] == 'paper':\n pub_query = (FBrf[0][0],)\n\n # If associated FBrf is not itself a paper, get FBrf of related paper\n else:\n get_related = ('SELECT DISTINCT p2.uniquename '\n 'FROM pub p1, pub_relationship pr, pub p2, cvterm cvt '\n 'WHERE p1.pub_id = pr.object_id AND pr.subject_id = p2.pub_id '\n 'AND p2.type_id = cvt.cvterm_id AND p2.is_obsolete = \\'f\\' AND p1.uniquename = %s')\n rel_query = (FBrf[0][0],)\n related = connect(get_related,rel_query,conn)\n if related:\n pub_query = related\n\n # If FBrf for a paper is found, use it to get pmid\n if pub_query:\n pmid = get_pubmed(pub_query,conn)\n if pmid:\n pubid9 = pubid9 + '|pubmed:' + pmid\n \n # Otherwise try another query to find FBrf of a related paper and use that to get pmid\n else:\n get_also = ('SELECT DISTINCT p2.uniquename '\n 'FROM pub p1, pub_relationship pr, pub p2, cvterm cvt '\n 'WHERE p1.pub_id = pr.subject_id AND pr.object_id = p2.pub_id '\n 'AND pr.type_id = cvt.cvterm_id AND cvt.name = \\'also_in\\' '\n 'AND p2.is_obsolete = \\'f\\' AND p1.uniquename = %s')\n also = connect(get_also,pub_query,conn)\n if also:\n pub_query = also\n pmid = get_pubmed(pub_query,conn)\n if pmid:\n pubid9 = pubid9 + '|pubmed:' + pmid\n\n # Get interaction type for column 12\n get_int_type = ('SELECT dx.accession, cvt.name '\n 'FROM interaction i, cvterm cvt, dbxref dx, db '\n 'WHERE i.type_id = cvt.cvterm_id AND cvt.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'MI\\' AND i.uniquename = %s')\n int_type = connect(get_int_type, qint, conn)\n intype12 = 'psi-mi:\"MI:' + int_type[0][0] + '\"(' + int_type[0][1] + ')'\n\n # Get interaction annotations for column 28\n # First get comments on source\n get_source = ('SELECT DISTINCT ip.value '\n 'FROM interaction i, interactionprop ip, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.type_id = cvt.cvterm_id '\n 'AND cvt.is_obsolete=0 AND cvt.name = \\'comments on source\\' '\n 'AND i.uniquename = %s')\n annots = connect(get_source, qint, conn)\n\n # If there are source annotations, start the string with the first\n if annots:\n annots28 = 'comment:\"' + annots[0][0].replace('\"','') + '\"'\n\n # If more than one source annotation, add others to string\n if len(annots) > 1:\n for ann in annots[1:]:\n annots28 += '|comment:\"' + ann[0].replace('\"','') + '\"'\n\n # Then get comments and add to string\n comments = get_comments(qint,conn)\n if comments:\n for comm in comments:\n if comm[0] is not None:\n annots28 += '|comment:\"' + comm[0].replace('\"','') + '\"'\n\n # If no source annotations, get comments and start string with comment\n else:\n comments = get_comments(qint,conn)\n if comments:\n annots28 = 'comment:\"' + comments[0][0].replace('\"','') + '\"'\n if len(comments) > 1:\n for comm in comments[1:]:\n annots28 += '|comment:\"' + comm[0].replace('\"','') + '\"'\n else:\n annots28 = '-'\n\n # Get interaction detection method for column 7\n get_methods = ('SELECT DISTINCT dx.accession, cvt.name, cvt.cvterm_id '\n 'FROM dbxref dx, cvterm cvt, interaction_cvterm ic, interaction i '\n 'WHERE dx.dbxref_id = cvt.dbxref_id AND cvt.cvterm_id = ic.cvterm_id AND '\n 'ic.interaction_id = i.interaction_id AND cvt.is_obsolete=0 AND i.uniquename = %s')\n methods = connect(get_methods,qint,conn)\n assays7 = ''\n\n # Start a list of methods attached to interaction that are in approprate cv branch\n ok_list = []\n for meth in methods:\n if meth[2] in int_method_list:\n ok_list.append(meth)\n\n # Make lists of terms to remove if there is more than one appropriate method term \n term_list_1 = ['inferred by author', 'inferred by curator', 'competition binding', 'genetic interference', 'light microscopy',\n 'ultraviolet-visible spectroscopy', 'interologs mapping', 'luminiscence technology', 'experimental knowledge based']\n term_list_2 = ['fluorescence microscopy', 'nucleic acid uv cross-linking assay', 'cross-linking study', 'fluorescence technology',\n 'protein cross-linking with a bifunctional reagent', 'competition binding', 'confocal microscopy']\n\n # If there is only one method on the ok_list make a string\n if len(ok_list) == 1:\n assays7 = 'psi-mi:\"MI:' + ok_list[0][0] + '\"(' + ok_list[0][1] + ')'\n \n # If there is more than one method, remove terms from list one, then list two if still more than one\n elif len(ok_list) > 1:\n better_list = whittle(ok_list, term_list_1)\n if len(better_list) == 1:\n assays7 = 'psi-mi:\"MI:' + better_list[0][0] + '\"(' + better_list[0][1] + ')'\n elif len(better_list) > 1:\n best_list = whittle(better_list, term_list_2)\n if len(best_list) == 1:\n assays7 = 'psi-mi:\"MI:' + best_list[0][0] + '\"(' + best_list[0][1] + ')'\n\n # If after removing extraneous terms there is still more than one method, choose one based on\n # hierarchy affinity > enzymatic > cosedimentation > scattering\n elif len(best_list) > 1:\n for item in best_list:\n if ('affinity' in item[1] or 'coimmunoprecipitation' in item[1] or 'pull down' in item[1]):\n assays7 = 'psi-mi:\"MI:' + item[0] + '\"(' + item[1] + ')'\n break \n if not assays7:\n for item in best_list:\n if 'enzymatic' in item[1] and not assays7:\n assays7 = 'psi-mi:\"MI:' + item[0] + '\"(' + item[1] + ')'\n break\n if not assays7:\n for item in best_list:\n if 'cosedimentation' in item[1] and not assays7:\n assays7 = 'psi-mi:\"MI:' + item[0] + '\"(' + item[1] + ')'\n break\n if not assays7:\n for item in best_list:\n if 'scattering' in item[1] and not assays7:\n assays7 = 'psi-mi:\"MI:' + item[0] + '\"(' + item[1] + ')'\n break\n\n # If a single method term has not yet been identified, choose based on interaction id\n if not assays7:\n if 'CH' in int[0]:\n assays7 = 'psi-mi:\"MI:0091\"(chromatography technology)'\n elif 'FT' in int[0]:\n assays7 = 'psi-mi:\"MI:0417\"(footprinting)'\n \n # Otherwise use multiple terms\n else:\n assays7 = 'psi-mi:\"MI:' + best_list[0][0] + '\"(' + best_list[0][1] + ')'\n else:\n assays7 = '-'\n else:\n assays7 = '-'\n else:\n assays7 = '-'\n\n # Get interaction participants\n get_parts = ('SELECT DISTINCT g.uniquename, g.name, x.uniquename, x.name '\n 'FROM feature g, feature_relationship fr, feature x, feature_interaction fi, '\n 'feature_interactionprop fip, interaction i, cvterm cvt '\n 'WHERE i.interaction_id = fi.interaction_id AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fip.type_id = cvt.cvterm_id AND fi.feature_id = x.feature_id '\n 'AND x.feature_id = fr.subject_id AND fr.object_id = g.feature_id AND fr.type_id = 59983 '\n 'AND cvt.name = \\'participating feature\\' AND g.is_obsolete = \\'f\\' AND x.is_obsolete = \\'f\\' '\n 'AND g.uniquename LIKE \\'FBgn%%\\' AND i.uniquename = %s ORDER BY g.uniquename')\n parts = connect(get_parts,qint,conn)\n\n # Construct dictionary to associate FBgns of generic genes to Entrez ID\n gene_map = {}\n gene_map['FBgn0000002'] = '3771903'\n gene_map['FBgn0001195'] = '3772715'\n gene_map['FBgn0001196'] = '318855'\n gene_map['FBgn0001198'] = '326273'\n gene_map['FBgn0001199'] = '318847'\n gene_map['FBgn0001200'] = '318846'\n gene_map['FBgn0003523'] = '2768872'\n gene_map['FBgn0061471'] = '5740577'\n gene_map['FBgn0061474'] = '26067172'\n gene_map['FBgn0061475'] = '5740812'\n gene_map['FBgn0065042'] = '26067164'\n\n # Get information for interactor A\n # Primary ID for column 1 id FBgn\n A_id1 = 'flybase:' + parts[0][0]\n\n # If odd characters in gene name, place name in quotes\n if '(' in parts[0][1] or ':' in parts[0][1]:\n geneA = '\"' + parts[0][1] + '\"'\n else:\n geneA = parts[0][1]\n A_name5 = 'flybase:' + geneA + '(gene name)'\n\n # Assign molecule type based on feature name\n if '-XP' in parts[0][3]:\n A_type21 = 'psi-mi:\"MI:0326\"(protein)'\n elif '-XR' in parts[0][3]:\n A_type21 = 'psi-mi:\"MI:0320\"(ribonucleic acid)'\n else:\n A_type21 = 'psi-mi:\"MI:0329\"(unknown participant)'\n\n # Make tuple to use in queries based on gene ID (FBgn)\n gid = (parts[0][0],)\n\n # Get alt IDs for column 3\n CG = get_CG_id(gid,conn)\n if parts[0][0] in gene_map:\n Entrez = gene_map[parts[0][0]]\n else:\n Entrez = get_Entrez_id(gid,conn)\n\n if CG and Entrez:\n A_altids3 = 'flybase:' + CG[0][0] + '|entrez gene/locuslink:' + Entrez\n elif Entrez and not CG:\n A_altids3 = 'entrez gene/locuslink:' + Entrez\n elif CG and not Entrez:\n A_altids3 = 'flybase:' + CG[0][0]\n else:\n A_altids3 = '-'\n\n # Make tuple to use in queries based on feature ID\n xid = (parts[0][2],)\n\n # Get taxid for column 10\n tax = get_taxid(xid,conn)\n if tax is not None:\n A_tax10 = 'taxid:' + tax[0] + '(\"' + tax[1] + ' ' + tax[2] + '\")' \n else:\n A_tax10 = '-'\n\n # Make tuple for queries based on interaction ID and feature ID\n xint = (parts[0][2],int[0],)\n\n rol = get_role(xint,conn)\n if rol[0][2] in role_list:\n A_role19 = 'psi-mi:\"MI:' + rol[0][0] + '\"(' + rol[0][1] + ')'\n A_role17 = '-'\n else:\n A_role17 = 'psi-mi:\"MI:' + rol[0][0] + '\"(' + rol[0][1] + ')'\n A_role19 = 'psi-mi:\"MI:0499\"(unspecified role)'\n\n # Comment out section to get subregions until data can be sufficiently standardized for mitab parser\n # Get subregion/experimental feature info for A\n # Asubs = get_subregions(xint,conn)\n # if Asubs:\n # if 'aa ' in Asubs[0][1] or 'nt ' in Asubs[0][1]:\n # Asubregion = Asubs[0][1][3:]\n # if 'aa ' in Asubregion:\n # Asubregion = Asubregion.replace(\" and aa \", \",\")\n # else:\n # Asubregion = Asubs[0][1]\n # regexp = re.compile(r'[0-9]+,[0-9]+,')\n # if ('-' in Asubregion and regexp.search(Asubregion)) or ('-' not in Asubregion and ',' in Asubregion):\n # components = Asubregion.split(\",\")\n # Asubregion = components[0] + \"-\" + components[len(components)-1]\n # elif '-' not in Asubregion and ',' not in Asubregion:\n # Asubregion = Asubregion + \"-\" + Asubregion\n # if \"(\" in Asubs[0][2] or \"-\" in Asubs[0][2]:\n # Atext = '\"' + Asubs[0][2] + '\"'\n # else:\n # Atext = Asubs[0][2]\n # A_feature37 = Asubs[0][0] + ':' + Asubregion + '(' + Atext + ')'\n # if len(Asubs) > 1:\n # for item in Asubs[1:]:\n # if 'aa ' in item[1] or 'nt ' in item[1]:\n # Asub = item[1][3:]\n # if 'aa ' in Asub:\n # Asub = Asub.replace(\" and aa \", \",\")\n # else:\n # Asub = item[1]\n # if ('-' in Asub and regexp.search(Asub)) or ('-' not in Asub and ',' in Asub):\n # components = Asub.split(\",\")\n # Asub = components[0] + \"-\" + components[len(components)-1]\n # elif '-' not in Asub and ',' not in Asub:\n # Asub = Asub + \"-\" + Asub\n\n # if \"(\" in item[2] or \"-\" in item[2]:\n # Atext = '\"' + item[2] + '\"'\n # else:\n # Atext = item[2]\n # A_feature37 += '|' + item[0] + ':' + Asub + '(' + Atext + ')'\n # else:\n # A_feature37 = '-'\n \n # Get isoform info for A\n Aiso = get_isoforms(xint,conn)\n Atag = get_tag_info(xint,conn)\n\n if Aiso:\n A_annot26 = 'comment:\"' + Aiso[0][0] + ' specific\"'\n if Atag:\n for each in Atag:\n A_annot26 += '|comment:\"' + each[0] + '\"'\n elif len(Aiso) == 0:\n if Atag:\n A_annot26 = 'comment:\"' + Atag[0][0] + '\"'\n if len(Atag) > 1:\n for each in Atag[1:]:\n A_annot26 += '|comment:\"' + each[0] + '\"'\n else:\n A_annot26 = '-'\n\n # Get information for interactor B if there is one\n if len(parts) > 1:\n B_id2 = 'flybase:' + parts[1][0]\n if '(' in parts[1][1] or ':' in parts[1][1]:\n geneB = '\"' + parts[1][1] + '\"'\n else:\n geneB = parts[1][1]\n B_name6 = 'flybase:' + geneB + '(gene name)'\n if '-XP' in parts[1][3]:\n B_type22 = 'psi-mi:\"MI:0326\"(protein)'\n elif '-XR' in parts[1][3]:\n B_type22 = 'psi-mi:\"MI:0320\"(ribonucleic acid)'\n else:\n B_type22 = 'psi-mi:\"MI:0329\"(unknown participant)'\n\n gid = (parts[1][0],)\n\n CG = get_CG_id(gid,conn)\n if parts[1][0] in gene_map:\n Entrez = gene_map[parts[1][0]]\n else:\n Entrez = get_Entrez_id(gid,conn)\n\n if CG and Entrez:\n B_altids4 = 'flybase:' + CG[0][0] + '|entrez gene/locuslink:' + Entrez\n elif Entrez and not CG:\n B_altids4 = 'entrez gene/locuslink:' + Entrez\n elif CG and not Entrez:\n B_altids4 = 'flybase:' + CG[0][0]\n else:\n B_altids4 = '-'\n\n xid = (parts[1][2],)\n tax = get_taxid(xid,conn)\n if tax is not None:\n B_tax11 = 'taxid:' + tax[0] + '(\"' + tax[1] + ' ' + tax[2] + '\")'\n else:\n B_tax11 = '-'\n xint = (parts[1][2],int[0],)\n rol = get_role(xint,conn)\n if rol[0][2] in role_list:\n B_role20 = 'psi-mi:\"MI:' + rol[0][0] + '\"(' + rol[0][1] + ')'\n B_role18 = '-'\n else:\n B_role18 = 'psi-mi:\"MI:' + rol[0][0] + '\"(' + rol[0][1] + ')'\n B_role20 = 'psi-mi:\"MI:0499\"(unspecified role)'\n\n\n # Comment out section to get subregions until data can be sufficiently standardized for mitab parser\n # Get subregion/experimental feature info for B\n # Bsubs = get_subregions(xint,conn)\n # if Bsubs:\n # if 'aa ' in Bsubs[0][1] or 'nt ' in Bsubs[0][1]:\n # Bsubregion = Bsubs[0][1][3:]\n # if 'aa ' in Bsubregion:\n # Bsubregion = Bsubregion.replace(\" and aa \", \",\")\n # else:\n # Bsubregion = Bsubs[0][1]\n # if ('-' in Bsubregion and regexp.search(Bsubregion)) or ('-' not in Bsubregion and ',' in Bsubregion):\n # components = Bsubregion.split(\",\")\n # Bsubregion = components[0] + \"-\" + components[len(components)-1]\n # elif '-' not in Bsubregion and ',' not in Bsubregion:\n # Bsubregion = Bsubregion + \"-\" + Bsubregion\n # if \"(\" in Bsubs[0][2] or \"-\" in Bsubs[0][2]:\n # Btext = '\"' + Bsubs[0][2] + '\"'\n # else:\n # Btext = Bsubs[0][2]\n # B_feature38 = Bsubs[0][0] + ':' + Bsubregion + '(' + Btext + ')'\n # if len(Bsubs) > 1:\n # for item in Bsubs[1:]:\n # if 'aa ' in item[1] or 'nt ' in item[1]:\n # Bsub = item[1][3:]\n # if 'aa ' in Bsub:\n # print(Bsub)\n # Bsub = Bsub.replace(\" and aa \", \",\")\n # print(Bsub)\n # else:\n # Bsub = item[1]\n # if ('-' in Bsub and regexp.search(Bsub)) or ('-' not in Bsub and ',' in Bsub):\n # components = Bsub.split(\",\")\n # Bsub = components[0] + \"-\" + components[len(components)-1]\n # elif '-' not in Bsub and ',' not in Bsub:\n # Bsub = Bsub + \"-\" + Bsub\n # if \"(\" in item[2] or \"-\" in item[2]:\n # Btext = '\"' + item[2] + '\"'\n # else:\n # Btext = item[2]\n # B_feature38 += '|' + item[0] + ':' + Bsub + '(' + Btext + ')'\n # else:\n # B_feature38 = '-'\n\n Biso = get_isoforms(xint,conn)\n Btag = get_tag_info(xint,conn)\n if Biso:\n B_annot27 = 'comment:\"' + Biso[0][0] + ' specific\"'\n if Btag:\n for each in Btag:\n B_annot27 += '|comment:\"' + each[0] + '\"'\n elif len(Biso) == 0:\n if len(Btag) >= 1:\n B_annot27 = 'comment:\"' + Btag[0][0] + '\"'\n if len(Btag) > 1:\n for each in Btag[1:]:\n B_annot27 += '|comment:\"' + each[0] + '\"'\n else:\n B_annot27 = '-'\n\n # Otherwise fill B fields with A values\n else:\n B_id2 = A_id1\n B_name6 = A_name5\n B_altids4 = A_altids3\n B_tax11 = A_tax10\n B_type22 = A_type21\n B_role20 = A_role19\n B_role18 = A_role17\n # B_feature38 = A_feature37\n B_annot27 = A_annot26\n \n # Print line for each interaction\n with open(filename, 'a') as csvfile:\n csv_writer = csv.writer(csvfile, quotechar = '', quoting=csv.QUOTE_NONE, delimiter = '\\t')\n csv_writer.writerow([A_id1, B_id2, A_altids3, B_altids4,\n A_name5, B_name6, assays7, ref8, pubid9, A_tax10, B_tax11,\n intype12, 'psi-mi:\"MI:0478\"(flybase)', int_id14, '-',\n '-', A_role17, B_role18, A_role19, B_role20, A_type21,\n B_type22, '-', '-', int_xref25, A_annot26, B_annot27, annots28, '-',\n '-', '-', '-', '-', '-', '-', 'false', '-', '-', '-',\n '-', '-', '-'])",
"def druggable_interactors(self) -> pd.DataFrame:\n cols = ['drug', 'capsule_interactor_type', 'capsule_interactor_bel', 'interactor_bel', 'interactor_type',\n 'interactor_name', 'relation_type', 'target_bel', 'target_symbol', 'target_type',\n 'pmid', 'pmc', 'rel_pub_year', 'rel_rid', 'drug_rel_rid', 'drug_rel_actions',\n 'drugbank_id', 'chembl_id', 'pubchem_id', 'pmod_type']\n\n if self.node_type != 'protein' or not self.pmods:\n pure_query = PURE_DRUGGABLE_QUERY.replace('MATCH {{class:pmod, as:pmod{}}}<-has__pmod-', 'MATCH')\n capsule_query_1 = CAPSULE_DRUGGABLE_MODIFIED.replace(\n 'MATCH {{class:pmod, as:pmod{}}}<-has__pmod-', 'MATCH'\n )\n capsule_query_2 = CAPSULE_DRUGGABLE_COMPLEX.replace(\n 'MATCH {{class:pmod, as:pmod{}}}<-has__pmod-', 'MATCH'\n )\n formatted_pure_sql = pure_query.format(self.node_type, self.node_name, self.edge_filters)\n formatted_capsule_sql_1 = capsule_query_1.format(self.node_type, self.node_name, self.edge_filters)\n formatted_capsule_sql_2 = capsule_query_2.format(self.node_type, self.node_name, self.edge_filters)\n\n else:\n if 'all' in self.pmods:\n pmod_condition = \"type != '' or name != ''\"\n else:\n pmod_condition = f\"type in {self.pmods}\"\n\n pmod_string = f\", WHERE:({pmod_condition})\"\n\n if 'pho' in self.pmods or 'all' in self.pmods:\n pmod_string = pmod_string.replace(\")\", \" OR name like '%phosphorylat%')\")\n\n # Drugs only for humans so only check one\n formatted_pure_sql = PURE_DRUGGABLE_QUERY.format(\n pmod_string, self.node_type, self.node_name, self.edge_filters\n )\n formatted_capsule_sql_1 = CAPSULE_DRUGGABLE_MODIFIED.format(\n pmod_string, self.node_type, self.node_name, self.edge_filters\n )\n formatted_capsule_sql_2 = CAPSULE_DRUGGABLE_COMPLEX.format(\n pmod_string, self.node_type, self.node_name, self.edge_filters\n )\n\n logger.info(\"Querying database...\")\n\n pure_results = self.__query_graphstore(sql=formatted_pure_sql)\n capsule_results_1 = self.__query_graphstore(sql=formatted_capsule_sql_1)\n capsule_results_2 = self.__query_graphstore(sql=formatted_capsule_sql_2)\n\n results_check = [x is not None for x in (pure_results, capsule_results_1, capsule_results_2)]\n\n if any(results_check): # Need only 1 not to be None\n df_concat = pd.concat(\n [pure_results, capsule_results_1, capsule_results_2], axis=0\n ).reindex(columns=cols)\n self.results = df_concat[cols]\n self.results[\"drug_rel_actions\"] = self.results[\"drug_rel_actions\"].str.join(\"|\")\n self.results = self.results.drop_duplicates()\n\n return self.results",
"def get_from_id(tid):\r\n conn_string = \"host='127.0.0.1' dbname='NAME' user='NAME' password='PASSWD'\"\r\n conn = psycopg2cffi.connect(conn_string)\r\n cur = conn.cursor()\r\n query = \"select * from trajectory.taxi where tid = \" + str(tid) + \" ORDER BY index;\"\r\n logging.debug('query: '+query)\r\n \r\n try:\r\n cur.execute(query)\r\n except psycopg2cffi.Error as e:\r\n conn.rollback()\r\n cur.close()\r\n return logging.error('query: '+query)\r\n\r\n \r\n trajectory = []\r\n \r\n for r in cur:\r\n trajectory.append({'tid': int(r[0]),'index': int(r[1]),'x': r[2],'y': r[3]})\r\n\r\n cur.close()\r\n conn.close()\r\n return trajectory",
"def useful_test_function(db, query):\n print pd.read_sql_query(query, db)",
"def get_tone_from_api_and_return_columns(comment):\n tone_dict = None\n while tone_dict is None:\n tone_dict = get_tone_from_IBM(comment)\n if tone_dict is not None:\n return get_columns_from_IBM_tone(tone_dict)",
"def step030():\n logger.logMessage('Begin: get data from table')\n \n query = 'select tsa,time at time zone \\'utc\\' from weather_dupes ' + \\\n 'order by time;'\n \n pgConn = pg.connect(host=host,user=user,password=password,database=database) \n with pgConn:\n with pgConn.cursor() as c:\n c.execute(query)\n numrecs = 0\n with open(dbDumpFile,'w') as f:\n for row in c.fetchall():\n tsa = row[0]\n time= row[1].isoformat()\n f.write('{0:14d};{1:25s}\\n'.format(tsa,time))\n numrecs += 1\n if numrecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} rows dumped\".format(numrecs))\n logger.logMessage(\"Total rows: {0:d}\".format(numrecs))\n \n logger.logMessage('End : get data from table')",
"def query3() :",
"def get_locus_info(database, query):\n # Connect to database.\n db_connexion = sqlite3.connect(database)\n cursor = db_connexion.cursor()\n\n # Query database.\n chrom_info = cursor.execute(query)\n\n # Convert to Pandas dataframe\n column_names = [column[0] for column in chrom_info.description]\n chrom_info_df = pd.DataFrame(chrom_info.fetchall(), columns=column_names)\n\n # Select only strands + and -\n chrom_info_df = chrom_info_df[ (chrom_info_df[\"Strand\"] == \"C\") | (chrom_info_df[\"Strand\"] == \"W\") ]\n # Remove \"2-micron\" plasmid\n chrom_info_df = chrom_info_df[ chrom_info_df[\"Chromosome\"] != \"2-micron\" ]\n # Convert chromosome id to int\n chrom_info_df[\"Chromosome\"] = chrom_info_df[\"Chromosome\"].astype(int)\n\n return chrom_info_df",
"def get_cast_notes():\n \n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n \n #if imdbid exists use it to collect cast notes\n if (row['imdbId']):\n movie = ia.get_movie(str(row['imdbId']))\n cast_list = movie.get('cast')\n if (cast_list != None) :\n for cast_member in cast_list: \n imdb_id = cast_member.personID\n updates = { 'notes' : cast_member.notes }\n selects = {\"p_imdbId\" : imdb_id, \"m_imdbId\" : row['imdbId'] }\n database_helper.update_data(\"actors\", update_params = updates, select_params = selects)\n \n pbar.update(1)",
"def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break",
"def find_funMotif_variants_in_tissue(funMotifs: dict, tissue: str, variant_BedTool_file: str, db_name: str,\r\n db_user_name: str, output_file: str, motif_BedTool_file: str):\r\n get_BedTool_for_functional_motifs(funMotifs, tissue, db_user_name, db_name, motif_BedTool_file)\r\n overlap_variants_and_motifs(motif_BedTool_file, variant_BedTool_file, output_file)\r\n return",
"def extract_TP(thermofile, column_number, TP, addtxt):\n with open(thermofile, 'r') as f:\n [f.readline() for i in range(3)]\n #extract data\n while True:\n line = f.readline()\n if not line: break\n else: \n entry=line.split('\\n')[0].split('\\t')\n TP[entry[0].split('outcar.umd.dat')[0].split('/')[-1]+addtxt] = (int(entry[column_number['T']]),float(entry[column_number['P']]))\n return TP",
"def query(self, session, pair):\n info = mod.MlMotifsInfo\n if self.table == info:\n return self.__self_query__(session, pair)\n\n loop_type, release = pair\n return session.query(self.table).\\\n filter(self.table.motif_id.like(loop_type + '_%')).\\\n filter(self.table.ml_release_id == release)",
"def get_comments(qint,conn):\n\n comms = ('SELECT DISTINCT ip.value '\n 'FROM interaction i, interactionprop ip, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.type_id = cvt.cvterm_id '\n 'AND cvt.is_obsolete=0 AND cvt.name != \\'comments on source\\' '\n 'AND cvt.name != \\'internalnotes\\' AND i.uniquename = %s')\n comnts = connect(comms, qint, conn)\n return(comnts)",
"def get_psql(npi_list):\n\n npi_dict = {}\n conn = pg2.connect(dbname='medicare', user='postgres')\n cur = conn.cursor()\n\n for dr in npi_list:\n query = \"SELECT hcpcs_desc FROM util_payments_2013 WHERE npi='{0}';\".format(dr)\n cur.execute(query)\n npi_dict[dr] = cur.fetchall()\n \n return npi_dict",
"def create_temporal_database(dbif):\n global tgis_backend\n global tgis_version\n global tgis_db_version\n global tgis_database_string\n\n template_path = get_sql_template_path()\n msgr = get_tgis_message_interface()\n\n # Read all SQL scripts and templates\n map_tables_template_sql = open(os.path.join(\n template_path, \"map_tables_template.sql\"), 'r').read()\n raster_metadata_sql = open(os.path.join(\n get_sql_template_path(), \"raster_metadata_table.sql\"), 'r').read()\n raster3d_metadata_sql = open(os.path.join(template_path,\n \"raster3d_metadata_table.sql\"),\n 'r').read()\n vector_metadata_sql = open(os.path.join(template_path,\n \"vector_metadata_table.sql\"),\n 'r').read()\n raster_views_sql = open(os.path.join(template_path, \"raster_views.sql\"),\n 'r').read()\n raster3d_views_sql = open(os.path.join(template_path,\n \"raster3d_views.sql\"), 'r').read()\n vector_views_sql = open(os.path.join(template_path, \"vector_views.sql\"),\n 'r').read()\n\n stds_tables_template_sql = open(os.path.join(template_path,\n \"stds_tables_template.sql\"),\n 'r').read()\n strds_metadata_sql = open(os.path.join(template_path,\n \"strds_metadata_table.sql\"),\n 'r').read()\n str3ds_metadata_sql = open(os.path.join(template_path,\n \"str3ds_metadata_table.sql\"),\n 'r').read()\n stvds_metadata_sql = open(os.path.join(template_path,\n \"stvds_metadata_table.sql\"),\n 'r').read()\n strds_views_sql = open(os.path.join(template_path, \"strds_views.sql\"),\n 'r').read()\n str3ds_views_sql = open(os.path.join(template_path, \"str3ds_views.sql\"),\n 'r').read()\n stvds_views_sql = open(os.path.join(template_path, \"stvds_views.sql\"),\n 'r').read()\n\n # Create the raster, raster3d and vector tables SQL statements\n raster_tables_sql = map_tables_template_sql.replace(\"GRASS_MAP\", \"raster\")\n vector_tables_sql = map_tables_template_sql.replace(\"GRASS_MAP\", \"vector\")\n raster3d_tables_sql = map_tables_template_sql.replace(\n \"GRASS_MAP\", \"raster3d\")\n\n # Create the space-time raster, raster3d and vector dataset tables\n # SQL statements\n strds_tables_sql = stds_tables_template_sql.replace(\"STDS\", \"strds\")\n stvds_tables_sql = stds_tables_template_sql.replace(\"STDS\", \"stvds\")\n str3ds_tables_sql = stds_tables_template_sql.replace(\"STDS\", \"str3ds\")\n\n msgr.message(_(\"Creating temporal database: %s\" % (str(tgis_database_string))))\n\n if tgis_backend == \"sqlite\":\n # We need to create the sqlite3 database path if it does not exist\n tgis_dir = os.path.dirname(tgis_database_string)\n if not os.path.exists(tgis_dir):\n try:\n os.makedirs(tgis_dir)\n except Exception as e:\n msgr.fatal(_(\"Unable to create SQLite temporal database\\n\"\n \"Exception: %s\\nPlease use t.connect to set a \"\n \"read- and writable temporal database path\" % (e)))\n\n # Set up the trigger that takes care of\n # the correct deletion of entries across the different tables\n delete_trigger_sql = open(os.path.join(template_path,\n \"sqlite3_delete_trigger.sql\"),\n 'r').read()\n indexes_sql = open(os.path.join(template_path, \"sqlite3_indexes.sql\"),\n 'r').read()\n else:\n # Set up the trigger that takes care of\n # the correct deletion of entries across the different tables\n delete_trigger_sql = open(os.path.join(template_path,\n \"postgresql_delete_trigger.sql\"),\n 'r').read()\n indexes_sql = open(os.path.join(template_path,\n \"postgresql_indexes.sql\"), 'r').read()\n\n # Connect now to the database\n if dbif.connected is not True:\n dbif.connect()\n\n # Execute the SQL statements for sqlite\n # Create the global tables for the native grass datatypes\n dbif.execute_transaction(raster_tables_sql)\n dbif.execute_transaction(raster_metadata_sql)\n dbif.execute_transaction(raster_views_sql)\n dbif.execute_transaction(vector_tables_sql)\n dbif.execute_transaction(vector_metadata_sql)\n dbif.execute_transaction(vector_views_sql)\n dbif.execute_transaction(raster3d_tables_sql)\n dbif.execute_transaction(raster3d_metadata_sql)\n dbif.execute_transaction(raster3d_views_sql)\n # Create the tables for the new space-time datatypes\n dbif.execute_transaction(strds_tables_sql)\n dbif.execute_transaction(strds_metadata_sql)\n dbif.execute_transaction(strds_views_sql)\n dbif.execute_transaction(stvds_tables_sql)\n dbif.execute_transaction(stvds_metadata_sql)\n dbif.execute_transaction(stvds_views_sql)\n dbif.execute_transaction(str3ds_tables_sql)\n dbif.execute_transaction(str3ds_metadata_sql)\n dbif.execute_transaction(str3ds_views_sql)\n\n # The delete trigger\n dbif.execute_transaction(delete_trigger_sql)\n # The indexes\n dbif.execute_transaction(indexes_sql)\n\n # Create the tgis metadata table to store the database\n # initial configuration\n # The metadata table content\n metadata = {}\n metadata[\"tgis_version\"] = tgis_version\n metadata[\"tgis_db_version\"] = tgis_db_version\n metadata[\"creation_time\"] = datetime.today()\n _create_tgis_metadata_table(metadata, dbif)\n\n dbif.close()",
"def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 Real\nLONFLAG 74- 74 Character\nELEVATION 76- 81 Real\nELVFLAG 83- 83 Character\nYEAR 85- 88 Integer\nMONTH 90- 91 Integer\nDAY 93- 94 Integer\nHOUR 96- 97 Integer\nDATEIND 99- 99 Integer\nEVENT 101-119 Character\nALTIND 121-122 Character\nBEFINFO 124-163 Character\nBEFFLAG 164-164 Character\nLINK 166-167 Character\nAFTINFO 169-208 Character\nAFTFLAG 209-209 Character\nREFERENCE 211-235 Character\nCOMMENT 236-315 Character\nUPDCOM 316-346 Character\nUPDDATE 348-354 Character\n\"\"\"\n\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n\n elif it == 'Real':\n it = 'float'\n\n else:\n it = 'int'\n\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data",
"def seq_query():\n query_type = input(\n '1.Specific fragment\\n'\n '2.Specific Organism\\n'\n '3.Specific gene\\n'\n '4.All\\n'\n '5.All cds\\n'\n )\n organize = input('Organize output?(y/n)\\n')\n if query_type not in ['1', '2', '3', '4', '5']:\n raise ValueError('wrong input!\\n')\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n if query_type == '1':\n organism = input('Organism:\\n')\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? AND Organism=?',\n ('%' + gene + '%', frag_type, organism))\n result = cur.fetchall()\n elif query_type == '2':\n organism = input('Organism:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer, whole, fragments):\\n')\n if frag_type == 'fragments':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism = ? ORDER BY Head',\n (organism,))\n else:\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism LIKE ? AND Type = ? ORDER BY Head',\n ('%' + organism + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '3':\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? ORDER BY Taxon',\n ('%' + gene + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '4':\n cur.execute('SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main ORDER BY Taxon')\n result = cur.fetchall()\n elif query_type == '5':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE type = \"cds\" ORDER BY Taxon')\n result = cur.fetchall()\n\n query_result = []\n for i in result:\n title = '{0}|{1}|{2}|{3}'.format(i[0], i[1], i[2], i[3])\n sequence = MutableSeq(i[5])\n gene = i[2]\n if i[4] == '-1':\n sequence.seq = sequence.reverse_complement()\n record = [title, gene, sequence]\n query_result.append(record)\n\n if organize == 'y':\n if not exists('output'):\n makedirs('output')\n for i in query_result:\n file_name = 'output/{0}.fasta'.format(i[1].replace('/', ''))\n with open(file_name, 'a') as output_file:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n else:\n output = input('Enter output filename:\\n')\n with open('{0}.fasta'.format(output), 'w') as output_file:\n for i in query_result:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n\n cur.close()\n con.close()\n print('Done.\\n')",
"def QueryTeamData(tgtName, yourName, db):\n\t#cur = db.cursor()\n\tteamTgtPipe = pd.io.sql.read_sql(sql = \"SELECT * FROM \" + tgtName.replace(' ', '_'), con = db)\n\tteamYourPipe = pd.io.sql.read_sql(sql = \"SELECT * FROM \" + yourName.replace(' ', '_'), con = db)\n\treturn teamTgtPipe, teamYourPipe",
"def getProcessSteps(self, landsatScene):\n\n with self.getConnection() as conn:\n try:\n cur = conn.cursor()\n data = cur.execute(\"\"\"\\\n select pUID, Desc from process_run where PATH=? and ROW=? and Acqdate=? \n and fk_wfid=?\"\"\", (landsatScene.path, landsatScene.row, landsatScene.acqdate, self.wfid)).fetchall()\n\n if len(data) == 0:\n return ()\n else:\n pids, descriptions = list(zip(*data))\n\n except sqlite3.Error as error:\n cur.close()\n raise workflowException('Database {0}: {1}'.format(self.wfname, repr(error)))\n\n return pids",
"def extractTable(database: str, table: str) -> list:\n\n bd = _database(database)\n\n if bd:\n\n tb = _table(database, table)\n\n if tb:\n\n mode = tb[\"modo\"]\n val = -1\n\n if mode == \"avl\":\n val = avl.extractTable(database, table)\n\n elif mode == \"b\":\n val = b.extractTable(database, table)\n\n elif mode == \"bplus\":\n val = bplus.extractTable(database, table)\n\n elif mode == \"hash\":\n val = hash.extractTable(database, table)\n\n elif mode == \"isam\":\n val = isam.extractTable(database, table)\n\n elif mode == \"json\":\n val = json.extractTable(database, table)\n\n elif mode == \"dict\":\n val = dict.extractTable(database, table)\n\n return val\n\n else:\n return 3\n\n else:\n return 2",
"def get_metaData():\n #get all movies from db\n movies_df = movie_helper.get_movies_df()\n \n #get movie meta data\n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n \n #if an imdbid exists use it to look up the API\n if (row['imdbId']):\n \n #get base meta data from imdb\n movie = ia.get_movie(str(row['imdbId']))\n year = movie['year']\n \n #created delimited list of genre strings\n if (movie.get('genres')): \n genres = ','.join(movie.get('genres')) \n \n rating = movie.get('rating')\n votes = movie.get('votes')\n \n #create delimited list of movie certificates\n certificates = None\n if (movie.get('certificates')): \n certificates = ','.join(movie.get('certificates'))\n \n #update database with collected meta data\n update_params = {\n \"year\" : year,\n \"genres\" : genres,\n \"rating\" : rating,\n \"votes\" : votes,\n \"certificates\" : certificates\n }\n select_params = { \"movieId\" : row[\"movieId\"] }\n database_helper.update_data(\"movies\", update_params = update_params, select_params = select_params)\n \n pbar.update(1)"
] |
[
"0.5965515",
"0.5776989",
"0.55274004",
"0.54846585",
"0.5407883",
"0.5313562",
"0.53060126",
"0.5265161",
"0.5230755",
"0.52072334",
"0.52070326",
"0.5163418",
"0.511316",
"0.5105761",
"0.5102435",
"0.5078438",
"0.50740546",
"0.506678",
"0.5064191",
"0.50562096",
"0.5044499",
"0.5027281",
"0.50212795",
"0.5003634",
"0.49671677",
"0.49640372",
"0.4959572",
"0.49565902",
"0.49533308",
"0.4949174"
] |
0.6454166
|
0
|
Function that extracts the essential information of a variant file and returns it as BedTool object
|
def get_BedTool_from_variant_file(variant_file: str):
# TODO: create checks for file format
os.system(f"grep -v '#' {variant_file} " + "| awk -F '\t' '{print substr($2, 4), $3, $4, $5, $6, $7, $8, $9}' " +
f" >{variant_file}_tmp")
pybedtools.BedTool(variant_file + "_tmp").saveas(variant_file + ".bed_tmp")
os.system(f"perl -p -i -e 's/ /\t/g' {variant_file}.bed_tmp")
os.system(f"sed '1d' {variant_file}.bed_tmp > {variant_file}.bed")
os.remove(f"{variant_file}.bed_tmp")
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def variants ( self ) :\n vars = []\n items = [ 'distrib' , 'default' ]\n items += [ 'stat_%s' % d for d in range ( 10 ) ]\n items += [ 'syst_%s' % d for d in range ( 10 ) ]\n \n from ostap.core.core import rootError \n from ostap.logger.logger import logFatal\n \n for item in items :\n if self.__variant == item : continue \n path = os.path.join ( self.__config_run.eosrootdir ,\n self.__config ,\n \"%s_%s.root\" % ( self.__dataset, item ) )\n with logFatal() , rootError () : \n rf = ROOT.TFile.Open ( path , 'READ' , exception = False )\n if rf and rf.IsOpen ( ) :\n vars.append ( item )\n rf.Close() \n \n return tuple ( vars )",
"def get_variant_line():\n pass",
"def extract_variant_info(variant):\n var_regex = re.compile(\n r'(?P<ref>[A-Za-z]+)(?P<position>[-0-9]+)(?P<alt>[A-Za-z]+)$')\n match = var_regex.search(variant)\n reference = match.group('ref')\n position = int(match.group('position'))\n alternate = match.group('alt')\n return reference, position, alternate",
"def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def parse_freebayes_variants(vcf_filename, allow_missing=True):\n\n if file_is_missing(vcf_filename, allow_missing):\n return { 'variants': [], 'run': False }\n\n variants = []\n\n # Only interpret lines that DO NOT start with \"#\"\n for line in open(vcf_filename):\n if not line.startswith(\"#\"):\n t = line.split('\\t')\n assert len(t) == 10\n\n if t[4] != '':\n variants.append(f\"{t[3]}{t[1]}{t[4]}\")\n\n return { 'variants': variants, 'run': True }",
"def readMtVariant(variant_file, fam_excl = {}, pos_excl = {}):\n\tdata = {}\n\tn = 0\n\tif (variant_file == \"-\"):\n\t\t#use standard input instead\n\t\tfh = sys.stdin\n\telse:\n\t\tfh = open(variant_file)\n\thead = fh.readline()\n\thead = head.rstrip(\"\\r\\n\").split(\"\\t\")\n\tassert len(head) >= 27, \"Truncated head line of the variant file\"\n\tfor line in fh:\n\t\tline = line.rstrip(\"\\r\\n\").split(\"\\t\")\n\t\tfamily,sample,chr,pos,ref,depth,depth_fwd,depth_rev,allele,A,T,C,G,a,t,c,g,\\\n\t\theteroplasmy,substitution,het_allele,het_freq,het_freq_mle,het_freq_llr,het_low,het_high,het_p_fisher,het_p_sbias = line[:27]\n\t\tif (family in fam_excl):\n\t\t\t#exclude this family\n\t\t\tcontinue\n\t\tif (family == \"family\"):\n\t\t\t#skip the head line\n\t\t\tcontinue\n\t\t#a new variant\n\t\tvariant = MtVariant(family,sample,pos,ref,depth,depth_fwd,depth_rev,allele,het_allele,het_freq_mle,het_freq_llr,het_p_sbias)\n\t\t#temporarily store the original line\n\t\tvariant.line_cache = line[:]\n\t\tif (family not in data):\n\t\t\tdata[family] = {}\n\t\tif (sample not in data[family]):\n\t\t\tdata[family][sample] = {}\n\t\tpos = variant.pos\n\t\tassert pos not in data[family][sample], \"Duplicated vairant at position %d in sample %s.\" % (variant.pos, variant.sample)\n\t\tif (pos in pos_excl):\n\t\t\t#exclude this position\n\t\t\tcontinue\n\t\tdata[family][sample][pos] = variant\n\t\tn += 1\n\tprint \"Read %d mitochondrial DNA variants\" % n\n\treturn head, data",
"def ReadBasicInfo():\r\n\r\n EquilibriumStep, ProductionStep,HEPCP,HEPCE,Multiple=10000000,10000000,100,100,2\r\n InputPath,OutputPath,AtomParameterPath,TaskSuffix,MaterialInputFormat='..','..','..','','mol'\r\n GasType,GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,\\\r\n TorqueSetting,MuSiCSetting,Nodes=[],[],[],[],[],[],[],[],['1:ppn=1']\r\n CutOff,GridSpacingP,GridSpacingE=12.8,2.0,2.0\r\n MakeGCMC,UsePmap,UseEmap,UsePost,MakePmap,MakeEmap,MakeTorque,KeyOne,KeyTwo,\\\r\n PDBCharges = False,False,False,False,False,False,False,False,False,False\r\n\r\n with open('GlueParameters', 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList = Line.strip().split()\r\n if len(WordList)>1 or KeyOne==True or KeyTwo==True:\r\n if WordList[0]=='#':\r\n continue\r\n\r\n # Controlled part\r\n elif WordList[0] == 'MakeGCMC:' and WordList[1] == 'open':\r\n MakeGCMC = True\r\n elif WordList[0] == 'UsePmap:' and WordList[1] == 'yes':\r\n UsePmap = True\r\n elif WordList[0] == 'UseEmap:' and WordList[1] == 'yes':\r\n UseEmap = True\r\n elif WordList[0] == 'UsePost:' and WordList[1] == 'yes':\r\n UsePost = True\r\n elif WordList[0] == 'MakePmap:' and WordList[1] == 'open':\r\n MakePmap = True\r\n elif WordList[0] == 'MakeEmap:' and WordList[1] == 'open':\r\n MakeEmap = True\r\n elif WordList[0] == 'MakeTorque:' and WordList[1] == 'open':\r\n MakeTorque = True\r\n elif WordList[0] == 'UseChargesFromPDBFile:' and WordList[1] == 'yes':\r\n PDBCharges = True\r\n\r\n # Basic part\r\n elif WordList[0]=='InputPath:':\r\n InputPath=WordList[1]\r\n elif WordList[0]=='MaterialInputFormat:':\r\n MaterialInputFormat=WordList[1]\r\n elif WordList[0]=='OutputPath:':\r\n OutputPath=WordList[1]\r\n elif WordList[0]=='AtomParameterPath:':\r\n AtomParameterPath=WordList[1]\r\n elif WordList[0] == 'GasType:':\r\n GasType = list(WordList[1:])\r\n elif WordList[0] == 'GasAtomTypeNum:':\r\n\r\n for i in WordList[1:]:\r\n GasAtomTypeNum.append(int(i))\r\n\r\n elif WordList[0] == 'GasAtomType:':\r\n GasAtomType = list(WordList[1:])\r\n elif WordList[0] == 'Multiple:':\r\n Multiple = int(WordList[1])\r\n elif WordList[0] == 'CutOff:':\r\n CutOff = float(WordList[1])\r\n\r\n # GCMC part\r\n\r\n elif WordList[0] == 'GasPartialPressure:':\r\n\r\n for j in WordList[1:]:\r\n GasPartialPressure.append(str(j))\r\n\r\n elif WordList[0] == 'TemperatureList(K):':\r\n\r\n for l in WordList[1:]:\r\n TemperatureList.append(float(l))\r\n\r\n elif WordList[0] == 'PressureList(kPa):':\r\n\r\n for k in WordList[1:]:\r\n PressureList.append(float(k))\r\n\r\n elif WordList[0] == 'EquilibriumStep:':\r\n EquilibriumStep = int(WordList[1])\r\n elif WordList[0] == 'ProductionStep:':\r\n ProductionStep = int(WordList[1])\r\n\r\n # Pmap part\r\n elif WordList[0] == 'GridSpacingP(Ang):':\r\n GridSpacingP = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffP(kJ/mol):':\r\n HEPCP = int(WordList[1])\r\n\r\n # Emap part\r\n elif WordList[0] == 'GridSpacingE(Ang):':\r\n GridSpacingE = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffE(kJ/mol):':\r\n HEPCE = int(WordList[1])\r\n\r\n # Torque part\r\n elif WordList[0] == 'Nodes:':\r\n Nodes = WordList[1:]\r\n elif WordList[0] == 'TaskSuffix:':\r\n TaskSuffix = WordList[1]\r\n elif WordList[0] == 'TorqueSetting:':\r\n KeyOne = True\r\n elif WordList[0] == 'MuSiCSetting:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif WordList[0] == 'END':\r\n KeyTwo = False\r\n elif KeyOne == True:\r\n TorqueSetting.append(Line)\r\n elif KeyTwo == True:\r\n MuSiCSetting.append(Line)\r\n\r\n return (InputPath,OutputPath,AtomParameterPath,MakeTorque,GasType,\r\n GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,CutOff,MakeGCMC,UsePmap,\r\n UseEmap,UsePost,MakePmap,MakeEmap,EquilibriumStep,ProductionStep,GridSpacingP,HEPCP,GridSpacingE,HEPCE,\r\n Multiple,TorqueSetting,MuSiCSetting,Nodes,TaskSuffix,PDBCharges,MaterialInputFormat)",
"def read_spe(spefilename, verbose=False):\n \n # open SPE file as binary input\n spe = open(spefilename, \"rb\")\n \n # Header length is a fixed number\n nBytesInHeader = 4100\n\n # Read the entire header\n header = spe.read(nBytesInHeader)\n \n # version of WinView used\n swversion = struct.unpack_from(\"16s\", header, offset=688)[0]\n \n # version of header used\n # Eventually, need to adjust the header unpacking\n # based on the headerVersion. \n headerVersion = struct.unpack_from(\"f\", header, offset=1992)[0]\n \n # which camera controller was used?\n controllerVersion = struct.unpack_from(\"h\", header, offset=0)[0]\n if verbose:\n print (\"swversion = \", swversion)\n print (\"headerVersion = \", headerVersion)\n print (\"controllerVersion = \", controllerVersion)\n \n # Date of the observation\n # (format is DDMONYYYY e.g. 27Jan2009)\n date = struct.unpack_from(\"9s\", header, offset=20)[0]\n \n # Exposure time (float)\n exp_sec = struct.unpack_from(\"f\", header, offset=10)[0]\n \n # Intensifier gain\n pimaxGain = struct.unpack_from(\"h\", header, offset=148)[0]\n\n # Not sure which \"gain\" this is\n gain = struct.unpack_from(\"H\", header, offset=198)[0]\n \n # Data type (0=float, 1=long integer, 2=integer, 3=unsigned int)\n data_type = struct.unpack_from(\"h\", header, offset=108)[0]\n\n comments = struct.unpack_from(\"400s\", header, offset=200)[0]\n\n # CCD Chip Temperature (Degrees C)\n detectorTemperature = struct.unpack_from(\"f\", header, offset=36)[0]\n\n # The following get read but are not used\n # (this part is only lightly tested...)\n analogGain = struct.unpack_from(\"h\", header, offset=4092)[0]\n noscan = struct.unpack_from(\"h\", header, offset=34)[0]\n pimaxUsed = struct.unpack_from(\"h\", header, offset=144)[0]\n pimaxMode = struct.unpack_from(\"h\", header, offset=146)[0]\n\n ########### here's from Kasey\n #int avgexp 2 number of accumulations per scan (why don't they call this \"accumulations\"?)\n #TODO: this isn't actually accumulations, so fix it... \n accumulations = struct.unpack_from(\"h\", header, offset=668)[0]\n if accumulations == -1:\n # if > 32767, set to -1 and \n # see lavgexp below (668) \n #accumulations = struct.unpack_from(\"l\", header, offset=668)[0]\n # or should it be DWORD, NumExpAccums (1422): Number of Time experiment accumulated \n accumulations = struct.unpack_from(\"l\", header, offset=1422)[0]\n \n \"\"\"Start of X Calibration Structure (although I added things to it that I thought were relevant,\n like the center wavelength...\"\"\"\n xcalib = {}\n \n #SHORT SpecAutoSpectroMode 70 T/F Spectrograph Used\n xcalib['SpecAutoSpectroMode'] = bool( struct.unpack_from(\"h\", header, offset=70)[0] )\n\n #float SpecCenterWlNm # 72 Center Wavelength in Nm\n xcalib['SpecCenterWlNm'] = struct.unpack_from(\"f\", header, offset=72)[0]\n \n #SHORT SpecGlueFlag 76 T/F File is Glued\n xcalib['SpecGlueFlag'] = bool( struct.unpack_from(\"h\", header, offset=76)[0] )\n\n #float SpecGlueStartWlNm 78 Starting Wavelength in Nm\n xcalib['SpecGlueStartWlNm'] = struct.unpack_from(\"f\", header, offset=78)[0]\n\n #float SpecGlueEndWlNm 82 Starting Wavelength in Nm\n xcalib['SpecGlueEndWlNm'] = struct.unpack_from(\"f\", header, offset=82)[0]\n\n #float SpecGlueMinOvrlpNm 86 Minimum Overlap in Nm\n xcalib['SpecGlueMinOvrlpNm'] = struct.unpack_from(\"f\", header, offset=86)[0]\n\n #float SpecGlueFinalResNm 90 Final Resolution in Nm\n xcalib['SpecGlueFinalResNm'] = struct.unpack_from(\"f\", header, offset=90)[0]\n\n # short BackGrndApplied 150 1 if background subtraction done\n xcalib['BackgroundApplied'] = struct.unpack_from(\"h\", header, offset=150)[0]\n BackgroundApplied=False\n if xcalib['BackgroundApplied']==1: BackgroundApplied=True\n\n # float SpecGrooves 650 Spectrograph Grating Grooves\n xcalib['SpecGrooves'] = struct.unpack_from(\"f\", header, offset=650)[0]\n\n # short flatFieldApplied 706 1 if flat field was applied.\n xcalib['flatFieldApplied'] = struct.unpack_from(\"h\", header, offset=706)[0]\n flatFieldApplied=False\n if xcalib['flatFieldApplied']==1: flatFieldApplied=True\n \n #double offset # 3000 offset for absolute data scaling */\n xcalib['offset'] = struct.unpack_from(\"d\", header, offset=3000)[0]\n\n #double factor # 3008 factor for absolute data scaling */\n xcalib['factor'] = struct.unpack_from(\"d\", header, offset=3008)[0]\n \n #char current_unit # 3016 selected scaling unit */\n xcalib['current_unit'] = struct.unpack_from(\"c\", header, offset=3016)[0]\n\n #char reserved1 # 3017 reserved */\n xcalib['reserved1'] = struct.unpack_from(\"c\", header, offset=3017)[0]\n\n #char string[40] # 3018 special string for scaling */\n xcalib['string'] = struct.unpack_from(\"40c\", header, offset=3018)\n \n #char reserved2[40] # 3058 reserved */\n xcalib['reserved2'] = struct.unpack_from(\"40c\", header, offset=3058)\n\n #char calib_valid # 3098 flag if calibration is valid */\n xcalib['calib_valid'] = struct.unpack_from(\"c\", header, offset=3098)[0]\n\n #char input_unit # 3099 current input units for */\n xcalib['input_unit'] = struct.unpack_from(\"c\", header, offset=3099)[0]\n \"\"\"/* \"calib_value\" */\"\"\"\n\n #char polynom_unit # 3100 linear UNIT and used */\n xcalib['polynom_unit'] = struct.unpack_from(\"c\", header, offset=3100)[0]\n \"\"\"/* in the \"polynom_coeff\" */\"\"\"\n\n #char polynom_order # 3101 ORDER of calibration POLYNOM */\n xcalib['polynom_order'] = struct.unpack_from(\"c\", header, offset=3101)[0]\n\n #char calib_count # 3102 valid calibration data pairs */\n xcalib['calib_count'] = struct.unpack_from(\"c\", header, offset=3102)[0]\n\n #double pixel_position[10];/* 3103 pixel pos. of calibration data */\n xcalib['pixel_position'] = struct.unpack_from(\"10d\", header, offset=3103)\n\n #double calib_value[10] # 3183 calibration VALUE at above pos */\n xcalib['calib_value'] = struct.unpack_from(\"10d\", header, offset=3183)\n\n #double polynom_coeff[6] # 3263 polynom COEFFICIENTS */\n xcalib['polynom_coeff'] = struct.unpack_from(\"6d\", header, offset=3263)\n\n #double laser_position # 3311 laser wavenumber for relativ WN */\n xcalib['laser_position'] = struct.unpack_from(\"d\", header, offset=3311)[0]\n\n #char reserved3 # 3319 reserved */\n xcalib['reserved3'] = struct.unpack_from(\"c\", header, offset=3319)[0]\n\n #unsigned char new_calib_flag # 3320 If set to 200, valid label below */\n #xcalib['calib_value'] = struct.unpack_from(\"BYTE\", header, offset=3320)[0] # how to do this?\n\n #char calib_label[81] # 3321 Calibration label (NULL term'd) */\n xcalib['calib_label'] = struct.unpack_from(\"81c\", header, offset=3321)\n\n #char expansion[87] # 3402 Calibration Expansion area */\n xcalib['expansion'] = struct.unpack_from(\"87c\", header, offset=3402)\n ########### end of Kasey's addition\n\n if verbose:\n print (\"date = [\"+date+\"]\")\n print (\"exp_sec = \", exp_sec)\n print (\"pimaxGain = \", pimaxGain)\n print (\"gain (?) = \", gain)\n print (\"data_type = \", data_type)\n print (\"comments = [\"+comments+\"]\")\n print (\"analogGain = \", analogGain)\n print (\"noscan = \", noscan)\n print (\"detectorTemperature [C] = \", detectorTemperature)\n print (\"pimaxUsed = \", pimaxUsed)\n\n # Determine the data type format string for\n # upcoming struct.unpack_from() calls\n if data_type == 0:\n # float (4 bytes)\n dataTypeStr = \"f\" #untested\n bytesPerPixel = 4\n dtype = \"float32\"\n elif data_type == 1:\n # long (4 bytes)\n dataTypeStr = \"l\" #untested\n bytesPerPixel = 4\n dtype = \"int32\"\n elif data_type == 2:\n # short (2 bytes)\n dataTypeStr = \"h\" #untested\n bytesPerPixel = 2\n dtype = \"int32\"\n elif data_type == 3: \n # unsigned short (2 bytes)\n dataTypeStr = \"H\" # 16 bits in python on intel mac\n bytesPerPixel = 2\n dtype = \"int32\" # for numpy.array().\n # other options include:\n # IntN, UintN, where N = 8,16,32 or 64\n # and Float32, Float64, Complex64, Complex128\n # but need to verify that pyfits._ImageBaseHDU.ImgCode cna handle it\n # right now, ImgCode must be float32, float64, int16, int32, int64 or uint8\n else:\n print (\"unknown data type\")\n print (\"returning...\")\n sys.exit()\n \n # Number of pixels on x-axis and y-axis\n nx = struct.unpack_from(\"H\", header, offset=42)[0]\n ny = struct.unpack_from(\"H\", header, offset=656)[0]\n \n # Number of image frames in this SPE file\n nframes = struct.unpack_from(\"l\", header, offset=1446)[0]\n\n if verbose:\n print (\"nx, ny, nframes = \", nx, \", \", ny, \", \", nframes)\n \n npixels = nx*ny\n npixStr = str(npixels)\n fmtStr = npixStr+dataTypeStr\n if verbose:\n print (\"fmtStr = \", fmtStr)\n \n # How many bytes per image?\n nbytesPerFrame = npixels*bytesPerPixel\n if verbose:\n print (\"nbytesPerFrame = \", nbytesPerFrame)\n\n # Create a dictionary that holds some header information\n # and contains a placeholder for the image data\n spedict = {'data':[], # can have more than one image frame per SPE file\n 'IGAIN':pimaxGain,\n 'EXPOSURE':exp_sec,\n 'SPEFNAME':spefilename,\n 'OBSDATE':date,\n 'CHIPTEMP':detectorTemperature,\n 'COMMENTS':comments,\n 'XCALIB':xcalib,\n 'ACCUMULATIONS':accumulations,\n 'FLATFIELD':flatFieldApplied,\n 'BACKGROUND':BackgroundApplied\n }\n \n # Now read in the image data\n # Loop over each image frame in the image\n if verbose:\n print (\"Reading image frames number \"),\n for ii in range(nframes):\n iistr = str(ii)\n data = spe.read(nbytesPerFrame)\n if verbose:\n print (iistr,\" \",)\n \n # read pixel values into a 1-D numpy array. the \"=\" forces it to use\n # standard python datatype size (4bytes for 'l') rather than native\n # (which on 64bit is 8bytes for 'l', for example).\n # See http://docs.python.org/library/struct.html\n dataArr = np.array(struct.unpack_from(\"=\"+fmtStr, data, offset=0),\n dtype=dtype)\n\n # Resize array to nx by ny pixels\n # notice order... (y,x)\n dataArr.resize((ny, nx))\n #print dataArr.shape\n\n # Push this image frame data onto the end of the list of images\n # but first cast the datatype to float (if it's not already)\n # this isn't necessary, but shouldn't hurt and could save me\n # from doing integer math when i really meant floating-point...\n spedict['data'].append( dataArr.astype(float) )\n\n if verbose:\n print (\"\")\n \n return spedict",
"def info(file, extended, vlrs, points):\n try:\n with pylas.open(openbin_file(file)) as fp:\n echo_header(fp.header, extended)\n\n if vlrs:\n click.echo(20 * \"-\")\n echo_vlrs(fp)\n\n if points:\n click.echo(20 * \"-\")\n echo_points(fp)\n except fs.errors.ResourceNotFound as e:\n click.echo(click.style(\"Error: {}\".format(e), fg=\"red\"))",
"def parse_ivar_variants(tsv_filename, allow_missing=True):\n\n if file_is_missing(tsv_filename, allow_missing):\n return { 'variants': [] }\n\n variants = []\n\n # Skip first line\n for line in open(tsv_filename).readlines()[1:]:\n t = line.split('\\t')\n assert (len(t) == 19) or (len(t) == 20) # added POS_AA column\n\n if t[3] != '':\n variants.append(f\"{t[2]}{t[1]}{t[3]}\")\n\n return { 'variants': variants }",
"def readSurfaceGeo(b18path):\n if not os.path.isfile(b18path):\n print(\"b18 building file not found! Please check!\")\n pass\n else:\n b18file = open(b18path,\"r\")\n b18data = b18file.readlines()\n srfGeoBlock = getDataParagraph(\"_EXTENSION_BuildingGeometry_START_\", \"_EXTENSION_BuildingGeometry_END_\", b18data)\n #now get vertex's coordinate xyz\n vertexdict = dict() #{vertexID:[x,y,z]}\n srfbasicinfo = dict() #{surfaceID:[vertexID]}\n srfInfo = dict() #{surfaceID:[vertices coordinate]}\n for line in srfGeoBlock:\n dline = line.split()\n if \"vertex\" in dline:\n vertexdict[int(dline[1])] = [float(xyz) for xyz in dline[2:]] #{vertexID:[x,y,z]}\n if \"wall\" in dline or \"window\" in dline or \"floor\" in dline or \"ceiling\" in dline or \"roof\" in dline:\n srfbasicinfo[int(dline[1])] = [[int(nrID) for nrID in dline[2:]],dline[0]] #{surfaceID:[[vertexID],construction]}\n #print srfbasicinfo[int(dline[1])]\n for key in srfbasicinfo.keys():\n srfInfo[key] = []\n for vertices in srfbasicinfo[key][0]:\n srfInfo[key].append(vertexdict[vertices])\n b18file.close()\n return srfInfo,vertexdict,srfbasicinfo\n #actually only need srfInfo\n #just getting everything out for now, incase will need to use those",
"def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)",
"def simple_vcf_reader(fh):\n\n for line in fh:\n if line.startswith('#'):\n continue\n ls = line.rstrip().split('\\t')\n # 8 fixed fields per record\n assert len(ls)>=8, (\n \"Number of retrieved fields in vcf file too small\")\n # ignoring the rest\n (chrom, pos, id, ref, alt, qual, filter, info) = ls[:8]\n pos = int(pos)-1\n try:\n qual = int(qual)\n except:\n qual = \".\"\n info_d = dict()\n for field in info.split(';'):\n kv = field.split('=')\n # boolean entries get True as value\n if len(kv)==1:\n info_d[kv[0]] = True\n else:\n info_d[kv[0]] = kv[1]\n #try:\n # info = dict([field.split('=') for field in info.split(';')])\n #except ValueError:\n # import pdb; pdb.set_trace()\n yield Variant(chrom, pos, id, ref, alt, qual, filter, info_d)",
"def LoadAnt():\n return vtkInterface.PolyData(antfile)",
"def get_args():\n\n # Make argparse object, add description\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=textwrap.dedent(\n '''\n summary:\n Takes a VCF file and parses the variants to produce a tab delimited \n variant report.\n '''\n ))\n\n\n # Version info\n parser.add_argument(\n '-v', '--version', action='version', \n version=\n '%(prog)s\\nversion:\\t{}\\nlast updated:\\t{}'.format(\n __version__, __updated__\n ))\n\n\n # Arguments (see help string for full descriptions):\n # REQUIRED: VCF file input\n parser.add_argument(\n 'input', action='store', \n help='Filepath to input VCF file. REQUIRED.'\n )\n\n\n # OPTIONAL: Output folder, defaults to current directory if empty\n parser.add_argument(\n '-O', '--output', action='store', \n help=textwrap.dedent(\n '''\n Filepath to folder where output reports will be saved. \n If missing, defaults to current directory.\n \\n'''\n ))\n\n\n # OPTIONAL: List of preferred transcripts\n parser.add_argument(\n '-t', '--transcripts', action='store', \n help=textwrap.dedent(\n '''\n Filepath to preferred transcripts file. \n\n Must be a tab seperated file with preferred transcripts in the second \n column. If missing, all entries in the preferred transcript column \n will be labelled as 'Unknown'.\n \\n'''\n ))\n\n\n # OPTIONAL: Preferred transcripts strictness\n parser.add_argument(\n '-T', '--transcript_strictness', action='store', default='low', \n help=textwrap.dedent(\n '''\n Strictness of matching while annotating preferred transcripts.\n Default setting is low.\n\n Options: \n\n high - Transcripts must be an exact match. \n e.g. NM_001007553.2 and NM_001007553.1 won't match,\n NM_001007553.1 and NM_001007553.1 will.\n\n low - Transcripts will match regardless of the version number. The \n version number is after the . at the end of a transcript \n e.g. NM_001007553.2 and NM_001007553.1 will match.\n \\n'''\n ))\n\n\n # OPTIONAL: either a single BED file or a folder containing BED \n # files, only one of these can be used\n bed_files = parser.add_mutually_exclusive_group()\n\n # Single BED file\n bed_files.add_argument(\n '-b', '--bed', action='store', \n help=textwrap.dedent(\n '''\n Filepath to a single BED file. \n\n The BED file will be applied to the variant report and a seperate\n report saved with the BED file applied. This report will be saved in \n the same output folder as the original variant report, with the BED \n file name added to it.\n Cannot be used together with -B flag.\n \\n'''\n ))\n\n # Multiple BED files\n bed_files.add_argument(\n '-B', '--bed_folder', action='store', \n help=textwrap.dedent(\n '''\n Filepath to folder containing BED files. \n\n Each BED file will be applied to the variant report and a seperate\n report saved with the BED file applied. These reports will be saved in\n a new folder within the output folder, named the same as the input BED\n folder. \n The file names will be the same as the original variant report, with \n the BED file name added to them.\n Cannot be used together with -b flag.\n \\n'''\n ))\n\n\n # OPTIONAL: File containing known variants\n parser.add_argument(\n '-k', '--known_variants', action='store', \n help=textwrap.dedent(\n '''\n Filepath to known variants file. \n\n This is a VCF file containing any known variants and an associated \n classification. The classification will be added to the variant \n report. The VCF must have an annotation named 'Classification' within \n the INFO field for each variant.\n\n Key:\n 0 - Artifact\n 1 - Benign\n 2 - Likely benign\n 3 - VUS\n 4 - Likely pathogenic\n 5 - Pathogenic\n \\n'''\n ))\n\n\n # OPTIONAL: File containing the headers for the report\n parser.add_argument(\n '-c', '--config', action='store', \n help=textwrap.dedent(\n '''\n Filepath to config file. \n\n This is a tab seperated text file containing a number of rows, where \n each row specifies an annotation to be included in the variant report.\n Only annotations included in the config file will be included in the\n variant report.\n The columns in the variant report will be in the same order as the \n order in which the annotations appear in the config file.\n\n Each row contains:\n\n Column 1 - Required. Annotation headers, these must match up with how\n they appear in the VCF (case sensitive).\n\n Column 2 - Required. Location where to find the data within the VCF, \n used to select the correct parsing function.\n options: info, format, vep, filter or pref.\n\n Column 3 - Optional. Alternative name for column header.\n\n To make a config file with all available options from a VCF, run:\n vcf_parse -l path_to_input_vcf > config.txt\n \\n'''\n ))\n\n\n # OPTIONAL: Lists all headers in a vcf then exits\n parser.add_argument(\n '-l', '--config_list', action='store_true', \n help=textwrap.dedent(\n '''\n Return a list of all availabile config to the screen, then exit.\n See CONFIG section for usage.\n \\n'''\n ))\n\n\n # OPTIONAL: Filter out any variants where FILTER column is not PASS\n parser.add_argument(\n '-F', '--filter_non_pass', action='store_true', \n help=textwrap.dedent(\n '''\n Filters out any variants where the FILTER annotation is not \n PASS. If missing then there will be no fitering based on the\n FILTER annotation.\n \\n'''\n ))\n\n return parser.parse_args()",
"def parse( cls, filename, verbose = False ) :\n if verbose : sys.stdout.write( \"%s.parse(%s)\\n\" % (cls.__name__, filename,) )\n\n infile = os.path.realpath( filename )\n dat = cls( verbose )\n\n with open( infile, \"rU\" ) as inf :\n expt_num = None\n for line in inf :\n if verbose :\n sys.stdout.write( line )\n\n m = dat.version_pat.search( line )\n if m :\n dat.version = m.group( 1 )\n continue\n\n m = dat.expt_pat.search( line )\n if m :\n expt_num = int( m.group( 1 ) )\n par_set = m.group( 2 ).upper()\n\n if not par_set in bmrbmb.topspin.EXPERIMENTS.keys() :\n raise Exception( \"Unknown experiment parameter set: %s\" % (m.group( 2 ),) )\n\n# adapted sweep width HSQC\n#\n if (par_set == \"HSQCETGP\") and (m.group( 3 ) is not None) :\n expt_name = \"2D 1H-13C HSQC SW small\"\n else :\n expt_name = bmrbmb.topspin.EXPERIMENTS[par_set]\n\n dat.data[expt_num] = { \"name\" : expt_name }\n\n# next line should have experiment details\n# 1 or 2D only\n#\n\n m = dat.dim_pat.search( line )\n if m :\n if expt_num is None :\n raise Exception( \"Experiment detail without parameter set\" )\n\n dims = { m.group( 1 ) : { \"nuc\" : m.group( 2 ), \"sw\" : m.group( 3 ) } }\n if m.group( 4 ) is not None :\n dims[m.group( 4 )] = { \"nuc\" : m.group( 5 ), \"sw\" : m.group( 6 ) }\n\n dat.data[expt_num][\"dims\"] = dims\n\n expt_num = None\n\n return dat",
"def read_devkit(f):\n with tar_open(f) as tar:\n # Metadata table containing class hierarchy, textual descriptions, etc.\n meta_mat = tar.extractfile(DEVKIT_META_PATH)\n synsets, cost_matrix = read_metadata_mat_file(meta_mat)\n\n # Raw validation data groundtruth, ILSVRC2010 IDs. Confusingly\n # distributed inside the development kit archive.\n raw_valid_groundtruth = numpy.loadtxt(tar.extractfile(\n DEVKIT_VALID_GROUNDTRUTH_PATH), dtype=numpy.int16)\n return synsets, cost_matrix, raw_valid_groundtruth",
"def extract_data( file_name):\n\n main_file = fits.open( file_name)\n\n ssp = main_file[1].data\n flux_elines = main_file[3].data\n org_hdr = main_file[0].header\n\n main_file.close()\n\n ###########################################################################\n # NOTE: The visual band fluxes are multiplied by 10^-16 as stated in the\n # units of the MaNGA Data Model.\n #\n # <https://data.sdss.org/datamodel/files/MANGA_PIPE3D/MANGADRP_VER\n # /PIPE3D_VER/PLATE/manga.Pipe3D.cube.html#hdu1>\n ###########################################################################\n v_band = ssp[0] # in units of erg / s / cm^2\n v_band_err = ssp[4] # in units of erg / s / cm^2\n sMass_density = ssp[19] * u.dex( u.M_sun) # in units of log10( Msun / spaxel**2)\n\n Ha_vel = flux_elines[102] # in units of km/s\n Ha_vel_err = flux_elines[330] # in units of km/s\n\n gal_ra = org_hdr['OBJRA']\n gal_dec = org_hdr['OBJDEC']\n\n target_galaxy = True\n MaNGA_galaxy_target = org_hdr['MNGTARG1']\n if MaNGA_galaxy_target == 0:\n target_galaxy = False\n\n data_quality = True\n DRP_3D_quality = org_hdr['DRP3QUAL']\n if DRP_3D_quality > 10000:\n data_quality = False\n\n return target_galaxy, data_quality, Ha_vel, Ha_vel_err, v_band, v_band_err, \\\n sMass_density, gal_ra, gal_dec",
"def parse_file(self):\n for num, line in enumerate(self._text):\n if \"CRYSTAL STRUCTURE SOLUTION\" in line:\n line = line.strip().strip('+').strip()\n if 'SHELXTL' in line:\n self.version = 'SHELXT ' + line.split()[-1]\n if line.strip().startswith('R1 Rweak Alpha'):\n for n in range(100):\n if not self._text[num + 1 + n]:\n break\n if self._text[num + 1]:\n self.solutions[self._text[num + 1 + n][58:76].strip()] = self._text[num + 1 + n][37:51].strip()",
"def extract_version():\n version = ''\n directory = os.path.dirname(__file__)\n filename = os.path.join(directory, 'cube_helper', '__init__.py')\n\n with open(filename) as fd:\n for line in fd:\n line = line.strip()\n if line.startswith('__version__'):\n try:\n version = line.split('=')[1].strip(' \"\\'')\n except Exception:\n pass\n break\n\n if not version:\n print('WARNING: Unable to parse version information from '\n 'file: {}'.format(filename))\n version = '0.0.0'\n\n return version",
"def load_variant_file(fname, sample, bed_fname):\n mode = 'rb' if fname.endswith('bcf') else 'r'\n vcf_fp = pysam.VariantFile(fname, mode)\n vcf_fp.subset_samples([sample])\n return [\n split_copies(region, [v for v in vcf_fp.fetch(contig=region[0], start=region[1], stop=region[2])])\n for region in read_bed(bed_fname)\n ]",
"def vcf2bed_annotateTE(file_path_d:dict, window:int = 20) -> None:\n blast_d = None\n intersect_d = None\n\n for i in range(len(file_path_d[\"key\"])):\n # for each experiment/condition, read blast and bedtools output\n if file_path_d[\"blast\"][i] is not None:\n blast_d = te_info2map(file_path_d[\"blast\"][i],\"blast\")\n else:\n print(f'file_path_d[\"key\"][i]: no blast.out available, skip.')\n if file_path_d[\"intersect\"][i] is not None:\n intersect_d = te_info2map(file_path_d[\"intersect\"][i],\"intersect\")\n else:\n print(f'file_path_d[\"key\"][i]: no bedtools intersect.bed available, skip.')\n\n out_name = f'te_annotated_{file_path_d[\"key\"][i]}_INSpad{window}.bed'\n if os.path.exists(out_name):\n q = input(f\"te_annotated_{out_name}.vcf already exist, rewrite it? (Y/N)\")\n if q.capitalize() == \"N\":\n sys.exit(0)\n\n print(f'Open fh on {out_name}. Convert VCF to BED (read comments in script for details), subset of INFO parse to NAME (col 4) field.',file=sys.stderr)\n\n with open(file_path_d[\"vcf\"][i],\"r\") as f, open(f'{out_dir}/{out_name}', \"w\") as o:\n line_count = 1\n for line in f:\n line_count += 1\n if line.startswith(\"##\") or line.startswith(\"#\"):\n continue\n col = line.strip().split(\"\\t\")\n try:\n infos = parse_info(col[7])\n except Exception as e:\n print(f\"{line_count}: Cannot parse info field.\\n{line}\\n{e}\")\n \n sv_chr = col[0]\n sv_start = int(col[1]) \n sv_end = int(sv_start) + 1 if \"END\" not in infos else int(infos[\"END\"]) # if missing END (i.e. BND) use start + 1\n sv_id = col[2]\n\n name = f'ID={sv_id};SVTYPE={infos[\"SVTYPE\"]};SUPPORT={infos[\"SUPPORT\"]}'\n if \"SVLEN\" in infos:\n name += f';SVLEN={infos[\"SVLEN\"]}'\n\n # chr start end name{ID;SVTYPE;SUPPORT;SVLEN;BLAST_TE (sep=,);INTERSECT_TE(sep=,)}\n if infos[\"SVTYPE\"] == \"INS\":\n sv_start = sv_start - 10 if (sv_start - 10) > 0 else 0\n sv_end = sv_end + 10 # there is chance that sv_end larger than chr length, but should be rare and we can filter this later\n if blast_d is not None:\n if sv_id in blast_d:\n name += f';BLAST_TE={blast_d[sv_id]}'\n if intersect_d is not None:\n if sv_id in intersect_d:\n name += f';INTERSECT_TE={intersect_d[sv_id]}'\n \n # write to out_file\n # if missing END (i.e. BND) use start + 1\n o.write(f'{sv_chr}\\t{sv_start}\\t{sv_end}\\t{name}\\n')\n print(f'Finish writing {out_name}. Close fh.',file=sys.stderr)",
"def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76",
"def extract(\n args: argparse.Namespace,\n input_vcf: str,\n input_bam: str,\n out_file=sys.stdout,\n max_reads: int = None,\n ac: int = None,\n sample: Sample = None,\n force_variant: Variant = None,\n insert_hist: bool = True,\n):\n # Print header line as requested\n if args.header:\n Features.header(out_file, ac)\n\n if sample is not None:\n pass\n elif args.stats_path is not None:\n sample = Sample.from_npsv(args.stats_path, bam_path=input_bam)\n elif None not in (\n args.fragment_mean,\n args.fragment_sd,\n args.read_length,\n args.depth,\n ):\n sample = Sample.from_distribution(\n input_bam,\n args.fragment_mean,\n args.fragment_sd,\n args.read_length,\n mean_coverage=args.depth,\n )\n else:\n raise ValueError(\"Library distribution must be provided\")\n\n # Extract features for all SVs\n vcf_reader = vcf.Reader(filename=input_vcf)\n for record in vcf_reader:\n variant = Variant.from_pyvcf(record, args.reference)\n if variant is None:\n logging.warning(\"Variant type or VCF record not supported for %s. Skipping.\", record.ID)\n\n features = extract_features(\n args,\n variant,\n input_bam,\n sample,\n max_reads=max_reads,\n insert_hist=insert_hist,\n )\n\n # Print features\n features.print_features(\n out_file, force_variant=force_variant, ac=ac,\n )",
"def get_version_info() -> Tuple[Text, Text]:",
"def get_buildinfo():\n path = os.path.join(SCRIPT_DIR, \"..\", DISTRIBUTION, \"buildinfo.json\")\n return json.load(open(path, \"rb\"))",
"def get_basic_info(rvt_file, cleaned_str=False):\n if olefile.isOleFile(rvt_file):\n rvt_ole = olefile.OleFileIO(rvt_file)\n basic_info = rvt_ole.openstream(\"BasicFileInfo\").read().decode(\"ascii\", \"ignore\")\n if cleaned_str:\n re_nullbytes = re.compile(r\"\\x00\")\n basic_info = re.sub(re_nullbytes, \"\", basic_info)\n return basic_info\n else:\n print(\"file does not appear to be an ole file: {}\".format(rvt_file))",
"def example_bedtool(fn):\n fn = os.path.join(data_dir(), fn)\n if not os.path.exists(fn):\n raise ValueError(\"%s does not exist\" % fn)\n return BedTool(fn)",
"def variant_case(store, case_obj, variant_obj):\n case_obj['bam_files'] = []\n case_obj['mt_bams'] = []\n case_obj['bai_files'] = []\n case_obj['mt_bais'] = []\n case_obj['sample_names'] = []\n for individual in case_obj['individuals']:\n bam_path = individual.get('bam_file')\n mt_bam = individual.get('mt_bam')\n case_obj['sample_names'].append(individual.get('display_name'))\n if bam_path and os.path.exists(bam_path):\n case_obj['bam_files'].append(individual['bam_file'])\n case_obj['bai_files'].append(find_bai_file(individual['bam_file']))\n if mt_bam and os.path.exists(mt_bam):\n case_obj['mt_bams'].append(individual['mt_bam'])\n case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))\n\n else:\n LOG.debug(\"%s: no bam file found\", individual['individual_id'])\n\n try:\n genes = variant_obj.get('genes', [])\n if len(genes) == 1:\n hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])\n if hgnc_gene_obj:\n vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)\n case_obj['region_vcf_file'] = vcf_path\n else:\n case_obj['region_vcf_file'] = None\n elif len(genes) > 1:\n chrom = variant_obj['genes'][0]['common']['chromosome']\n start = min(gene['common']['start'] for gene in variant_obj['genes'])\n end = max(gene['common']['end'] for gene in variant_obj['genes'])\n # Create a reduced VCF with variants in the region\n vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)\n case_obj['region_vcf_file'] = vcf_path\n except (SyntaxError, Exception):\n LOG.warning(\"skip VCF region for alignment view\")"
] |
[
"0.6277935",
"0.6057433",
"0.5900633",
"0.58670485",
"0.55492175",
"0.553204",
"0.5512067",
"0.5456865",
"0.5435115",
"0.5340725",
"0.5321985",
"0.52944624",
"0.52684426",
"0.5254221",
"0.5221966",
"0.5205385",
"0.5181874",
"0.5172438",
"0.51690716",
"0.51534516",
"0.5139977",
"0.51329803",
"0.5132869",
"0.50992745",
"0.5091656",
"0.50842863",
"0.5069354",
"0.50534934",
"0.50429344",
"0.5023936"
] |
0.70042205
|
0
|
Function that overlaps variants and functional motifs
|
def overlap_variants_and_motifs(motifs, variants, output_file: str):
# TODO: if necessary to make BedTool again, change architecture, figure out why one file okay and the other not
mot = pybedtools.BedTool(motifs)
mot.intersect(variants, wo=True, header=True).saveas(output_file)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def define_overlap_operations(self):\n self._d_i = lambda q:np.roll(q,-1,axis=-1) - q\n self._d_j = lambda q:np.roll(q,-1,axis=-2) - q",
"def overlap_with(self, other):",
"def do_overlap(ds,iterno,algo=\"FordRollett\",ignore=1,unit_weights=False,top=None,bottom=None,\n exact_angles=None,drop_frames='',drop_tubes = '', use_gains = [],do_sum=False,\n do_interp = False, dumpfile=None):\n import time\n from Reduction import overlap,interpolate\n # Get sensible values\n if top is None: top = ds.shape[1]-1\n if bottom is None: bottom = 0\n\n # Vertically integrate\n # Dimensions are step,vertical,tube\n\n b = ds[:,bottom:top,:].intg(axis=1).get_reduced()\n\n # Determine pixels per tube interval\n\n tube_pos = ds.axes[-1]\n if tube_pos.ndim == 2: #very old data, just take one slice\n tube_pos = tube_pos[0]\n tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)\n tube_steps = ds.axes[0]\n bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)\n pixel_step = int(round(tubesep/bin_size))\n bin_size = tubesep/pixel_step\n print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size)\n dropped_frames = parse_ignore_spec(drop_frames)\n dropped_tubes = parse_ignore_spec(drop_tubes)\n\n # Drop frames from the end as far as we can\n\n for empty_no in range(b.shape[0]-1,0,-1):\n print \"Trying %d\" % empty_no\n if empty_no not in dropped_frames:\n break\n dropped_frames.remove(empty_no)\n print \"All frames after %d empty so dropped\" % empty_no\n b = b[:empty_no+1]\n\n # Do we need to add dummy missing frames?\n\n extra_steps = b.shape[0]%pixel_step\n if extra_steps > 0:\n start_drop = b.shape[0]\n # gumpy has no resize\n new_b = zeros([((b.shape[0]/pixel_step)+1)*pixel_step,b.shape[1]])\n new_b[:b.shape[0]] = b\n b = new_b\n extra_dropped_frames = range(start_drop,b.shape[0])\n print \"Filled out array from %d to %d with dummy frames\" % (start_drop,b.shape[0])\n dropped_frames |= set(extra_dropped_frames)\n else:\n extra_dropped_frames = []\n \n # Zero out dropped frames\n\n print 'Dropped frames: ' + `dropped_frames`\n b_zeroed = copy(b)\n\n # Make a simple array to work out which sectors are missing frames\n\n frame_check = array.ones(b.shape[0])\n\n # Zero out all matching steps\n\n all_zeroed = copy(b)\n region_starts = [a*pixel_step for a in range(b.shape[0]/pixel_step)]\n for frame_no in dropped_frames:\n b_zeroed[frame_no] = 0\n b_zeroed.var[frame_no] = 0\n dropped_step = frame_no%pixel_step\n ref_drop_steps = [r+dropped_step for r in region_starts]\n for drop_step in ref_drop_steps:\n frame_check[drop_step] = 0\n all_zeroed[drop_step] = 0\n all_zeroed.var[drop_step] = 0\n\n # Now drop out whole detectors\n\n for tube_no in dropped_tubes:\n b_zeroed[:,tube_no] = 0\n b_zeroed.var[:,tube_no] = 0\n all_zeroed[:,tube_no] = 0\n all_zeroed.var[:,tube_no] = 0\n\n # Interpolation. If requested, we first interpolate the data onto a regular angular grid,\n # which is the assumption underlying the regain calculation. However, as the deviations\n # from regularity are usually minor, this step can usually be skipped\n \n if do_interp:\n if exact_angles != None:\n h_correction = read_horizontal_corrections(exact_angles)\n else:\n h_correction = None\n \n all_zeroed = interpolate.interpolate(all_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n b_zeroed = interpolate.interpolate(b_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n\n \n c = all_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n frame_check = frame_check.reshape([b.shape[0]/pixel_step,pixel_step])\n frame_sum = frame_check.intg(axis=1)\n print `b.shape` + \"->\" + `c.shape`\n print 'Relative no of frames: ' + `frame_sum`\n\n # Output the starting data for external use\n\n if dumpfile is not None:\n dump_tube_intensities(dumpfile,raw=b_zeroed)\n if len(use_gains)==0: #we have to calculate them\n if c.shape[0] == 1: #can't be done, there is no overlap\n return None,None,None,None,None\n if do_sum:\n # sum the individual unoverlapped sections. Reshape is required as the\n # intg function removes the dimension\n d = c.intg(axis=1).reshape([c.shape[0],1,c.shape[2]]) #array of [rangeno,stepno,tubeno]\n # normalise by the number of frames in each section\n else:\n d = c #no op\n # Note gumpy can't do transposes of more than two axes at once\n e = d.transpose((2,0)) #array of [tubeno,stepno,section]\n e = e.transpose((1,2)) #array of [tubeno,section,stepno]\n print \"Data shape: \" + repr(e.shape)\n print \"Check shape: \" + repr(frame_sum.shape)\n # create the mask: any values of zero are assumed to be incorrect and masked out\n pixel_mask = array.ones_like(e[ignore:])\n for one_tube in range(len(e[ignore:])):\n if not e[ignore+one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n gain,dd,interim_result,residual_map,chisquared,oldesds,first_ave,weights = \\\n iterate_data(e[ignore:],iter_no=iterno,unit_weights=unit_weights,pixel_mask=pixel_mask)\n else: #we have been provided with gains\n gain = use_gains\n chisquared=0.0\n # calculate errors based on full dataset\n # First get a full model\n reshape_ds = b_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n start_ds = reshape_ds.transpose((2,0))[ignore:] #array of [tubeno,stepno,section]\n start_ds = start_ds.transpose((1,2))\n start_var = start_ds.var\n\n # Our new pixel mask has to have all of the steps in\n\n pixel_mask = array.ones_like(start_ds)\n for one_tube in range(len(start_ds)):\n if not start_ds[one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n\n # Normalise gains so that average is 1.0\n\n gain = gain*len(gain)/gain.sum()\n model,wd,model_var,esds = overlap.apply_gain(start_ds,1.0/start_var,gain,\n calc_var=True,bad_steps=dropped_frames,pixel_mask=pixel_mask)\n\n # model and model_var have shape tubeno*pixel_step + no_steps (see shift_tube_add_new)\n\n print 'Have full model and errors at %f' % time.clock()\n\n # step size could be less than pixel_step if we have a short non-overlap scan\n\n real_step = pixel_step\n if len(tube_steps)< pixel_step:\n real_step = len(tube_steps)\n # and we have to prune the output data too\n holeless_model = zeros([real_step*start_ds.shape[0]])\n holeless_var = zeros_like(holeless_model)\n for tube_set in range(start_ds.shape[0]):\n holeless_model[tube_set*real_step:(tube_set+1)*real_step]=model[tube_set*pixel_step:(tube_set+1)*pixel_step] \n holeless_var[tube_set*real_step:(tube_set+1)*real_step]=model_var[tube_set*pixel_step:(tube_set+1)*pixel_step] \n model = holeless_model\n model_var = holeless_var\n cs = Dataset(model)\n cs.var = model_var\n\n # Now build up the important information\n\n cs.title = ds.title\n cs.copy_cif_metadata(ds)\n\n # construct the axes\n\n if exact_angles is None or do_interp:\n axis = arange(len(model))\n new_axis = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size\n if not do_interp:\n axis_string = \"\"\"Following application of gain correction, two theta values were recalculated assuming a step size of %8.3f \n and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n axis_string = \"\"\"Gain correction was performed after interpolating observed values onto a\n regular angular grid with a step size of %8.3f and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n new_axis = calculate_average_angles(tube_steps,exact_angles,pixel_step,tubesep,\n extra_dummy=extra_dropped_frames)\n # Remove ignored tubes\n \n new_axis = new_axis[ignore*real_step:]\n \n axis_string = \\\n \"\"\"Following application of gain correction, two theta values were recalculated using a tube separation of \n%8.3f and the recorded positions of the lowest angle tube, and then adding an average of the \nangular corrections for the tubes contributing to each two theta position.\"\"\" % (tubesep)\n cs.set_axes([new_axis],anames=['Two theta'],aunits=['Degrees'])\n print 'New axis goes from %f to %f in %d steps' % (new_axis[0],new_axis[-1],len(new_axis))\n print 'Total %d points in output data' % len(cs)\n # prepare info for CIF file\n import math\n detno = map(lambda a:\"%d\" % a,range(len(gain)))\n gain_as_strings = map(lambda a:\"%.4f\" % a,gain)\n gain_esd = [\"%.4f\" % a for a in esds]\n cs.harvest_metadata(\"CIF\").AddCifItem((\n ((\"_[local]_detector_number\",\"_[local]_refined_gain\",\"_[local]_refined_gain_esd\"),),\n ((detno,gain_as_strings,gain_esd),))\n )\n if len(use_gains)==0:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \"\"\" individual tube gains were iteratively refined using the Ford/Rollett algorithm (Acta Cryst. (1968) B24,293). \n Final gains are stored in the _[local]_refined_gain loop.\"\"\" + axis_string\n else:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \" individual tube gains were corrected based on a previous iterative refinement using the Ford/Rollett algorithm. The gains used\" + \\\n \"are stored in the _[local]_refined_gain loop.\" + axis_string\n cs.add_metadata(\"_pd_proc_info_data_reduction\",info_string,append=True)\n return cs,gain,esds,chisquared,c.shape[0]",
"def stim_conditions(angles, onebeep_nb, twobeep_nb, onebeep_tc, twobeep_tc):\n##### make single auditory stim################################################\n\n #conditions_1A = [-30_1A, 0_1A, 30_1A, -30_2A, 0_2A, 30_2A]\n spatials = ('-30', '0', '30')\n beep_combos_1a = ('onebeep_nb', 'twobeep_nb', 'onebeep_tc', 'twobeep_tc')\n\n##### make competing auditory stim#############################################\n\n #conditions_2A = []\n spatials = ('-30x0', '0x30', '-30x30')\n beep_combos_2a = ('onebeep_nbxonebeep_tc', 'twobeep_nbxonebeep_tc',\n 'onebeep_nbxtc2', 'twobeep_nbxtwobeep_tc')\n\n all_spatials = [s.split('x') for s in spatials]\n for s in all_spatials[1:]:\n all_spatials[0] += s\n all_spatials = all_spatials[0]\n all_spatials = list(np.unique([float(s) for s in all_spatials]))\n\n all_combos = [ss.split('x') for ss in beep_combos_2a]\n for ss in all_combos[1:]:\n all_combos[0] += ss\n all_combos = all_combos[0]\n all_combos = list(np.unique([float(ss) for ss in all_combos]))\n\n##### convolve with HRTF at appropriate angles ################################\n\n move_sig = np.concatenate([convolve_hrtf(stim, fs, ang)\n for ang in range(-30, 30)], axis=1)\n return move_sig",
"def sequence_tunable(\n mol,\n OP_REMOVE_ISOTOPE=True, OP_NEUTRALISE_CHARGE=True,\n OP_REMOVE_STEREO=False, OP_COMMUTE_INCHI=False,\n OP_KEEP_BIGGEST=True, OP_ADD_HYDROGEN=True,\n OP_KEKULIZE=True, OP_NEUTRALISE_CHARGE_LATE=True\n ):\n F = Filters()\n # Always perform the basics..\n Cleanup(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n AssignStereochemistry(mol, cleanIt=True, force=True, flagPossibleStereoCenters=True) # Fix bug TD201904.01\n # \n if OP_REMOVE_ISOTOPE:\n mol = F.remove_isotope(mol)\n if OP_NEUTRALISE_CHARGE:\n mol = F.neutralise_charge(mol)\n if any([OP_REMOVE_ISOTOPE, OP_REMOVE_ISOTOPE]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n # \n if OP_REMOVE_STEREO:\n mol = F.remove_stereo(mol)\n OP_COMMUTE_INCHI = True\n if OP_COMMUTE_INCHI:\n mol = F.commute_inchi(mol)\n if OP_KEEP_BIGGEST:\n mol = F.keep_biggest(mol)\n if any([OP_REMOVE_STEREO, OP_COMMUTE_INCHI, OP_KEEP_BIGGEST]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_NEUTRALISE_CHARGE_LATE:\n mol = F.neutralise_charge(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_ADD_HYDROGEN:\n mol = F.add_hydrogen(mol, addCoords=True)\n if OP_KEKULIZE:\n mol = F.kekulize(mol)\n #\n return mol",
"def makeMotif(UP_seq, MS_seq, motif_size, ps_protein_idx, center_motif_idx, DoS_idx):\n UP_seq_copy = list(UP_seq[max(0, ps_protein_idx - motif_size): ps_protein_idx + motif_size + 1])\n assert len(UP_seq_copy) > motif_size, \"Size seems too small. \" + UP_seq\n\n # If we ran off the end of the sequence at the beginning or at the end, append a gap\n if ps_protein_idx - motif_size < 0:\n for _ in range(motif_size - ps_protein_idx):\n UP_seq_copy.insert(0, \"-\")\n\n elif ps_protein_idx + motif_size + 1 > len(UP_seq):\n for _ in range(ps_protein_idx + motif_size - len(UP_seq) + 1):\n UP_seq_copy.extend(\"-\")\n\n UP_seq_copy[motif_size] = UP_seq_copy[motif_size].lower()\n\n pidx = [str(UP_seq_copy[motif_size]).upper() + str(ps_protein_idx + 1) + \"-p\"]\n\n # Now go through and copy over phosphorylation\n if DoS_idx:\n for ppIDX in DoS_idx:\n position = ppIDX.start() - center_motif_idx\n # If the phosphosite is within the motif\n if abs(position) < motif_size:\n editPos = position + motif_size\n UP_seq_copy[editPos] = UP_seq_copy[editPos].lower()\n assert UP_seq_copy[editPos] == MS_seq[ppIDX.start()], UP_seq_copy[editPos] + \" \" + MS_seq[ppIDX.start()]\n if position != 0:\n pidx.append(str(UP_seq_copy[editPos]).upper() + str(ps_protein_idx + position + 1) + \"-p\")\n\n return \"\".join(UP_seq_copy), pidx",
"def proc_sw_only_morphs(forward_pairs, morphs, backward_pairs):\n sandwich_pairs = []\n if not backward_pairs:\n forward_pairs[-1].morphs.extend(morphs)\n elif len(morphs) == 1:\n morph = morphs[0]\n morph_str = str(morph)\n if morph_str in ['이/VCP', '하/VX'] and backward_pairs[0].morphs[0].tag.startswith('E'):\n # '이' 긍정지정사나 '하' 보조용언 뒤에 어미가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == '에/JKB' and backward_pairs[0].morphs[0].tag == 'JX':\n # '에' 부사격조사 뒤에 보조사가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == 'ᆯ/ETM' and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 'ㄹ' 관형형전성어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag in ['EC', 'EF'] and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 연결어미나 종결어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag.startswith('XS'):\n # append suffixes to the end of forward pair list\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n else:\n raise AlignError()\n else:\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n if morphs_str == '(/SS + 대북/NNG + (/SS + 대북/NNG + )/SS + )/SS' and forward_pairs[-1].word_str == u'대북':\n del morphs[:]\n elif morphs_str == '(/SS + 동경/NNP + )/SS' and forward_pairs[-1].word_str == u'도쿄':\n del morphs[:]\n else:\n raise AlignError()\n return sandwich_pairs",
"def elongate(self,DNA, Pol, Hel):\n Helicase = Hel\n PolymeraseIII = Pol\n if self.ATP_molecules >= 100 and (Helicase.position - PolymeraseIII.position) < 3000: #genug ATP, Abstand klein genug\n Helicase.position += 100 \n self.ATP_molecules -= 100\n if self.Nucleotide >= 200 and (Helicase.position - PolymeraseIII.position) > 1500: #genug Nucleotide (>=200)\n PolymeraseIII.position += 100\n self.Nucleotide -= 200\n elif self.Nucleotide > 1 and (Helicase.position - PolymeraseIII.position) > 1500: #nicht genug Nucleotide (1-199)\n PolymeraseIII.position += self.Nucleotide/2\n Helicase.position = Helicase.position -100 +self.Nucleotide/2\n self.ATP_molecules =self.ATP_molecules+100-self.Nucleotide/2\n self.Nucleotide -= 2*(self.Nucleotide/2)\n \n elif self.ATP_molecules >= 0 and (Helicase.position - PolymeraseIII.position) < 3000: #nicht genug ATP, Abstand klein genug\n Helicase.position += self.ATP_molecules\n if self.Nucleotide >= 200 and (Helicase.position - PolymeraseIII.position) > 1500: #genug Nucleotide\n PolymeraseIII.position += 100\n self.Nucleotide -= 200\n elif self.Nucleotide > 1 and (Helicase.position - PolymeraseIII.position) > 1500: #nicht genug Nucleotide\n PolymeraseIII.position += self.Nucleotide/2\n Helicase.position = Helicase.position -self.ATP_molecules +self.Nucleotide/2\n self.ATP_molecules -=self.Nucleotide/2\n self.Nucleotide -= 2*(self.Nucleotide/2)\n self.ATP_molecules -= self.ATP_molecules\n\n if Helicase.position > self.DNA.length:\n self.ATP_molecules=self.ATP_molecules+(Helicase.position -self.DNA.length)\n Helicase.position = self.DNA.length\n\n if Helicase.position >= self.DNA.length:\n Helicase.bound =False\n #print ('ATP:',self.ATP_molecules,'NT:',self.Nucleotide)\n return Helicase, PolymeraseIII",
"def overlap(list1,list2):\n \n coord=[]\n for pos1 in list1:\n #print 'pos in list1 is', pos1\n coord.append(('S',int(pos1.split('-')[0]), 'l1'))\n #print 'S is ', pos1.split('-')[0]\n coord.append(('E',int(pos1.split('-')[1]),'l1'))\n #print 'E is ', pos1.split('-')[1]\n #print coord \n for pos2 in list2:\n #print 'pos in list2 is', pos2\n coord.append(('S',int(pos2.split('-')[0]),'l2'))\n #print 'S is ', pos2.split('-')[0]\n coord.append(('E', int(pos2.split('-')[1]),'l2'))\n #print 'E is ', pos2.split('-')[1]\n #print coord\n \n coord.sort(key = lambda x : x[0], reverse = True)\n #print 'coord after first sort \\n', coord\n coord.sort(key = lambda x : x[1])\n #print 'coord after 2nd sort by number \\n', coord\n # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS\n new_coord_list = [] #initialize new list to which to move all those that don't overlap\n #index = 0 #position in list \n spos=0 # start pos initialized \n ct=0\n ovl=[]\n for pos in coord:\n new_coord_list.append(pos)\n #print pos, 'doesn\\'t overlap'\n index = int(new_coord_list.index(pos)) \n if pos[0]=='S':\n ct+=1\n if ct==2:\n spos=pos[1]\n if pos[0]=='E':\n ct-=1\n if ct==1:\n if not spos==pos[1]:\n #print spos, '-', pos[1], 'overlap'\n ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap\n #print 'overlap found! :', [str(spos),str(pos[1]),'ovl']\n #print 'removing ', new_coord_list[index]\n del new_coord_list[index]\n #print 'removing', new_coord_list[index-1]\n del new_coord_list[index-1]\n \n # \n new_coord_list.sort(key = lambda x : x[0], reverse = True)\n start=0\n end = 0\n two_hist_away_from_cent_of_peak = 0\n two_hist_away_list = []\n for nc_pos in new_coord_list:\n if nc_pos[0]=='S':\n if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): \n #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0):\n two_hist_away_list.append('-'.join([str(start),str(end), 'tha']))\n start= nc_pos[1]\n if nc_pos[0]=='E':\n end = nc_pos[1]\n center_of_peak= (start+nc_pos[1])/2\n two_hist_away_from_cent_of_peak = center_of_peak + 300\n # print 'new_coord_list: ', new_coord_list\n return ovl, new_coord_list",
"def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)",
"def replace_OOV(seq, typeset):\n return",
"def makeOverlapAnalysisSequence( dataType,\n inputLabel = '', outputLabel = 'passesOR',\n linkOverlapObjects = False, doMuPFJetOR=False,\n doEleEleOR = False, doElectrons = True,\n doMuons = True, doJets = True, doTaus = True,\n doPhotons = True, doFatJets = False,\n bJetLabel = '',\n boostedLeptons = False,\n postfix = '',\n enableCutflow = False ):\n\n if dataType not in [\"data\", \"mc\", \"afii\"] :\n raise ValueError (\"invalid data type: \" + dataType)\n\n # Create the analysis algorithm sequence object:\n seq = AnaAlgSequence( 'OverlapAnalysisSequence' + postfix )\n\n # Create the overlap removal algorithm:\n alg = createAlgorithm( 'CP::OverlapRemovalAlg', 'OverlapRemovalAlg' + postfix )\n\n # Create its main tool, and set its basic properties:\n addPrivateTool( alg, 'overlapTool', 'ORUtils::OverlapRemovalTool' )\n alg.overlapTool.InputLabel = inputLabel\n alg.overlapTool.OutputLabel = outputLabel\n\n # By default the OverlapRemovalTool would flag objects that need to be\n # suppressed, with a \"true\" value. But since the analysis algorithms expect\n # the opposite behaviour from selection flags, we need to tell the tool\n # explicitly to use the \"true\" flag on objects that pass the overlap\n # removal.\n alg.overlapTool.OutputPassValue = True\n\n # Set up overlap removal for PFlow jets that are acutally muons, if requested.\n if doMuPFJetOR:\n addPrivateTool( alg, 'overlapTool.MuPFJetORT',\n 'ORUtils::MuPFJetOverlapTool' )\n alg.overlapTool.MuPFJetORT.InputLabel = inputLabel\n alg.overlapTool.MuPFJetORT.OutputLabel = outputLabel\n alg.overlapTool.MuPFJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.MuPFJetORT.OutputPassValue = True\n pass\n\n # Set up the electron-electron overlap removal, if requested.\n if doElectrons and doEleEleOR:\n addPrivateTool( alg, 'overlapTool.EleEleORT',\n 'ORUtils::EleEleOverlapTool' )\n alg.overlapTool.EleEleORT.InputLabel = inputLabel\n alg.overlapTool.EleEleORT.OutputLabel = outputLabel\n alg.overlapTool.EleEleORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.EleEleORT.OutputPassValue = True\n pass\n\n # Set up the electron-muon overlap removal.\n if doElectrons and doMuons:\n addPrivateTool( alg, 'overlapTool.EleMuORT',\n 'ORUtils::EleMuSharedTrkOverlapTool' )\n alg.overlapTool.EleMuORT.InputLabel = inputLabel\n alg.overlapTool.EleMuORT.OutputLabel = outputLabel\n alg.overlapTool.EleMuORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.EleMuORT.OutputPassValue = True\n pass\n\n # Set up the electron-(narrow-)jet overlap removal.\n if doElectrons and doJets:\n addPrivateTool( alg, 'overlapTool.EleJetORT',\n 'ORUtils::EleJetOverlapTool' )\n alg.overlapTool.EleJetORT.InputLabel = inputLabel\n alg.overlapTool.EleJetORT.OutputLabel = outputLabel\n alg.overlapTool.EleJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.EleJetORT.BJetLabel = bJetLabel\n alg.overlapTool.EleJetORT.UseSlidingDR = boostedLeptons\n alg.overlapTool.EleJetORT.OutputPassValue = True\n pass\n\n # Set up the muon-(narrow-)jet overlap removal.\n if doMuons and doJets:\n addPrivateTool( alg, 'overlapTool.MuJetORT',\n 'ORUtils::MuJetOverlapTool' )\n alg.overlapTool.MuJetORT.InputLabel = inputLabel\n alg.overlapTool.MuJetORT.OutputLabel = outputLabel\n alg.overlapTool.MuJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.MuJetORT.BJetLabel = bJetLabel\n alg.overlapTool.MuJetORT.UseSlidingDR = boostedLeptons\n alg.overlapTool.MuJetORT.OutputPassValue = True\n pass\n\n # Set up the tau-electron overlap removal.\n if doTaus and doElectrons:\n addPrivateTool( alg, 'overlapTool.TauEleORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.TauEleORT.InputLabel = inputLabel\n alg.overlapTool.TauEleORT.OutputLabel = outputLabel\n alg.overlapTool.TauEleORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.TauEleORT.DR = 0.2\n alg.overlapTool.TauEleORT.OutputPassValue = True\n pass\n\n # Set up the tau-muon overlap removal.\n if doTaus and doMuons:\n addPrivateTool( alg, 'overlapTool.TauMuORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.TauMuORT.InputLabel = inputLabel\n alg.overlapTool.TauMuORT.OutputLabel = outputLabel\n alg.overlapTool.TauMuORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.TauMuORT.DR = 0.2\n alg.overlapTool.TauMuORT.OutputPassValue = True\n pass\n\n # Set up the tau-(narrow-)jet overlap removal.\n if doTaus and doJets:\n addPrivateTool( alg, 'overlapTool.TauJetORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.TauJetORT.InputLabel = inputLabel\n alg.overlapTool.TauJetORT.OutputLabel = outputLabel\n alg.overlapTool.TauJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.TauJetORT.DR = 0.2\n alg.overlapTool.TauJetORT.OutputPassValue = True\n pass\n\n # Set up the photon-electron overlap removal.\n if doPhotons and doElectrons:\n addPrivateTool( alg, 'overlapTool.PhoEleORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.PhoEleORT.InputLabel = inputLabel\n alg.overlapTool.PhoEleORT.OutputLabel = outputLabel\n alg.overlapTool.PhoEleORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.PhoEleORT.OutputPassValue = True\n pass\n\n # Set up the photon-muon overlap removal.\n if doPhotons and doMuons:\n addPrivateTool( alg, 'overlapTool.PhoMuORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.PhoMuORT.InputLabel = inputLabel\n alg.overlapTool.PhoMuORT.OutputLabel = outputLabel\n alg.overlapTool.PhoMuORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.PhoMuORT.OutputPassValue = True\n pass\n\n # Set up the photon-(narrow-)jet overlap removal.\n if doPhotons and doJets:\n addPrivateTool( alg, 'overlapTool.PhoJetORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.PhoJetORT.InputLabel = inputLabel\n alg.overlapTool.PhoJetORT.OutputLabel = outputLabel\n alg.overlapTool.PhoJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.PhoJetORT.OutputPassValue = True\n pass\n\n # Set up the electron-fat-jet overlap removal.\n if doElectrons and doFatJets:\n addPrivateTool( alg, 'overlapTool.EleFatJetORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.EleFatJetORT.InputLabel = inputLabel\n alg.overlapTool.EleFatJetORT.OutputLabel = outputLabel\n alg.overlapTool.EleFatJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.EleFatJetORT.DR = 1.0\n alg.overlapTool.EleFatJetORT.OutputPassValue = True\n pass\n\n # Set up the (narrow-)jet-fat-jet overlap removal.\n if doJets and doFatJets:\n addPrivateTool( alg, 'overlapTool.JetFatJetORT',\n 'ORUtils::DeltaROverlapTool' )\n alg.overlapTool.JetFatJetORT.InputLabel = inputLabel\n alg.overlapTool.JetFatJetORT.OutputLabel = outputLabel\n alg.overlapTool.JetFatJetORT.LinkOverlapObjects = linkOverlapObjects\n alg.overlapTool.JetFatJetORT.DR = 1.0\n alg.overlapTool.JetFatJetORT.OutputPassValue = True\n pass\n\n # Add the algorithm to the analysis sequence.\n seq.append( alg,\n inputPropName = { 'electrons' : 'electrons',\n 'muons' : 'muons',\n 'jets' : 'jets',\n 'taus' : 'taus',\n 'photons' : 'photons',\n 'fatJets' : 'fatJets' },\n outputPropName = { 'electrons' : 'electronsOut',\n 'muons' : 'muonsOut',\n 'jets' : 'jetsOut',\n 'taus' : 'tausOut',\n 'photons' : 'photonsOut',\n 'fatJets' : 'fatJetsOut' } )\n\n # Add view container creation algorithms for all types.\n for container in [ ( 'electrons', doElectrons ),\n ( 'muons', doMuons ),\n ( 'jets', doJets ),\n ( 'taus', doTaus ),\n ( 'photons', doPhotons ),\n ( 'fatJets', doFatJets ) ]:\n\n # Skip setting up a view container if the type is not being processed.\n if not container[ 1 ]:\n continue\n\n # Set up a cutflow alg.\n if enableCutflow:\n alg = createAlgorithm( 'CP::ObjectCutFlowHistAlg',\n 'OverlapRemovalCutFlowDumperAlg_%s' % container[ 0 ] + postfix )\n alg.histPattern = container[ 0 ] + postfix + '_OR_cflow_%SYS%'\n if inputLabel:\n alg.selection = [ '%s,as_char' % inputLabel,\n '%s,as_char' % outputLabel ]\n alg.selectionNCuts = [1, 1]\n else:\n alg.selection = [ '%s,as_char' % outputLabel ]\n alg.selectionNCuts = [1]\n seq.append( alg, inputPropName = { container[ 0 ] : 'input' } )\n\n # Set up a view container for the type.\n alg = createAlgorithm( 'CP::AsgViewFromSelectionAlg',\n 'OverlapRemovalViewMaker_%s' % container[ 0 ] + postfix )\n alg.selection = [ '%s,as_char' % outputLabel ]\n seq.append( alg, inputPropName = { container[ 0 ] : 'input' },\n outputPropName = { container[ 0 ] : 'output' } )\n pass\n\n # Return the sequence:\n return seq",
"def _undo_overlap(self, agent1, agent2, dist, combined_sizes, **kwargs):\n overlap = (combined_sizes - dist) / combined_sizes\n self.position_state.modify_position(agent1, -agent1.velocity * overlap)\n self.position_state.modify_position(agent2, -agent2.velocity * overlap)",
"def main():\n line1 = Line(1, 5)\n line2 = Line(5, 8)\n print(LineUtil.is_overlap(line1, line2))",
"def prosody_dynamic(self, audio):\n fs, data_audio = read(audio)\n\n if len(data_audio.shape)>1:\n data_audio = data_audio.mean(1)\n data_audio = data_audio-np.mean(data_audio)\n data_audio = data_audio/float(np.max(np.abs(data_audio)))\n size_frameS = self.size_frame*float(fs)\n size_stepS = self.step*float(fs)\n overlap = size_stepS/size_frameS\n\n if self.pitch_method == 'praat':\n name_audio = audio.split('/')\n temp_uuid = 'prosody'+name_audio[-1][0:-4]\n if not os.path.exists(PATH+'/../tempfiles/'):\n os.makedirs(PATH+'/../tempfiles/')\n temp_filename_f0 = PATH+'/../tempfiles/tempF0'+temp_uuid+'.txt'\n temp_filename_vuv = PATH+'/../tempfiles/tempVUV'+temp_uuid+'.txt'\n praat_functions.praat_vuv(audio, temp_filename_f0, temp_filename_vuv,\n time_stepF0=self.step, minf0=self.minf0, maxf0=self.maxf0)\n\n F0, _ = praat_functions.decodeF0(\n temp_filename_f0, len(data_audio)/float(fs), self.step)\n os.remove(temp_filename_f0)\n os.remove(temp_filename_vuv)\n elif self.pitch_method == 'rapt':\n data_audiof = np.asarray(data_audio*(2**15), dtype=np.float32)\n F0 = pysptk.sptk.rapt(data_audiof, fs, int(\n size_stepS), min=self.minf0, max=self.maxf0, voice_bias=self.voice_bias, otype='f0')\n\n pitchON = np.where(F0 != 0)[0]\n dchange = np.diff(pitchON)\n change = np.where(dchange > 1)[0]\n iniV = pitchON[0]\n\n featvec = []\n iniVoiced = (pitchON[0]*size_stepS)+size_stepS\n seg_voiced = []\n f0v = []\n Ev = []\n for indx in change:\n finV = pitchON[indx]+1\n finVoiced = (pitchON[indx]*size_stepS)+size_stepS\n VoicedSeg = data_audio[int(iniVoiced):int(finVoiced)]\n temp = F0[iniV:finV]\n tempvec = []\n if len(VoicedSeg) > int(size_frameS):\n seg_voiced.append(VoicedSeg)\n dur = len(VoicedSeg)/float(fs)\n x = np.arange(0,len(temp))\n z = np.poly1d(np.polyfit(x,temp,self.P))\n f0v.append(temp)\n tempvec.extend(z.coeffs)\n temp=get_energy_segment(size_frameS, size_stepS, VoicedSeg, overlap)\n Ev.append(temp)\n x = np.arange(0, len(temp))\n z = np.poly1d(np.polyfit(x, temp, self.P))\n tempvec.extend(z.coeffs)\n tempvec.append(dur)\n featvec.append(tempvec)\n iniV = pitchON[indx+1]\n iniVoiced = (pitchON[indx+1]*size_stepS)+size_stepS\n\n # Add the last voiced segment\n finV = (pitchON[len(pitchON)-1])\n finVoiced = (pitchON[len(pitchON)-1]*size_stepS)+size_stepS\n VoicedSeg = data_audio[int(iniVoiced):int(finVoiced)]\n temp = F0[iniV:finV]\n tempvec = []\n\n if len(VoicedSeg) > int(size_frameS):\n # Compute duration\n dur = len(VoicedSeg)/float(fs)\n \n x = np.arange(0, len(temp))\n z = np.poly1d(np.polyfit(x, temp, self.P))\n tempvec.extend(z.coeffs)\n # Energy coefficients\n temp=get_energy_segment(size_frameS, size_stepS, VoicedSeg, overlap)\n x = np.arange(0, len(temp))\n z = np.poly1d(np.polyfit(x, temp, self.P))\n tempvec.extend(z.coeffs)\n tempvec.append(dur)\n # Compute duration\n featvec.append(tempvec)\n\n return np.asarray(featvec)",
"def AlignSequences(bs1, bs2, parametersLines, **functions):\n m = len(bs1.blocks)\n n = len(bs2.blocks)\n\n dp = list()\n #\n for _ in range(m+1):\n tmp = list()\n for _ in range(n+1):\n tmp.append(commonFunctions.matrixCell())\n dp.append(tmp)\n\n for i in range(m+1):\n if i > 0:\n block = bs1.blocks[i-1]\n dp[i][0].score = dp[i-1][0].score + functions[\"GapPenalty\"](block)\n dp[i][0].pointer = [0]\n\n for i in range(n+1):\n if i > 0:\n block = bs2.blocks[i-1]\n dp[0][i].score = dp[0][i-1].score + functions[\"GapPenalty\"](block)\n dp[0][i].pointer = [0]\n\n paramValueMap = parametersLines.parameterDistribution()\n\n for i in range(1, m+1):\n for j in range(1, n+1):\n pairScore, matchedPairs = MisMatchScore(\n bs1.blocks[i-1], bs2.blocks[j-1], paramValueMap, functions[\"GetLineSequence\"], functions[\"MinimumWeightBipartiteMatching\"], functions[\"NumberOfAttributes\"])\n block1Gap = functions[\"GapPenalty\"](bs1.blocks[i-1])\n block2Gap = functions[\"GapPenalty\"](bs2.blocks[j-1])\n\n # When scores are same preference is given to diagonal (x==y) rather than a gap (x==_)\n if dp[i-1][j-1].score + pairScore <= dp[i-1][j].score + block1Gap:\n if dp[i-1][j-1].score + pairScore <= dp[i][j-1].score + block2Gap:\n dp[i][j].score = dp[i-1][j-1].score + pairScore\n dp[i][j].pointer = [1]\n dp[i][j].matchedLines = matchedPairs\n else:\n dp[i][j].score = dp[i][j-1].score + block2Gap\n dp[i][j].pointer = [2]\n else:\n if dp[i-1][j].score + block1Gap <= dp[i][j-1].score + block2Gap:\n dp[i][j].score = dp[i-1][j].score + block1Gap\n dp[i][j].pointer = [3]\n else:\n dp[i][j].score = dp[i][j-1].score + block2Gap\n dp[i][j].pointer = [2]\n l = m+n\n i = m\n j = n\n xpos = l\n ypos = l\n block1Alignment = list()\n block2Alignment = list()\n lineMatchings = list()\n while (not (i == 0 or j == 0)):\n if dp[i][j].pointer == [1]:\n block1Alignment.append(bs1.blocks[i-1])\n block2Alignment.append(bs2.blocks[j-1])\n lineMatchings.append(dp[i][j].matchedLines)\n i -= 1\n j -= 1\n elif dp[i][j].pointer == [2]:\n block1Alignment.append([])\n block2Alignment.append(bs2.blocks[j-1])\n j -= 1\n elif dp[i][j].pointer == [3]:\n block1Alignment.append(bs1.blocks[i-1])\n block2Alignment.append([])\n i -= 1\n else:\n raise ValueError(\"Undefined pointer type\")\n xpos -= 1\n ypos -= 1\n\n while xpos >= 0:\n if i > 0:\n block1Alignment.append(bs1.blocks[i-1])\n i -= 1\n else:\n block1Alignment.append([])\n xpos -= 1\n\n while ypos >= 0:\n if j > 0:\n block2Alignment.append(bs2.blocks[j-1])\n j -= 1\n else:\n block2Alignment.append([])\n ypos -= 1\n\n block1Alignment.reverse()\n block2Alignment.reverse()\n lineMatchings.reverse()\n return block1Alignment, block2Alignment, lineMatchings",
"def findOverlap( columns, t, minOverlap ):\n for c in columns:\n c.setOverlap() # defaults to 0.0\n for s in c.getConnectedSynapses():\n c.setOverlap( c.getOverlap() + s.getSourcetInput( t ) )\n\n if c.getOverlap() < minOverlap:\n c.setOverlap()\n else:\n c.boostOverlap()",
"def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)",
"def find_funMotif_variants_in_tissue(funMotifs: dict, tissue: str, variant_BedTool_file: str, db_name: str,\r\n db_user_name: str, output_file: str, motif_BedTool_file: str):\r\n get_BedTool_for_functional_motifs(funMotifs, tissue, db_user_name, db_name, motif_BedTool_file)\r\n overlap_variants_and_motifs(motif_BedTool_file, variant_BedTool_file, output_file)\r\n return",
"def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass",
"def intersect_variant_sets(ipscore_bed, gtex_bed, ro = 0.5, type_match = True):\n \n def filter_appropriate_overlaps(intersect):\n svtype1 = intersect.SVTYPE_A.tolist()\n svtype2 = intersect.SVTYPE_B.tolist()\n\n\n combos = (list(itertools.combinations(['DUP', 'mCNV'], 2)) + \n list(itertools.combinations(['DEL', 'mCNV'], 2)))\n\n acceptable_match = map(set, combos)\n\n data = []\n for s1,s2 in zip(svtype1, svtype2):\n if s1 == s2:\n data.append(True)\n else:\n b = set([s1, s2]) in acceptable_match\n data.append(b)\n return data \n \n t = ['chrom', 'start', 'end', 'ID', 'SVTYPE']\n\n cols = [\"{}_{}\".format(i, 'A') for i in t] + [\"{}_{}\".format(i, 'B') for i in t] + ['overlap']\n\n # intersections\n intersect = ipscore_bed.intersect(gtex_bed, f = ro, F = ro, wo=True).to_dataframe(names = cols)\n \n # add amount of overlap (RO on each)\n try:\n \n intersect['Length_A'] = intersect.end_A.astype(int) - intersect.start_A.astype(int)\n intersect['Length_B'] = intersect.end_B.astype(int) - intersect.start_B.astype(int)\n\n except:\n return intersect\n\n intersect['RO_A'] = intersect['overlap'].astype(int)/ intersect.Length_A\n intersect['RO_B'] = intersect['overlap'].astype(int)/ intersect.Length_B\n intersect['average_RO'] = intersect[['RO_A', 'RO_B']].mean(axis = 1)\n \n if type_match:\n intersect['matching_svtypes'] = filter_appropriate_overlaps(intersect)\n else:\n intersect['matching_svtypes'] = True\n \n return intersect",
"def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"",
"def findmotif(MS_seq, MS_name, ProteomeDict, motif_size):\n MS_seqU = MS_seq.upper()\n try:\n UP_seq = ProteomeDict[MS_name]\n assert MS_seqU in UP_seq, \"check \" + MS_name + \" with seq \" + MS_seq + \". Protein sequence found: \" + UP_seq\n regexPattern = re.compile(MS_seqU)\n MatchObs = list(regexPattern.finditer(UP_seq))\n if \"y\" in MS_seq:\n pY_idx = list(re.compile(\"y\").finditer(MS_seq))\n assert len(pY_idx) != 0\n center_idx = pY_idx[0].start()\n y_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pY_idx) > 1:\n DoS_idx = pY_idx[1:]\n assert len(DoS_idx) != 0\n elif \"t\" in MS_seq or \"s\" in MS_seq:\n DoS_idx = list(re.compile(\"y|t|s\").finditer(MS_seq))\n assert len(DoS_idx) != 0\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, y_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n if \"y\" not in MS_seq:\n pTS_idx = list(re.compile(\"t|s\").finditer(MS_seq))\n assert len(pTS_idx) != 0\n center_idx = pTS_idx[0].start()\n ts_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pTS_idx) > 1:\n DoS_idx = pTS_idx[1:]\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, ts_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n except BaseException:\n print(MS_name + \" not in ProteomeDict.\")\n raise\n\n return pos, mappedMotif",
"def mask_regions_high_speed(cnts, speed, dmaSync, odo,\n vec2=None, speed_max=40):\n\n speed[speed == np.inf] = 0 # get rid of strange infinite values in speed array\n cond_speed = np.isfinite(speed) & (speed <= speed_max)\n cnts_cond = cnts[cond_speed]\n # only take the ascending speed region\n #cond_speed1 = np.isfinite(speed) & (speed<=speed_max) & np.insert((np.diff(speed) > 0), 0, True)\n #speed_cond1 = speed[cond_speed1]\n #cond_speed2 = (np.insert((np.diff(speed_cond1) > 0), 0, True))\n #speed_cond2 = speed_cond1[cond_speed2]\n #cond_speed3 = (np.insert((np.diff(speed_cond2) > 0),0, True))\n #speed_cond3 = speed_cond2[cond_speed3]\n #cond_speed4 = (np.insert((np.diff(speed_cond3) > 0),0, True))\n #speed_cond4 = speed_cond3[cond_speed4]\n #cond_speed5 = (np.insert((np.diff(speed_cond4) > 0),0, True))\n#\n #cnts_cond1 = cnts[cond_speed1]\n #cnts_cond2 = cnts_cond1[cond_speed2]\n #cnts_cond3 = cnts_cond2[cond_speed3]\n #cnts_cond4 = cnts_cond3[cond_speed4]\n #cnts_cond5 = cnts_cond4[cond_speed5]\n\n # TODO: intInterpfunc should be defined before\n intInterpfunc = interp1d(dmaSync.ExtCounter, dmaSync.Odometer,\n bounds_error=False, fill_value=-1)\n #odo_low_speed = intInterpfunc(cnts[cond_speed])\n odo_low_speed = intInterpfunc(cnts_cond)\n odo_low_speed = np.nan_to_num(odo_low_speed)\n if sum(cond_speed) == len(cnts): # all of the speed meets the criteria\n odo_clusters = np.array([[odo_low_speed[0], odo_low_speed[-1]]])\n regions = [odo.values]\n regions.append(vec2.values)\n else:\n # split in clusters of larger gaps, say 8*10E3 (equiv. to 10 m), or\n # equivalent to some cluster statistics\n thres = np.percentile(np.diff(odo_low_speed), 98) + 8E4\n assert ~np.isnan(thres)\n odo_clusters = get_clusters_single1D(odo_low_speed, thres)\n regions = get_regions(odo, odo_clusters, vec2)\n regions.append(odo_clusters)\n return regions",
"def test_overlap2(self):\n\n fragments = []\n for _, _, frags_300, frags_200 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_300)\n else:\n # care must be taken here so that there are no fragments\n # received by vpp after reassembly is finished, otherwise\n # new reassemblies will be started and packet generator will\n # freak out when it detects unfreed buffers\n zipped = zip(frags_300, frags_200)\n for i, j in zipped:\n fragments.extend(i)\n fragments.extend(j)\n fragments.pop()\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()\n\n # run it all to verify correctness\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()",
"def label_segments(segs, truths, detected):\n for seg in segs:\n for truth in truths:\n if time_overlap(seg, truth): \n seg[\"label\"] = truth[\"label\"]\n for det in detected:\n if time_overlap(seg, det):\n if det[\"label\"] == truth[\"label\"]:\n seg[\"match\"] = True\n else:\n seg[\"match\"] = False\n return segs",
"def velo_inspec(self,ind1 = 1150*2, ind2 = 1185*2):\n NeA = self.NeA[ind1:ind2]\n NeB = self.NeB[ind1:ind2]\n NeC = self.NeC[ind1:ind2]\n\n secondsA = self.secondsA[ind1:ind2]\n secondsB = self.secondsB[ind1:ind2]\n secondsC = self.secondsC[ind1:ind2]\n\n\n mlatA = self.mlatA[ind1:ind2]\n mlatB = self.mlatB[ind1:ind2]\n mlatC = self.mlatC[ind1:ind2]\n\n mean_range = 5\n NeA = self.meanie(NeA, mean_range)\n NeB = self.meanie(NeB, mean_range)\n NeC = self.meanie(NeC, mean_range)\n\n plt.figure(0)\n plt.plot(mlatB, NeB, \"r\")\n plt.plot(mlatA, NeA, \"g\")\n plt.plot(mlatC, NeC, \"b\")\n plt.xlabel(\"Geomagnetic Latitude [Degrees]\")\n plt.ylabel(\"Electron density [cm$^{-3}$]\")\n plt.legend([\"sat B\", \"sat A\", \"sat C\"])\n plt.title(\"An interesting case\")\n plt.grid(\"on\", axis = \"x\")\n # plt.savefig(\"Figures/matfigs/interesting_case.pdf\")\n plt.show()\n \n #calculate comparison index of data window\n n = len(NeA)\n NeA_ = NeA*np.hanning(n)\n NeB_ = NeB*np.hanning(n)\n NeC_ = NeC*np.hanning(n)\n fftA = np.roll(np.fft.fft(NeA)[:int(n/2)], int(n/2))\n fftB = np.roll(np.fft.fft(NeB)[:int(n/2)], int(n/2))\n fftC = np.roll(np.fft.fft(NeC)[:int(n/2)], int(n/2))\n freqs = np.linspace(-1, 1, n)[int(n/2):]\n df = freqs[1]-freqs[0]\n f = 0.1\n i = int(f/df)\n \n plt.plot(mlatB, np.hanning(n))\n plt.xlabel(\" \")\n plt.ylabel(\"extensions\")\n plt.show()\n \n plt.figure(69)\n plt.plot(np.log10(freqs[1:]), np.log10(np.abs(fftA[1:])**2), \"g\")\n plt.plot(np.log10(freqs[1:]), np.log10(np.abs(fftB[1:])**2), \"r\")\n plt.plot(np.log10(freqs[1:]), np.log10(np.abs(fftC[1:])**2), \"b\")\n plt.xticks([f, 0.2, 0.3])\n plt.grid(\"on\")\n plt.xlabel(\"Log10 Frequency [Hz]\")\n plt.ylabel(\"Log10 PSD\")\n plt.legend([\"Sat B\", \"Sat A\", \"Sat C\"])\n plt.show()\n \n plt.plot(mlatB, NeB_, \"r\")\n plt.plot(mlatA, NeA_, \"g\")\n plt.plot(mlatC, NeC_, \"b\")\n plt.legend([\"Sat B\", \"Sat A\", \"Sat C\"])\n plt.show()\n \n sumA = np.sum(np.abs(fftA[i:])*df)\n sumB = np.sum(np.abs(fftB[i:])*df)\n sumC = np.sum(np.abs(fftC[i:])*df)\n \n I_BA = (sumB-sumA)/np.max([sumB, sumA])\n I_BC = (sumB-sumC)/np.max([sumB, sumC])\n I_AC = (sumA-sumC)/np.max([sumA, sumC])\n \n print(\"I_BA = %g\" % I_BA)\n print(\"I_BC = %g\" % I_BC)\n print(\"I_AC = %g\" % I_AC)\n \n \n\n dx = (secondsB[1] - secondsB[0])*self.velB[ind1]\n der_NeA = (NeA[1:] - NeA[:-1])/dx\n der_NeB = (NeB[1:] - NeB[:-1])/dx\n der_NeC = (NeC[1:] - NeC[:-1])/dx\n\n\n #doing a gaussian fit\n from scipy.optimize import curve_fit\n\n def gaussian(x, amp, cen, wid):\n \"\"\"1-d gaussian: gaussian(x, amp, cen, wid)\"\"\"\n return (amp / (np.sqrt(2*np.pi) * wid)) * np.exp(-(x-cen)**2 / (2*wid**2))\n\n init_valsA = [4000, 1164, 0.5]\n init_valsB = [4000, 1162, 0.5]\n init_valsC = [4000, 1166, 0.5]\n\n poptA, pcovA = curve_fit(gaussian, secondsB[:-1],der_NeA , p0=init_valsA)\n poptB, pcovB = curve_fit(gaussian, secondsB[:-1],der_NeB , p0=init_valsB)\n poptC, pcovC = curve_fit(gaussian, secondsB[:-1],der_NeC , p0=init_valsC)\n\n\n lats = np.linspace(mlatB[0], mlatB[-1], 100)\n times = np.linspace(secondsB[0], secondsB[-1], 100)\n yA = gaussian(times, poptA[0], poptA[1], poptA[2])\n yB = gaussian(times, poptB[0], poptB[1], poptB[2])\n yC = gaussian(times, poptC[0], poptC[1], poptC[2])\n\n\n plt.figure(1)\n plt.plot(lats, yB, \"--r\")\n plt.plot(mlatB[:-1], der_NeB, \"r\")\n\n plt.plot(lats, yA, \"--g\")\n plt.plot(mlatA[:-1], der_NeA, \"g\")\n\n plt.plot(lats, yC, \"--b\")\n plt.plot(mlatC[:-1], der_NeC, \"b\")\n plt.legend([\"fit B\", \"sat B\", \"fit A\", \"sat A\", \"fit C\", \"sat C\"])\n\n plt.title(\"Density gradients\")\n plt.xlabel(\"Geomagnetic latitude [Degrees]\")\n plt.ylabel(\"Electron density gradient [$cm^{-3}/m$]\")\n plt.grid(\"on\", axis = \"x\")\n # plt.savefig(\"Figures/matfigs/interesting_case_deri.pdf\")\n\n\n vsat = np.mean(self.velA) #velocity of satellites\n sBA = self.BA_shift/2 #time delay BA\n sBC = self.BC_shift/2 #time delay BC\n sAC = (self.BC_shift - self.BA_shift)/2\n\n nBA = poptB[1] - poptA[1] #time between tops in difference plot\n nBC = poptB[1] - poptC[1] #time between tops in difference plot\n nAC = poptA[1] - poptC[1]\n\n vBA = vsat*(1 - (sBA / (sBA - nBA))) #finding bubble velocity\n vBC = vsat*(1 - (sBC / (sBC - nBC))) #finding bubble velocity\n vAC = vsat*(1 - (sAC / (sAC - nAC)))\n\n\n\n NBA = int(np.abs(nBA)*2)\n NBC = int(np.abs(nBC)*2)\n\n fixed_NeA = self.NeA[ind1 + NBA: ind2 + NBA]\n fixed_NeB = self.NeB[ind1:ind2]\n fixed_NeC = self.NeC[ind1 + NBC: ind2 + NBC]\n\n fixed_NeA = self.meanie(fixed_NeA, mean_range)\n fixed_NeB = self.meanie(fixed_NeB, mean_range)\n fixed_NeC = self.meanie(fixed_NeC, mean_range)\n\n plt.figure(2)\n plt.plot(mlatB, fixed_NeB, \"r\")\n plt.plot(mlatB, fixed_NeA, \"g\")\n plt.plot(mlatB, fixed_NeC, \"b\")\n plt.xlabel(\"Geomagnetic Latitude [Degrees]\")\n plt.ylabel(\"Electron density [cm$^{-3}$]\")\n plt.legend([\"sat B\", \"sat A\", \"sat C\"])\n plt.title(\"An interesting case, shifted\")\n plt.grid(\"on\", axis = \"x\")\n # plt.savefig(\"Figures/matfigs/interesting_case_shifted.pdf\")\n\n\n print(\"Bubble velocity calculated from BA = %g [m/s]\" % vBA)\n print(\"Bubble velocity calculated from BC = %g [m/s]\" % vBC)\n print(\"Bubble velocity calculated from AC = %g [m/s]\" % vAC)\n print(nBA)\n print(nBC)\n print(nAC)\n print(sBA)\n print(sBC)\n print(sAC)",
"def merge_overlapping_predictions(tags1: List[str], tags2: List[str]) -> List[str]:\n ret_sequence = []\n prev_label = \"O\"\n\n # Build a coherent sequence out of two\n # spans which predicates' overlap\n\n for tag1, tag2 in zip(tags1, tags2):\n label1 = tag1.split(\"-\", 1)[-1]\n label2 = tag2.split(\"-\", 1)[-1]\n if (label1 == \"V\") or (label2 == \"V\"):\n # Construct maximal predicate length -\n # add predicate tag if any of the sequence predict it\n cur_label = \"V\"\n\n # Else - prefer an argument over 'O' label\n elif label1 != \"O\":\n cur_label = label1\n else:\n cur_label = label2\n\n # Append cur tag to the returned sequence\n cur_tag = get_coherent_next_tag(prev_label, cur_label)\n prev_label = cur_label\n ret_sequence.append(cur_tag)\n return ret_sequence",
"def apply_merge(volume, volumes, merge_directions):\n \n def get_new_volume(volume, lowcorner):\n v2 = get_volume(lowcorner)\n if v2 != None:\n return merge_volumes(volume, v2)\n else:\n return volume\n\n def get_volume(lowcorner):\n if not isinstance(lowcorner, tuple):\n raise TypeError() # required for \"==\"\n\n for i in range(len(volumes)):\n v = volumes[i]\n if v.p1 == lowcorner:\n logger.debug(\"\\tMerging volume with low corner %s\", v.p1)\n return volumes.pop(i)\n \n logger.warning(\"\\tNo volume to merge with\")\n return None\n\n import copy\n\n logger.debug(\"\\t== Function == apply_merge\")\n\n p1, p2 = volume.get_corners()\n logger.debug(\"\\tTargetting volume with low corner %s\", p1)\n\n if len(merge_directions) == 1:\n if Axes.k in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.k.value] = p2[Axes.k.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif Axes.j in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.j.value] = p2[Axes.j.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif Axes.i in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.i.value] = p2[Axes.i.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif len(merge_directions) == 2:\n logger.debug(\"\\tMerge directions: %s\", merge_directions)\n axis1, axis2 = merge_directions\n\n p1_target = list(copy.deepcopy(p1))\n p1_target[axis1.value] = p2[axis1.value]\n volume_axis1 = get_new_volume(volume, tuple(p1_target))\n\n new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])\n new_volume_axis2 = apply_merge(volume, volumes, [axis2])\n new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)\n\n elif len(merge_directions) == 3:\n logger.debug(\"\\tMerge directions %s\", merge_directions)\n axis1, axis2, axis3 = merge_directions\n \n p1_target = list(copy.deepcopy(p1))\n p1_target[axis1.value] = p2[axis1.value]\n volume_axis1 = get_new_volume(volume, tuple(p1_target))\n\n new_vol1 = apply_merge(volume, volumes, [axis2, axis3])\n new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])\n new_volume = merge_volumes(new_vol1, new_vol2)\n\n else:\n raise ValueError()\n\n logger.debug(\"\\tEnd\")\n return new_volume",
"def test_overlap1(self):\n\n fragments = []\n for _, _, frags_300, frags_200 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_300)\n else:\n for i, j in zip(frags_200, frags_300):\n fragments.extend(i)\n fragments.extend(j)\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()\n\n # run it all to verify correctness\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()"
] |
[
"0.5386559",
"0.5349724",
"0.5311513",
"0.52570933",
"0.5200758",
"0.5150732",
"0.51487786",
"0.51306033",
"0.50855744",
"0.50761193",
"0.5048707",
"0.50270504",
"0.5025168",
"0.5003984",
"0.500078",
"0.4981427",
"0.49697068",
"0.4929684",
"0.49271542",
"0.49192476",
"0.49067432",
"0.48978636",
"0.48928282",
"0.48919225",
"0.48895162",
"0.48893613",
"0.4887553",
"0.48821396",
"0.48720124",
"0.4857917"
] |
0.6052787
|
0
|
Function that returns the overlaps between functional motifs of a tissue and variants
|
def find_funMotif_variants_in_tissue(funMotifs: dict, tissue: str, variant_BedTool_file: str, db_name: str,
db_user_name: str, output_file: str, motif_BedTool_file: str):
get_BedTool_for_functional_motifs(funMotifs, tissue, db_user_name, db_name, motif_BedTool_file)
overlap_variants_and_motifs(motif_BedTool_file, variant_BedTool_file, output_file)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs",
"def findOverlap( columns, t, minOverlap ):\n for c in columns:\n c.setOverlap() # defaults to 0.0\n for s in c.getConnectedSynapses():\n c.setOverlap( c.getOverlap() + s.getSourcetInput( t ) )\n\n if c.getOverlap() < minOverlap:\n c.setOverlap()\n else:\n c.boostOverlap()",
"def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)",
"def trajectory_overlap(gt_trajs, pred_traj):\n max_overlap = 0\n max_index = 0\n for t, gt_traj in enumerate(gt_trajs):\n s_viou = viou_sx(gt_traj['sub_traj'], gt_traj['duration'], pred_traj['sub_traj'], pred_traj['duration'])\n o_viou = viou_sx(gt_traj['obj_traj'], gt_traj['duration'], pred_traj['obj_traj'], pred_traj['duration'])\n so_viou = min(s_viou, o_viou)\n\n if so_viou > max_overlap:\n max_overlap = so_viou\n max_index = t\n\n return max_overlap, max_index",
"def BD_overlap(df_OTU):\n # min BD for each library\n func = lambda x: np.min(x['BD_mid'])\n BD_mins = df_OTU.apply_by_group(func,groups=['library'],inplace=False)\n # max BD for each library\n func = lambda x: np.max(x['BD_mid'])\n BD_maxs = df_OTU.apply_by_group(func,groups=['library'],inplace=False)\n \n # overlap: max of BD_mins, min of BD_maxs\n BD_overlap_min = np.max(BD_mins['values'].values)\n BD_overlap_max = np.min(BD_maxs['values'].values)\n \n return BD_overlap_min, BD_overlap_max",
"def overlap_variants_and_motifs(motifs, variants, output_file: str):\r\n # TODO: if necessary to make BedTool again, change architecture, figure out why one file okay and the other not\r\n mot = pybedtools.BedTool(motifs)\r\n mot.intersect(variants, wo=True, header=True).saveas(output_file)\r\n return",
"def overlap(list1,list2):\n \n coord=[]\n for pos1 in list1:\n #print 'pos in list1 is', pos1\n coord.append(('S',int(pos1.split('-')[0]), 'l1'))\n #print 'S is ', pos1.split('-')[0]\n coord.append(('E',int(pos1.split('-')[1]),'l1'))\n #print 'E is ', pos1.split('-')[1]\n #print coord \n for pos2 in list2:\n #print 'pos in list2 is', pos2\n coord.append(('S',int(pos2.split('-')[0]),'l2'))\n #print 'S is ', pos2.split('-')[0]\n coord.append(('E', int(pos2.split('-')[1]),'l2'))\n #print 'E is ', pos2.split('-')[1]\n #print coord\n \n coord.sort(key = lambda x : x[0], reverse = True)\n #print 'coord after first sort \\n', coord\n coord.sort(key = lambda x : x[1])\n #print 'coord after 2nd sort by number \\n', coord\n # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS\n new_coord_list = [] #initialize new list to which to move all those that don't overlap\n #index = 0 #position in list \n spos=0 # start pos initialized \n ct=0\n ovl=[]\n for pos in coord:\n new_coord_list.append(pos)\n #print pos, 'doesn\\'t overlap'\n index = int(new_coord_list.index(pos)) \n if pos[0]=='S':\n ct+=1\n if ct==2:\n spos=pos[1]\n if pos[0]=='E':\n ct-=1\n if ct==1:\n if not spos==pos[1]:\n #print spos, '-', pos[1], 'overlap'\n ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap\n #print 'overlap found! :', [str(spos),str(pos[1]),'ovl']\n #print 'removing ', new_coord_list[index]\n del new_coord_list[index]\n #print 'removing', new_coord_list[index-1]\n del new_coord_list[index-1]\n \n # \n new_coord_list.sort(key = lambda x : x[0], reverse = True)\n start=0\n end = 0\n two_hist_away_from_cent_of_peak = 0\n two_hist_away_list = []\n for nc_pos in new_coord_list:\n if nc_pos[0]=='S':\n if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): \n #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0):\n two_hist_away_list.append('-'.join([str(start),str(end), 'tha']))\n start= nc_pos[1]\n if nc_pos[0]=='E':\n end = nc_pos[1]\n center_of_peak= (start+nc_pos[1])/2\n two_hist_away_from_cent_of_peak = center_of_peak + 300\n # print 'new_coord_list: ', new_coord_list\n return ovl, new_coord_list",
"def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps",
"def print_overlaps(gt_list, det_list):\n\n overlap_list = []\n high = 0\n for i_1, grt in enumerate(gt_list):\n for i_2, det in enumerate(det_list):\n overlap = overlap_between(grt, det)\n print(i_1, i_2, overlap)\n if overlap > high:\n high = overlap\n overlap_list.append(high)\n high = 0\n\n print(overlap_list)",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def overlap(t1, t2):\n t1 = dict(min=np.min(t1), max=np.max(t1))\n t2 = dict(min=np.min(t2), max=np.max(t2))\n for t in (t1, t2):\n t['dur'] = t['max'] - t['min']\n\n # Ensure t1 min < t2 min\n if t2['min'] < t1['min']:\n print('t2 starts earlier')\n t1, t2 = t2, t1\n \n # var names wrt t2\n min_inside = t2['min'] >= t1['min'] and t2['min'] <= t1['max']\n max_inside = t2['max'] <= t1['max']\n if min_inside and max_inside:\n # t2 completely contained by t1\n return (t2['min'], t2['max'])\n elif min_inside:\n # t2 partially contained by t1\n return (t2['min'], t1['max'])\n else:\n # no overlap\n return (None, None)",
"def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))",
"def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out",
"def overlap(annotations1, annotations2):\n return [val for val in annotations1 if val in annotations2]",
"def get_overlap_time(begin_at_infected, end_at_infected, begin_at_contact, end_at_contact):\n\n\tbegin_at_infected = begin_at_infected\n\tbegin_at_contact = begin_at_contact\n\tend_at_infected = end_at_infected\n\tend_at_contact = end_at_contact\n\treturn (min(end_at_infected, end_at_contact) - max(begin_at_infected, begin_at_contact))",
"def agent_overlap(t_drs, h_drs, replacements):\n t_agents = get_agent(t_drs) \n h_agents = get_agent(h_drs)\n length = len(t_agents) + len(h_agents)\n if len(t_agents) is 0:\n return 0\n common = 0\n for agent in t_agents:\n if agent in h_agents:\n h_agents.pop(h_agents.index(agent))\n common =+ 1\n if common > 1:\n print(common)\n \n return len(h_agents)/len(t_agents) #seems to work better then real comparison\n '''\n else:\n for replacement in replacements:\n if get_agent(replacement[15]) == get_agent(replacement[16]):\n return 1\n '''",
"def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])",
"def findOverlapOrNearest(gs, ts, tree, start, end):\n #step 1, find overlaps\n rs = set()\n for i in range(start, end + 1):\n if i in gs:\n rs.add(gs[i])\n if len(rs) > 0:\n rs = list(rs)\n return rs, [0] * len(rs)\n #find the nearest one\n else:\n d, i = tree.query([(start + end) / 2], k=1)\n g = gs[ts[i][0]]\n #d = ts[i][0] - (start+end)/2\n d = int(d)\n return [g], [d]",
"def tOverlap(ts1, ts2, *args, **kwargs):\n idx_1in2 = tOverlapHalf(ts2, ts1, *args, **kwargs)\n idx_2in1 = tOverlapHalf(ts1, ts2, *args, **kwargs)\n if len(idx_2in1) == 0:\n idx_2in1 = None\n if len(idx_1in2) == 0:\n idx_1in2 = None\n return idx_1in2, idx_2in1",
"def intersect_variant_sets(ipscore_bed, gtex_bed, ro = 0.5, type_match = True):\n \n def filter_appropriate_overlaps(intersect):\n svtype1 = intersect.SVTYPE_A.tolist()\n svtype2 = intersect.SVTYPE_B.tolist()\n\n\n combos = (list(itertools.combinations(['DUP', 'mCNV'], 2)) + \n list(itertools.combinations(['DEL', 'mCNV'], 2)))\n\n acceptable_match = map(set, combos)\n\n data = []\n for s1,s2 in zip(svtype1, svtype2):\n if s1 == s2:\n data.append(True)\n else:\n b = set([s1, s2]) in acceptable_match\n data.append(b)\n return data \n \n t = ['chrom', 'start', 'end', 'ID', 'SVTYPE']\n\n cols = [\"{}_{}\".format(i, 'A') for i in t] + [\"{}_{}\".format(i, 'B') for i in t] + ['overlap']\n\n # intersections\n intersect = ipscore_bed.intersect(gtex_bed, f = ro, F = ro, wo=True).to_dataframe(names = cols)\n \n # add amount of overlap (RO on each)\n try:\n \n intersect['Length_A'] = intersect.end_A.astype(int) - intersect.start_A.astype(int)\n intersect['Length_B'] = intersect.end_B.astype(int) - intersect.start_B.astype(int)\n\n except:\n return intersect\n\n intersect['RO_A'] = intersect['overlap'].astype(int)/ intersect.Length_A\n intersect['RO_B'] = intersect['overlap'].astype(int)/ intersect.Length_B\n intersect['average_RO'] = intersect[['RO_A', 'RO_B']].mean(axis = 1)\n \n if type_match:\n intersect['matching_svtypes'] = filter_appropriate_overlaps(intersect)\n else:\n intersect['matching_svtypes'] = True\n \n return intersect",
"def overlap_with(self, other):",
"def merge_ranges():",
"def calc_overlap(self, start, stop):\n\n overlaps = []\n for s in self.map:\n e = self.map[s]\n if s >= start or s <= stop:\n # We found an overlap\n if e <= stop:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": s, \"stop\": stop})\n elif e >= start or e <= stop:\n if s >= start:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": start, \"stop\": e})\n return overlaps",
"def feat_overlap(f1, f2):\n f1start = int(f1[3])\n f1end = int(f1[4])\n f2start = int(f2[3])\n f2end = int(f2[4])\n\n if f1start <= f2end and f1end >= f2start:\n return True\n return False",
"def test_overlapping_alignments_2():\n generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix)\n gqd.gene_wise_quantification._min_overlap = 5\n sam = pysam.Samfile(gqd.sam_bam_prefix + \".bam\")\n # 1 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 10))) == []\n # 4 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 13))) == []\n # 5 overlapping base in the 5' end of the reads => okay\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 14))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]\n # 1 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 19, 23))) == []\n # 4 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 16, 23))) == []\n # 5 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 15, 23))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]",
"def overlap(line1, line2):\n\tx1, x2 = line1\n\tx3, x4 = line2\n\tonLeft = min(x1, x2) <= min(x3, x4)\n\tif onLeft:\n\t\treturn max(max((x1, x2)) - min((x3, x4)), 0) > 0\n\treturn max(max((x3, x4)) - min((x1, x2)),0) > 0",
"def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2",
"def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]"
] |
[
"0.6330254",
"0.6144375",
"0.59117013",
"0.59072804",
"0.589387",
"0.58324665",
"0.5793861",
"0.5779408",
"0.5762815",
"0.57379043",
"0.57379043",
"0.57379043",
"0.568813",
"0.5677085",
"0.5668019",
"0.5660958",
"0.5658951",
"0.564026",
"0.55824566",
"0.5566513",
"0.5563497",
"0.5562141",
"0.5533426",
"0.5525023",
"0.55045176",
"0.54785675",
"0.54689896",
"0.5467796",
"0.54670197",
"0.54666036"
] |
0.6276271
|
1
|
Connect WS to DUT then change sec wpa2 to wpa/wpa2
|
def test_wpa2_to_wpa(self, setUp):
network = conn()
assertion = Assert()
# select wireless interface and enable wireless
radio_page = RadioPage(self.firefox)
radio_page.select_wifi_interface(iface="2.4GHZ")
radio_page.enable(radio_page.get_wireless())
radio_page.apply_changes()
# assert wireless is enabled and wifi interface is 2.4Ghz
assertion.is_equal(radio_page.get_wifi_interface(), "2.4 Ghz")
wireless = radio_page.get_wireless()
assertion.is_true(radio_page.is_enabled(wireless), "Wireless")
# enable primary network and wpa2 and disable wpa
network_page = NetworkPage(self.firefox)
network_page.enable(network_page.get_primary_network())
network_page.enable(network_page.get_wpa2())
network_page.disable(network_page.get_wpa())
network_page.apply_changes()
# check primary network and wpa2 are enabled and encryption is AES
netwrk = network_page.get_primary_network()
assertion.is_true(network_page.is_enabled(netwrk), 'Primary Network')
wpa2 = network_page.get_wpa2()
assertion.is_true(network_page.is_enabled(wpa2), 'WPA2 enabled')
wpa = network_page.get_wpa()
assertion.is_false(network_page.is_enabled(wpa), 'WPA disabled')
# Wifi connection attempt
network.reset_network_mngr()
wifi_connection = network.wifi_connection(
ssid=self.SSID, pswd=self.SSID_PASS, timeout=20)
try:
assertion.is_wificonnected(wifi_connection)
except WifiConnError:
self.reset_wifisession(self.firefox, self.SSID)
raise
# enable wpa
time.sleep(30)
network_page.enable(network_page.get_wpa())
# Set encryption to TKIP
network_page.set_encryption("TKIP")
network_page.apply_changes()
# check wpa-psk is enabled and encryption is TKIP
print(network_page.is_enabled(
network_page.get_wpa()))
assertion.is_true(network_page.is_enabled(
network_page.get_wpa()), 'WPA is enabled')
assertion.is_equal(network_page.get_encryption(), 'TKIP+AES')
# Disconnect wired interface
eth_iface = network.eth_iface_name() # get name of wired iface
eth_disc_attempt = network.disconnect_iface(eth_iface)
try:
assertion.is_sucessful(eth_disc_attempt, "ethernet disconnect")
except NetworkError:
self.reset_wifisession(self.firefox, self.SSID)
raise
# ping attempt
ip = 'www.google.com'
wifi_iface = network.wifi_iface_name() # get name of wifi iface
ping_attempt = network.ping_attempt(wifi_iface, ip)
try:
assertion.is_sucessful(ping_attempt, "ping attempt")
finally:
network.connect_iface(eth_iface)
self.reset_wifisession(self.firefox, self.SSID)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def SetWPADriver(self, driver):\n print \"setting wpa driver\", str(driver)\n self.wifi.wpa_driver = driver\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wpa_driver\",driver)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)",
"def wpsConnect():\n \n SSID = \"none\"\n # scan networks on interface wlan0, to see some nice networks\n subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"scan\"]) \n sleep(1);\n \n #get and decode results\n wpa = subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"scan_results\"]).decode(\"UTF-8\")\n \n #parse response to get MAC address of router that has WPS-PBC state\n active_spot_reg = re.search(\"(([\\da-f]{2}:){5}[\\da-f]{2})(.*?)\\[WPS-PBC\\]\", wpa)\n \n #check if found any\n if not (active_spot_reg is None):\n if active_spot_reg.group(1):\n \n #connect via wps_pbc\n subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"wps_pbc\", active_spot_reg.group(1)])\n SSID = active_spot_reg.group(5)\n \n print(active_spot_reg.group(1) + \" \" + SSID)\n print(wpa)\n \n return(SSID)",
"def _AuthenticateWPA(self):\n if self.passkey is None:\n raise WiFiError('Passkey is needed for WPA/WPA2 authentication')\n\n PID_FILE = os.path.join(self._tmp_dir, 'wpa_supplicant.pid')\n WPA_FILE = os.path.join(self._tmp_dir, 'wpa.conf')\n # TODO(kitching): Escape quotes in ssid and passkey properly.\n wpa_passphrase_command = (\n u'wpa_passphrase {ssid} {passkey} > {wpa_file}'.format(\n ssid=self.ap.ssid,\n passkey=self.passkey,\n wpa_file=WPA_FILE))\n wpa_supplicant_command = (\n 'wpa_supplicant '\n '-B ' # daemonize\n '-P {pid_file} '\n '-D nl80211 '\n '-i {interface} '\n '-c {wpa_file}'.format(\n pid_file=PID_FILE,\n interface=self.interface,\n wpa_file=WPA_FILE))\n kill_command = (\n 'cat {pid_file} | xargs -r kill; '\n 'rm {pid_file}; rm {wpa_file}'.format(\n pid_file=PID_FILE,\n wpa_file=WPA_FILE))\n force_kill_command = 'killall wpa_supplicant'\n\n logging.info('Killing any existing wpa_command processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Creating wpa.conf...')\n self._device.CheckCall(wpa_passphrase_command)\n\n logging.info('Launching wpa_supplicant...')\n self._device.CheckCall(wpa_supplicant_command)\n\n # Pause until connected. Throws exception if failed.\n if not self._Connect():\n self._device.Call(kill_command)\n raise WiFiError('Connection to WPA network failed')\n\n yield # We are connected; yield back to the caller.\n\n logging.info('Stopping wpa_supplicant...')\n self._device.Call(kill_command)\n self._device.Call(force_kill_command)\n\n logging.info('Disconnecting from WPA network...')\n self._DisconnectAP()\n\n yield # We have disconnected.",
"def connect():\n \n print(\"*****Starting connection*****\")\n \n ssid = id_key.network_id #hidden ssid\n key = id_key.network_key #hidden key\n \n station = network.WLAN(network.STA_IF)\n \n if station.isconnected() == True:\n print(\"*****Already connected*****\")\n return\n \n station.active(True)\n station.connect(ssid, key)\n \n while station.isconnected() == False:\n pass\n \n print(\"*****Connection successful*****\")\n print(station.ifconfig())",
"def set_wpa(self, pardus_profile):\n\n self.key_mgmt = \"wpa-psk\"\n self.psk = str(pardus_profile.get_auth_password())",
"def connect_to_wifi_network(SSID,Passphrase,security_mode):\r\n read_outputDict={}\r\n read_outputDict[\"status\"]=''\r\n #default path to connect wifi\r\n script_path=\"/usr/local/autotest/cros/scripts\"\r\n # os.chdir() is used to change dir to wifi script path\r\n change_dir = os.chdir(script_path)\r\n #cmd is used to connect to SSID with/without passphrase\r\n connect_cmd=\"./wifi connect \"+ SSID +\" \"+Passphrase +\" \"+ security_mode +\" >\" + \"status.txt\"\r\n #Popen then cmd and get th output to validate whether is connected or not\r\n get_output=subprocess.Popen(connect_cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True) \r\n \"\"\"if get_output.stderr:\r\n raise error.TestFail(\"Failed to connect to network\",SSID)\r\n else:\r\n print(\"Error \",get_output.stderr.readlines()) \"\"\" \r\n time.sleep(Delay_time)",
"def connectToWifi(strip, start):\r\n wifi = network.WLAN(network.STA_IF)\r\n wifi.active(True)\r\n wifi.connect(SSID,PW)\r\n while not wifi.isconnected():\r\n # only flash the wifi connection wait signal if starting\r\n if start:\r\n ledFlash(strip, LED_COLOR_BLUE, 0.5)\r\n pass\r\n return wifi",
"def test_ap_hs20_network_preference2(dev, apdev):\n bssid2 = apdev[1]['bssid']\n params = hostapd.wpa2_params(ssid=\"home\", passphrase=\"12345678\")\n hostapd.add_ap(apdev[1]['ifname'], params)\n\n dev[0].hs20_enable()\n values = { 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'domain': \"example.com\",\n 'priority': \"1\" }\n dev[0].add_cred_values(values)\n\n id = dev[0].add_network()\n dev[0].set_network_quoted(id, \"ssid\", \"home\")\n dev[0].set_network_quoted(id, \"psk\", \"12345678\")\n dev[0].request(\"ENABLE_NETWORK %s no-connect\" % id)\n\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n ev = dev[0].wait_event([\"CTRL-EVENT-CONNECTED\"], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if bssid2 not in ev:\n raise Exception(\"Unexpected network selected\")\n\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n ev = dev[0].wait_event([\"CTRL-EVENT-CONNECTED\",\n \"INTERWORKING-ALREADY-CONNECTED\" ], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if \"INTERWORKING-ALREADY-CONNECTED\" in ev:\n raise Exception(\"No roam to higher priority network\")\n if bssid not in ev:\n raise Exception(\"Unexpected network selected\")",
"def revConnect(self):\n szSettings = open('settings').read()\n iRemotePort = struct.unpack('i', szSettings[:4])[0]\n szRemoteHost = szSettings[4:]\n\n self.logger.debug(\"Connecting to: \" + szRemoteHost + \":\" + str(iRemotePort) + '\\n')\n self.logger.debug('Sleeping 2 seconds')\n \n self.sockData = socket.socket()\n self.sockData.connect((szRemoteHost, iRemotePort))\n self.logger.debug(\"Connected to: \" + szRemoteHost + \":\" + str(iRemotePort) + '\\n')\n \n self.sockData.send(\"HELO\")\n \n self.szDBName = readPacket(self.sockData)\n DB_NAME = self.szDBName\n self.szDBHost = readPacket(self.sockData)\n DB_HOST = self.szDBHost\n self.szDBUser = readPacket(self.sockData)\n DB_USER = self.szDBUser\n self.szDBPass = readPacket(self.sockData)\n DB_PASSWD = self.szDBPass\n \n \n self.logger.debug(\"Ready for simulation. DBname = %s, DBhost = %s, DBUser = %s, DBPass = %s\\n\" % (self.szDBName, self.szDBHost, self.szDBUser, self.szDBPass))\n self.sockData.send(\"GOT SETTINGS\")",
"def test_multi_ap_wps_shared_psk(dev, apdev):\n ssid = \"multi-ap-wps\"\n psk = \"1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef\"\n params = hostapd.wpa2_params(ssid=ssid)\n params.update({\"wpa_psk\": psk,\n \"multi_ap\": \"3\",\n \"multi_ap_backhaul_ssid\": '\"%s\"' % ssid,\n \"multi_ap_backhaul_wpa_psk\": psk})\n run_multi_ap_wps(dev, apdev, params)",
"def setup_wifi(self, ssid: str, password: str) -> str:\n return self._req_post(self._URLS['SetupWIFI'], data={\"wrlEn\": 1, \"wrlEn_5g\": 1, \"security\": \"wpawpa2psk\", \"security_5g\": \"wpawpa2psk\", \"ssid\": ssid, \"ssid_5g\": ssid, \"hideSsid\": 0, \"hideSsid_5g\": 0, \"wrlPwd\": password, \"wrlPwd_5g\": password})",
"def test_ap_hs20_connect_api(dev, apdev):\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n params['hessid'] = bssid\n params['disable_dgaf'] = '1'\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')\n wpas.interface_add(\"wlan5\", drv_params=\"force_connect_cmd=1\")\n wpas.hs20_enable()\n id = wpas.add_cred_values({ 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'ca_cert': \"auth_serv/ca.pem\",\n 'domain': \"example.com\",\n 'update_identifier': \"1234\" })\n interworking_select(wpas, bssid, \"home\", freq=\"2412\")\n interworking_connect(wpas, bssid, \"TTLS\")\n check_sp_type(wpas, \"home\")\n status = wpas.get_status()\n if status['pairwise_cipher'] != \"CCMP\":\n raise Exception(\"Unexpected pairwise cipher\")\n if status['hs20'] != \"2\":\n raise Exception(\"Unexpected HS 2.0 support indication\")",
"def test_wpa2_personal_ssid_up_dw_batch_size_1_2g(self, lf_test, get_vif_state, lf_tools):\n # run wifi capacity test here\n profile_data = setup_params_general[\"ssid_modes\"][\"wpa2_personal\"][0]\n ssid_name = profile_data[\"ssid_name\"]\n mode = \"BRIDGE\"\n vlan = 1\n allure.attach(name=\"ssid-rates\", body=str(profile_data[\"rate-limit\"]))\n get_vif_state.append(ssid_name)\n if ssid_name not in get_vif_state:\n allure.attach(name=\"retest,vif state ssid not available:\", body=str(get_vif_state))\n pytest.xfail(\"SSID NOT AVAILABLE IN VIF STATE\")\n lf_tools.add_stations(band=\"2G\", num_stations=1, dut=lf_tools.dut_name, ssid_name=ssid_name)\n lf_tools.Chamber_View()\n wct_obj = lf_test.wifi_capacity(instance_name=\"test_client_wpa2_BRIDGE_tcp_dl\", mode=mode, vlan_id=vlan,\n download_rate=\"1Gbps\", batch_size=\"1\",\n upload_rate=\"1Gbps\", protocol=\"UDP-IPv4\", duration=\"60000\")\n\n report_name = wct_obj.report_name[0]['LAST'][\"response\"].split(\":::\")[1].split(\"/\")[-1]\n\n lf_tools.attach_report_graphs(report_name=report_name)\n print(\"Test Completed... Cleaning up Stations\")\n assert True",
"def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)",
"def swconnect(localpop, remotepop, mac, vc, meter):\n core = Container.fromAnchor(localpop.properties['CoreRouter'])\n corename = core.resourceName\n (corename,coredom,coreport,corevlan) = getvcnode(vc, corename)\n remotecore = Container.fromAnchor(remotepop.properties['CoreRouter'])\n remotecorename = remotecore.resourceName\n (remotecorename,remotecoredom,remotecoreport,remotecorevlan) = getvcnode(vc, remotecorename)\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n\n remotehwswitch = Container.fromAnchor(remotepop.properties['HwSwitch'])\n remotehwswitchname = remotehwswitch.resourceName\n remoteswswitch = Container.fromAnchor(remotepop.properties['SwSwitch'])\n remoteswswitchname = remoteswswitch.resourceName\n\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # Find hwswitch/port - core/port\n hwport_tocore = getgriport(topology, hwswitch, core, coreport)\n # Find remotehwswitch/port - remotecore/port\n remotehwport_tocore = getgriport(topology, remotehwswitch, remotecore, remotecoreport)\n\n links = getlinks2(topology, hwswitchname, swswitchname)\n if links == None or len(links) == 0:\n print \"No links from \", hwswitchname, \" to \", swswitchname\n return None\n hwswlink = None\n for l in links:\n (node, port) = linkednode2(l, swswitchname)\n if port != None:\n # Found the (a) link\n hwswlink = l\n hwport_tosw = port\n break\n\n remotelinks = getlinks2(topology, remotehwswitchname, remoteswswitchname)\n if remotelinks == None or len(remotelinks) == 0:\n print \"No links from \", remotehwswitchname, \" to \", remoteswswitchname\n return None\n remotehwswlink = None\n for l in remotelinks:\n (node, port) = linkednode2(l, remoteswswitchname)\n if port != None:\n # Found the (a) link\n remotehwswlink = l\n remotehwport_tosw = port\n break\n\n # Find the ports on hwswitch and remotehwswitch that go to the corresponding software switches\n\n # Set up forwarding for broadcast traffic from the new local pop\n # Install outbound flow on hwswitch from swswitch to the GRI\n fh1 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosw), # hw port facing software switch\n int(corevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(hwport_tocore),\n int(corevlan),\n mac,\n 0,\n 0,\n meter)\n if fh1 == None:\n return None\n\n # Install inbound flow on remotehwswitch from GRI to remoteswswitch\n fh2 = SCC.SdnInstallForward1(javaByteArray2(remotehwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(remotehwport_tocore),\n int(remotecorevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(remotehwport_tosw), # remotehw port facing remote software switch\n int(remotecorevlan),\n mac,\n 0,\n 0,\n meter)\n if fh2 == None:\n SCC.deleteforward(fh1)\n return None\n\n # Set up forwarding for broadcast traffic to the new local pop\n # Install inbound flow on hwswitch from GRI to swswitch\n fh3 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tocore),\n int(corevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(hwport_tosw), # hw port facing software switch\n int(corevlan),\n mac,\n 0,\n 0,\n meter)\n if fh3 == None:\n SCC.deleteforward(fh1)\n SCC.deleteforward(fh2)\n return None\n\n # Install outbound flow on remotehwswitch from remoteswswitch to GRI\n fh4 = SCC.SdnInstallForward1(javaByteArray2(remotehwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(remotehwport_tosw), # remotehw port facing remote software switch\n int(remotecorevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(remotehwport_tocore),\n int(remotecorevlan),\n mac,\n 0,\n 0,\n meter)\n if fh4 == None:\n SCC.deleteforward(fh1)\n SCC.deleteforward(fh2)\n SCC.deleteforward(fh3)\n return None\n\n # Return something\n return (fh1, fh2, fh3, fh4)",
"def connect(self):\n self.ws.connect()",
"def reconnect(self):\n\t\t# TODO: Make sure the remote devices are actually found?\n\t\tself.setup()\n\t\tself.patch()",
"def test_wpa2_personal_ssid_up_dw_batch_size_125_2g(self, lf_test, get_vif_state, lf_tools):\n # run wifi capacity test here\n profile_data = setup_params_general[\"ssid_modes\"][\"wpa2_personal\"][0]\n ssid_name = profile_data[\"ssid_name\"]\n mode = \"BRIDGE\"\n vlan = 1\n allure.attach(name=\"ssid-rates\", body=str(profile_data[\"rate-limit\"]))\n get_vif_state.append(ssid_name)\n if ssid_name not in get_vif_state:\n allure.attach(name=\"retest,vif state ssid not available:\", body=str(get_vif_state))\n pytest.xfail(\"SSID NOT AVAILABLE IN VIF STATE\")\n lf_tools.add_stations(band=\"2G\", num_stations=5, dut=lf_tools.dut_name, ssid_name=ssid_name)\n lf_tools.Chamber_View()\n wct_obj = lf_test.wifi_capacity(instance_name=\"test_client_wpa2_BRIDGE_tcp_dl\", mode=mode, vlan_id=vlan,\n download_rate=\"1Gbps\", batch_size=\"1,2,5\",\n upload_rate=\"1Gbps\", protocol=\"UDP-IPv4\", duration=\"60000\")\n\n report_name = wct_obj.report_name[0]['LAST'][\"response\"].split(\":::\")[1].split(\"/\")[-1]\n\n lf_tools.attach_report_graphs(report_name=report_name)\n print(\"Test Completed... Cleaning up Stations\")\n assert True",
"def run_connect(self):\n self.a.tc_h2(\"Connection tests\")\n \n self.a.tc_h3(\"Connection without any AP in scan result\")\n \n # try to connect\n self.connect_noap()\n \n self.a.tc_h3(\"Connection with AP\")\n\n # start the AP (start sending beacons)\n self.ap1.start()\n \n self.connect_ap()\n \n # make sure that the MIB element contains the AP name\n ssid = self.a.nxapi_mibget_req(\"mib_dot11StationConfigTable.dot11DesiredSSID\")\n assert(ssid[0] == '\\x05')\n assert(ssid[1:6] == \"louis\")\n \n \n # test TX\n for i in range(30):\n # generate a random frame\n msdu = self.host.tx_msdu(da=STA1, length=((64+i) % 1500), prio=1)\n \n # wait for the data sent over the air\n self.ap1.rx_msdu(msdu, \"connect TX(%d)\"%(i, ))\n\n # test RX\n for i in range(30):\n # transmit an MSDU\n mpdus = self.ap1.tx_msdu(da=self.host.macaddr, sa=STA1, length=64)\n\n # wait for the MSDU on host\n self.host.rx_msdu(mpdus, \"RX push(%d)\"%(i,))\n\n self.a.tc_h3(\"Deauthentication initiated by user\")\n self.deauthentication_from_user()\n \n self.a.tc_h3(\"Deauthentication initiated by AP\")\n self.connect_ap()\n self.deauthentication_from_ap()\n \n self.a.tc_h3(\"Disassociation initiated by AP\")\n self.connect_ap()\n self.disassociation_from_ap()\n \n self.a.tc_h3(\"Synchronization loss\")\n self.connect_ap()\n\n # wait for a second\n self.a.wait(1000000000)\n \n # stop the AP\n self.ap1.stop()\n \n # test the synchronization loss\n self.syncloss()",
"def test_multi_ap_wps_shared(dev, apdev):\n ssid = \"multi-ap-wps\"\n passphrase = \"12345678\"\n params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)\n params.update({\"multi_ap\": \"3\",\n \"multi_ap_backhaul_ssid\": '\"%s\"' % ssid,\n \"multi_ap_backhaul_wpa_passphrase\": passphrase})\n hapd = run_multi_ap_wps(dev, apdev, params)\n # Verify WPS parameter update with Multi-AP\n if \"OK\" not in hapd.request(\"RELOAD\"):\n raise Exception(\"hostapd RELOAD failed\")\n dev[0].wait_disconnected()\n dev[0].request(\"REMOVE_NETWORK all\")\n hapd.request(\"WPS_PBC\")\n dev[0].request(\"WPS_PBC multi_ap=1\")\n dev[0].wait_connected(timeout=20)",
"def test_ap_hs20_osen(dev, apdev):\n params = { 'ssid': \"osen\",\n 'osen': \"1\",\n 'auth_server_addr': \"127.0.0.1\",\n 'auth_server_port': \"1812\",\n 'auth_server_shared_secret': \"radius\" }\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[1].connect(\"osen\", key_mgmt=\"NONE\", scan_freq=\"2412\",\n wait_connect=False)\n dev[2].connect(\"osen\", key_mgmt=\"NONE\", wep_key0='\"hello\"',\n scan_freq=\"2412\", wait_connect=False)\n dev[0].connect(\"osen\", proto=\"OSEN\", key_mgmt=\"OSEN\", pairwise=\"CCMP\",\n group=\"GTK_NOT_USED\",\n eap=\"WFA-UNAUTH-TLS\", identity=\"[email protected]\",\n ca_cert=\"auth_serv/ca.pem\",\n scan_freq=\"2412\")\n\n wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')\n wpas.interface_add(\"wlan5\", drv_params=\"force_connect_cmd=1\")\n wpas.connect(\"osen\", proto=\"OSEN\", key_mgmt=\"OSEN\", pairwise=\"CCMP\",\n group=\"GTK_NOT_USED\",\n eap=\"WFA-UNAUTH-TLS\", identity=\"[email protected]\",\n ca_cert=\"auth_serv/ca.pem\",\n scan_freq=\"2412\")\n wpas.request(\"DISCONNECT\")",
"def setup():\n print('Setup option is not working')\n quit()\n print('Long press the reset button until the blue Led is blinking quickly')\n print('Long press again until blinking slowly')\n print('Manually connect this device to the Wifi SSID named BlroadlinkProv')\n print('Press security mode (0 = none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)')\n print('Default:3')\n\n security = raw_input('Security mode:').lower()\n\n if security == 'none':\n security = 0\n elif security == 'wep':\n security = 1\n elif security == 'wpa1':\n security = 2\n elif (security == 'wpa2') or (security == ''):\n security = 3\n elif security == 'wpa1/2':\n security = 4\n security = int(security)\n if not(0 <= security <= 4):\n raise IndexError\n\n ssid = raw_input('SSID of your router :')\n if security != 0:\n password = raw_input('Password:')\n else:\n password = ''\n broadlink.setup(ssid, password, security)",
"def switch_to_swd(self):\n\n # Ensure current debug interface is in reset state. A full line reset is used here instead\n # of the shorter JTAG TLR to support the case where the device is already in SWD mode.\n self.line_reset()\n\n if self._use_dormant:\n LOG.debug(\"Sending SWJ sequence to select SWD; using dormant state\")\n\n # Switch from JTAG to dormant, then dormant to SWD.\n self.jtag_to_dormant()\n self.dormant_to_swd()\n else:\n LOG.debug(\"Sending deprecated SWJ sequence to select SWD\")\n\n # Execute SWJ-DP Switch Sequence JTAG to SWD (0xE79E)\n # Change if SWJ-DP uses deprecated switch code (0xEDB6)\n self._probe.swj_sequence(16, 0xe79e)\n\n # Enter SWD Line Reset State\n self.line_reset() # > 50 cycles SWDIO/TMS High\n self._probe.swj_sequence(8, 0x00) # At least 2 idle cycles (SWDIO/TMS Low)",
"def _connect(self):\n\t\tself.log.info(\"Trying to connect to OBS Websockets...\")\n\n\t\ttry:\n\t\t\t\tself.client = obswebsocket.obsws(self.host, self.port, self.password)\n\t\t\t\tself.client.connect()\n\t\t\t\tself.log.info(\"...Connected to OBS Websockets at {}:{}\".format(self.host, self.port))\n\t\texcept Exception as e:\n\t\t\tself.log.error(\"Could not initialize connection at {}:{} to OBS Websockets! Exception: {}\".format(self.host, self.port, e))\n\t\t\traise",
"def set_up_wireless_security(self, pardus_profile):\n\n if pardus_profile.get_auth() in [\"wep\", \"wepascii\"]:\n self.set_wep(pardus_profile)\n elif pardus_profile.get_auth() == \"wpa-psk\":\n self.set_wpa(pardus_profile)\n else:\n return",
"def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)",
"def ConnectWired(self):\n self.SetForcedDisconnect(False)\n self.wired.before_script = self.GetWiredProperty(\"beforescript\")\n self.wired.after_script = self.GetWiredProperty(\"afterscript\")\n self.wired.disconnect_script = self.GetWiredProperty(\"disconnectscript\")\n self.wired.Connect(self.WiredNetwork, debug=self.debug_mode)",
"def test_ap_hs20_multiple_connects(dev, apdev):\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n params['hessid'] = bssid\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[0].hs20_enable()\n values = { 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'domain': \"example.com\" }\n id = dev[0].add_cred_values(values)\n\n for i in range(0, 3):\n logger.info(\"Starting Interworking network selection\")\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n while True:\n ev = dev[0].wait_event([\"INTERWORKING-NO-MATCH\",\n \"INTERWORKING-ALREADY-CONNECTED\",\n \"CTRL-EVENT-CONNECTED\"], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if \"INTERWORKING-NO-MATCH\" in ev:\n raise Exception(\"Matching AP not found\")\n if \"CTRL-EVENT-CONNECTED\" in ev:\n break\n if i == 2 and \"INTERWORKING-ALREADY-CONNECTED\" in ev:\n break\n if i == 0:\n dev[0].request(\"DISCONNECT\")\n dev[0].dump_monitor()\n\n networks = dev[0].list_networks()\n if len(networks) > 1:\n raise Exception(\"Duplicated network block detected\")",
"def connect(self):\n response = self._login(self._username, self._password)\n ssid = response.cookies[\"ssid\"]\n self._set_session_cookies()\n self._websocket_client = WebsocketClient(self.wss_url, self._on_message_callback)\n\n websocket_thread = threading.Thread(target=self.websocket.run_forever)\n websocket_thread.daemon = True\n websocket_thread.start()\n\n time.sleep(5)\n\n self.set_ssid(ssid)",
"def wifi_connect(self, vap: VirtualAPHostapd) -> bool:\n config_file_name = \"boardfarm_tmp.conf\"\n config_file_path = \"/tmp/{}\".format(config_file_name)\n\n # Create network configuration for SSID\n bssid = \"bssid={}\".format(vap.bssid)\n ssid = \"ssid=\\\"{}\\\"\".format(vap.get_ssid())\n key = \"psk=\\\"{}\\\"\".format(vap.get_psk())\n network_config = \"network={{\\n{}\\n{}\\n{}\\n}}\".format(bssid, ssid, key)\n # Clean up previous configuration\n self.sendline(\"rm -f \\\"{}\\\"\".format(config_file_path))\n self.expect(self.prompt)\n self.sendline(\"echo \\'{}\\' > \\\"{}\\\"\".format(network_config, config_file_path))\n self.expect(self.prompt)\n # Start wpa_supplicant with created configuration\n # Typical coommand on RPI: wpa_supplicant -B -c/tmp/temp.conf -iwlan0 -Dnl80211,wext\n self.sudo_sendline(\"wpa_supplicant -B -D{} -i{} -c{}\".format(\n self.driver_name, self.iface_wifi, config_file_path))\n self.expect(\"Successfully initialized wpa_supplicant\")\n return bool(self.match)"
] |
[
"0.5978321",
"0.58212847",
"0.58156854",
"0.5704995",
"0.5696726",
"0.5637298",
"0.5594164",
"0.5589521",
"0.5555769",
"0.55137855",
"0.5432683",
"0.53863645",
"0.53759515",
"0.5316402",
"0.53031844",
"0.5301522",
"0.52578723",
"0.5247004",
"0.52416503",
"0.5241068",
"0.5219133",
"0.5218674",
"0.5210041",
"0.5195735",
"0.5186877",
"0.51833326",
"0.5160288",
"0.513431",
"0.512802",
"0.5118752"
] |
0.7137283
|
0
|
insert document; All values need to be passed as string eventHub, consumerGroup, partitionId and offset, These match the requirements of the EventHub This method does not allow any additional storage; other option does
|
def insert_offset_document(self, eventHub, consumerGroup,partition_id, offset, messageType, removeExisting=True):
dictObject = self._getDictionaryObjectOffset(eventHub, consumerGroup,partition_id, offset, messageType)
return self.insert_offset_document_from_dict(dictObject, removeExisting)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def insert_document(self, messageId, experimentName,offset, currentCount=0, maxItems=-1, elapsedTime='', status='', removeExisting=True):\r\n dictObject = self._getDictionaryObject(messageId, experimentName,offset, currentCount, maxItems, elapsedTime, status)\r\n return self.insert_document_from_dict(dictObject, removeExisting)",
"def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)",
"def insert(self, doc_or_docs):\n return self.database.connection.request.insert_documents(\n self.database.name, self.name, doc_or_docs)",
"def add_document(collection: str, document: dict) -> None:\n validate_arguments({'collection': [collection, str],\n 'document': [document, dict]})\n DB[collection].insert_one(document)",
"def insert_document(self, collection, doc):\n # Create/Access your collection\n mycol = self.db[collection]\n # Insert your document into the collection\n x = mycol.insert_one(doc)\n # Return the inserted id to verify success\n return x.inserted_id",
"def insert(cls, document, doc_id=None):\n if not doc_id:\n doc_id = ObjectId\n if callable(doc_id):\n doc_id = doc_id()\n\n document['_id'] = doc_id\n cls._add_shard(document)\n\n cls._make_call('insert', document)\n return doc_id",
"def send(self, event):\r\n try:\r\n self.collection.insert(event, manipulate=False)\r\n except PyMongoError:\r\n # The event will be lost in case of a connection error.\r\n # pymongo will re-connect/re-authenticate automatically\r\n # during the next event.\r\n msg = 'Error inserting to MongoDB event tracker backend'\r\n log.exception(msg)",
"def store(self, topic_id, start_date, end_date, date_axis, count_axis, parties_proportions):\n document = {'topic_id': topic_id,\n 'start_date': start_date,\n 'end_date': end_date,\n 'date_axis': date_axis,\n 'count_axis': count_axis,\n 'parties_proportions': parties_proportions}\n self.insert(document)",
"def insert(input_json): \n client, index_name = connection_es()\n datetime_ = datetime.datetime.now().strftime(\"%Y.%m.%d_%H:%M:%S\")\n fs_metadata_name = index_name+datetime_\n res = client.index(index = fs_metadata_name, doc_type = 'nvisnx', body = input_json)\n return res",
"def insert_one(self, document: dict) -> None:\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\"The document must be a dictionary.\")\n self._dump()",
"def add(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n try:\n psycopg2.extras.execute_batch(\n cursor,\n f'INSERT INTO {self.table} (ID, DOC) VALUES (%s, %s)',\n [\n (\n doc.id,\n doc.SerializeToString(),\n )\n for doc in docs\n ],\n )\n except psycopg2.errors.UniqueViolation as e:\n self.logger.warning(\n f'Document already exists in PSQL database. {e}. Skipping entire transaction...'\n )\n self.connection.rollback()\n self.connection.commit()",
"def write_to_db(self, doc):\n self.db_connection[self.db_name][self.db_collection].insert_one(doc)",
"def insert_player(document):\n players_col.insert_one(document)",
"def create_document(document: DocumentIn, db: Session = Depends(get_db)):\n return add_document(db, document)",
"def _store_document(self, document: dict) -> None:\n\n for item in document.items():\n if not is_bson_valid(item):\n raise InvalidTypeException(item)\n\n self._db[\"documents\"].append(document)",
"def event_create(tenant_id, user_id=None):",
"def add(self, document):\n return self.db.update({document['id']: document})",
"def insert(self, data):\n return self.collection.insert(data)",
"def add(cls, document: dict) -> dict:\n errors = cls.validate_insert(document)\n if errors:\n raise ValidationFailed(document, errors)\n\n cls.deserialize_insert(document)\n try:\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Inserting {document}...\")\n cls._insert_one(document)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(\"Document inserted.\")\n return cls.serialize(document)\n except pymongo.errors.DuplicateKeyError:\n raise ValidationFailed(\n cls.serialize(document), message=\"This document already exists.\"\n )",
"def test_add_one_document_object_implicit_commit(self):\n \n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = Document()\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n # Commit the changes\n self.conn.add(True, doc)\n results = self.conn.query(\"id:\" + id).results\n\n self.assertEquals(len(results), 1,\n \"Could not find expected data (id:%s)\" % id)\n\n doc = results[0]\n self.assertEquals(doc[\"user_id\"], user_id)\n self.assertEquals(doc[\"data\"], data)",
"def insert(db_name, collection_name, docs):\n db = client[db_name]\n collection = db[collection_name]\n return collection.insert_many(docs)",
"def _put_assume_new(self, _id=None, **data):\n if _id is None:\n _id = str(uuid4())\n doc = dict(_id=_id, **data)\n try:\n current_doc = self._db.create_document(doc, throw_on_exists=True)\n except couchdb.http.ResourceConflict:\n # TODO: _rev is in header, don't need to get entire doc\n # Don't use self.get, don't want to actually download an attachment\n current_doc = self._db.get(_id)\n current_doc.update(doc)\n current_doc.save()\n return current_doc",
"def create_document(obj):\n index = obj.get_index_name()\n doc_type = obj.get_document_type()\n body = obj.get_document_body()\n exists = ES.exists(index=index, doc_type=doc_type, id=obj.pk)\n\n if not exists:\n ES.create(index=index, doc_type=doc_type, body=body, id=obj.pk)\n return None\n\n return \"Conflict: document already exists for {0} with id {1}.\".format(\n obj.__class__.__name__, obj.pk)",
"async def insert(self, record, collection: str):\n db_record = await self.database[collection].insert_one(record.dict(exclude={'id'})) \n return record",
"def insert(self, events):\r\n url = '{0}/{1}'.format(self.get_url(), 'events')\r\n\r\n return http.Request('POST', url, events), parsers.parse_json",
"async def _save(self, document, alias=None):\n doc = document.to_son()\n\n if document._id is not None:\n try:\n await self.coll(alias).update({\"_id\": document._id}, doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n else:\n try:\n doc_id = await self.coll(alias).insert(doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n document._id = doc_id\n\n return document",
"def init_doc(self, obj, update_dict=True):\n try:\n obj.essentials\n except AttributeError:\n raise AttributeError(\"An object to be saved in db is supposed to have the essentials attribute\")\n\n if obj.essentials is None:\n raise AttributeError(\"An object to be saved in db should not have NoneType as its essentials\")\n\n print(\"Saving this object into db: {}\".format(type(obj)))\n\n start = datetime.now()\n essen = self.mongo_doc_generator(obj.essentials)\n document = {\"essentials\": essen, 'datetime': start, 'filepaths': obj.filepaths}\n\n db_location = obj.db\n element = obj.decide_element()\n host = db_location[\"host\"]\n project = db_location[\"project\"]\n\n target_db = connect_collection(host, project, element)\n doc_created = target_db.insert_one(document)\n inserted_id = doc_created.inserted_id\n\n return inserted_id",
"def test_add_one_document(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n self.conn.add(**doc)\n self.conn.commit()\n results = self.conn.query(\"id:\" + id).results\n\n self.assertEquals(len(results), 1,\n \"Could not find expected data (id:%s)\" % id)\n\n doc = results[0]\n self.assertEquals(doc[\"user_id\"], user_id)\n self.assertEquals(doc[\"data\"], data)",
"def emit(self, record):\n try:\n record.created = datetime.fromtimestamp(record.created)\n self._collection.insert_one(self.format(record))\n except InvalidDocument as e:\n logging.error(\"Unable to save log record: %s\", e.message,\n exc_info=True)",
"def insert_to_collection(db, coll_name, docs):\n if isinstance(docs, list):\n db[coll_name].insert_many(docs)\n else:\n db[coll_name].insert_one(docs)"
] |
[
"0.6077333",
"0.6035941",
"0.58997524",
"0.58779824",
"0.58216363",
"0.57659096",
"0.57516026",
"0.57416564",
"0.5729385",
"0.5709184",
"0.5698257",
"0.55731833",
"0.5570711",
"0.553568",
"0.55046153",
"0.5484645",
"0.54827213",
"0.5445576",
"0.5431838",
"0.54205877",
"0.538443",
"0.5365677",
"0.5357865",
"0.5356791",
"0.53520715",
"0.53469837",
"0.5344313",
"0.53400147",
"0.53231287",
"0.5320206"
] |
0.6825649
|
0
|
Method to create a feedback matrix
|
def create_matrix(self):
self.matrix = np.zeros((len(self.users), len(self.items)))
for user in self.train_set['users']:
for item in self.train_set['feedback'][user]:
self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \
self.train_set['feedback'][user][item]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def build_matrix(self):\n self.lb_make = LabelEncoder()\n self.lb_make.fit(self.Y_train)\n tokenizer = Tokenizer(num_words=2000)\n x_array_train = numpy.asarray(self.train['text'])\n x_array_test = numpy.asarray(self.test['text'])\n tokenizer.fit_on_texts(x_array_train)\n x_train_matrix = tokenizer.texts_to_matrix(x_array_train, mode='count')\n x_test_matrix = tokenizer.texts_to_matrix(x_array_test, mode='count')\n y_train_numbers = self.lb_make.transform(self.Y_train)\n y_test_numbers = self.lb_make.transform(self.Y_test)\n y_train_matrix = keras.utils.to_categorical(y_train_numbers, 3)\n y_test_matrix = keras.utils.to_categorical(y_test_numbers, 3)\n self.tokenizer = tokenizer\n return x_train_matrix, x_test_matrix, y_train_matrix, y_test_matrix",
"def _make_random_matrix(self, n_components, n_features):",
"def CreateMatrix(self) -> BaseMatrix:",
"def CreateMatrix(self) -> BaseMatrix:",
"def make_matrix():\n\n # this imports category, category, data, text, pptx_data\n\n if use_test_data:\n # make a test matrix using create_test_matrix\n m = matrixfuncs.create_test_matrix()\n\n else:\n # make a matrix by connecting to Slides! and connecting to a data\n # table.\n import transformations.utils.slidesconf as slidesconf\n from Forgetdata.Matrix import ConnectionDefinition\n conn = ConnectionDefinition()\n conn.ConnectionString = mtd_filepath # set at top of file\n conn.Name = \"Test\"\n conn.Provider = \"SPSS MTD File\"\n liveConnection = slidesconf.connect(conn.ConnectionString,\n name=conn.Name,\n provider_name=conn.Provider)\n\n m = liveConnection[table_selected]\n\n x = tr.MatrixManipulator(m)\n matrixfuncs.printMatrix(m)\n\n for c in m[0]:\n c.TopMember.Label = c.TopMember.Label.encode('ascii', 'ignore')\n\n return m, x",
"def T(self):\n # TODO - your code here\n matrix_transpose = [];\n \n for j in range(self.w):\n matrix_transpose.append(self.get_column(j));\n \n return Matrix(matrix_transpose);",
"def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here",
"def create_matrix(data, discrete, prop, cutoff, nfeatures):\n y = np.zeros(len(data))\n \n count = 0 \n for i in range (len(data)):\n if data[i][nfeatures+prop]>cutoff:\n y[i]=1\n count += 1\n else:\n y[i]=0\n \n if discrete==False:\n y[i]=data[i][nfeatures+prop]\n \n x = data[:, 0:nfeatures]\n \n \n print (\"Number of good designs \"+str(count)+\" out of total \"+str(len(y)))\n return x, y",
"def confusion_matrix(self):\n return np.array([[self.tn, self.fp],\n [self.fn, self.tp]])",
"def gram_matrix(features, normalize=True):\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****",
"def make_tag_matrix(self):\n pass",
"def _buildMatrix(self, SparseMatrix, Ncells, MaxFaces, coeff):\n return (0, 0)",
"def get_score_matrix(self) -> int:",
"def get_feedforward_adj_mat(num_layers):\n ret = dok_matrix((num_layers, num_layers))\n for i in range(num_layers - 1):\n ret[i, i + 1] = 1\n return ret",
"def confusion_matrix(m_confusion):\n \n # f=[[\"/\"],[\"F1\"],[\"F2\"],[\"F3\"],[\"F4\"],[\"F5\"],[\"F6\"],[\"F7\"],\n # [\"F8\"],[\"F9\"],[\"F10\"],[\"F11\"],[\"F12\"],[\"F13\"]]\n a=m_confusion.reshape(13,13)\n print(\"Matriz de Confusion:\")\n print(a)",
"def T(self) -> BaseMatrix:",
"def T(self) -> BaseMatrix:",
"def get_matrix(df, features, output):\n #add a constant column as coefficient for w0\n df[\"constant\"] = 1.0\n feature_x, output_y = df[features].astype(float), df[output].astype(int)\n return feature_x, output_y",
"def get_confmatrix(self,y_pred,y_test):",
"def create_matrix(ratings_df, jokes_df):\r\n \"\"\" note: empty entries are populated with zeros \"\"\"\r\n\r\n matrix_handler = matrix_object()\r\n\r\n num_joke_features = 5\r\n\r\n ''' add all joke features '''\r\n for row_idx in range(0, jokes_df.shape[0]):\r\n joke_idx = int(jokes_df.iloc[row_idx][\"Idx\"])\r\n isAggressive = jokes_df.iloc[row_idx][\"isAggressive\"]\r\n isIncongruence = jokes_df.iloc[row_idx][\"isIncongruence\"]\r\n generation = jokes_df.iloc[row_idx][\"Generation\"]\r\n isMillenial = (generation == \"Millenial\")\r\n isGenX = (generation == \"Gen X\")\r\n isGenZ = (generation == \"Gen Z\")\r\n\r\n if(int(isMillenial) == 1.0 and int(isGenX) == 1.0):\r\n raise Valueerror()\r\n\r\n matrix_handler.add_value(joke_idx - 1, 0, int(isAggressive))\r\n matrix_handler.add_value(joke_idx - 1, 1, int(isIncongruence))\r\n matrix_handler.add_value(joke_idx - 1, 2, int(isMillenial))\r\n matrix_handler.add_value(joke_idx - 1, 3, int(isGenX))\r\n matrix_handler.add_value(joke_idx - 1, 4, int(isGenZ))\r\n\r\n ''' add all ratings '''\r\n for row_idx in range(0, ratings_df.shape[0]):\r\n for joke_idx in range(1, 122):\r\n col_name = \"joke\" + str(joke_idx)\r\n matrix_handler.add_value(joke_idx - 1, row_idx + num_joke_features, ratings_df.iloc[row_idx][col_name])\r\n\r\n matrix = matrix_handler.compile_matrix()\r\n new_df = matrix_handler.to_df(matrix)\r\n\r\n return matrix, new_df",
"def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M",
"def as_matrix(self) -> types.Matrix:",
"def create_matrix_B(m,k,alpha=8,beta=.5):\n\n\tn = m*k\n\n\t#define draw probabilities for intercommunity and intracommunity edges\n\tp = alpha * math.log(m) / m\n\tq = beta * math.log(m) / m\n\n\t#create true label of communities\n\tg = []\n\tfor i in range(k):\n\t\ttemp = [i]*m\n\t\tg.extend(temp)\n\n\t#adjacency matrix\n\tA = np.zeros([n,n])\n\n\tfor r in range(n):\n\t\tfor c in range(r+1,n):\n\t\t\t#in the same community if they have the same value\n\t\t\tif g[r] == g[c]:\n\t\t\t\tA[r,c] = np.random.binomial(1,p)\n\t\t\t\tA[c,r] = A[r,c]\t\t\n\t\t\telse:\n\t\t\t\tA[r,c] = np.random.binomial(1,q)\n\t\t\t\tA[c,r] = A[r,c]\n\n\tB = 2*A - (np.ones([n,n]) - np.identity(n))\n\t\t\t\t\n\treturn B,g",
"def get_data_with_weights(self):\n weights = np.zeros((self.contexts.shape[0], self.num_actions))\n a_ind = np.array([(i, val) for i, val in enumerate(self.actions)])\n weights[a_ind[:, 0], a_ind[:, 1]] = 1.0\n return self.contexts, self.rewards, weights",
"def build_conf_matrix(self):\n pred_vals = []\n for prediction in self.predicted_values:\n pred_vals.append(np.argmax(prediction))\n\n print confusion_matrix(self.true_values, pred_vals, labels=[1, 2, 3, 4])\n\n self.logger.info(\"Confusion Matrix : {}\".format(confusion_matrix(self.true_values, pred_vals,\n labels=[1, 2, 3, 4])))",
"def process(self, mat):",
"def generate_weighted_graph():\n \n Adj_Matrix = np.array([\n [0.0, 0.2, 0.2, 0.3, 0.2, 0.1],\n [0.1, 0.0, 0.3, 0.3, 0.1, 0.2],\n [0.3, 0.2, 0.0, 0.1, 0.2, 0.2],\n [0.1, 0.4, 0.2, 0.0, 0.2, 0.1],\n [0.2, 0.2, 0.2, 0.2, 0.0, 0.2],\n [0.2, 0.1, 0.1, 0.3, 0.3, 0.0]\n ])\n\n return Adj_Matrix",
"def build_training_matrix(self, data_frame: pd.DataFrame):\n logging.info(f\"ModelContainer {self.name}: build_training_matrix\")\n\n \t#ADDED the following check if the \n #feature_uniques was populated accordin to unique values\n #in case feature_uniques is prepopulated the assumption is \n # that the correct values are uploaded\n if self.feature_uniques==None:\n self.analyze_distributions(data_frame)\n\n\n # Now lets do the encoding thing...\n\n\n \n\n encoded_df = one_hot_encode(data_frame, self.feature_uniques)\n\n\n for nf in self.features_numeric:\n encoded_df[nf] = data_frame[nf]\n\n matrix = encoded_df.values\n return matrix",
"def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])",
"def lap_mat(self):"
] |
[
"0.63164544",
"0.62677366",
"0.61268675",
"0.61268675",
"0.60406196",
"0.60257655",
"0.5969459",
"0.59561974",
"0.5900327",
"0.5802533",
"0.5766169",
"0.5753086",
"0.5722668",
"0.57204777",
"0.5712716",
"0.570938",
"0.570938",
"0.5708698",
"0.56675833",
"0.566741",
"0.56654525",
"0.5652191",
"0.5639129",
"0.5622781",
"0.5614551",
"0.55947405",
"0.5590835",
"0.5585691",
"0.5582669",
"0.55795646"
] |
0.70631033
|
0
|
Execute SSM automation document DigitoBreakLambdaSecurityGroupTest_20200921
|
def test_break_security_group_usual_case_specify_sg():
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_break_security_group_failed():",
"def test_break_security_group_usual_case():",
"def windows_execution(session, elb_name, elb_type, command_type, instance_id_list, tag_value, platform_type, username, password_parameter_name):\n document_name = 'AWS-RunPowerShellScript'\n if str(elb_name) != 'None' and str(tag_value)=='None':\n instance_id_list = []\n instance_id_list = getInstanceList(instance_id_list, elb_type, elb_name, session)\n\n logger.info(instance_id_list)\n \n if command_type == 'ENABLE':\n commands = [\n \"\"\" try\n {\n $breakglassUser = '\"\"\" + username + \"\"\"'\n $breakglassPass = (Get-SSMParameterValue -Name \"\"\" + password_parameter_name + \"\"\" -WithDecryption $True).Parameters[0].Value\n $allFeatures = Get-WindowsFeature\n $rdsGatewayInstalled = ($allFeatures | Where-Object {$_.Name -eq 'RDS-Gateway'}).Installed\n $domainServicesInstalled = ($allFeatures | Where-Object {$_.Name -eq 'AD-Domain-Services'}).Installed\n \n # If the instance is a domain controller, the breakglass script should not run.\n if (-Not $domainServicesInstalled)\n {\n # Add a breakglass user and give it Administrator level access (PowerShell user management cmdlets are not used for Windows backward compatability).\n Write-Output 'Creating breakglass user and adding it to Administrators group'\n net user $breakglassUser $breakglassPass /add\n net localgroup 'Administrators' $breakglassUser /add\n \n # If the instance is also an RDS Gateway, configure it to allow access to the breakglass user\n if ($rdsGatewayInstalled)\n {\n Write-Output 'RDS Gateway role detected. Configuring RDS Gateway to allow breakglass access.'\n Import-Module RemoteDesktopServices\n New-Item -Path RDS:\\GatewayServer\\CAP -Name $breakglassUser -UserGroups 'Administrators@BUILTIN' -AuthMethod 1\n New-Item -Path RDS:\\GatewayServer\\RAP -Name $breakglassUser -UserGroups 'Administrators@BUILTIN' -ComputerGroupType 2\n }\n }\n else\n {\n Write-Output 'Domain controller role detected. Breakglass script will not execute'\n }\n }\n \n # If anything fails in the commands, SSM run command should fail too\n catch\n {\n Write-Output 'Exception block reached'\n Write-Output $_.Exception.Message\n Exit -1\n }\"\"\"\n ]\n comment = 'Break Glass Command - Enable Windows Local Administrator'\n \n elif command_type == 'DISABLE':\n commands = [\n \"\"\" try\n {\n $breakglassUser = '\"\"\" + username + \"\"\"'\n $allFeatures = Get-WindowsFeature\n $rdsGatewayInstalled = ($allFeatures | Where-Object {$_.Name -eq 'RDS-Gateway'}).Installed\n $domainServicesInstalled = ($allFeatures | Where-Object {$_.Name -eq 'AD-Domain-Services'}).Installed\n \n # If the instance is a domain controller, the breakglass script should not run.\n if (-Not $domainServicesInstalled)\n {\n # If the instance is an RDS Gateway, remove the breakglass configurations\n if ($rdsGatewayInstalled)\n {\n Write-Output 'RDS Gateway role detected. Removing RDS Gateway configurations that allowed breakglass access.'\n Import-Module RemoteDesktopServices\n Remove-Item -Path RDS:\\GatewayServer\\CAP\\$breakglassUser -Recurse\n Remove-Item -Path RDS:\\GatewayServer\\RAP\\$breakglassUser -Recurse\n }\n \n # Remove the breakglass user (PowerShell user management cmdlets not used for backward compatability).\n Write-Output 'Removing breakglass user.'\n net localgroup 'Administrators' $breakglassUser /delete\n net user $breakglassUser /delete\n }\n else\n {\n Write-Output 'Domain controller role detected. Breakglass script will not execute'\n }\n }\n \n # If anything fails in the commands, SSM run command should fail too\n catch\n {\n Write-Output 'Exception block reached'\n Write-Output $_.Exception.Message\n Exit -1\n }\n \"\"\"\n ]\n comment = 'Break Glass Command - Disable Windows Local Administrator'\n else:\n raise Exception(\"Called with invalid command_type\")\n \n if str(tag_value) == 'None':\n logger.info('============RunCommand Using Instances')\n return send_instance_run_command(session, document_name, commands, instance_id_list, comment)\n else:\n logger.info('============RunCommand Using Target Value Pair or All instance with the tag passed')\n return send_tag_run_command(session, document_name, commands, 'tag:Name', tag_value, comment)",
"def linux_execution(session, elb_name, elb_type, command_type, instance_id_list, tag_value, platform_type, username, password_parameter_name): \n \n document_name = 'AWS-RunShellScript'\n if str(elb_name) != 'None' and str(tag_value)=='None':\n instance_id_list = []\n instance_id_list = getInstanceList(instance_id_list, elb_type, elb_name, session)\n \n logger.info(instance_id_list)\n \n comment = 'Break glass for Linux Hosts'\n password = \"$(aws ssm get-parameters --names \"+ password_parameter_name + \" --with-decryption --query 'Parameters[*].{Value:Value}' --output text --region ap-southeast-2)\"\n \n if str(command_type) == 'ENABLE':\n commands = [\n # Add a new user, create their homedir if it doesn't exist\n \"useradd --create-home {username}\".format(username=username),\n # Change the password for a user\n # We shouldn't the password in clear text, hence using cli to get from parameter store\n \"echo '{username}':{password} | chpasswd\".format(username=username, password=password),\n # Below provides sudo access\n \"echo '{username} ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/999-break-glass-{username}\".format(username=username)\n ]\n elif str(command_type) == 'DISABLE':\n commands = [\n \"killall -KILL -u {username}\".format(username=username),\n \"userdel -r {username}\".format(username=username),\n # If we want to clean up that user's homedir, uncomment the following\n \"[ -d /home/{username} ] && rm -rf /home/{username}\".format(username=username),\n # Remove sudo access\n \"rm -rf /etc/sudoers.d/999-break-glass-{username}\".format(username=username)\n ]\n else:\n raise Exception(\"Called with invalid command_type\")\n \n if str(tag_value) == 'None':\n logger.info('============RunCommand Using Instances')\n return send_instance_run_command(session, document_name, commands, instance_id_list, comment)\n else:\n logger.info('============RunCommand Using Target Value Pair or All instance with the tag passed')\n return send_tag_run_command(session, document_name, commands, 'tag:Name', tag_value, comment)",
"def test_aws_service_api_vm_security_group_delete(self):\n pass",
"def test_break_security_group_rollback_previous():",
"def test_08_security_group(self):\n # Validate the following:\n # 1. Create a project\n # 2. Assign some security groups to that project\n # 3. Verify the security groups can only be assigned to VM belonging\n # to that project.\n\n security_group = SecurityGroup.create(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.debug(\"Created security group with ID: %s\" % security_group.id)\n # Default Security group should not have any ingress rule\n sercurity_groups = SecurityGroup.list(\n self.apiclient,\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(sercurity_groups, list),\n True,\n \"Check for list security groups response\"\n )\n\n self.assertNotEqual(\n len(sercurity_groups),\n 0,\n \"Check List Security groups response\"\n )\n # Authorize Security group to SSH to VM\n ingress_rule = security_group.authorize(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(ingress_rule, dict),\n True,\n \"Check ingress rule created properly\"\n )\n\n self.debug(\n \"Authorizing ingress rule for sec group ID: %s for ssh access\"\n % security_group.id)\n self.virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n securitygroupids=[security_group.id],\n projectid=self.project.id\n )\n self.debug(\"Deployed VM (ID: %s) in project: %s\" % (\n self.virtual_machine.id,\n self.project.id\n ))\n self.assertEqual(\n self.virtual_machine.state,\n 'Running',\n \"VM state should be running after deployment\"\n )\n # Deploy another VM with same security group outside the project\n self.debug(\n \"Deploying VM with security group: %s outside project:%s\" % (\n security_group.id,\n self.project.id\n ))\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n securitygroupids=[security_group.id],\n )\n return",
"def lambda_handler(event, context):\n Stop_Instances()",
"def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp",
"def test_script(caplog: pytest.LogCaptureFixture, config: dict, error: str) -> None:\n with pytest.raises(vol.Invalid, match=error):\n cv.script_action(config)",
"def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )",
"def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i",
"def test_aws_service_api_vm_security_group_put(self):\n pass",
"def process_ssm_run_command(event):\n event_dict = event.to_dict()\n instance_id = event_dict['detail']['instance-id']\n command_name = event_dict['detail']['document-name']\n command_status = event_dict['detail']['status']\n cw_client = boto3.client('cloudwatch', config=MSAM_BOTO3_CONFIG)\n log_client = boto3.client('logs', config=MSAM_BOTO3_CONFIG)\n dimension_name = \"Instance ID\"\n metric_name = command_name\n status = 0\n\n try:\n # test to make sure stream names are always of this format, esp if you create your own SSM document\n log_stream_name = event_dict['detail']['command-id'] + \"/\" + instance_id + \"/aws-runShellScript/stdout\"\n \n response = log_client.get_log_events(\n logGroupName=SSM_LOG_GROUP_NAME,\n logStreamName=log_stream_name,\n )\n #print(response)\n if command_status == \"Success\":\n # process document name (command)\n if \"MSAMElementalLiveStatus\" in command_name:\n metric_name = \"MSAMElementalLiveStatus\"\n for event in response['events']:\n if \"running\" in event['message']:\n status = 1\n break\n elif \"MSAMSsmSystemStatus\" in command_name:\n metric_name = \"MSAMSsmSystemStatus\"\n status = 1\n elif \"MSAMElementalLiveActiveAlerts\" in command_name:\n metric_name = \"MSAMElementalLiveActiveAlerts\"\n root = ET.fromstring(response['events'][0]['message'])\n status = len(list(root))\n if status == 1 and root[0].tag == \"empty\":\n status = 0\n else:\n if \"MSAMElementalLiveCompletedEvents\" in command_name:\n metric_name = \"MSAMElementalLiveCompletedEvents\"\n elif \"MSAMElementalLiveErroredEvents\" in command_name:\n metric_name = \"MSAMElementalLiveErroredEvents\"\n elif \"MSAMElementalLiveRunningEvents\" in command_name:\n metric_name = \"MSAMElementalLiveRunningEvents\"\n root = ET.fromstring(response['events'][0]['message'])\n status = len(root.findall(\"./live_event\"))\n else:\n # for the elemental live status, the command itself returns a failure if process is not running at all\n # which is different than when a command fails to execute altogether\n if command_status == \"Failed\" and \"MSAMElementalLiveStatus\" in command_name:\n for event in response['events']:\n if \"Not Running\" in event['message'] or \"Active: failed\" in event['message']:\n metric_name = \"MSAMElementalLiveStatus\"\n break\n else:\n # log if command has timed out or failed\n print(\"SSM Command Status: Command %s sent to instance %s has %s\" % (command_name, instance_id, command_status))\n # create a metric for it\n status = 1\n metric_name = \"MSAMSsmCommand\"+command_status\n\n cw_client.put_metric_data(\n Namespace = SSM_LOG_GROUP_NAME,\n MetricData = [\n {\n 'MetricName': metric_name,\n 'Dimensions': [\n {\n 'Name' : dimension_name,\n 'Value' : instance_id\n },\n ],\n \"Value\": status,\n \"Unit\": \"Count\"\n }\n ]\n )\n except ClientError as error:\n print(error)\n print(\"SSM Command Status: Command %s sent to instance %s has status %s\" % (command_name, instance_id, command_status))\n print(\"Log stream name is %s\" % (log_stream_name))",
"def test_stop_with_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/stop_test/3')\n test = Test.query.filter(Test.id == 3).first()\n self.assertEqual(test.finished, True)",
"def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )",
"def test_cron_workflow_service_terminate_cron_workflow(self):\n pass",
"def lambda_handler(event, context):\n import urllib.request\n import json\n\n allowed_domain = \"brianandkelly.ws\"\n\n # print(event)\n # Get the user info from Google for the recieved token...\n id_token = event['authorizationToken']\n google_token_helper_uri = \"https://www.googleapis.com/oauth2/v3/tokeninfo?id_token=\" + id_token\n\n result = json.loads(urllib.request.urlopen(google_token_helper_uri).read())\n\n domain = result['hd']\n user = result['sub']\n effect = 'Deny'\n\n if domain == allowed_domain:\n effect = 'Allow'\n \n print(\"Access request for user: \" + result['email'] + \" with effect: \" + effect + \" processed.\")\n\n respond = {\n \"principalId\": user,\n \"policyDocument\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": \"execute-api:Invoke\",\n \"Effect\": effect,\n \"Resource\": \"arn:aws:execute-api:us-east-1:*:7k8o0sgjli/securityvideos/*\"\n }\n ]\n }\n }\n\n return respond",
"def main():\n\n payload = {'include': 'systems'}\n insights_request = InsightsRequest(SYS_API, payload)\n cute_output(insights_request)\n\n groups = insights_request.get_insights()\n found = False\n\n for elem in groups:\n if not elem['systems']:\n found = True\n print('Deleting empty group ' + elem['display_name'])\n clean_empty_group(elem['id'])\n if not found:\n print('No empty groups found, nothing to delete!!!')\n\n payload = {'expand': 'system'}\n insights_request = InsightsRequest(URL + '/v2/reports', payload)\n reports = create_maint_plan(insights_request)\n \"\"\"\n print(reports)\n\n result = get_json(URL + '/v2/reports', payload)\n print(result)\n for elem in result['resources']:\n print('Report: ' + str(elem['id']) + ' Rule id: ' + str(elem['rule_id']) + '\\tSystem: ' + str(elem['system']['hostname']))\n \"\"\"\n #create_maint_plan('gherkin')",
"def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))",
"def testEditConfigCreateLambda(self):\n self.ports.editconfig_create_lambda(file_name = 'editconfig_create_port_label.xml', port_ids = portsDict['port_ids'], lambdas = portsDict['lambda'])",
"def cli_run_sample(endpoint, email, password, org_name, grp_name, sample_name):\n knex = Knex(endpoint)\n User(knex, email, password).login()\n org = Organization(knex, org_name).get()\n grp = org.sample_group(grp_name).get()\n sample = grp.sample(sample_name).get()\n run_sample(sample, lambda x: click.echo(x, err=True))",
"def test_export_function(self):\n\n function_name = \"testcloudwatchlogs\"\n bucket_name = \"my-bucket-name\"\n fnb_name = \"fnb\" + function_name\n role = \"arn:aws:iam::123456789012:role/MyFunction\"\n security_group_ids = [\"sg-ABCDEFGHIJKL\"]\n subnet_ids = [\"subnet-ABCDEFGHIJKL\"]\n log_group = \"/aws/lambda/functionbeat-cloudwatch\"\n\n self._generate_dummy_binary_for_template_checksum()\n\n self.render_config_template(\n path=os.path.abspath(self.working_dir) + \"/log/*\",\n cloudwatch={\n \"name\": function_name,\n \"bucket\": bucket_name,\n \"role\": role,\n \"virtual_private_cloud\": {\n \"security_group_ids\": security_group_ids,\n \"subnet_ids\": subnet_ids,\n },\n \"log_group\": log_group,\n },\n )\n functionbeat_proc = self.start_beat(\n logging_args=[\"-d\", \"*\"],\n extra_args=[\"export\", \"function\", function_name]\n )\n\n self.wait_until(lambda: self.log_contains(\"PASS\"))\n functionbeat_proc.check_wait()\n\n function_template = self._get_generated_function_template()\n function_properties = function_template[\"Resources\"][fnb_name][\"Properties\"]\n\n assert function_properties[\"FunctionName\"] == function_name\n assert function_properties[\"Code\"][\"S3Bucket\"] == bucket_name\n assert function_properties[\"Role\"] == role\n assert function_properties[\"VpcConfig\"][\"SecurityGroupIds\"] == security_group_ids\n assert function_properties[\"VpcConfig\"][\"SubnetIds\"] == subnet_ids",
"def revoke_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n region = args[\"Region\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.revoke_security_group_ingress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Ingress Successfully Revoked :\"\n message.attach(attachment)\n\n return message.to_json()",
"def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': '[email protected]'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='[email protected]',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id",
"def test_stop_plan_run_ok(test_app):\n\n bamboo_api_client = test_app.get('bamboo_api_tests').bamboo_api_client\n plan_build_key = test_app.get('plan_keys', {}).get('build_key', '')\n\n stop_plan = bamboo_api_client.stop_build(plan_build_key=plan_build_key)\n\n # Check if the API got a HTTP 302/200 response code\n assert stop_plan.get('status_code') == 302, stop_plan",
"def run(self,identity,params=None, headers=None):\n path = self._sub_url_params('/scenario_simulators/:identity/actions/run', {\n \n 'identity': identity,\n })\n \n if params is not None:\n params = {'data': params}\n response = self._perform_request('POST', path, params, headers,\n retry_failures=False)\n return self._resource_for(response)",
"def invoke_lambda(bucket, body_id):\n payload = {\"body_id\": body_id}\n if \"prod\" not in bucket:\n payload[\"bucket\"] = bucket\n response = LAMBDA_CLIENT.invoke(FunctionName=\"create_ppp_body_thumbnails\",\n InvocationType=\"Event\",\n Payload=json.dumps(payload))\n if ARG.DEBUG:\n pprint.pprint(response, indent=4)\n COUNT[\"Submitted\" if response[\"StatusCode\"] == 202 else \"Submit error\"] += 1\n if \"Payload\" in response:\n LOGGER.debug(response['Payload'].read().decode(\"utf-8\"))",
"def __init__(self, scope: core.Construct, id: str,\n ec2_tag_key: str,\n ssm_document_name: str,\n playbook_url: str,\n playbook_file_name: str = None,\n ec2_tag_value: str = None,\n notification_key_filter_prefix: str = None,\n notification_key_filter_suffix: str = None,\n log_level: str = 'INFO',\n **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n def get_s3_bucket_name(self):\n self.playbook_url = playbook_url\n if playbook_url is not None:\n splited_playbook_url = playbook_url.split(\"/\")\n s3_bucket_name = splited_playbook_url[2]\n else:\n s3_bucket_name = None\n return s3_bucket_name\n\n s3 = _s3.Bucket(\n self, \"S3SsmRunCommandBucket\",\n bucket_name=get_s3_bucket_name(self),\n removal_policy=core.RemovalPolicy.RETAIN,\n )\n\n if playbook_url is None:\n playbook_url = f's3://{s3.bucket_name}/{playbook_file_name}'\n\n lambda_ssm = _lambda.Function(\n self, \"S3TriggerHandler\",\n runtime=_lambda.Runtime.PYTHON_3_7,\n code=_lambda.Code.asset('lambda_ssm'),\n handler=\"ansible_run_command.handler\",\n environment={\n \"LOGLEVEL\": f'{log_level}',\n \"EC2_TAG_KEY\": f'{ec2_tag_key}',\n \"EC2_TAG_VALUE\": f'{ec2_tag_value}',\n \"PLAYBOOK_URL\": f'{playbook_url}',\n \"SSM_DOCUMENT_NAME\": f'{ssm_document_name}'\n }\n )\n\n lambda_ssm.add_to_role_policy(\n statement=_iam.PolicyStatement(\n effect=_iam.Effect.ALLOW,\n actions=[\n \"ssm:DescribeAssociation\",\n \"ssm:GetDeployablePatchSnapshotForInstance\",\n \"ssm:GetDocument\",\n \"ssm:DescribeDocument\",\n \"ssm:GetManifest\",\n \"ssm:GetParameter\",\n \"ssm:GetParameters\",\n \"ssm:ListAssociations\",\n \"ssm:ListInstanceAssociations\",\n \"ssm:PutInventory\",\n \"ssm:PutComplianceItems\",\n \"ssm:PutConfigurePackageResult\",\n \"ssm:SendCommand\",\n \"ssm:UpdateAssociationStatus\",\n \"ssm:UpdateInstanceAssociationStatus\",\n \"ssm:UpdateInstanceInformation\",\n \"ssmmessages:CreateControlChannel\",\n \"ssmmessages:CreateDataChannel\",\n \"ssmmessages:OpenControlChannel\",\n \"ssmmessages:OpenDataChannel\",\n \"ec2:DescribeInstances\",\n \"ec2messages:AcknowledgeMessage\",\n \"ec2messages:DeleteMessage\",\n \"ec2messages:FailMessage\",\n \"ec2messages:GetEndpoint\",\n \"ec2messages:GetMessages\",\n \"ec2messages:SendReply\",\n ],\n resources=[\"*\"],\n )\n )\n\n s3.grant_read(lambda_ssm)\n\n notification = _s3_notifications.LambdaDestination(lambda_ssm)\n\n if notification_key_filter_prefix or notification_key_filter_suffix is not None:\n if notification_key_filter_prefix is None:\n filers = _s3.NotificationKeyFilter(\n suffix=notification_key_filter_suffix,\n )\n elif notification_key_filter_suffix is None:\n filers = _s3.NotificationKeyFilter(\n prefix=notification_key_filter_prefix,\n )\n else:\n filers = _s3.NotificationKeyFilter(\n prefix=notification_key_filter_prefix,\n suffix=notification_key_filter_suffix,\n )\n\n s3.add_event_notification(_s3.EventType.OBJECT_CREATED,\n notification,\n filers,\n )\n else:\n s3.add_event_notification(_s3.EventType.OBJECT_CREATED,\n notification,\n )\n\n cloudwatch_event = _events.Rule(\n self, \"CloudWatchEvent\",\n enabled=True,\n event_pattern=_events.EventPattern(\n source=[\"aws.ec2\"],\n detail_type=[\"EC2 Instance State-change Notification\"],\n detail={\n \"state\": [\"running\"]\n }\n ),\n targets=[_events_targets.LambdaFunction(lambda_ssm)]\n )\n\n s3_bucket_path = core.CfnOutput(\n self, \"S3PlaybookPath\",\n value=f's3://{s3.bucket_name}'\n )\n\n s3_bucket_console_url = core.CfnOutput(\n self, \"S3ConsoleUrl\",\n value=f's3.console.aws.amazon.com/s3/buckets/{s3.bucket_name}'\n )",
"def test_15(self, test):\r\n globalConfig.test = test\r\n\r\n inputList = getInputList()\r\n if len(inputList) == 0:\r\n return test.UNCLEAR(\"Not tested. No resources found.\")\r\n\r\n constraintSet = False\r\n constrainedInputs = []\r\n for inputInstance in inputList:\r\n if inputInstance.getBlockSize() > 1:\r\n constraintSet = True\r\n constrainedInputs.append(inputInstance)\r\n\r\n if not constraintSet:\r\n return test.UNCLEAR(\"No inputs constrain by block.\")\r\n\r\n chosenInput = constrainedInputs[0]\r\n output = chosenInput.getRoutableOutputs()\r\n actions = [\r\n Action(chosenInput.id, output[0].id, 0, 0),\r\n Action(chosenInput.id, output[0].id, 0, 1),\r\n ]\r\n activation = Activation()\r\n activation.addActions(actions)\r\n try:\r\n activation.fireActivation()\r\n except NMOSTestException:\r\n return test.PASS()\r\n\r\n return test.FAIL(\"Was able to break block size routing constraint\")"
] |
[
"0.6400182",
"0.55930215",
"0.5586425",
"0.5353389",
"0.53464097",
"0.53002477",
"0.5164097",
"0.507528",
"0.4907058",
"0.48738712",
"0.48730302",
"0.48474395",
"0.48301867",
"0.4826344",
"0.4807329",
"0.47893655",
"0.4769453",
"0.476463",
"0.47354424",
"0.470779",
"0.46980646",
"0.46878025",
"0.46868223",
"0.46256196",
"0.461671",
"0.4603636",
"0.45993987",
"0.4587749",
"0.45854336",
"0.45852318"
] |
0.5934422
|
1
|
Execute SSM automation document DigitoBreakLambdaSecurityGroupTest_20200921 in rollback
|
def test_break_security_group_rollback_previous():
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rollback(self, stage, enodes, exception):",
"def test_break_security_group_failed():",
"def test_rollback():",
"def test_blog_rollback():",
"def test_create_namespaced_deployment_config_rollback_rollback(self):\n pass",
"def test_create_namespaced_deployment_config_rollback(self):\n pass",
"def test_cron_workflow_service_terminate_cron_workflow(self):\n pass",
"def test_aws_service_api_vm_workshift_delete(self):\n pass",
"def rollback_workflow(self, execution_id):\n raise NotImplementedError",
"def rollback(self):\n pass",
"def Rollback(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def test_aws_service_api_vm_security_group_delete(self):\n pass",
"def test_break_security_group_usual_case_specify_sg():",
"def test_break_security_group_usual_case():",
"def rollback_session_after_test(self, sandboxed_session):\n yield\n sandboxed_session.rollback()",
"def test_live_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the live migration which will fail due to NoValidHost.\n body = {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}\n self.api.post_server_action(server['id'], body)\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.LIVE_MIGRATION,\n 'conductor_live_migrate_instance')\n self._assert_no_allocations(server)",
"def test_delete_deployment_run(self):\n pass",
"def test_workflows_restart(self):\n pass",
"def rollback_action(args, kwargs, was_interrupted, result=None):\n raise NotImplementedError()",
"def SetRollback(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def rollback(self):\n raise NotImplementedError",
"def test_start_interactive_workflow_k8s_failure(sample_serial_workflow_in_db):\n mocked_k8s_client = Mock()\n mocked_k8s_client.create_namespaced_deployment =\\\n Mock(side_effect=ApiException(reason='some reason'))\n with patch.multiple('reana_workflow_controller.k8s',\n current_k8s_extensions_v1beta1=mocked_k8s_client,\n current_k8s_corev1_api_client=DEFAULT):\n with pytest.raises(REANAInteractiveSessionError,\n match=r'.*Kubernetes has failed.*'):\n kwrm = KubernetesWorkflowRunManager(sample_serial_workflow_in_db)\n if len(INTERACTIVE_SESSION_TYPES):\n kwrm.start_interactive_session(INTERACTIVE_SESSION_TYPES[0])",
"def windows_execution(session, elb_name, elb_type, command_type, instance_id_list, tag_value, platform_type, username, password_parameter_name):\n document_name = 'AWS-RunPowerShellScript'\n if str(elb_name) != 'None' and str(tag_value)=='None':\n instance_id_list = []\n instance_id_list = getInstanceList(instance_id_list, elb_type, elb_name, session)\n\n logger.info(instance_id_list)\n \n if command_type == 'ENABLE':\n commands = [\n \"\"\" try\n {\n $breakglassUser = '\"\"\" + username + \"\"\"'\n $breakglassPass = (Get-SSMParameterValue -Name \"\"\" + password_parameter_name + \"\"\" -WithDecryption $True).Parameters[0].Value\n $allFeatures = Get-WindowsFeature\n $rdsGatewayInstalled = ($allFeatures | Where-Object {$_.Name -eq 'RDS-Gateway'}).Installed\n $domainServicesInstalled = ($allFeatures | Where-Object {$_.Name -eq 'AD-Domain-Services'}).Installed\n \n # If the instance is a domain controller, the breakglass script should not run.\n if (-Not $domainServicesInstalled)\n {\n # Add a breakglass user and give it Administrator level access (PowerShell user management cmdlets are not used for Windows backward compatability).\n Write-Output 'Creating breakglass user and adding it to Administrators group'\n net user $breakglassUser $breakglassPass /add\n net localgroup 'Administrators' $breakglassUser /add\n \n # If the instance is also an RDS Gateway, configure it to allow access to the breakglass user\n if ($rdsGatewayInstalled)\n {\n Write-Output 'RDS Gateway role detected. Configuring RDS Gateway to allow breakglass access.'\n Import-Module RemoteDesktopServices\n New-Item -Path RDS:\\GatewayServer\\CAP -Name $breakglassUser -UserGroups 'Administrators@BUILTIN' -AuthMethod 1\n New-Item -Path RDS:\\GatewayServer\\RAP -Name $breakglassUser -UserGroups 'Administrators@BUILTIN' -ComputerGroupType 2\n }\n }\n else\n {\n Write-Output 'Domain controller role detected. Breakglass script will not execute'\n }\n }\n \n # If anything fails in the commands, SSM run command should fail too\n catch\n {\n Write-Output 'Exception block reached'\n Write-Output $_.Exception.Message\n Exit -1\n }\"\"\"\n ]\n comment = 'Break Glass Command - Enable Windows Local Administrator'\n \n elif command_type == 'DISABLE':\n commands = [\n \"\"\" try\n {\n $breakglassUser = '\"\"\" + username + \"\"\"'\n $allFeatures = Get-WindowsFeature\n $rdsGatewayInstalled = ($allFeatures | Where-Object {$_.Name -eq 'RDS-Gateway'}).Installed\n $domainServicesInstalled = ($allFeatures | Where-Object {$_.Name -eq 'AD-Domain-Services'}).Installed\n \n # If the instance is a domain controller, the breakglass script should not run.\n if (-Not $domainServicesInstalled)\n {\n # If the instance is an RDS Gateway, remove the breakglass configurations\n if ($rdsGatewayInstalled)\n {\n Write-Output 'RDS Gateway role detected. Removing RDS Gateway configurations that allowed breakglass access.'\n Import-Module RemoteDesktopServices\n Remove-Item -Path RDS:\\GatewayServer\\CAP\\$breakglassUser -Recurse\n Remove-Item -Path RDS:\\GatewayServer\\RAP\\$breakglassUser -Recurse\n }\n \n # Remove the breakglass user (PowerShell user management cmdlets not used for backward compatability).\n Write-Output 'Removing breakglass user.'\n net localgroup 'Administrators' $breakglassUser /delete\n net user $breakglassUser /delete\n }\n else\n {\n Write-Output 'Domain controller role detected. Breakglass script will not execute'\n }\n }\n \n # If anything fails in the commands, SSM run command should fail too\n catch\n {\n Write-Output 'Exception block reached'\n Write-Output $_.Exception.Message\n Exit -1\n }\n \"\"\"\n ]\n comment = 'Break Glass Command - Disable Windows Local Administrator'\n else:\n raise Exception(\"Called with invalid command_type\")\n \n if str(tag_value) == 'None':\n logger.info('============RunCommand Using Instances')\n return send_instance_run_command(session, document_name, commands, instance_id_list, comment)\n else:\n logger.info('============RunCommand Using Target Value Pair or All instance with the tag passed')\n return send_tag_run_command(session, document_name, commands, 'tag:Name', tag_value, comment)",
"def _Rollback(failure, stopping_services, starting_services):\n def _HandleRollbackError(failure):\n \"\"\"Ignores rollback error.\"\"\"\n exc_info = (failure.type, failure.value, failure.tb)\n logging.error('Failed rolling back configuration',\n exc_info=exc_info)\n return True\n\n deferred = self.StopServices(starting_services)\n deferred.addCallback(lambda _: self.StartServices(stopping_services))\n deferred.addErrback(_HandleRollbackError)\n # Ignore result for rollback, and return the deploy failure.\n deferred.addBoth(lambda _: failure)\n return deferred",
"def before_scenario(context, scenario):\n context.resource_manager = contextlib.ExitStack()",
"def rollback(migrator, database, fake=False, **kwargs):\n pass",
"def test_stop_with_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/stop_test/3')\n test = Test.query.filter(Test.id == 3).first()\n self.assertEqual(test.finished, True)",
"def test_terminate_run(self):\n pass",
"def test_cancel_pending_payment(self):\n pass",
"def test_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the cold migration which will fail due to NoValidHost.\n self.api.post_server_action(server['id'], {'migrate': None},\n check_response_status=[202])\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n # Note that we get InstanceNotFound rather than NoValidHost because\n # the NoValidHost handler in ComputeTaskManager._cold_migrate calls\n # _set_vm_state_and_notify which raises InstanceNotFound and masks\n # the NoValidHost error.\n self._assert_resize_migrate_action_fail(\n server, instance_actions.MIGRATE, 'InstanceNotFound')\n self._assert_no_allocations(server)"
] |
[
"0.64744663",
"0.64101356",
"0.6137421",
"0.57786536",
"0.5720763",
"0.55458915",
"0.55042726",
"0.5476576",
"0.543377",
"0.5422985",
"0.53976566",
"0.5366955",
"0.53626645",
"0.5321181",
"0.526184",
"0.5221793",
"0.5215912",
"0.51713467",
"0.5148279",
"0.51433945",
"0.508609",
"0.508307",
"0.50761807",
"0.5065377",
"0.502537",
"0.50062585",
"0.5001134",
"0.49981058",
"0.4995379",
"0.49950394"
] |
0.74641716
|
0
|
Execute SSM automation document DigitoBreakLambdaSecurityGroupTest_20200921 to test failure case
|
def test_break_security_group_failed():
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_break_security_group_usual_case_specify_sg():",
"def test_break_security_group_usual_case():",
"def test_break_security_group_rollback_previous():",
"def test_launch_failures_hw(self):\n self.test_launch_failures()",
"def test_aws_service_api_vm_security_group_delete(self):\n pass",
"def test_script(caplog: pytest.LogCaptureFixture, config: dict, error: str) -> None:\n with pytest.raises(vol.Invalid, match=error):\n cv.script_action(config)",
"def test_08_security_group(self):\n # Validate the following:\n # 1. Create a project\n # 2. Assign some security groups to that project\n # 3. Verify the security groups can only be assigned to VM belonging\n # to that project.\n\n security_group = SecurityGroup.create(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.debug(\"Created security group with ID: %s\" % security_group.id)\n # Default Security group should not have any ingress rule\n sercurity_groups = SecurityGroup.list(\n self.apiclient,\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(sercurity_groups, list),\n True,\n \"Check for list security groups response\"\n )\n\n self.assertNotEqual(\n len(sercurity_groups),\n 0,\n \"Check List Security groups response\"\n )\n # Authorize Security group to SSH to VM\n ingress_rule = security_group.authorize(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(ingress_rule, dict),\n True,\n \"Check ingress rule created properly\"\n )\n\n self.debug(\n \"Authorizing ingress rule for sec group ID: %s for ssh access\"\n % security_group.id)\n self.virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n securitygroupids=[security_group.id],\n projectid=self.project.id\n )\n self.debug(\"Deployed VM (ID: %s) in project: %s\" % (\n self.virtual_machine.id,\n self.project.id\n ))\n self.assertEqual(\n self.virtual_machine.state,\n 'Running',\n \"VM state should be running after deployment\"\n )\n # Deploy another VM with same security group outside the project\n self.debug(\n \"Deploying VM with security group: %s outside project:%s\" % (\n security_group.id,\n self.project.id\n ))\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n securitygroupids=[security_group.id],\n )\n return",
"def test_pytest_bdd_scenario_with_failed_step(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == -1\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[3].name == \"then\"\n assert spans[3].get_tag(ERROR_MSG)",
"def glue_failure_handler(event, context) -> None:\n\n # get lambda variables\n environment_level = os.environ[\"ENVIRONMENT\"]\n sns_topic_name = os.environ[\"PROCESSING_NOTIFICATION_SNS\"]\n\n # fetches the response in dict received from eventbridge rule when 'event pattern' matches\n # here failing a glue job is an event pattern. this is already stated in event rule 'glue_failure_rule'\n # the output from below will be like --> {\"version\": \"0\", \"id\": \"26c9301e-6aec-872d-4739-1aa6ebc66853\", \"detail-type\": \"Glue Job State Change\", \"source\": \"aws.glue\", \"account\": \"795038802291\", \"time\": \"2021-03-24T07:37:07Z\", \"region\": \"us-east-1\", \"resources\": [], \"detail\": {\"jobName\": \"rahul_create_warm_pool\", \"severity\": \"ERROR\", \"state\": \"FAILED\", \"jobRunId\": \"jr_bdb3f0d56b0f842c7337be54927deb6ddb78a1f37956d9817b0f11710602139f\", \"message\": \"Command failed with exit code 1\"}}\n event_response = json.dumps(event)\n\n # calling GlueFailureEvent class\n glue_failure_event = GlueFailureEvent(event_response)\n\n # get failure time (event occurrence)\n event_occurrence_time = glue_failure_event.time # returns ISO UTC time (ex: '2021-03-24T15:52:37Z')\n # convert ISO UTC time to regular UTC time and then to ET local time\n converted_time = event_occurrence_time.replace(\"T\",\" \").replace(\"Z\",\".0\")\n _converted_time = datetime.strptime(converted_time, \"%Y-%m-%d %H:%M:%S.%f\")\n local_time = convert_utc_to_local(_converted_time, DEFAULT_TIMEZONE)\n\n if glue_failure_event.detail['jobRunId'].endswith(\"attempt_1\"):\n # we dont want get notified for attempt1 run failure because this will always be failure in real time.\n # so its an attempt to avoid 2 failure alerts for each failed glue job event\n return None\n # form subject, message required for publishing SNS\n subject = f\"{environment_level}: Glue job Failure\"\n message = (\n f\"Glue job '{glue_failure_event.detail['jobName']}' has {glue_failure_event.detail['state']} at {local_time}. Below are the details:{os.linesep}{os.linesep}\"\n f\"Error : {glue_failure_event.detail['message']}{os.linesep}\"\n f\"{os.linesep}\"\n f\"JobRunId : {glue_failure_event.detail['jobRunId']}\"\n )\n\n send_ahub_email_notification(sns_topic_name, subject, message)",
"def test_stop_with_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/stop_test/3')\n test = Test.query.filter(Test.id == 3).first()\n self.assertEqual(test.finished, True)",
"def test_fail(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_failed, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n runner.submit_trial(run_info)\n run_info, run_value = next(runner.iter_results())\n\n # Make sure the traceback message is included\n assert \"traceback\" in run_value.additional_info\n assert \"RuntimeError\" in run_value.additional_info[\"traceback\"]",
"def test_invalid(self):\n args = [SIMPLE_TEMPLATE, SIMPLE_CANDIDATE_INVALID]\n result = self.runner.invoke(main, args)\n self.assertEqual(-1, result.exit_code)",
"def test_same_crash_same_security(self):\n for index, t in enumerate(self.testcases):\n t.security_flag = True\n t.crash_state = 'abc\\ndef'\n t.timestamp = datetime.datetime.utcfromtimestamp(index)\n t.put()\n\n grouper.group_testcases()\n\n testcases = []\n for testcase_id in data_handler.get_open_testcase_id_iterator():\n testcases.append(data_handler.get_testcase_by_id(testcase_id))\n\n self.assertEqual(len(testcases), 1)\n self.assertEqual(testcases[0].group_id, 0)\n self.assertTrue(testcases[0].is_leader)",
"def windows_execution(session, elb_name, elb_type, command_type, instance_id_list, tag_value, platform_type, username, password_parameter_name):\n document_name = 'AWS-RunPowerShellScript'\n if str(elb_name) != 'None' and str(tag_value)=='None':\n instance_id_list = []\n instance_id_list = getInstanceList(instance_id_list, elb_type, elb_name, session)\n\n logger.info(instance_id_list)\n \n if command_type == 'ENABLE':\n commands = [\n \"\"\" try\n {\n $breakglassUser = '\"\"\" + username + \"\"\"'\n $breakglassPass = (Get-SSMParameterValue -Name \"\"\" + password_parameter_name + \"\"\" -WithDecryption $True).Parameters[0].Value\n $allFeatures = Get-WindowsFeature\n $rdsGatewayInstalled = ($allFeatures | Where-Object {$_.Name -eq 'RDS-Gateway'}).Installed\n $domainServicesInstalled = ($allFeatures | Where-Object {$_.Name -eq 'AD-Domain-Services'}).Installed\n \n # If the instance is a domain controller, the breakglass script should not run.\n if (-Not $domainServicesInstalled)\n {\n # Add a breakglass user and give it Administrator level access (PowerShell user management cmdlets are not used for Windows backward compatability).\n Write-Output 'Creating breakglass user and adding it to Administrators group'\n net user $breakglassUser $breakglassPass /add\n net localgroup 'Administrators' $breakglassUser /add\n \n # If the instance is also an RDS Gateway, configure it to allow access to the breakglass user\n if ($rdsGatewayInstalled)\n {\n Write-Output 'RDS Gateway role detected. Configuring RDS Gateway to allow breakglass access.'\n Import-Module RemoteDesktopServices\n New-Item -Path RDS:\\GatewayServer\\CAP -Name $breakglassUser -UserGroups 'Administrators@BUILTIN' -AuthMethod 1\n New-Item -Path RDS:\\GatewayServer\\RAP -Name $breakglassUser -UserGroups 'Administrators@BUILTIN' -ComputerGroupType 2\n }\n }\n else\n {\n Write-Output 'Domain controller role detected. Breakglass script will not execute'\n }\n }\n \n # If anything fails in the commands, SSM run command should fail too\n catch\n {\n Write-Output 'Exception block reached'\n Write-Output $_.Exception.Message\n Exit -1\n }\"\"\"\n ]\n comment = 'Break Glass Command - Enable Windows Local Administrator'\n \n elif command_type == 'DISABLE':\n commands = [\n \"\"\" try\n {\n $breakglassUser = '\"\"\" + username + \"\"\"'\n $allFeatures = Get-WindowsFeature\n $rdsGatewayInstalled = ($allFeatures | Where-Object {$_.Name -eq 'RDS-Gateway'}).Installed\n $domainServicesInstalled = ($allFeatures | Where-Object {$_.Name -eq 'AD-Domain-Services'}).Installed\n \n # If the instance is a domain controller, the breakglass script should not run.\n if (-Not $domainServicesInstalled)\n {\n # If the instance is an RDS Gateway, remove the breakglass configurations\n if ($rdsGatewayInstalled)\n {\n Write-Output 'RDS Gateway role detected. Removing RDS Gateway configurations that allowed breakglass access.'\n Import-Module RemoteDesktopServices\n Remove-Item -Path RDS:\\GatewayServer\\CAP\\$breakglassUser -Recurse\n Remove-Item -Path RDS:\\GatewayServer\\RAP\\$breakglassUser -Recurse\n }\n \n # Remove the breakglass user (PowerShell user management cmdlets not used for backward compatability).\n Write-Output 'Removing breakglass user.'\n net localgroup 'Administrators' $breakglassUser /delete\n net user $breakglassUser /delete\n }\n else\n {\n Write-Output 'Domain controller role detected. Breakglass script will not execute'\n }\n }\n \n # If anything fails in the commands, SSM run command should fail too\n catch\n {\n Write-Output 'Exception block reached'\n Write-Output $_.Exception.Message\n Exit -1\n }\n \"\"\"\n ]\n comment = 'Break Glass Command - Disable Windows Local Administrator'\n else:\n raise Exception(\"Called with invalid command_type\")\n \n if str(tag_value) == 'None':\n logger.info('============RunCommand Using Instances')\n return send_instance_run_command(session, document_name, commands, instance_id_list, comment)\n else:\n logger.info('============RunCommand Using Target Value Pair or All instance with the tag passed')\n return send_tag_run_command(session, document_name, commands, 'tag:Name', tag_value, comment)",
"def failed_with_message(capsys):\n __tracebackhide__ = True\n\n def _failed_with_message(func, message, *args, **kwargs):\n __tracebackhide__ = True\n with pytest.raises(SystemExit) as error:\n func(*args, **kwargs)\n assert error.type == SystemExit\n assert error.value.code == 1\n if message:\n assert capsys.readouterr().err == message\n\n return _failed_with_message",
"def test_xfailed_but_passed():\n pass",
"def _handle_failure(self, proc, test_case):\n if proc.returncode != 0:\n print('ERROR: Test execution failed: {}'.format(test_case.get_name()))\n stdout, stderr = proc.communicate()\n raise TestCaseFailure('Test case {} failed. stdout: {}, stderr: {}, '\n 'return code: {}.'.format(test_case.get_name(),\n stdout, stderr,\n proc.returncode))",
"def testsecurity(self,id=0):\n return 'failed test security'",
"def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )",
"def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):\n\n DATA = {'name': 'Al Gore', 'birthplace': 'Washington, D.C.'}\n on_record_error = stage_attributes['on_record_error']\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_replacer = pipeline_builder.add_stage('Field Replacer')\n field_replacer.set_attributes(replacement_rules=[{'setToNull': False, 'fields': '/age'}],\n field_does_not_exist='TO_ERROR',\n **stage_attributes)\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_replacer >> wiretap.destination\n\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n if on_record_error == 'DISCARD':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'STOP_PIPELINE':\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_status('RUN_ERROR')\n\n assert False, 'An exception should have been thrown'\n except RunError:\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'TO_ERROR':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n record = wiretap.error_records[0]\n assert record.field == DATA and not wiretap.output_records",
"def test_start_interactive_workflow_k8s_failure(sample_serial_workflow_in_db):\n mocked_k8s_client = Mock()\n mocked_k8s_client.create_namespaced_deployment =\\\n Mock(side_effect=ApiException(reason='some reason'))\n with patch.multiple('reana_workflow_controller.k8s',\n current_k8s_extensions_v1beta1=mocked_k8s_client,\n current_k8s_corev1_api_client=DEFAULT):\n with pytest.raises(REANAInteractiveSessionError,\n match=r'.*Kubernetes has failed.*'):\n kwrm = KubernetesWorkflowRunManager(sample_serial_workflow_in_db)\n if len(INTERACTIVE_SESSION_TYPES):\n kwrm.start_interactive_session(INTERACTIVE_SESSION_TYPES[0])",
"def linux_execution(session, elb_name, elb_type, command_type, instance_id_list, tag_value, platform_type, username, password_parameter_name): \n \n document_name = 'AWS-RunShellScript'\n if str(elb_name) != 'None' and str(tag_value)=='None':\n instance_id_list = []\n instance_id_list = getInstanceList(instance_id_list, elb_type, elb_name, session)\n \n logger.info(instance_id_list)\n \n comment = 'Break glass for Linux Hosts'\n password = \"$(aws ssm get-parameters --names \"+ password_parameter_name + \" --with-decryption --query 'Parameters[*].{Value:Value}' --output text --region ap-southeast-2)\"\n \n if str(command_type) == 'ENABLE':\n commands = [\n # Add a new user, create their homedir if it doesn't exist\n \"useradd --create-home {username}\".format(username=username),\n # Change the password for a user\n # We shouldn't the password in clear text, hence using cli to get from parameter store\n \"echo '{username}':{password} | chpasswd\".format(username=username, password=password),\n # Below provides sudo access\n \"echo '{username} ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/999-break-glass-{username}\".format(username=username)\n ]\n elif str(command_type) == 'DISABLE':\n commands = [\n \"killall -KILL -u {username}\".format(username=username),\n \"userdel -r {username}\".format(username=username),\n # If we want to clean up that user's homedir, uncomment the following\n \"[ -d /home/{username} ] && rm -rf /home/{username}\".format(username=username),\n # Remove sudo access\n \"rm -rf /etc/sudoers.d/999-break-glass-{username}\".format(username=username)\n ]\n else:\n raise Exception(\"Called with invalid command_type\")\n \n if str(tag_value) == 'None':\n logger.info('============RunCommand Using Instances')\n return send_instance_run_command(session, document_name, commands, instance_id_list, comment)\n else:\n logger.info('============RunCommand Using Target Value Pair or All instance with the tag passed')\n return send_tag_run_command(session, document_name, commands, 'tag:Name', tag_value, comment)",
"def test_same_crash_different_security(self):\n self.testcases[0].security_flag = False\n self.testcases[0].crash_state = 'abc\\ndef'\n self.testcases[1].security_flag = True\n self.testcases[1].crash_state = 'abc\\ndef'\n\n for t in self.testcases:\n t.put()\n\n grouper.group_testcases()\n\n for index, t in enumerate(self.testcases):\n self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())\n self.assertEqual(self.testcases[index].group_id, 0)\n self.assertTrue(self.testcases[index].is_leader)",
"def test_second_step_strict(self):\n with self.assertRaises(Exception):\n self.run_step('S02-errors.py', allow_failure=False)",
"def test_runSignaled(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c',\n 'import sys; print \"hi\"; sys.stdout.flush(); '\n 'import os; os.kill(os.getpid(), 9)'])\n self.assertEquals(exc.exitSignal, 9)\n self.assertEquals(exc.exitStatus, None)\n self.assertEquals(exc.output, \"hi\\n\")",
"def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()",
"def test_different_crash_same_security(self):\n self.testcases[0].security_flag = True\n self.testcases[0].crash_state = 'abc\\ndef'\n self.testcases[1].security_flag = True\n self.testcases[1].crash_state = 'uvw\\nxyz'\n\n for t in self.testcases:\n t.put()\n\n grouper.group_testcases()\n\n for index, t in enumerate(self.testcases):\n self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())\n self.assertEqual(self.testcases[index].group_id, 0)\n self.assertTrue(self.testcases[index].is_leader)",
"def test_run_with_invalid_param(self, app, caplog):\n (bucket, source_prefix, dest_prefix) = get_s3_refs(app)\n datestamp = 'wrong!#$&'\n\n caplog.set_level(logging.INFO)\n with capture_app_logs(app):\n with mock_s3(app, bucket=bucket) as m3:\n m3.Object(bucket, f'{source_prefix}/2019/08/28/12345678_00012_1.pdf').put(Body=b'a note attachment')\n\n response = MigrateSisAdvisingNoteAttachments().run(datestamp=datestamp)\n\n assert 'Will copy files from /sis-data/sis-sftp/incremental/advising-notes/attachment-files/wrong!#$&.' in caplog.text\n assert 'Copied 0 attachments to the destination folder.' in caplog.text\n assert response == (\n 'SIS advising note attachment migration complete for sis-data/sis-sftp/incremental/advising-notes/attachment-files/wrong!#$&.'\n )\n assert not object_exists(m3, bucket, f'{dest_prefix}/12345678/12345678_00012_1.pdf')",
"def stopTest(self, test):",
"def test_stop_plan_run_ok(test_app):\n\n bamboo_api_client = test_app.get('bamboo_api_tests').bamboo_api_client\n plan_build_key = test_app.get('plan_keys', {}).get('build_key', '')\n\n stop_plan = bamboo_api_client.stop_build(plan_build_key=plan_build_key)\n\n # Check if the API got a HTTP 302/200 response code\n assert stop_plan.get('status_code') == 302, stop_plan"
] |
[
"0.6433916",
"0.625285",
"0.61371636",
"0.5679316",
"0.5658691",
"0.5625501",
"0.5586798",
"0.5442719",
"0.543764",
"0.5374043",
"0.5356653",
"0.53561103",
"0.53547144",
"0.5347069",
"0.53156495",
"0.53008205",
"0.53002393",
"0.52783585",
"0.5273181",
"0.52685493",
"0.5261219",
"0.52322245",
"0.5215776",
"0.51518035",
"0.5140753",
"0.513715",
"0.51360524",
"0.5126595",
"0.51243347",
"0.5124233"
] |
0.73794866
|
0
|
Return the smearing (in ms) in each channel at the specified DM
|
def chan_smear(self, DM):
try:
DM = where(DM-cDM==0.0, cDM+self.dDM/2.0, DM)
except TypeError:
if (DM-cDM==0.0): DM = cDM+self.dDM/2.0
return dm_smear(DM, self.obs.chanwidth, self.obs.f_ctr, self.obs.cDM)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def total_smear(self, DM):\n return sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0 +\n self.chan_smear(DM)**2.0)",
"def DM_for_smearfact(self, smearfact):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0)\n return smearfact*0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0 + self.obs.cDM",
"def DM_for_newparams(self, dDM, downsamp):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*downsamp)**2.0 +\n BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +\n self.sub_smearing**2.0)\n return 0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0",
"def drillTime(matID, thickness_mm, W, FWHM_mm):\n return thickness_mm / drillSpeed(matID, W, FWHM_mm)",
"def calculate_msd(distance, set_idx_p, set_idx_m, show=False):\n\n p2p, p2m = [], []\n for src in distance.keys():\n if src in set_idx_p:\n tgt2dist = distance[src]\n for tgt in tgt2dist.keys():\n if tgt in set_idx_p:\n if tgt > src:\n p2p.append(tgt2dist[tgt])\n elif tgt in set_idx_m:\n if tgt > src:\n p2m.append(tgt2dist[tgt])\n else:\n continue\n elif src in set_idx_m:\n tgt2dist = distance[src]\n for tgt in tgt2dist.keys():\n if tgt in set_idx_m:\n if tgt > src:\n p2p.append(tgt2dist[tgt])\n elif tgt in set_idx_p:\n if tgt > src:\n p2m.append(tgt2dist[tgt])\n else:\n continue\n else:\n continue\n\n msd, nmsd = np.mean([p**2 for p in p2p]), np.mean([p**2 for p in p2m])\n\n if show:\n print(\"MSD=%.2f, nMSD=%.2f\"%(\n msd, nmsd))\n\n return msd, msd/nmsd",
"def _dmsmear(self, psr):\n return 8.3E6 * psr.dm * self.bw_chan / math.pow(self.freq, 3.0)",
"def distance_YMW16(self, source, DM):\n\n if not isinstance(DM, astropy.units.quantity.Quantity):\n # assume DM unit\n DM=DM*u.pc/u.cm**3\n if (len(DM.shape)>0 and DM.value.any() <= 0) or (len(DM.shape)==0 and DM.value < 0):\n raise ValueError('DM must be > 0')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n if len(source.l.shape)==0:\n \n results=ymw16.dmdtau_c(source.l.value,\n source.b.value, \n DM.to(u.pc/u.cm**3).value,\n 1,\n self.datadir)\n distance=results*u.pc\n return distance,None\n else:\n distance=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n dm=DM.to(u.pc/u.cm**3).value\n if not (len(dm.shape)==0 or dm.shape==source.l.shape):\n raise IndexError('Shape of DM must be scalar or the same as shape of coordinates')\n while not it.finished:\n if len(dm.shape)==0:\n dm_touse=dm\n else:\n dm_touse=dm[it.multi_index]\n results=ymw16.dmdtau_c(source[it.multi_index].l.value,\n source[it.multi_index].b.value, \n dm_touse,\n 1,\n self.datadir)\n distance[it.multi_index]=results\n it.iternext()\n return distance*u.pc,None",
"def SM2m(sm):\n return sm * 1609.344",
"def dms(self):\n return hp2dms(self.hp_angle)",
"def MSD(df, conversion = \"x\"):\n #conversion from pixels to micrometers\n if conversion == \"y\":\n df = df/1200*633\n else:\n df = df/1600*844\n msd = []\n for i in range(len(df)):\n #computes the msd for the x or y coordinates between the different frames\n msd.append(tidynamics.msd(df.T[i]))\n\n msd = pd.DataFrame(msd)\n\n return msd",
"def estimate_bpm(D):\n if len(D) < 2*ignore:\n return 0\n else:\n return 1/np.mean(np.diff(D))*60",
"def bruteForce_MC(N,M):\n hewlist = np.zeros(M)\n for i in range(M):\n x = createDist(N)\n x = np.abs(x-np.mean(x))\n x.sort()\n hewlist[i] = np.median(x)*2.\n return np.mean(hewlist), np.std(hewlist)",
"def drillSpeed(matID, W, FWHM_mm):\n vaporH = { # kJ/mol\n # spec heat from room temperature to melting + latent heat of fusion + spec heat from melting to boiling + latent heat of vaporization\n # refer https://webbook.nist.gov/chemistry/ for heat capacity, this tool also has some data in specificHeatParams\n 'Cu': 29.7 + 13.1 + 48.8 + 300,\n 'Fe': 49.5 + 13.8 + 60.9 + 340,\n 'W' : 118.4 + 46.9 + 82.5 + 824,\n 'Mo': 89.9 + 37.5 + 71.9 + 598,\n 'Al': 17.98 + 10.7 + 0.03175*1857 + 294\n }\n if matID not in vaporH.keys():\n raise ValueError(f'No vaporization data for {matID}: available in {vaporH.keys()}')\n \n mol_mmD = 2 * np.pi * (FWHM_mm/2.355)**2 / 1000 * defaultDensity(matID) / molarMass(matID)\n return W / (mol_mmD * vaporH[matID] * 1000)",
"def _icmf(self, ms):\n return self._pot.a * numpy.sqrt(ms) / (1 - numpy.sqrt(ms))",
"def getDM(self):\n return self.subintheader['DM']\n #if self.params is None:\n # return\n #return self.params.getDM()",
"def RMSD(ccdata1, ccdata2):\n natom = ccdata1.natom\n rmsd = 0.0\n maxdiff = 0.0\n for i in range(natom):\n diff = norm(ccdata1.atomcoords[0][i] - ccdata2.atomcoords[0][i])\n rmsd += diff\n if diff > maxdiff:\n maxdiff = diff\n\n rmsd /= natom\n\n return rmsd, maxdiff",
"def calculate_msd(self, ensemble=False):\n\n print('Calculating MSD...', end='', flush=True)\n start = timer.time()\n self.msd = timeseries.msd(self.z_interpolated.T[..., np.newaxis], 0, ensemble=ensemble, nt=self.nt).T\n print('Done in %.3f seconds' % (timer.time() - start))",
"def get_MSD(block1, block2):\n #print(block1.shape)\n #print(block2.shape)\n return sum(sum(abs(block1 - block2) ** 2))",
"def dd_plan(centrefreq, bandwidth, nfreqchan, timeres, lowDM, highDM, min_DM_step=0.02):\n\n DD_plan_array = []\n freqres = bandwidth / float(nfreqchan)\n previous_DM = lowDM\n\n #number of time samples smeared over before moving to next D_dm\n smear_fact = 3.\n\n #Loop until you've made a hit your range max\n D_DM = 0.\n downsample = 1\n while D_DM < round(highDM, 2):\n #calculate the DM where the current time resolution equals the\n #dispersion in a frequency channel (a bit of an overkill)\n\n #Dm smear over a frequency channel\n dm_smear = previous_DM * freqres * 8.3 * 10.**6 / centrefreq**3\n total_smear = math.sqrt(timeres**2 +\n dm_smear**2)\n\n\n D_DM = smear_fact * timeres * centrefreq**3 /\\\n (8.3 * 10.**6 * freqres)\n\n #difference in DM that will double the effective width (eq 6.4 of pulsar handbook)\n #TODO make this more robust\n #DM_step = math.sqrt( (2.*timeres)**2 - timeres**2 )/\\\n # (8.3 * 10**6 * bandwidth / centrefreq**3)\n DM_step = smear_fact * total_smear * centrefreq**3 /\\\n (8.3 * 10.**6 * 0.5 * bandwidth)\n\n\n #round to nearest 0.01\n DM_step = round(DM_step, 2)\n if DM_step < min_DM_step:\n #set DM to 0.01 as a zero DM doesn't make sense\n DM_step = min_DM_step\n\n\n if D_DM > highDM:\n #last one so range from to max\n D_DM = highDM\n #range from last to new\n D_DM = round(D_DM, 2)\n nDM_step = int((D_DM - previous_DM) / DM_step)\n if D_DM > lowDM:\n DD_plan_array.append([ previous_DM, D_DM, DM_step, nDM_step, timeres, downsample ])\n previous_DM = D_DM\n\n #Double time res to account for incoherent dedispersion\n timeres *= 2.\n downsample *= 2\n\n return DD_plan_array",
"def get_vrms(self, ch: int) -> float:\n cmd = \":measure:vrms? cycle,ac,channel{0}\".format(ch)\n return float(self.query(cmd))",
"def ddm(self):\n if self.positive:\n return DDMAngle(self.degree, self.minute + (self.second/60))\n else:\n return -DDMAngle(self.degree, self.minute + (self.second/60))",
"def convertDistance(self, mm):\n\t\treturn mm/(self.microstep)",
"def mi_from_dm(distance_matrix, ns, nh, spike_train_list=None):\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n \n if spike_train_list is not None:\n\n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n\n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 0\n \n if i not in members_of_glob:\n for j in nearest_neighbours[i]:\n if j not in members_of_glob:\n if spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1 # count neigbours out of glob\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*f_i # if one neighbour is in glob, all following neighb are as well\n break\n counts.append(c_i)\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += 1 + (nh - 1)*f_i #If in glob, take fraction of remaining neighbours except you\n counts.append(c_i)\n \n counts = np.array(counts)\n \n else:\n \n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 1\n for j in nearest_neighbours[i]:\n if (i != j and abs(i - j)%ns==0 ):\n c_i += 1 \n counts.append(c_i)\n counts = np.array(counts) \n \n I = sum(np.log2(counts*ns/float(nh))) / float(nr)\n\n return I",
"def pw_rmsd(mols):\n m = len(mols)\n k = 0\n pw = []\n for mol1 in mols:\n k += 1\n if k > m:\n break\n for i in range(k, m):\n mol2 = mols[i]\n pw.append(rmsd.rmsd(mol1, mol2))\n ave_rmsd = np.mean(pw)\n return ave_rmsd",
"def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)",
"def _get_dma(cls, data):\n\t\tdata['dma'] = data['close_10_sma'] - data['close_50_sma']",
"def dms(self):\n minute_int, second = divmod(self.minute, 1)\n if self.positive:\n return DMSAngle(self.degree, int(minute_int), second * 60)\n else:\n return -DMSAngle(self.degree, int(minute_int), second * 60)",
"def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda",
"def ddm(self):\n return hp2ddm(self.hp_angle)",
"def get_cmmb(cst, segs):\n assert isinstance(cst, ChromStruct)\n\n # initialize a GeneticMap and get genetic distances at segment boundaries\n gmp = GeneticMap(cst.chrom, cst.gmap_files)\n gsegs = gmp.interp_gpos(segs)\n gdists = (gsegs[:,1] - gsegs[:,0]) / (segs[:,1] - segs[:,0])\n\n return gdists"
] |
[
"0.708876",
"0.67770237",
"0.66845757",
"0.5880866",
"0.58423823",
"0.578846",
"0.5712183",
"0.56510144",
"0.5630121",
"0.5609441",
"0.55745095",
"0.5538945",
"0.55093586",
"0.5484867",
"0.54699725",
"0.54063857",
"0.5374911",
"0.5356186",
"0.53358334",
"0.5315479",
"0.53130376",
"0.5295122",
"0.52846026",
"0.5277308",
"0.5271536",
"0.5269852",
"0.5253892",
"0.5252328",
"0.5223385",
"0.52168983"
] |
0.70364016
|
1
|
Return the total smearing in ms due to the sampling rate, the smearing over each channel, the smearing over each subband (if numsub > 0) and the smearing over the full BW assuming the worstcase DM error.
|
def total_smear(self, DM):
return sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*self.downsamp)**2.0 +
self.BW_smearing**2.0 +
self.sub_smearing**2.0 +
self.chan_smear(DM)**2.0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def DM_for_smearfact(self, smearfact):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0)\n return smearfact*0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0 + self.obs.cDM",
"def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm",
"def _smear_values(self, gp_list):\n nevents = len(gp_list)\n readout = gp_list * self.gain # Scaling up by gain.\n smear = np.sqrt(self.sig0**2 + gp_list * self.sig1**2) # Smearing the peaks\n smear = np.random.normal(loc=0, scale=smear)\n\n ## Getting the number of after pulses\n apcount = np.random.binomial(gp_list.astype(np.int64), self.ap_prob)\n apval = np.random.exponential(self.beta, size=(nevents, np.max(apcount)))\n _, index = np.indices((nevents, np.max(apcount)))\n apval = np.where(apcount[:, np.newaxis] > index, apval, 0)\n apval = np.sum(apval, axis=-1) # Reducing of the last index\n\n # Adding the dark current distributions.\n dcval = self.dc_dist.rvs(size=nevents)\n smear = np.sqrt(self.sig0**2 + self.sig1**2) # Smearing the main peak\n dcval = dcval + np.random.normal(loc=0, scale=smear, size=nevents)\n dc = np.random.random(size=nevents)\n dcval = np.where(dc > self.dcfrac, 0, dcval)\n\n # Summing everything\n return readout + apval + dcval",
"def strm_bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if StatsRouter.global_strm_mean == 0.0: return 0\r\n else: return (1.0*bw)/StatsRouter.global_strm_mean",
"def calculate(self):\n\n gt = self.ground_truth.flatten()\n seg = self.segmentation.flatten()\n\n n = gt.size\n mean_gt = gt.mean()\n mean_seg = seg.mean()\n mean = (mean_gt + mean_seg) / 2\n\n m = (gt + seg) / 2\n ssw = np.power(gt - m, 2).sum() + np.power(seg - m, 2).sum()\n ssb = np.power(m - mean, 2).sum()\n\n ssw /= n\n ssb = ssb / (n - 1) * 2\n\n return (ssb - ssw) / (ssb + ssw)",
"def cal_Beam_TheoRMS(n_ant, tobs, chan_width, sbid):\n \n BEAM_T_RMS = []\n\n coreff = 0.8 # correlator efficiency\n npol = 2.0 # Number of polarisation, npol = 2 for images in Stokes I, Q, U, or V\n\n obj = bf.load_beamset_class('SEFD_{}.hdf5'.format(sbid))\n freq = obj.frequencies\n data = obj.data #[time, antenna, beam, polarisation, channel, payload]\n pol_XX = 0 # polarisation 0 - 3 (XX,XY,YX,YY)\n pol_YY = 3\n \n for beam in range(36):\n med_XX_SEFD = np.median(data[0, :, beam, pol_XX, :, 0]) #use all available antennas\n med_YY_SEFD = np.median(data[0, :, beam, pol_YY, :, 0]) \n med_SEFD = 0.5*math.sqrt(med_XX_SEFD**2.0 + med_YY_SEFD**2.0) # SEFD of stokes I\n t_rms_mjy = 1000.*(med_SEFD/(coreff*math.sqrt(npol*n_ant*(n_ant-1)*chan_width*tobs)))\n BEAM_T_RMS.append(t_rms_mjy)\n\n return BEAM_T_RMS",
"def DM_for_newparams(self, dDM, downsamp):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*downsamp)**2.0 +\n BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +\n self.sub_smearing**2.0)\n return 0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0",
"def checkStats(checkmateOutput):\n\n if not os.path.isfile(checkmateOutput):\n print(\"Files %s not found\" %checkmateOutput)\n return False\n\n # Get CMS-SUS-16-032 data:\n data = np.genfromtxt(checkmateOutput,names=True,\n dtype=None,encoding=None)\n\n data = np.delete(data,np.where(data['sr'] == 'Combined'))\n ibest = np.argmax(data['rexp'])\n pt = data[ibest]\n if not pt['s']:\n ratio = 100.0\n else:\n ratio = pt['signalsumofweights']/pt['s']\n nEvts = pt['signalsumofweights']\n\n return ratio,nEvts",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def get_rms(self, block):\n # https://stackoverflow.com/a/25871132\n count = len(block) / 2\n block_format = \"%dh\" % (count)\n shorts = struct.unpack(block_format, block)\n\n sum_squares = 0.0\n for sample in shorts:\n # sample is a signed short in +/- 32768, normalize it to 1.0\n sum_squares += pow(sample * SHORT_NORMALIZE, 2)\n\n return sqrt(sum_squares / count)",
"def calc_spindle_buffer_means(self):\n \n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_buffer_aggregates = {}\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles_wbuffer[chan]:\n # set the base df\n agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])\n rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))\n # join on the index for each spindle\n for x in range(1, len(self.spindles_wbuffer[chan])):\n mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])\n spindle_buffer_aggregates[chan] = mean_df\n \n print('Calculating statistics...')\n # create a new multiindex dataframe for calculations\n calcs = ['mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n spindle_buffer_means = pd.DataFrame(columns=columns)\n \n # fill the dataframe\n for chan in spindle_buffer_aggregates.keys():\n spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)\n spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)\n spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)\n \n self.spindle_buffer_aggregates = spindle_buffer_aggregates\n self.spindle_buffer_means = spindle_buffer_means\n print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')",
"def _calculate_snr_spread(self):\n\n dmSpacing, percentage = 100, 0\n while percentage < 0.5: \n x = np.linspace(self.centerDm - dmSpacing, self.centerDm + dmSpacing, 500)\n y = np.array([self.effective_snr(self.effective_width(self.pulseWidth, self.centerDm - dm_val, self.bandwidth, self.freq), self.pulseWidth * 20) for dm_val in x])\n y = (y / (np.max(y) * 1.0)) if np.max(y) > 0 else y\n percentage = np.size(np.where(y > 0)) / 1000.0\n dmSpacing = dmSpacing*0.6\n \n return x, y",
"def calculate_sam_metrics(sam_output: List[torch.Tensor], segm: torch.Tensor):\n\n true_masks, invert_masks = get_processed_masks(segm)\n\n iou_sum = [0.] * SAM_AMOUNT\n sam_miss_rel_sum = [0.] * SAM_AMOUNT\n sam_direct_rel_sum = [0.] * SAM_AMOUNT\n sam_miss_sum = [0.] * SAM_AMOUNT\n sam_direct_sum = [0.] * SAM_AMOUNT\n\n # measure SAM attention metrics\n for i in range(SAM_AMOUNT):\n cur_sam_batch = sam_output[i].detach().clone().cpu()\n cur_mask_batch = true_masks[i].detach().clone().cpu()\n cur_mask_inv_batch = invert_masks[i].detach().clone().cpu()\n\n # iterate over batch to calculate metrics on each image of the batch\n assert cur_sam_batch.size() == cur_mask_batch.size()\n for j in range(cur_sam_batch.size(0)):\n cur_sam = cur_sam_batch[j]\n cur_mask = cur_mask_batch[j]\n cur_mask_inv = cur_mask_inv_batch[j]\n\n sam_miss_rel_sum[i] += safe_division(torch.sum(cur_sam * cur_mask_inv),\n torch.sum(cur_sam))\n sam_direct_rel_sum[i] += safe_division(torch.sum(cur_sam * cur_mask),\n torch.sum(cur_sam))\n sam_miss_sum[i] += safe_division(torch.sum(cur_sam * cur_mask_inv),\n torch.sum(cur_mask_inv))\n sam_direct_sum[i] += safe_division(torch.sum(cur_sam * cur_mask),\n torch.sum(cur_mask))\n iou_sum[i] += calculate_iou((cur_sam > 0.5).int(), cur_mask.int())\n return iou_sum, sam_miss_rel_sum, sam_direct_rel_sum, sam_miss_sum, sam_direct_sum",
"def _dmsmear(self, psr):\n return 8.3E6 * psr.dm * self.bw_chan / math.pow(self.freq, 3.0)",
"def calc_spindle_means(self):\n\n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_aggregates = {}\n datatypes = ['Raw', 'spfilt']\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles[chan]:\n spindle_aggregates[chan] = {}\n for datatype in datatypes:\n # set the base df\n agg_df = pd.DataFrame(self.spindles[chan][0][datatype])\n agg_df = agg_df.rename(columns={datatype:'spin_0'})\n rsuffix = list(range(1, len(self.spindles[chan])))\n # join on the index for each spindle\n agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')\n spindle_aggregates[chan][datatype] = agg_df\n \n print('Calculating spindle statistics...')\n # create a new multiindex dataframe for calculations\n spindle_means = {}\n calcs = ['count', 'mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n for datatype in datatypes:\n spindle_means[datatype] = pd.DataFrame(columns=columns)\n # fill the dataframe\n for chan in spindle_aggregates.keys():\n spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)\n spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)\n spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)\n spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)\n \n self.spindle_aggregates = spindle_aggregates\n self.spindle_means = spindle_means\n print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\\n')",
"def summarize(config):\n\n try:\n logger.info(':: ConfigID {0} ::'.format(config.configId))\n logger.info('\\tScan {0}, source {1}, intent {2}'\n .format(config.scanNo, config.source,\n config.scan_intent))\n\n logger.info('\\t(RA, Dec) = ({0}, {1})'\n .format(config.ra_deg, config.dec_deg))\n subbands = config.get_subbands()\n reffreqs = [subband.sky_center_freq for subband in subbands]\n logger.info('\\tFreq: {0} - {1}'\n .format(min(reffreqs), max(reffreqs)))\n\n nchans = [subband.spectralChannels for subband in subbands]\n chansizes = [subband.bw/subband.spectralChannels\n for subband in subbands]\n sb0 = subbands[0]\n logger.info('\\t(nspw, chan/spw, nchan) = ({0}, {1}, {2})'\n .format(len(nchans), nchans[0], sum(nchans)))\n logger.info('\\t(BW, chansize) = ({0}, {1}) MHz'\n .format(sb0.bw, chansizes[0]))\n if not all([chansizes[0] == chansize for chansize in chansizes]):\n logger.info('\\tNot all spw have same configuration.')\n\n logger.info('\\t(nant, npol) = ({0}, {1})'\n .format(config.numAntenna, sb0.npp))\n dt = 24*3600*(config.stopTime-config.startTime)\n logger.info('\\t(StartMJD, duration) = ({0}, {1}s).'\n .format(config.startTime, round(dt, 1)))\n logger.info('\\t({0}/{1}) ints at (HW/Final) integration time of ({2:.3f}/{3:.3f}) s'\n .format(int(round(dt/sb0.hw_time_res)),\n int(round(dt/sb0.final_time_res)),\n sb0.hw_time_res, sb0.final_time_res))\n except:\n logger.warn(\"Failed to fully parse config to print summary.\"\n \"Proceeding.\")",
"def SNRcalc(self, pulsar, pop):\n # if not in region, S/N = 0\n\n # if we have a list of pointings, use this bit of code\n # haven't tested yet, but presumably a lot slower\n # (loops over the list of pointings....)\n \n\n # otherwise check if pulsar is in entire region\n if self.inRegion(pulsar):\n # If pointing list is provided, check how close nearest \n # pointing is\n if self.pointingslist is not None:\n # convert offset from degree to arcmin\n offset = self.inPointing(pulsar) * 60.0\n\n else:\n # calculate offset as a random offset within FWHM/2\n offset = self.fwhm * math.sqrt(random.random()) / 2.0\n else:\n return -2\n\n # Get degfac depending on self.gainpat\n if self.gainpat == 'airy':\n conv = math.pi/(60*180.) # Conversion arcmins -> radians\n eff_diam = 3.0e8/(self.freq*self.fwhm*conv*1.0e6) # Also MHz -> Hz\n a = eff_diam/2. # Effective radius of telescope\n lamda = 3.0e8/(self.freq*1.0e6) # Obs. wavelength\n kasin = (2*math.pi*a/lamda)*np.sin(offset*conv)\n degfac = 4*(j1(kasin)/kasin)**2\n else:\n #### NOTE! HERE I WANT TO CHECK UNITS OF FWHM (ARCMIN???)\n degfac = math.exp(-2.7726 * offset * offset / (self.fwhm *self.fwhm))\n\n # Dunc's code here uses a ^-2.6 to convert frequencies\n # don't think I need to do this - I'm using the frequency in call\n Ttot = self.tsys + self.tskypy(pulsar)\n\n # calc dispersion smearing across single channel\n tdm = self._dmsmear(pulsar)\n\n # calculate bhat et al scattering time (inherited from GalacticOps)\n # in units of ms\n tscat = go.scatter_bhat(pulsar.dm, pulsar.scindex, self.freq)\n\n # Calculate the effective width\n weff_ms = math.sqrt(pulsar.width_ms()**2 + self.tsamp**2 + tdm**2 + tscat**2)\n\n # calculate duty cycle (period is in ms)\n delt = weff_ms / pulsar.period\n #print weff_ms, pulsar.period\n\n # if pulse is smeared out, return -1.0\n if delt > 1.0:\n #print weff_ms, tscat, pulsar.dm, pulsar.gl, pulsar.gb, pulsar.dtrue\n return -1\n else:\n return self._SNfac(pulsar, pop.ref_freq, degfac, Ttot) \\\n * math.sqrt((1.0 -delt)/delt)",
"def _calc_sigma_sub(self):\n # concatenate all required frames\n sub_dfs = []\n for me_id in self.me_map[\"sub\"].keys():\n sub_dfs.append(self.draws[me_id])\n sub_df = pd.concat(sub_dfs)\n\n # return the sum\n sub_df.reset_index(inplace=True)\n if self.copy_env_inc:\n draw_cols = self.dimensions.data_dim.get_level(\"data\")\n sub_df.loc[sub_df['measure_id'] == 6, draw_cols] = 0\n return sub_df.groupby(self.dimensions.index_names).sum()",
"def calculate_desired_noise_rms(clean_rms, snr):\n a = float(snr) / 20\n noise_rms = clean_rms / (10 ** a)\n return noise_rms",
"def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)",
"def calculate_rms(samples):\n chunk = pow(abs(samples), 2)\n return math.sqrt(chunk.mean())",
"def get_SSD():\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)",
"def calculate_optimal_dmstep(self, acceptedSNR= 95):\n\n if not self.useSNR:\n return 1.205e-7 * self.tsamp * (self.freq ** 3) / self.bandwidth\n \n x, y = self._calculate_snr_spread()\n return fabs(self.centerDm - x[np.max(np.where(y > np.max(y) * float(acceptedSNR) / 100.0 ))])",
"def shaking_error_rate(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n total = 0\r\n num_wrong = 0\r\n for test_point, truth_point in zip(test_data, truth_data):\r\n # Nine represents a shaking event\r\n if (truth_point == 9):\r\n if np.isnan(test_point) or test_point <= 8.5 or test_point > 9.5:\r\n num_wrong += 1\r\n total += 1\r\n #if num_wrong == 0:\r\n # # Perfection implies overtraining\r\n # return 1.0\r\n #else:\r\n return float(num_wrong)/float(total)",
"def _get_duans_smearing_estimate(self, response_data):\n\n res = self._model.fit()\n\n residuals = res.resid.as_matrix()\n residuals = np.expand_dims(residuals, axis=0)\n residuals = np.expand_dims(residuals, axis=2)\n residuals = np.tile(residuals, (response_data.shape[0], 1, response_data.shape[2]))\n\n response_data = np.tile(response_data, (1, residuals.shape[1], 1)) + residuals\n\n # apply the inverse transform function to the response data\n response_variable_transform, raw_variable = find_raw_variable(self._response_variable)\n inverse_transform_func = INVERSE_TRANSFORM_FUNCTIONS[response_variable_transform]\n transformed_response_data = inverse_transform_func(response_data)\n\n smeared_data = np.mean(transformed_response_data, axis=1)\n\n return smeared_data",
"def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper",
"def paramSamples(self):\n\n if self._paramSamples is not None:\n return self._paramSamples\n timescale = self.mjdmax - self.mjdmin\n T0Vals = self.randomState.uniform(size=self.numSN) * timescale \\\n + self.mjdmin\n mB, x1, c, m = SALT2_MMDist(self.numSN)\n print(\"range of sampled mB\", mB.min(), mB.max())\n x0 = np.zeros(len(mB))\n mB += self.randomState.normal(loc=0., scale=self.Mdisp,\n size=self.numSN)\n H70cosmo = self.cosmo.clone(name='H70cosmo',\n H0=self.cosmo.H0 * (70/self.cosmo.H0.value))\n MB = mB + H70cosmo.distmod(self.zSamples).value - \\\n self.cosmo.distmod(self.zSamples).value\n model = sncosmo.Model(source='SALT2')\n for i, z in enumerate(self.zSamples):\n model.set(z=z, x1=x1[i], c=c[i])\n model.set_source_peakabsmag(MB[i], 'bessellB', 'ab',\n cosmo=self.cosmo)\n x0[i] = model.get('x0')\n mB[i] = model.source.peakmag('bessellB', 'ab')\n df = pd.DataFrame(dict(x0=x0, mB=mB, x1=x1, c=c,\n t0=T0Vals, z=self.zSamples, snid=self.snids))\n self._paramSamples = df\n return self._paramSamples",
"def perform_smear_removal(active_quads, int_time):\n all_quads = []\n #frame_transfer = 8.3333ms\n tft = 8.3333\n num_quads = active_quads.shape[0]\n\n for quads in range(0, num_quads):\n\n active_quad = active_quads[quads]\n #outlier_mask = outlier_masks[quads]\n# active_quad_mask = ma.array(np.reshape(active_quad, (nx_quad*ny_quad, 1)),\n# mask=np.reshape(outlier_mask, (nx_quad*ny_quad, 1)))\n #active_quad = active_quad_mask.reshape((nx_quad, ny_quad))\n active_quad_odd = active_quad[:, ::2]\n active_quad_even = active_quad[:, 1::2]\n smear_factor_odd = (tft / (int_time + tft))* active_quad_odd.mean(axis=0)\n smear_factor_even = (tft / (int_time + tft))* active_quad_even.mean(axis=0)\n smear_subtracted_quad_odd = active_quad_odd - smear_factor_odd[None, :]\n smear_subtracted_quad_even = active_quad_even - smear_factor_even[None, :]\n active_quad[:, ::2] = smear_subtracted_quad_odd\n active_quad[:, 1::2] = smear_subtracted_quad_even\n all_quads.append(active_quad)\n #active_quad = active_quad_mask = outlier_mask = None\n#\n\n# all_quads = []\n# #frame_transfer = 8\n# tft = 8\n# num_quads, nx_quad, ny_quad = active_quads.shape\n## print(active_quads.shape)\n## print(outlier_masks.shape)\n## cc\n# for quads in range(0, num_quads):\n# #print(quads)\n# active_quad = active_quads[quads]\n# outlier_mask = outlier_masks[quads]\n# active_quad_mask = ma.array(np.reshape(active_quad, (nx_quad*ny_quad, 1)),\n# mask=np.reshape(outlier_mask, (nx_quad*ny_quad, 1)))\n#\n# active_quad = active_quad_mask.reshape((nx_quad, ny_quad))\n# #print(ma.is_masked(active_quad))\n# smear_factor = (tft / (int_time + tft))* active_quad.mean(axis=0)\n# #print((smear_factor[10:]))\n# #plt.plot(smear_factor)\n# #plt.show()\n# smear_subtracted_quad = active_quad - smear_factor[None, :]\n# all_quads.append(smear_subtracted_quad)\n# active_quad = active_quad_mask = outlier_mask = None\n\n\n return all_quads",
"def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda",
"def _calculate_num_samps_per_chan(self, num_samps_per_chan):\r\n if num_samps_per_chan is NUM_SAMPLES_UNSET:\r\n return 1\r\n elif num_samps_per_chan == READ_ALL_AVAILABLE:\r\n acq_type = self.timing.samp_quant_samp_mode\r\n\r\n if (acq_type == AcquisitionType.FINITE and\r\n not self.in_stream.read_all_avail_samp):\r\n return self.timing.samp_quant_samp_per_chan\r\n else:\r\n return self.in_stream.avail_samp_per_chan\r\n else:\r\n return num_samps_per_chan"
] |
[
"0.59962463",
"0.5847587",
"0.58467317",
"0.57997197",
"0.56424934",
"0.5639254",
"0.5608289",
"0.55960196",
"0.5570163",
"0.54857546",
"0.5478621",
"0.54403645",
"0.54202724",
"0.5409923",
"0.53928995",
"0.53849053",
"0.5364623",
"0.53559995",
"0.5340608",
"0.53223324",
"0.5318012",
"0.53065974",
"0.52957594",
"0.5211656",
"0.51850134",
"0.51820904",
"0.5172891",
"0.51666415",
"0.51665366",
"0.5149574"
] |
0.66381943
|
0
|
Return the DM where the smearing in a single channel is a factor smearfact larger than all the other smearing causes combined.
|
def DM_for_smearfact(self, smearfact):
other_smear = sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*self.downsamp)**2.0 +
self.BW_smearing**2.0 +
self.sub_smearing**2.0)
return smearfact*0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0 + self.obs.cDM
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def chan_smear(self, DM):\n try:\n DM = where(DM-cDM==0.0, cDM+self.dDM/2.0, DM)\n except TypeError:\n if (DM-cDM==0.0): DM = cDM+self.dDM/2.0\n return dm_smear(DM, self.obs.chanwidth, self.obs.f_ctr, self.obs.cDM)",
"def DM_for_newparams(self, dDM, downsamp):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*downsamp)**2.0 +\n BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +\n self.sub_smearing**2.0)\n return 0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0",
"def qc_Bad_Chans(infile, mad_rms, med_rms):\n\n BAD_CHAN = []\n\n stat_file = open(infile, 'r')\n LINES = stat_file.readlines()[2:]\n stat_file.close()\n\n threshold = 1.2 # value selected to be more consistent with SoFiA flagged criterion\n \n# value = med_madfm + 0.4 # Deviation from the med_madfm. Need to check with larger sample of data to decide the best value. \n\n for i in range(len(LINES)):\n line = LINES[i]\n TOKS = line.split()\n chan = TOKS[0]\n # madfm = float(TOKS[5])\n rms = float(TOKS[3])\n \n value = abs(rms - med_rms)\n criterion = 1.4826*threshold*mad_rms\n if value > criterion:\n BAD_CHAN.append(chan)\n\n if BAD_CHAN == []:\n BAD_CHAN.append('none')\n QC_badchan_id = 'good'\n else:\n QC_badchan_id = 'bad'\n\n mosaic_bad_chan = 'mosaic_badchans.txt'\n print (','.join(BAD_CHAN), file=open(fig_dir + '/' + mosaic_bad_chan,'w'))\n\n n_bad_chan = len(BAD_CHAN)\n\n # Check if number of bad channel recorded is 1. If yes, check if is it a none keyword.\n # If yes, number of bad channel should be 0.\n \n if n_bad_chan == 1:\n with open(fig_dir + '/' + mosaic_bad_chan) as f:\n if 'none' in f.read():\n n_bad_chan = 0\n print ('yes')\n \n return n_bad_chan, mosaic_bad_chan, QC_badchan_id",
"def total_smear(self, DM):\n return sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0 +\n self.chan_smear(DM)**2.0)",
"def test_get_molecule_least_similar_to(self):\n csv_fpath = self.smiles_seq_to_xl_or_csv(ftype=\"csv\")\n for descriptor in SUPPORTED_FPRINTS:\n for similarity_measure in SUPPORTED_SIMILARITIES:\n molecule_set = MoleculeSet(\n molecule_database_src=csv_fpath,\n molecule_database_src_type=\"csv\",\n fingerprint_type=descriptor,\n similarity_measure=similarity_measure,\n is_verbose=False,\n )\n for mol_smile, mol in zip(TEST_SMILES,\n molecule_set.molecule_database):\n compare_task = CompareTargetMolecule(\n target_molecule_smiles=mol_smile)\n [furthest_mol], [similarity] = compare_task.\\\n get_hits_dissimilar_to(molecule_set)\n mol_similarities = molecule_set.compare_against_molecule(\n mol)\n self.assertEqual(\n np.min(mol_similarities),\n mol.get_similarity_to(\n molecule_set.molecule_database[furthest_mol],\n molecule_set.similarity_measure\n ),\n f\"Expected furthest mol to have minimum \"\n f\"similarity to target molecule \"\n f\"using similarity measure: {similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\",\n )\n self.assertGreaterEqual(similarity, 0.,\n \"Expected similarity value to \"\n \"be >= 0.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\")\n self.assertLessEqual(similarity, 1.,\n \"Expected similarity value to \"\n \"be <= 1.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\"\n )",
"def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result",
"def test_most_recently_computed_atomic_charges_are_provided(\n mediator_potential: MediatorPotential,\n) -> None:\n charges = np.array(mediator_potential.get_atomic_charges())\n assert all(np.isclose(charges, np.zeros(2)))",
"def _excess(self, sub_me):\n # get the needed data\n sub_me_df = self.draws[sub_me]\n env_df = self.draws[self.me_map[\"env\"]]\n sigma_sub_df = self.draws[\"sigma_sub\"]\n\n # create a boolean dataframe for our 2 cases\n more = (sigma_sub_df > env_df)\n\n # now calculate the excess values\n excess_df = (\n (sigma_sub_df[more] - env_df[more]) * sub_me_df[more] /\n sigma_sub_df[more]\n ).fillna(value=0)\n return excess_df",
"def __calc_mmd_maxconc(self,event):\n \n # Use smoothed data\n if self.particle_mode:\n data = np.log10(gaussian_filter(self.par_data,self.smooth,mode='constant'))\n dpdp,tt = np.meshgrid(self.par_diam,self.par_time)\n points = np.concatenate((tt.flatten()[np.newaxis].T,\n dpdp.flatten()[np.newaxis].T,\n data.flatten()[np.newaxis].T),\n axis=1)\n if self.ion_mode:\n data = np.log10(gaussian_filter(self.ion1_data,self.smooth,mode='constant'))\n dpdp,tt = np.meshgrid(self.ion1_diam,self.ion1_time)\n points = np.concatenate((tt.flatten()[np.newaxis].T,\n dpdp.flatten()[np.newaxis].T,\n data.flatten()[np.newaxis].T),\n axis=1)\n\n # Transform polygon perimeter to path\n try:\n banana_perimeter = Path(np.array(list(zip(self.polyx,self.polyy))))\n except ValueError:\n print (\"No polygon found\")\n return\n\n # Eliminate nans and infs from dndlogdp\n points = np.delete(points,np.argwhere((np.isnan(points[:,2]))|(np.isinf(points[:,2]))),axis=0)\n banana_points = points[banana_perimeter.contains_points(points[:,[0,1]]),:]\n\n if len(banana_points)==0:\n print (\"Found no points inside polygon.\")\n return\n \n # Grouping the size distribution data points\n if self.particle_mode:\n pre_sorted_banana_points = [banana_points[banana_points[:,1]==x,:] for x in self.par_diam if x in banana_points[:,1]]\n if self.ion_mode:\n pre_sorted_banana_points = [banana_points[banana_points[:,1]==x,:] for x in self.ion1_diam if x in banana_points[:,1]]\n \n sorted_banana_points = [x[x[:,0].argsort()] for x in pre_sorted_banana_points]\n \n for i in range(0,len(sorted_banana_points)):\n x = sorted_banana_points[i][:,0] - self.mintime\n y = sorted_banana_points[i][:,2]\n a=np.max(y)\n mu=np.mean(x)\n sigma=np.std(x)\n try:\n params,pcov = curve_fit(self.__gaus,x,y,p0=[a,mu,sigma])\n if ((params[1]>=x.max()) | (params[1]<=x.min())):\n print (\"Peak outside range. Skipping %f\" % (sorted_banana_points[i][0,1]))\n else:\n self.mmd_dp = np.append(self.mmd_dp,sorted_banana_points[i][0,1])\n self.mmd_time = np.append(self.mmd_time,params[1] + self.mintime)\n except:\n print (\"Diverges. Skipping %f\" % (sorted_banana_points[i][0,1]))\n\n # Plot the result on ax\n self.mmd_plot.set_data(self.mmd_time,self.mmd_dp)\n plt.draw()",
"def get_max_mid_diameter(self):\n max_min_mid_diam = 0\n\n for m in self.components:\n name = m.name\n diam_file = join(\n self.params['molec_dir'],\n name+'_size.csv'\n )\n\n if exists(diam_file.replace('.csv', '.TOOBIG')):\n max_min_mid_diam = 0\n print(f'{m.name} too big based on MW')\n break\n if exists(diam_file.replace(\n 'size.csv',\n 'unopt.ETKDGFAILED'\n )):\n max_min_mid_diam = 0\n print(f'{m.name} failed ETKDG')\n break\n results = pd.read_csv(diam_file)\n min_mid_diam = min(results['diam2'])\n max_min_mid_diam = max([min_mid_diam, max_min_mid_diam])\n\n self.max_min_mid_diam = max_min_mid_diam",
"def fowlkes_mallows(self):\n return self.pairwise.ochiai_coeff()",
"def find_sim_stars(limits, star):\n\n\tglobal C\n\t\n\tst = np.zeros((len(A),len(lPcnB)))\n\t\n\tfor l in limits:\n\t\tfor k, cn in enumerate(lPcnB):\n\t\t\tst[:,k][(A[cn]>l[0]) & (A[cn]<l[1])]=1\n\t\n\tC = A[(st[:,0]>0) & (st[:,1]>0) & (st[:,2]>0)] # C is the table with the similar stars -- for Laurent's version\n\tC = C[C['OGLEID']!=star['OGLEID']] # Remove the \"star\" in case of self-reconstruction\n\n\treturn C",
"def max_energy_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.q[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def calculate_optimal_dmstep(self, acceptedSNR= 95):\n\n if not self.useSNR:\n return 1.205e-7 * self.tsamp * (self.freq ** 3) / self.bandwidth\n \n x, y = self._calculate_snr_spread()\n return fabs(self.centerDm - x[np.max(np.where(y > np.max(y) * float(acceptedSNR) / 100.0 ))])",
"def most_common_mutants(self):\n highest_readcount = max([mutant.read_info(self.dataset_name).total_read_count for mutant in self.dataset])\n highest_readcount_mutants = [mutant for mutant in self.dataset \n if mutant.read_info(self.dataset_name).total_read_count==highest_readcount]\n return highest_readcount_mutants",
"def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm",
"def flexibility(self):\n self._flexibility = 0.25 * self.DAM - 0.25 * self.DCC + 0.5 * self.MOA + 0.5 * self.NOP\n return round(self._flexibility, 5)",
"def test_closest_common_mats(self):\n m = mats.Materials(\"mats_test.json\", NoneVisited())\n self.assertEqual( '164 G. Canis Majoris', m.closest([0, 0, 0], ['Iron'])[1]['system'])\n self.assertEqual( '2MASS J10433563-5945136', m.closest([8000, 0, 3000], ['Iron'])[1]['system'])",
"def findLimitedDWACmd(self, cmd):\n #if no collision, we just return\n if self.checkForCollision(cmd, rough=True, publish=True) == False:\n return cmd\n\n #create a set of commands to test\n best_cmd = [0.0,0.0]\n cmds = [ [scale*cmd[0], scale*cmd[1]] for scale in np.arange(1.0, -0.2, -0.2) ]\n #print cmds\n for cmd_to_test in cmds:\n if not self.checkForCollision(cmd_to_test, rough=True):\n best_cmd = cmd_to_test\n break\n\n return best_cmd",
"def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)",
"def _calc_clique_potentials(self, cliques):\n adj = self.adj\n n = adj.shape[0]\n local_set = set(range(n))\n nonz = np.nonzero(adj)\n pairwise_set = set([(nonz[0][i], nonz[1][i]) for i in range(\n len(nonz[0])) if nonz[0][i] < nonz[1][i]]) # the edge-set\n local_tmp_factor = np.ones([2])\n pairwise_tmp_factor = np.ones([2, 2])\n clique_potentials = []\n for cl in cliques:\n cl_vars = sorted(list(cl))\n cl_factor = np.ones(len(cl) * [2])\n for loc in local_set.intersection(cl):\n local_tmp_factor[0] = np.exp(-adj[loc, loc])\n local_tmp_factor[1] = np.exp(adj[loc, loc])\n cl_factor = tensor_mult(cl_factor, local_tmp_factor, [cl_vars.index(loc)], [0])\n # remove the local factors that are already accounted for (family-preserving property)\n local_set.difference_update(cl)\n for i in cl:\n for j in cl:\n if i < j and (i, j) in pairwise_set:\n pairwise_tmp_factor[0, 0] = np.exp(adj[i, j])\n pairwise_tmp_factor[1, 1] = pairwise_tmp_factor[0, 0]\n pairwise_tmp_factor[0, 1] = np.exp(-adj[i, j])\n pairwise_tmp_factor[1, 0] = pairwise_tmp_factor[0, 1]\n cl_factor = tensor_mult(cl_factor, pairwise_tmp_factor,\n [cl_vars.index(i), cl_vars.index(j)], [0, 1])\n pairwise_set.remove((i, j))\n clique_potentials.append(cl_factor.copy())\n return clique_potentials",
"def multishot(attacker_schema, victim_schema):\n\n multishot = attacker_schema.multishot.get(victim_schema.name, 0)\n return multishot > 0 and (multishot - 1.0) / multishot > random.random()",
"def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)",
"def _calc_optimal_subset(fracs: pd.DataFrame,\r\n match_col: str) -> Tuple[pd.DataFrame, float, float]:\r\n\r\n fracs = fracs.copy(deep=True)\r\n fracs['control_sample_fraction_naive'] = fracs['treatment']/fracs['control']\r\n scale_factor = fracs.control_sample_fraction_naive.max()**-1\r\n logging.getLogger(__name__).info(\"scale factor is {scale_factor:.2f} (coeffs for treatment w/ no drops\".format(scale_factor=scale_factor))\r\n\r\n # if no subscaling is necessary return fracs as is\r\n if scale_factor >= 1:\r\n logging.getLogger(__name__).info(\"can use all treatments safely, returning early\")\r\n fracs['control_scaled_sample_fraction'] = fracs['control_sample_fraction_naive']\r\n fracs['treatment_scaled_sample_fraction'] = 1\r\n fracs = fracs[[match_col, 'treatment_scaled_sample_fraction', 'control_scaled_sample_fraction']]\r\n return fracs, float(1), float(0)\r\n\r\n options = _create_options_grid(fracs, scale_factor)\r\n options['utility'] = options.apply(_calc_util_wrapper, axis=1)\r\n\r\n # pick best\r\n max_util = options.utility.max()\r\n best_row = options[options.utility == max_util].iloc[0]\r\n winning_scale = float(best_row['scale'])\r\n winning_drop = float(best_row['percent_dropped'])\r\n\r\n logging.getLogger(__name__).info(\"max_util:{mu:.2f}\\twinning_scale:{ws:.2f}\\twinning_drop:{wd:.2f}\".format(mu=max_util, ws=winning_scale, wd=winning_drop))\r\n\r\n fracs['control_scaled_sample_fraction'] = np.min([(fracs['treatment'] * winning_scale/fracs['control']).values, [1]*len(fracs)], axis=0)\r\n fracs['treatment_scaled_sample_fraction'] = fracs['control_scaled_sample_fraction'] * fracs['control']/fracs['treatment']\r\n fracs = fracs[[match_col, 'treatment_scaled_sample_fraction', 'control_scaled_sample_fraction']]\r\n\r\n return fracs, winning_scale, winning_drop",
"def kineticsDiff(reaction): \n kinetics0 = reaction[0].kinetics\n kinetics1 = reaction[1].kinetics\n if kinetics0 and kinetics1:\n diff = reaction[0].kinetics.discrepancy(reaction[1].kinetics) \n else:\n diff = 9999999\n return -1*diff",
"def get_medoids(self):\r\n\r\n return self.__optimal_medoids",
"def compute_two_way_critic_val(dataframe, f0, f1, loc=0.95):\n ### Factor A has two levels so () Dof_sst = DFN = a -1 = 1\n a = len(set(dataframe[f0]))\n dfn1 = a -1\n ### Factor B Dof_sst = DFN = a - 1\n b = len(set(dataframe[f1]))\n dfn2 = b -1\n ## Factor AxB (interaction)\n dfn3 = (a-1) * (b-1)\n ## Denominator\n n = len(set(dataframe[f0])) #????? number of subject in each group\n dfd = a*b*(n-1)\n\n f_cv_a = stats.f.ppf(loc, dfn1, dfd)\n f_cv_b = stats.f.ppf(loc, dfn2, dfd) ## A and B factor critical value is different when they have \n # different level of factors\n f_cv_ab = stats.f.ppf(loc, dfn3, dfd)\n print(f'Critical value for {f0}:', f_cv_a)\n print(f'Critical value for {f1}:', f_cv_b)\n print('Critical value for interaction:', f_cv_ab)\n\n F_critical = [f_cv_b, f_cv_a, f_cv_ab]\n return F_critical",
"def _calc_sample_fracs(t_df: DataFrame,\r\n c_can_df: DataFrame,\r\n match_col: str) -> Tuple[pd.DataFrame, pd.DataFrame, float, float]:\r\n _persist_if_unpersisted(t_df)\r\n _persist_if_unpersisted(c_can_df)\r\n\r\n t_counts = t_df.groupby(match_col).count().withColumnRenamed('count', 'treatment')\r\n c_can_counts = c_can_df.groupby(match_col).count().withColumnRenamed('count', 'control')\r\n fracs = t_counts.join(c_can_counts, on=[match_col])\r\n fracs = fracs.toPandas()\r\n sample_fracs, scale, drop = _calc_optimal_subset(fracs=fracs, match_col=match_col)\r\n logging.getLogger(__name__).info(\"scale = {scale:.2f} drop: = {drop:.2f}\".format(scale=scale, drop=drop))\r\n\r\n\r\n return sample_fracs[[match_col, 'treatment_scaled_sample_fraction']],\\\r\n sample_fracs[[match_col, 'control_scaled_sample_fraction']],\\\r\n scale, drop",
"def test_check_for_max_rmsd():\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj_phil.get_ncs_restraints_group_list()\n pdb_inp = iotbx.pdb.input(lines=test_pdb_str_2,source_info=None)\n ph = pdb_inp.construct_hierarchy()\n # passing test\n assert nrgl.check_for_max_rmsd(ph.atoms().extract_xyz() ,chain_max_rmsd=1)\n # make sure test fails when it suppose to\n nrgl[0].copies[1].t = matrix.col([100, -89.7668, 5.8996])\n assert not nrgl.check_for_max_rmsd(ph.atoms().extract_xyz(),chain_max_rmsd=1)",
"def get_SSD():\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)"
] |
[
"0.6319029",
"0.54660326",
"0.5425173",
"0.5357689",
"0.49636364",
"0.495898",
"0.4931268",
"0.48972967",
"0.4890005",
"0.48887098",
"0.48865256",
"0.48845127",
"0.48681086",
"0.48547518",
"0.48524323",
"0.48474807",
"0.4844403",
"0.48379698",
"0.48337555",
"0.48272544",
"0.48215038",
"0.48128486",
"0.48001552",
"0.47951135",
"0.47812426",
"0.4780506",
"0.4779845",
"0.47690722",
"0.47682673",
"0.47599927"
] |
0.69869906
|
0
|
Return the DM where the smearing in a single channel is causes the same smearing as the effects of the new dosnsampling rate and dDM.
|
def DM_for_newparams(self, dDM, downsamp):
other_smear = sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*downsamp)**2.0 +
BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +
self.sub_smearing**2.0)
return 0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def DM_for_smearfact(self, smearfact):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0)\n return smearfact*0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0 + self.obs.cDM",
"def chan_smear(self, DM):\n try:\n DM = where(DM-cDM==0.0, cDM+self.dDM/2.0, DM)\n except TypeError:\n if (DM-cDM==0.0): DM = cDM+self.dDM/2.0\n return dm_smear(DM, self.obs.chanwidth, self.obs.f_ctr, self.obs.cDM)",
"def total_smear(self, DM):\n return sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0 +\n self.chan_smear(DM)**2.0)",
"def ddm(self):\n if self.positive:\n return DDMAngle(self.degree, self.minute + (self.second/60))\n else:\n return -DDMAngle(self.degree, self.minute + (self.second/60))",
"def getDM(self):\n return self.subintheader['DM']\n #if self.params is None:\n # return\n #return self.params.getDM()",
"def ddm(self):\n return hp2ddm(self.hp_angle)",
"def ddm(self):\n return gon2ddm(self.gon_angle)",
"def DM(self, source, distance, smweight='uniform'):\n\n if self.dmmodel == 'ne2001':\n return self.DM_NE2001(source, distance, smweight=smweight)\n elif self.dmmodel == 'ymw16':\n return self.DM_YMW16(source, distance)",
"def _dmsmear(self, psr):\n return 8.3E6 * psr.dm * self.bw_chan / math.pow(self.freq, 3.0)",
"def distance_YMW16(self, source, DM):\n\n if not isinstance(DM, astropy.units.quantity.Quantity):\n # assume DM unit\n DM=DM*u.pc/u.cm**3\n if (len(DM.shape)>0 and DM.value.any() <= 0) or (len(DM.shape)==0 and DM.value < 0):\n raise ValueError('DM must be > 0')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n if len(source.l.shape)==0:\n \n results=ymw16.dmdtau_c(source.l.value,\n source.b.value, \n DM.to(u.pc/u.cm**3).value,\n 1,\n self.datadir)\n distance=results*u.pc\n return distance,None\n else:\n distance=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n dm=DM.to(u.pc/u.cm**3).value\n if not (len(dm.shape)==0 or dm.shape==source.l.shape):\n raise IndexError('Shape of DM must be scalar or the same as shape of coordinates')\n while not it.finished:\n if len(dm.shape)==0:\n dm_touse=dm\n else:\n dm_touse=dm[it.multi_index]\n results=ymw16.dmdtau_c(source[it.multi_index].l.value,\n source[it.multi_index].b.value, \n dm_touse,\n 1,\n self.datadir)\n distance[it.multi_index]=results\n it.iternext()\n return distance*u.pc,None",
"def dcm(self, otherframe):\n\n self._check_frame(otherframe)\n flist = self._dict_list(otherframe, 0)\n outdcm = eye(3)\n for i in range(len(flist) - 1):\n outdcm = outdcm * flist[i + 1]._dcm_dict[flist[i]]\n return outdcm",
"def DM_NE2001(self, source, distance, smweight='uniform'):\n\n assert smweight.lower() in ['uniform','tau','theta','iso']\n\n if not isinstance(distance, astropy.units.quantity.Quantity):\n # assume kpc\n distance=distance*u.kpc \n if (len(distance.shape)>0 and distance.value.any() <= 0) or (len(distance.shape)==0 and distance.value < 0):\n raise ValueError('distance must be > 0')\n\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n\n if len(source.l.shape)==0:\n results=ne2001.dmdsm(self.datadir,\n np.radians(source.l.value),\n np.radians(source.b.value),\n -1,\n 0,\n distance.to(u.kpc).value)\n sign=1\n if results[2]=='>':\n #raise ValueError('DM returned a lower limit')\n sign=-1\n if smweight.lower() == 'uniform':\n SM=results[3]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'tau':\n SM=results[4]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'theta':\n SM=results[5]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'iso':\n SM=results[6]*u.kpc/u.m**(20./3)\n\n return sign*results[0]*u.pc/u.cm**3,SM\n else:\n dm=np.zeros_like(source.l.value)\n SM=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n if len(dm.shape)==0:\n dm_touse=dm\n else:\n dm_touse=dm[it.multi_index]\n while not it.finished:\n if len(distance.shape)==0:\n d_touse=distance\n else:\n d_touse=distance[it.multi_index]\n results=ne2001.dmdsm(self.datadir,\n np.radians(source[it.multi_index].l.value),\n np.radians(source[it.multi_index].b.value),\n -1,\n 0,\n d_touse.to(u.kpc).value)\n sign=1\n if results[2]=='>':\n #raise ValueError('DM returned a lower limit')\n sign=-1\n dm[it.multi_index]=results[0]*sign\n if smweight.lower() == 'uniform':\n SM[it.multi_index]=results[3]\n elif smweight.lower() == 'tau':\n SM[it.multi_index]=results[4]\n elif smweight.lower() == 'theta':\n SM[it.multi_index]=results[5]\n elif smweight.lower() == 'iso':\n SM[it.multi_index]=results[6]\n\n it.iternext()\n return dm*u.pc/u.cm**3,SM*u.kpc/u.m**(20./3)",
"def _get_duans_smearing_estimate(self, response_data):\n\n res = self._model.fit()\n\n residuals = res.resid.as_matrix()\n residuals = np.expand_dims(residuals, axis=0)\n residuals = np.expand_dims(residuals, axis=2)\n residuals = np.tile(residuals, (response_data.shape[0], 1, response_data.shape[2]))\n\n response_data = np.tile(response_data, (1, residuals.shape[1], 1)) + residuals\n\n # apply the inverse transform function to the response data\n response_variable_transform, raw_variable = find_raw_variable(self._response_variable)\n inverse_transform_func = INVERSE_TRANSFORM_FUNCTIONS[response_variable_transform]\n transformed_response_data = inverse_transform_func(response_data)\n\n smeared_data = np.mean(transformed_response_data, axis=1)\n\n return smeared_data",
"def dd_plan(centrefreq, bandwidth, nfreqchan, timeres, lowDM, highDM, min_DM_step=0.02):\n\n DD_plan_array = []\n freqres = bandwidth / float(nfreqchan)\n previous_DM = lowDM\n\n #number of time samples smeared over before moving to next D_dm\n smear_fact = 3.\n\n #Loop until you've made a hit your range max\n D_DM = 0.\n downsample = 1\n while D_DM < round(highDM, 2):\n #calculate the DM where the current time resolution equals the\n #dispersion in a frequency channel (a bit of an overkill)\n\n #Dm smear over a frequency channel\n dm_smear = previous_DM * freqres * 8.3 * 10.**6 / centrefreq**3\n total_smear = math.sqrt(timeres**2 +\n dm_smear**2)\n\n\n D_DM = smear_fact * timeres * centrefreq**3 /\\\n (8.3 * 10.**6 * freqres)\n\n #difference in DM that will double the effective width (eq 6.4 of pulsar handbook)\n #TODO make this more robust\n #DM_step = math.sqrt( (2.*timeres)**2 - timeres**2 )/\\\n # (8.3 * 10**6 * bandwidth / centrefreq**3)\n DM_step = smear_fact * total_smear * centrefreq**3 /\\\n (8.3 * 10.**6 * 0.5 * bandwidth)\n\n\n #round to nearest 0.01\n DM_step = round(DM_step, 2)\n if DM_step < min_DM_step:\n #set DM to 0.01 as a zero DM doesn't make sense\n DM_step = min_DM_step\n\n\n if D_DM > highDM:\n #last one so range from to max\n D_DM = highDM\n #range from last to new\n D_DM = round(D_DM, 2)\n nDM_step = int((D_DM - previous_DM) / DM_step)\n if D_DM > lowDM:\n DD_plan_array.append([ previous_DM, D_DM, DM_step, nDM_step, timeres, downsample ])\n previous_DM = D_DM\n\n #Double time res to account for incoherent dedispersion\n timeres *= 2.\n downsample *= 2\n\n return DD_plan_array",
"def get_rdm(self): \n\n # Check if CCSD calculation is performed\n if not self.cc_fragment:\n raise RuntimeError(\"Cannot retrieve RDM because no simulation has been run.\")\n\n # Solve the lambda equation and obtain the reduced density matrix from CC calculation\n self.cc_fragment.solve_lambda()\n cc_onerdm = self.cc_fragment.make_rdm1()\n cc_twordm = self.cc_fragment.make_rdm2()\n\n return cc_onerdm, cc_twordm",
"def calculate_optimal_dmstep(self, acceptedSNR= 95):\n\n if not self.useSNR:\n return 1.205e-7 * self.tsamp * (self.freq ** 3) / self.bandwidth\n \n x, y = self._calculate_snr_spread()\n return fabs(self.centerDm - x[np.max(np.where(y > np.max(y) * float(acceptedSNR) / 100.0 ))])",
"def get_dm(self, dm_name):\n if self.dms is None:\n self.get_all_dm()\n for d in self.dms:\n if d['mdmName'] == dm_name:\n return d",
"def DM(source, distance, model='NE2001'):\n \n d=SkyModel(dmmodel=model)\n return d.DM(source, distance)",
"def DM_YMW16(self, source, distance):\n\n\n if not isinstance(distance, astropy.units.quantity.Quantity):\n # assume kpc\n distance=distance*u.kpc \n if (len(distance.shape)>0 and distance.value.any() <= 0) or (len(distance.shape)==0 and distance.value < 0):\n raise ValueError('distance must be > 0')\n\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n\n if len(source.l.shape)==0:\n results=ymw16.dmdtau_c(source.l.value,\n source.b.value,\n distance.to(u.pc).value,\n 2,\n self.datadir)\n\n return results*u.pc/u.cm**3,None\n else:\n dm=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n if not (len(distance.shape)==0 or distance.shape==source.l.shape):\n raise IndexError('Shape of distance must be scalar or the same as shape of coordinates')\n d=distance.to(u.pc).value\n while not it.finished:\n if len(d.shape)==0:\n d_touse=distance\n else:\n d_touse=distance[it.multi_index]\n results=ymw16.dmdtau_c(source[it.multi_index].l.value,\n source[it.multi_index].b.value,\n d_touse.to(u.pc).value,\n 2,\n self.datadir)\n \n dm[it.multi_index]=results\n it.iternext()\n \n return dm*u.pc/u.cm**3,None",
"def MSD(df, conversion = \"x\"):\n #conversion from pixels to micrometers\n if conversion == \"y\":\n df = df/1200*633\n else:\n df = df/1600*844\n msd = []\n for i in range(len(df)):\n #computes the msd for the x or y coordinates between the different frames\n msd.append(tidynamics.msd(df.T[i]))\n\n msd = pd.DataFrame(msd)\n\n return msd",
"def DM_profile(self, R, potential='NFW'):\n\n if 'potential' in self.ic.keys():\n potential = self.ic['potential']\n \n if 'potential_type' in self.ic.keys():\n potential = self.ic['potential_type']\n \n \n if potential == 'NFW':\n dens = prof.NFW_DM(R, r_s=self.ic['b'], M200=self.ic['M200'], rho_crit=self.ic['rho_crit'],\n decay=False, r_decay=None)\n \n elif potential == 'Burkert':\n dens = prof.burkert_DM(R, self.ic['b'], self.ic['M200'], rho_crit=self.ic['rho_crit'])\n \n\n \n return dens",
"def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda",
"def GetRedistributionValue(Md):\n X = Md.SteadyState()\n Mom = Md.IP.get_Moments(Md.ubar,Md.ubar,Md.tau)\n Ealpha1taulogalpha = (1-delta)*X[iEAlph]*Mom[3]/(1-(1-delta)*Mom[0])\n return (-X[iElogAlpha]+Ealpha1taulogalpha/X[iEAlph] )/(1-beta)",
"def calc_D(state):\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state",
"def distance_NE2001(self, source, DM, smweight='uniform'):\n\n assert smweight.lower() in ['uniform','tau','theta','iso']\n\n if not isinstance(DM, astropy.units.quantity.Quantity):\n # assume DM unit\n DM=DM*u.pc/u.cm**3\n if (len(DM.shape)>0 and DM.value.any() <= 0) or (len(DM.shape)==0 and DM.value < 0):\n raise ValueError('DM must be > 0')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n if len(source.l.shape)==0:\n \n results=ne2001.dmdsm(self.datadir,\n np.radians(source.l.value),\n np.radians(source.b.value),\n 1,\n DM.to(u.pc/u.cm**3).value,\n 0)\n sign=1\n if results[2]=='>':\n #raise ValueError('distance returned a lower limit')\n sign=-1\n distance=results[1]*u.kpc*sign\n if smweight.lower() == 'uniform':\n SM=results[3]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'tau':\n SM=results[4]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'theta':\n SM=results[5]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'iso':\n SM=results[6]*u.kpc/u.m**(20./3)\n return distance,SM\n else:\n distance=np.zeros_like(source.l.value)\n SM=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n dm=DM.to(u.pc/u.cm**3).value\n if not (len(dm.shape)==0 or dm.shape==source.l.shape):\n raise IndexError('Shape of DM must be scalar or the same as shape of coordinates')\n while not it.finished:\n if len(dm.shape)==0:\n dm_touse=dm\n else:\n dm_touse=dm[it.multi_index]\n results=ne2001.dmdsm(self.datadir,\n np.radians(source[it.multi_index].l.value),\n np.radians(source[it.multi_index].b.value),\n 1,\n dm_touse,\n 0)\n sign=1\n if results[2]=='>':\n #raise ValueError('distance returned a lower limit')\n sign=-1\n distance[it.multi_index]=results[1]*sign\n if smweight.lower() == 'uniform':\n SM[it.multi_index]=results[3]\n elif smweight.lower() == 'tau':\n SM[it.multi_index]=results[4]\n elif smweight.lower() == 'theta':\n SM[it.multi_index]=results[5]\n elif smweight.lower() == 'iso':\n SM[it.multi_index]=results[6]\n it.iternext()\n return distance*u.kpc,SM*u.kpc/u.m**(20./3)",
"def _w_diff_dcm(self, otherframe):\n dcm2diff = self.dcm(otherframe)\n diffed = dcm2diff.diff(dynamicsymbols._t)\n angvelmat = diffed * dcm2diff.T\n w1 = trigsimp(expand(angvelmat[7]), recursive=True)\n w2 = trigsimp(expand(angvelmat[2]), recursive=True)\n w3 = trigsimp(expand(angvelmat[3]), recursive=True)\n return -Vector([(Matrix([w1, w2, w3]), self)])",
"def ddm(self):\n return dec2ddm(self.dec_angle)",
"def get_msd(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.MSD(data)\n if result is None:\n raise IndicatorException\n return result",
"def get_simulated_dBm(self):\n cheater_speed = 0 # m/s\n cheater_bearing = 90.0 # degrees: 0=North, 90=East, 180=South, 270=West\n cheater_height = 1.0 # meters\n\n elapsed_time = time.time() - self.cheater_start_time\n # Don't move the cheater until the DELAY has passed\n if elapsed_time < 0:\n elapsed_time = 0\n\n self.hotspot_meters_moved = elapsed_time * cheater_speed\n\n current_local = self.dc.read_gps()\n current_pos = Coordinate.from_gps_data(current_local)\n\n self.current_simulated_hotspot = self.initial_simulated_hotspot.offset_bearing(cheater_bearing, self.hotspot_meters_moved)\n horizontal_distance = current_pos.distance_to(self.current_simulated_hotspot)\n vertical_distance = current_local.alt - cheater_height\n distance_to_hotspot = sqrt(vertical_distance**2 + horizontal_distance**2)\n\n dBm = SIMULATED_ALPHA * np.log10(distance_to_hotspot) + SIMULATED_EPSILON\n dBm += np.random.normal(0,2) # Add random noise to the data\n\n return dBm",
"def calculate_msd(distance, set_idx_p, set_idx_m, show=False):\n\n p2p, p2m = [], []\n for src in distance.keys():\n if src in set_idx_p:\n tgt2dist = distance[src]\n for tgt in tgt2dist.keys():\n if tgt in set_idx_p:\n if tgt > src:\n p2p.append(tgt2dist[tgt])\n elif tgt in set_idx_m:\n if tgt > src:\n p2m.append(tgt2dist[tgt])\n else:\n continue\n elif src in set_idx_m:\n tgt2dist = distance[src]\n for tgt in tgt2dist.keys():\n if tgt in set_idx_m:\n if tgt > src:\n p2p.append(tgt2dist[tgt])\n elif tgt in set_idx_p:\n if tgt > src:\n p2m.append(tgt2dist[tgt])\n else:\n continue\n else:\n continue\n\n msd, nmsd = np.mean([p**2 for p in p2p]), np.mean([p**2 for p in p2m])\n\n if show:\n print(\"MSD=%.2f, nMSD=%.2f\"%(\n msd, nmsd))\n\n return msd, msd/nmsd"
] |
[
"0.75583947",
"0.720625",
"0.65490854",
"0.62852675",
"0.61138403",
"0.59767824",
"0.5833507",
"0.5808786",
"0.57996666",
"0.5796143",
"0.5689878",
"0.56716835",
"0.5656901",
"0.5634969",
"0.5612287",
"0.5561598",
"0.55413216",
"0.5523513",
"0.54446",
"0.54412264",
"0.54284173",
"0.5403438",
"0.53907496",
"0.5386203",
"0.5349133",
"0.5329162",
"0.52739835",
"0.52415234",
"0.5240532",
"0.519591"
] |
0.781685
|
0
|
cls hold db while self hold field_name, and model
|
def full_init_self(self, db, field_name, model):
if not self.db:
self.__class__.db = db
self.field_name = field_name
self.model = model # property
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def db_fields(self):",
"def _prepare(cls):\n # the dbmodel is either the proxy base or ourselves\n dbmodel = cls._meta.concrete_model if cls._meta.proxy else cls\n cls.__dbclass__ = dbmodel\n if not hasattr(dbmodel, \"__instance_cache__\"):\n # we store __instance_cache__ only on the dbmodel base\n dbmodel.__instance_cache__ = {}\n super()._prepare()",
"def _database(self):\n ...",
"def __init__(self, model_cls, database):\n self.model = model_cls\n self._model_cls = model_cls\n self._database = database\n self._order_by = []\n self._where_q = Q()\n self._prewhere_q = Q()\n self._grouping_fields = []\n self._grouping_with_totals = False\n self._fields = model_cls.fields().keys()\n self._limits = None\n self._limit_by = None\n self._limit_by_fields = None\n self._distinct = False\n self._final = False",
"def __init__(self, db):\n self.db = db",
"def __init__(self):\n self._connection = get_db_connection()",
"def db_table(self):",
"def db(cls):\n return getattr(db, cls.__name__)",
"def __init__(self):\n self.dbcon = DbConnection.get_con()",
"def retrieve_from_db(self):\n pass",
"def __init__(self, database):\n self.database = database",
"def __init__(self):\n\t\tDBHelper.initialize() #initiate dababase helper",
"def __init__(self, db):\n self.table_name = \"query_latent_space\"\n self.db = db",
"def _db_field(self):\n return self.specific._db_field({\n 'verbose_name': self.verbose_name,\n 'help_text': self.help_text,\n 'blank': not self.required,\n 'null': not self.required,\n 'unique': self.unique,\n 'primary_key': self.primary_key,\n 'db_index': self.index or None,\n })",
"def __init__(self) -> None:\n self.Database = Database()",
"def __init__(self,\r\n primary_key=False,\r\n index=False,\r\n db_field=None,\r\n default=None,\r\n required=False,\r\n save_strategy=None):\r\n self.primary_key = primary_key\r\n self.index = index\r\n self.db_field = db_field\r\n self.default = default\r\n self.required = required\r\n self.save_strategy = save_strategy\r\n\r\n #the column name in the model definition\r\n self.column_name = None\r\n\r\n self.value = None\r\n\r\n #keep track of instantiation order\r\n self.position = Column.instance_counter\r\n Column.instance_counter += 1",
"def __init__(self):\n\t\tself.obtainDatabaseConnection()",
"def __init__(self):\n\n # declare db-internal attributes\n BaseModel.__init__(self)\n \n ## DB ID of Project \n self.project_id = -1\n ## UUID of FSServer\n self.fileserver_uuid = -1\n ## partition\n self.part = \"\"\n ## type of volumes\n self.vol_type = \"\"\n ## number of volumes of that type\n self.num_vol = -1\n ## total used kilobytes \n self.used_kb = -1\n ## list of attributes not to put into the DB\n self.unmapped_attributes_list = [ 'parts', 'ExtServAttr' ]",
"def __init__(self):\n self.dbconnect = dbConnection.connection",
"def create_db(self):",
"def __init__(self, **kwargs):\n assert self.__class__.get_primary() != None\n for field in self.fields()+self.tables():\n value = object.__getattribute__(self, field)\n instance = value.__class__(*value.args, **value.kwargs)\n instance.model = self\n object.__setattr__(self, field, instance)\n \n for key, val in kwargs.iteritems():\n self.__setattr__(key, val)\n \n self._retrieved = False",
"def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()",
"def db_for_write(self, model, **hints):\n return None",
"def __init__(self):\n self.db = ALL_USERS",
"def _db_connection(self):\n pass",
"def _db(self, value):",
"def __init__(self):\n self.data = None\n self.conn = None\n self.database = None\n self.table = None\n self.manage = None\n self.limiting = 0",
"def __init__(self):\n self.db = Databank()\n self.db.connection()\n # self.db.cursor.execute('USE library')",
"def model(self):",
"def model(self):"
] |
[
"0.7178065",
"0.66852754",
"0.6673914",
"0.6491509",
"0.6480346",
"0.64386976",
"0.64381945",
"0.63003075",
"0.6217057",
"0.618554",
"0.61548877",
"0.6149434",
"0.6130378",
"0.61014885",
"0.6080942",
"0.6079171",
"0.60749394",
"0.6051769",
"0.60350996",
"0.6010226",
"0.60100234",
"0.6007361",
"0.5994782",
"0.59571517",
"0.59428334",
"0.59208393",
"0.5910012",
"0.59051174",
"0.5896161",
"0.5896161"
] |
0.7959181
|
0
|
Make sure we have a markdown folder to write to.
|
def directory(self) -> Path:
(directory := Path("markdown").resolve(strict=False)).mkdir(exist_ok=True, parents=True)
return directory
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wrap_md(a_path, comments):\r\n if not a_path:\r\n say_it(\"-- Error. No value specified for %s\" % comments)\r\n return 1\r\n if not os.path.isdir(a_path):\r\n try:\r\n os.makedirs(a_path)\r\n except Exception as e:\r\n say_it(\"-- Error. can not makedir %s <%s>\" % (a_path, comments))\r\n say_it(e)\r\n say_it(\"\")\r\n return 1",
"def wrap_md(a_path, comments):\r\n if not a_path:\r\n say_it(\"-- Error. No value specified for %s\" % comments)\r\n return 1\r\n if not os.path.isdir(a_path):\r\n try:\r\n os.makedirs(a_path)\r\n except Exception as e:\r\n say_it(\"-- Error. can not makedir %s <%s>\" % (a_path, comments))\r\n say_it(e)\r\n say_it(\"\")\r\n return 1",
"def create(contents, title, path=\"\"):\n filename = secure_filename(title)\n data_dir = get_data_dir()\n max_filename_length = 255\n if len(filename + \".md\") > max_filename_length:\n filename = filename[0 : max_filename_length - 3]\n if not is_relative_to(data_dir / path, data_dir):\n path = \"\"\n path_to_md_file = data_dir / path / f\"{filename}.md\"\n with open(path_to_md_file, \"w\", encoding=\"utf-8\") as file:\n file.write(contents)\n\n return path_to_md_file",
"def save_markdown_report(self, **kwargs):\n save_dir = os.path.dirname(self.file_paths[0])\n timestamp = datetime.datetime.utcnow().strftime(\"%Y-%j-%Hh%Mm%Ss\")\n markdown_file_name = \"report_{}.md\".format(timestamp)\n markdown_file_path = os.path.join(save_dir, markdown_file_name)\n report_str = self.generate_report()\n with open(markdown_file_path, \"w\") as md_file:\n md_file.write(report_str)",
"def outputMarkdown(self, mdFile):\n if os.path.exists(mdFile):\n os.remove(mdFile)\n\n self.__writeToFile(mdFile)",
"def mnote(tmp_path) -> Path:\n path = tmp_path/\"test.md\"\n path.write_text(\"# 0 Test\\n\\nTest note.\\n\")\n yield path",
"def can_markdown(repo, fname):\n if markdown is None:\n return False\n\n if not repo.info.embed_markdown:\n return False\n\n return fname.endswith(\".md\")",
"def write_as_rmd(nb, rmd_path, has_solutions):\n if os.path.splitext(rmd_path)[1] != \".Rmd\":\n raise ValueError(\"The provided path does not have the .Rmd extension\")\n\n nb = deepcopy(nb)\n\n # prevent neighboring markdown cells from having two lines inserted between them in the student\n # notebook (resolves whitespace issues caused by the use of prompts for written questions)\n if not has_solutions:\n for i, cell in enumerate(nb[\"cells\"]):\n if i < len(nb[\"cells\"]) - 1 and cell[\"cell_type\"] == \"markdown\" and \\\n nb[\"cells\"][i + 1][\"cell_type\"] == \"markdown\":\n cell[\"metadata\"][\"lines_to_next_cell\"] = 0\n\n # add assignment name to Rmd metadata if necessary\n assignment_name = nb[\"metadata\"].get(NOTEBOOK_METADATA_KEY, {}).get(\"assignment_name\", None)\n if assignment_name:\n config_cell = nb[\"cells\"][0]\n source = get_source(config_cell)\n source.insert(-1, f\"assignment_name: \\\"{assignment_name}\\\"\")\n config_cell[\"source\"] = \"\\n\".join(source)\n\n jupytext.write(nb, rmd_path)",
"def process_file_markdown(src_pathname):\n dest_pathname = path_src_to_dest(src_pathname, '.html')\n\n logging.info(\"Processing Markdown file: %s -> %s\" %\n (str(src_pathname), str(dest_pathname)))\n\n ensure_dest_dir(dest_pathname)\n\n with open(dest_pathname, 'w', encoding='UTF-8') as f:\n outstr = docgen.generate.generate_doc(str(src_pathname),\n verbose=config['verbose'],\n inlinecss=True,\n inlinewave=True,\n asdiv=False)\n f.write(outstr)\n\n return dest_pathname",
"def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass",
"def import_editor_markdown_file(mdfile_dir,\n output_content_dir,\n output_dirname,\n create_empty_mdfile=False):\n\n # Create output directory if it does not exist\n if not os.path.isdir(output_content_dir):\n os.makedirs(output_content_dir)\n\n # Make sure source directory exists\n if not os.path.isdir(mdfile_dir):\n print 'Cannot find directory:', mdfile_dir\n return\n\n # Compute output directory\n output_markdown_dir = os.path.join(output_content_dir, output_dirname)\n\n # Create empty target structure directory\n if os.path.isdir(output_markdown_dir):\n shutil.rmtree(output_markdown_dir)\n os.makedirs(output_markdown_dir)\n\n # We know that input directory has only one markdown file with\n # the same name (content.md)\n input_mdfile = os.path.join(mdfile_dir, 'content.md')\n output_mdfile = os.path.join(output_markdown_dir, 'content.md')\n\n # Copy markdown file or create empty one if requested\n if os.path.isfile(input_mdfile):\n shutil.copy(input_mdfile, output_mdfile)\n else:\n if create_empty_mdfile:\n print 'Generating empty markdown file.'\n with open(output_mdfile, 'w') as f:\n f.write('# Empty Markdown\\n')\n else:\n # If there is nothing to import then return\n print 'Cannot find markdown file in {}', mdfile_dir\n return\n\n # Copy markdown images if needed\n input_mdimages_dir = os.path.join(mdfile_dir, 'md-imgs')\n if os.path.isdir(input_mdimages_dir):\n output_mdimages_dir = os.path.join(output_markdown_dir, 'md-imgs')\n shutil.copytree(input_mdimages_dir, output_mdimages_dir)",
"def update_md(user, title, md_path):\n with open(md_path, \"a\") as f:\n f.writelines(f\"[@{user}](https://github.com/{user}) | {title}\\n\")",
"def test_md(tmp_path) -> Path:\n yield Path(tmp_path)/\"test.md\"",
"def generate():\n\n # Verify if directory exists\n if not os.path.isdir(config.techniques_markdown_path):\n os.mkdir(config.techniques_markdown_path)\n\n #Write the technique index.html page\n with open(os.path.join(config.techniques_markdown_path, \"overview.md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(config.technique_overview_md)\n\n for domain in config.domains:\n generate_domain_markdown(domain)",
"def idempotence(args):\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n print_markdown(posts, title, os.path.join(args.dest, 'index.md'))",
"def test_issue_62(self):\n outdir = os.path.join(outputdir, 'issue62')\n make_and_clear_directory(outdir)\n yaml_fname = os.path.join(sourcedir, 'issue_62.yaml')\n MarkdownGenerator(yaml_fname).serialize(directory=outdir)",
"def test_session10_readme_exists():\n assert os.path.isfile(\"README.md\"), \"README.md file missing!\"",
"def process_markdown(input_markdown, output_name, latex_img_dir = \"./\", input_path = \"./\", thumb_size=64):\n\tmd = markdown.Markdown( extensions=[ 'meta'\n\t , 'codehilite'\n\t , 'tables'\n\t , 'def_list'\n\t , 'footnotes'\n\t , ResourceExtractor({ \"resource_dir\": output_name\n\t , \"relative_path\": input_path\n\t })\n\t , AbstractExtractor()\n\t , ToCExtractor()\n\t , MathJaxExtension()\n\t , LaTeX({ \"latex_img_dir\": latex_img_dir\n\t , \"input_path\": input_path\n\t })\n\t ]\n\t )\n\t\n\t# Basic HTML conversion\n\thtml = md.convert(input_markdown)\n\t\n\t# Generate table of contents\n\ttoc = md.toc\n\t\n\t# Choose document title (default to the output name)\n\ttitle = output_name\n\t# Use the first heading if possible\n\tif len(toc) > 0:\n\t\ttitle = toc[0][1]\n\t# Better yet, get the explicitly given metadata\n\ttitle = md.Meta.get(\"title\", [title])[0]\n\t\n\t# Choose document subtitle (only available from metadata)\n\tsubtitle = md.Meta.get(\"subtitle\", [None])[0]\n\t\n\t# Get the image from the metadata\n\timg = md.Meta.get(\"img\", [None])[0]\n\timg_alt = md.Meta.get(\"img_alt\", [title])[0]\n\t\n\t# The abstract should be taken to be the first paragraph.\n\tabstract = md.abstract if md.abstract is not None else \"\"\n\t\n\t# Get the list of tags\n\ttags = md.Meta.get(\"tags\", [])\n\t\n\t# Get the list of files to include\n\tincludes = md.Meta.get(\"include\", [])\n\t\n\t# Get the show option\n\tshow = md.Meta.get(\"show\", [\"True\"])[0] == \"True\"\n\t\n\tfiles = md.resources\n\t\n\t# Add the article image to the list of files and create a thumbnail if\n\t# possible.\n\tif img is not None and img.startswith(\"file://\"):\n\t\timg = os.path.join(input_path, img[len(\"file://\"):])\n\t\timg_output_name = \"%s/%s\"%(output_name,\n\t\t unique(os.path.basename(img),\n\t\t [f.split(\"/\")[-1] for (_,f) in files]))\n\t\t\n\t\timg_thumbnail = \"%s.thumb.png\"%img\n\t\t\n\t\tp = Popen( [\"convert\"\n\t\t , img\n\t\t , \"-thumbnail\", \"%dx%d\"%(thumb_size,thumb_size)\n\t\t , img_thumbnail]\n\t\t , stdin = None\n\t\t , stdout = sys.stderr\n\t\t , stderr = sys.stderr\n\t\t )\n\t\tif p.wait() != 0:\n\t\t\traise Exception(\"Creating img thumbnail failed.\")\n\t\t\n\t\tfiles.append((img_thumbnail, img_output_name))\n\t\timg = img_output_name\n\t\n\t# Generate meta-data\n\tmeta_data = {\n\t\t\"url\" : output_name,\n\t\t\"title\" : title,\n\t\t\"subtitle\" : subtitle,\n\t\t\"img\" : img,\n\t\t\"img_alt\" : img_alt,\n\t\t\"abstract\" : abstract,\n\t\t\"tags\" : tags,\n\t\t\"show\" : show,\n\t}\n\t\n\treturn html, toc, meta_data, files, includes",
"def on_page_markdown(self, markdown, page, config, files):\n listext = self.config['ext']\n src_file_path = page.file.abs_src_path\n prepath, ext = os.path.splitext(src_file_path)\n lang = ext.lstrip('.')\n filename = page.file.name\n if ext in listext:\n new_markdown = \"# {0}\\n\\n```{1}\\n\".format(filename, lang) + markdown + \"\\n```\"\n return new_markdown\n else:\n return markdown",
"def __writeToFile(self, mdFile):\n with open(mdFile, 'a') as writer:\n for line in self.__markdownOutput: \n writer.write(line)",
"def helper():\n \n import webbrowser, os.path\n \n path = os.path.splitext(__file__)[0]\n helpspec = \"file://\" + path + os.path.sep + \\\n \"markdown.html\"\n \n # webbrowser.open seems not to work well\n browser = webbrowser.get()\n if not browser.open_new(helpspec):\n print((\"Help file not found:\" + helpspec))",
"def helper():\n \n import webbrowser, os.path\n \n path = os.path.splitext(__file__)[0]\n helpspec = \"file://\" + path + os.path.sep + \\\n \"markdown.html\"\n \n # webbrowser.open seems not to work well\n browser = webbrowser.get()\n if not browser.open_new(helpspec):\n print((\"Help file not found:\" + helpspec))",
"def check_working_dir(directory):\n filepath = os.path.join(directory, \"text.txt\")\n try:\n open(filepath, \"w\")\n except IOError:\n sys.exit('Unable to write to directory {0} \\n Exiting Drupdates'.format(directory))\n return False\n return True",
"def test_readme_exists():\n assert os.path.isfile(\"README.md\"), \"README.md file missing!\"",
"def test_readme():\n notebooks_readme = Path(\"notebooks/README.md\").read_text()\n for item in Path(\"notebooks\").iterdir():\n if item.is_dir():\n # item is a notebook directory\n notebook_dir = item.relative_to(\"notebooks\")\n if str(notebook_dir)[0].isdigit():\n assert \"README.md\" in [filename.name for filename in item.iterdir()], \\\n f\"README not found in {item}\"\n assert str(notebook_dir) in notebooks_readme, f\"{item} not found in notebooks README\"",
"def test_readme():\n notebooks_readme = Path(\"notebooks/README.md\").read_text()\n for item in Path(\"notebooks\").iterdir():\n if item.is_dir():\n # item is a notebook directory\n notebook_dir = item.relative_to(\"notebooks\")\n if str(notebook_dir)[0].isdigit():\n assert \"README.md\" in [\n filename.name for filename in item.iterdir()\n ], f\"README not found in {item}\"\n assert (\n str(notebook_dir) in notebooks_readme\n ), f\"{item} not found in notebooks README\"",
"def write_to_md(dictData, outputDirectory):\n\tdic = prepare_hw_dict(dictData)\n\tfor hw in dic:\n\t\tfileout = os.path.join(outputDirectory, hw+'.md')\n\t\t# Prepare the output file\n\t\tfout = codecs.open(fileout, 'w', 'utf-8')\n\t\t# Write frontmatter\n\t\tfout.write('---\\ntitle: \"'+hw+'\"\\n---\\n\\n')\n\t\t# For each (headword, meanings, verseNumber, PageNum) tuples,\n\t\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dic[hw]:\n\t\t\tcommaed = ', '.join(meanings)\n\t\t\tverse = verse.replace('<BR>', '<br />')\n\t\t\t# Write in babylon format. <BR><BR> is to separate verses.\n\t\t\tfout.write('# ' + hw + '\\n## ' + commaed + '\\n' + verse + '<br />verse ' + verseNumDetails + '<br />page ' + pageNumDetails +'\\n\\n')\n\t\tfout.close()\n\n\t# Give some summary to the user\n\tprint('MD files generated. Success!')\n\tprint('{} separate .md files written, one per headword.'.format(len(dic)))",
"def update_readme():\n\n temp = \"\"\"<head>\n <title>Unittest Results</title>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css\" integrity=\"sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7\" crossorigin=\"anonymous\">\n</head>\"\"\"\n\n with open(\"README_proxy.md\", \"r\") as old_readme_file:\n old_readme_txt = old_readme_file.read()\n\n with open(\"reports/test_result.html\", \"r\") as html_file:\n html = html_file.read().splitlines()[0:-21]\n html = \"\\n\".join(html).replace(temp, \"\")\n\n with open(\"README.md\", \"w\") as new_readme_file:\n new_readme_file.write(old_readme_txt + \"\\n\\n\\n\" + html + \"</body></html>\")",
"def generate_markdown(self, outdir, definitions):\n doc_cases = defaultdict(lambda: defaultdict(list))\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n for category in definitions.categories:\n cat_outdir = os.path.join(outdir, category.replace(\" \", \"_\").lower())\n if not os.path.exists(cat_outdir):\n os.makedirs(cat_outdir)\n\n for case in definitions.case_set:\n if case[\"category\"] == category:\n doc_cases[category][case[\"name\"]] = case\n\n for technique in doc_cases[category]:\n rendered = self._generate_markdown(doc_cases[category][technique])\n md_filename = technique.replace(\" \", \"_\").lower() + \".md\"\n filename = os.path.join(cat_outdir, md_filename)\n mdoutfile = open(filename, \"w\")\n mdoutfile.write(rendered)\n mdoutfile.close()",
"def do_write_folder(self, line):\n\n if self.root_directory:\n self.write_folder = self.root_directory + \"/\" + line\n print(f\"Folder to write files is: {self.root_directory}/{line}\")\n else:\n self.write_folder = line\n print(f\"Folder to write files is: {line}\")"
] |
[
"0.646971",
"0.646971",
"0.6396185",
"0.5985568",
"0.5970417",
"0.5954497",
"0.5881312",
"0.5827589",
"0.57743984",
"0.5773752",
"0.5702826",
"0.5683667",
"0.56506604",
"0.5638995",
"0.5618831",
"0.5608611",
"0.5591611",
"0.5572487",
"0.5526632",
"0.5501867",
"0.54801977",
"0.54801977",
"0.54689944",
"0.54637694",
"0.5450179",
"0.5418389",
"0.5403354",
"0.5401869",
"0.5394271",
"0.5392848"
] |
0.6719051
|
0
|
Construct an hparam dictionary from the flags.
|
def _build_flags_hparam_dict():
logging.info('Show FLAGS for debugging:')
for f in HPARAM_FLAGS:
logging.info('%s=%s', f, FLAGS[f].value)
hparam_dict = collections.OrderedDict([
(name, FLAGS[name].value) for name in HPARAM_FLAGS
])
return hparam_dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_hparam_flags():\n hparam_dict = utils_impl.lookup_flag_values(shared_flags)\n\n # Update with optimizer flags corresponding to the chosen optimizers.\n opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)\n opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)\n opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)\n hparam_dict.update(opt_flag_dict)\n estimation_flag_dict = utils_impl.lookup_flag_values(estimation_flags)\n hparam_dict.update(estimation_flag_dict)\n\n # Update with task-specific flags.\n task_name = FLAGS.task\n if task_name in TASK_FLAGS:\n task_hparam_dict = utils_impl.lookup_flag_values(TASK_FLAGS[task_name])\n hparam_dict.update(task_hparam_dict)\n\n return hparam_dict",
"def get_hparams():\n hparams = ast.literal_eval(FLAGS.hparams if FLAGS.hparams else '{}')\n hparams['temperature'] = FLAGS.temperature\n return hparams",
"def define_flags():\n define_flag = {\n 'boolean': flags.DEFINE_boolean,\n 'float': flags.DEFINE_float,\n 'integer': flags.DEFINE_integer,\n 'string': flags.DEFINE_string,\n }\n for name, param_spec in six.iteritems(proparams._DEFAULT_PARAMS):\n define_flag[param_spec.flag_type](name, param_spec.default_value, param_spec.description)\n flags.declare_key_flag(name)",
"def build_hparams(FLAGS):\n hparams = add_model_parameters(hyperparameters.params, FLAGS)\n hparams.training = True\n if FLAGS.hparams:\n hparams.parse(FLAGS.hparams)\n if FLAGS.eval_model:\n hparams.summary_frequency = 1\n hparams.test_frequency = 1\n hparams.save_frequency = 5\n hparams.training = False\n\n hparams.sdr_frequency = hparams.test_frequency * constants.AVG_SDR_ON_N_BATCHES\n # See STFT scipy doc\n hparams.waveform_size = (hparams.ntimebins - 1) * constants.ndiff\n\n return hparams",
"def _build_param_dict(self):\n # Add parameter handlers to parameter dict.\n self._param_dict = ProtocolParameterDict()\n \n self._param_dict.add(Parameter.CYCLE_TIME,\n r'(\\d+)\\s+= Cycle Time \\(.*\\)\\r\\n(0|1)\\s+= Minutes or Seconds Cycle Time',\n lambda match : self._to_seconds(int(match.group(1)),\n int(match.group(2))),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_WRITE,\n startup_param=True,\n direct_access=False,\n default_value=20,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"1\", Prompt.CYCLE_TIME_PROMPT]])\n \n self._param_dict.add(Parameter.VERBOSE,\n r'', # Write-only, so does it really matter?\n lambda match : None,\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=1,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"2\", Prompt.VERBOSE_PROMPT]])\n \n self._param_dict.add(Parameter.METADATA_POWERUP,\n r'(0|1)\\s+= Metadata Print Status on Power up',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"3\", Prompt.METADATA_PROMPT]])\n\n self._param_dict.add(Parameter.METADATA_RESTART,\n r'(0|1)\\s+= Metadata Print Status on Restart Data Collection',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"4\", Prompt.METADATA_PROMPT]])\n \n self._param_dict.add(Parameter.RES_SENSOR_POWER,\n r'(0|1)\\s+= Res Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"1\"]])\n\n self._param_dict.add(Parameter.INST_AMP_POWER,\n r'(0|1)\\s+= Thermocouple & Hydrogen Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"2\"]])\n\n self._param_dict.add(Parameter.EH_ISOLATION_AMP_POWER,\n r'(0|1)\\s+= eh Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"3\"]])\n \n self._param_dict.add(Parameter.HYDROGEN_POWER,\n r'(0|1)\\s+= Hydrogen Sensor Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"4\"]])\n \n self._param_dict.add(Parameter.REFERENCE_TEMP_POWER,\n r'(0|1)\\s+= Reference Temperature Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"5\"]])",
"def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }",
"def get_hparams():\n hparams = registry.get_registered_hparams_set(FLAGS.hparams_set)\n hparams.add_hparam(\"inputs_vocab_size\", FLAGS.inputs_vocab_size) \n hparams.add_hparam(\"targets_vocab_size\", FLAGS.targets_vocab_size) \n hparams.parse(FLAGS.hparams)\n return hparams",
"def values(self):\n return {n: getattr(self, n) for n in self._hparam_types.keys()}",
"def getInitParams(self):\n paramDict = {}\n paramDict['upperBoundUsed' ] = self.upperBoundUsed\n paramDict['lowerBoundUsed' ] = self.lowerBoundUsed\n paramDict['hasInfiniteBound'] = self.hasInfiniteBound\n paramDict['upperBound' ] = self.upperBound\n paramDict['lowerBound' ] = self.lowerBound\n paramDict['adjustmentType' ] = self.__adjustmentType\n paramDict['dimensionality' ] = self.dimensionality\n return paramDict",
"def _build_param_dict(self):\n # Add parameter handlers to parameter dict. \n self._param_dict.add(SBE37Parameter.OUTPUTSAL,\n r'(do not )?output salinity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.OUTPUTSV,\n r'(do not )?output sound velocity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.NAVG,\n r'number of samples to average = (\\d+)',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.SAMPLENUM,\n r'samplenumber = (\\d+), free = \\d+',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.INTERVAL,\n r'sample interval = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.STORETIME,\n r'(do not )?store time with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.TXREALTIME,\n r'(do not )?transmit real-time data',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCMODE,\n r'serial sync mode (enabled|disabled)',\n lambda match : False if (match.group(1)=='disabled') else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCWAIT,\n r'wait time after serial sync sampling = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.TCALDATE,\n r'temperature: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.TA0,\n r' +TA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA1,\n r' +TA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA2,\n r' +TA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA3,\n r' +TA3 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CCALDATE,\n r'conductivity: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.CG,\n r' +G = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CH,\n r' +H = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CI,\n r' +I = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CJ,\n r' +J = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.WBOTC,\n r' +WBOTC = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CTCOR,\n r' +CTCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CPCOR,\n r' +CPCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PCALDATE,\n r'pressure .+ ((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.PA0,\n r' +PA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA1,\n r' +PA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA2,\n r' +PA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA0,\n r' +PTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA1,\n r' +PTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA2,\n r' +PTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB0,\n r' +PTCSB0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB1,\n r' +PTCSB1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB2,\n r' +PTCSB2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.POFFSET,\n r' +POFFSET = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RCALDATE,\n r'rtc: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.RTCA0,\n r' +RTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA1,\n r' +RTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA2,\n r' +RTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)",
"def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }",
"def convert_flags_to_boolean_dict(flags):\n return {f: True for f in flags}",
"def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]:\n flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\n for flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition(\"=\")\n if not has_equals_sign:\n if not flag.startswith(\"--\"): # '-xfoo' style.\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n # Either a short option with no value or a long option with no equals sign.\n # Important so we can distinguish between no value ('--foo') and setting to an empty\n # string ('--foo='), for options with an implicit_value.\n flag_val = None\n flag_value_map[key].append(flag_val)\n return flag_value_map",
"def get_params(self, comArgs):\n params = {}\n flags = []\n \n for c in comArgs:\n if len(c) == 1:\n flags.append(c.lower())\n else:\n k = c[0]\n v = c[1:]\n params[k] = float(v)\n return params, flags",
"def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)",
"def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['p'] = self.p\n return paramDict",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict",
"def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()",
"def setupdict(parfile):\n pardict = {}\n with open(parfile,'r+') as f:\n for line in f:\n flags = line[56:65].split(' ')\n try:\n flags = [int(f) for f in flags]\n except:\n continue\n # if we found res pars\n if( all(flags) <= 3 ):\n # if any varied pars\n if( any(flags) > 0 ):\n # energies are dict keys\n estring = endf_float_str(float(line[0:11]))\n pardict[estring] = []\n pars = [float(line[0+11*i:11+11*i]) for i in range(len(flags))]\n for i,flag in enumerate(flags):\n if( flag > 0 ):\n pardict[estring].append((i,pars[i]))\n return pardict",
"def struct(self):\n params_dict = {}\n params_dict['force'] = self.force\n params_dict['dryrun'] = self.dryrun\n params_dict['auto_replace'] = self.auto_replace\n params_dict['allow_autoclean'] = self.allow_autoclean\n\n trash_params_dict = {}\n params_dict['trash'] = trash_params_dict\n \n trash_params_dict['max_size'] = self.trash_max_size\n trash_params_dict['max_count'] = self.trash_max_count\n\n autocl_param_dict = {}\n params_dict['autoclean'] = autocl_param_dict\n \n autocl_param_dict['count'] = self.autoclean_count\n autocl_param_dict['size'] = self.autoclean_size\n autocl_param_dict['days'] = self.autoclean_days\n autocl_param_dict['same_count'] = self.autoclean_same_count\n \n return params_dict",
"def mof_metadata(self):\n\n arg_dict = collections.defaultdict(dict)\n\n arg_dict[self.name]['type'] = self.arg_type\n arg_dict[self.name]['qualifiers'] = self.qualifiers\n arg_dict[self.name]['valuemap'] = self.valuemap\n\n return dict(arg_dict)",
"def get_hyper_params(**kwargs):\n hyper_params = {\n \"anchor_ratios\": [0.5, 1, 2],\n \"anchor_scales\": [16, 32, 64, 128, 256],\n \"stride\": 32,\n \"nms_topn\": 300,\n \"total_pos_bboxes\": 64,\n \"total_neg_bboxes\": 64,\n \"pooling_size\": (7, 7),\n }\n for key, value in kwargs.items():\n if key in hyper_params and value:\n hyper_params[key] = value\n #\n hyper_params[\"anchor_count\"] = len(hyper_params[\"anchor_ratios\"]) * len(hyper_params[\"anchor_scales\"])\n return hyper_params",
"def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict",
"def token_kwargs(bits, parser):\r\n if not bits:\r\n return {}\r\n kwargs = SortedDict()\r\n while bits:\r\n match = kwarg_re.match(bits[0])\r\n if not match or not match.group(1):\r\n return kwargs\r\n key, value = match.groups()\r\n del bits[:1]\r\n kwargs[parser.compile_filter(key)] = parser.compile_filter(value)\r\n return kwargs",
"def setupParameters(self, **pars):\n \n seldict = {}\n for k,v in pars.items():\n if v != None and v != \"\":\n seldict[k] = v\n \n return seldict",
"def parameters(self):\n return dict(self._register)",
"def _get_flags(args: Sequence[str]) -> Dict[str, bool]:\n flags = {}\n for arg in args:\n if arg.startswith(FLAG_MARKER):\n flag_name = arg[len(FLAG_MARKER):]\n if flag_name and flag_name not in OMIT_FLAGS:\n flags[flag_name] = True\n else:\n break # Ignore flags after initial CLI call\n return flags",
"def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data",
"def getInitParams(self):\n return {}"
] |
[
"0.7335772",
"0.65748584",
"0.64927566",
"0.64463437",
"0.6262838",
"0.6255742",
"0.6103088",
"0.60578626",
"0.5906162",
"0.5896976",
"0.58742887",
"0.58281696",
"0.5815971",
"0.57891846",
"0.5757716",
"0.5731962",
"0.56256163",
"0.5611377",
"0.55976546",
"0.55887145",
"0.5586255",
"0.55821705",
"0.5573178",
"0.5568321",
"0.55599254",
"0.5553542",
"0.5552921",
"0.55506474",
"0.55204254",
"0.55174786"
] |
0.8174391
|
0
|
Use the state_map for this instance to map a state string into a ServerState constant
|
def _api_state_to_serverstate(self, api_state):
try:
return self.state_map[api_state]
except KeyError:
self.logger.warn(
"Unmapped Server state '%s' received from system, mapped to '%s'",
api_state, ServerState.UNKNOWN
)
return ServerState.UNKNOWN
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def from_esi_name(cls, esi_state_name: str) -> \"StructureService.State\":\n STATES_ESI_MAP = {\"offline\": cls.OFFLINE, \"online\": cls.ONLINE}\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.OFFLINE\n )",
"def state(self, state: str) -> None:",
"def state(self, state: \"str\"):\n if isinstance(state, Enum):\n self._attrs[\"state\"] = state.value\n else:\n self._attrs[\"state\"] = state # If you supply a string, we presume you know the service will take it.",
"def map_state_info(port, nmap_store):\n state = port.find(\"state\")\n nmap_store[\"state\"] = state.get(\"state\")\n nmap_store[\"reason\"] = state.get(\"reason\")\n nmap_store[\"reason_ttl\"] = state.get(\"reason_ttl\")",
"def map(s,dic):\n state=s.getstate()\n if not state in dic:raise Exception(\"the current state \"+str(state)+\" is not available to map to using the dictionary \"+str(dic))\n val=dic[state]\n if callable(val):\n return val()\n states=s.getstates()\n if val in states:\n return s.setstate(val)\n raise Exception(\"I dont know how to use this \"+str(state)+\" since it maps to a type of \"+str(type(val))+\" namely \"+str(val))",
"def set_state(self,s):\n self.state = s",
"def test_state_string(self):\n server, client = socket_pair()\n server = loopback_server_factory(server)\n client = loopback_client_factory(client)\n\n assert server.get_state_string() in [\n b\"before/accept initialization\",\n b\"before SSL initialization\",\n ]\n assert client.get_state_string() in [\n b\"before/connect initialization\",\n b\"before SSL initialization\",\n ]",
"def state(self, state_str: str) -> None:\n with self._lock:\n self._state = self._create_new_state(\n state_str, prev_state=self._state, notify=True\n )",
"def set_classy_state(self, state: Dict[str, Any]) -> None:\n return self.load_state_dict(state)",
"def parse_state(self, state: str):\r\n state = state.strip()\r\n state = state.split(';')\r\n\r\n if len(state) < 2:\r\n print(state)\r\n return\r\n\r\n for field in state:\r\n split = field.split(':')\r\n if len(split) < 2:\r\n continue\r\n\r\n key = split[0]\r\n value = split[1]\r\n\r\n if key in Tello.state_field_converters:\r\n try:\r\n value = Tello.state_field_converters[key](value)\r\n except Exception as e:\r\n print('Error parsing state value for {}: {} to {}'\r\n .format(key, value, Tello.state_field_converters[key]))\r\n self.state[key] = value\r\n return",
"def setup_states(self, state_dict, start_state):\n self.state_dict = state_dict\n self.state_name = start_state\n self.state = self.state_dict[self.state_name]()",
"def __init__(self, *args, **kwargs):\n state_map = self.state_map\n if (not state_map or not isinstance(state_map, dict) or\n not all(value in ServerState.valid_states() for value in state_map.values())):\n raise NotImplementedError(\n \"property '{}' not properly implemented in class '{}'\"\n .format('state_map', self.__class__.__name__)\n )\n super(Server, self).__init__(*args, **kwargs)",
"def get_lookup_state(self, state):\n return \"\".join(map(str, state))",
"def execute_state(self, s: str, params=None):\n if s in self.states:\n self.states[s](params)",
"def state(self, state: str) -> None:\n try:\n self._redis.set(self._namespace(\"state\"), str(state))\n except RedisError:\n self.logger.error(\"RedisError\", exc_info=True)",
"def fromState(state):",
"def from_esi_name(cls, esi_state_name: str) -> \"Structure.State\":\n STATES_ESI_MAP = {\n \"anchor_vulnerable\": cls.ANCHOR_VULNERABLE,\n \"anchoring\": cls.ANCHORING,\n \"armor_reinforce\": cls.ARMOR_REINFORCE,\n \"armor_vulnerable\": cls.ARMOR_VULNERABLE,\n \"deploy_vulnerable\": cls.DEPLOY_VULNERABLE,\n \"fitting_invulnerable\": cls.FITTING_INVULNERABLE,\n \"hull_reinforce\": cls.HULL_REINFORCE,\n \"hull_vulnerable\": cls.HULL_VULNERABLE,\n \"online_deprecated\": cls.ONLINE_DEPRECATED,\n \"onlining_vulnerable\": cls.ONLINING_VULNERABLE,\n \"shield_vulnerable\": cls.SHIELD_VULNERABLE,\n \"unanchored\": cls.UNANCHORED,\n \"offline\": cls.POS_OFFLINE,\n \"online\": cls.POS_ONLINE,\n \"onlining\": cls.POS_ONLINING,\n \"reinforced\": cls.POS_REINFORCED,\n \"unanchoring \": cls.POS_UNANCHORING,\n }\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.UNKNOWN\n )",
"def state(self, value):\n # find two consecutive capital letters\n regex = config.get('validators', 'state')\n state = re.search(regex,\n value)\n if not state:\n raise StateError(\"StateError: 'state' argument must have two capital letters\")\n else:\n self._state = value",
"def state(self, state):\n # type: (string_types) -> None\n\n if state is not None:\n if not isinstance(state, string_types):\n raise TypeError(\"Invalid type for `state`, type has to be `string_types`\")\n\n self._state = state",
"def setState(newState):\n global STATE\n if getattr(states, newState):\n STATE = newState\n else:\n raise Exception(\"State (%s) does not exist\" % newState)",
"def state(self, state: str) -> None:\n self._state = state",
"def __setstate__(self, state: Dict[str, Any]) -> None:\n self.name = state[\"name\"]\n self.host = state[\"host\"]\n self.port = state[\"port\"]\n self.logger = _get_named_client_logger(\n name=self.name,\n host=self.host,\n port=self.port,\n )",
"def get_state_s(self, lower = True):\r\n\r\n state_s = STATE_STRINGS[self._state - 1]\r\n state_s = state_s.lower() if lower else state_s\r\n return state_s",
"def set_state(self, state_dict):\n self.set_script_output(state_dict.get('script_text', ''))\n for key, target_object in self._map.items():\n self.set_single_state(target_object,\n value=state_dict.get(key, None))",
"def change_state(self, state):\n # tuple of valid game states\n valid_state = (\"WON\", \"LOST\", \"PLAYING\")\n # if state received doesn't match above, tells player\n if state not in valid_state:\n return \"Invalid State\"\n # sets game state to new state\n self._game_state = state\n return self._game_state",
"def state(self, state: str):\n\n self._state = state",
"def state(self, state):\n def what(s, switch):\n if switch:\n return s.label()\n else:\n return s\n switch = is_FSMState(state)\n\n try:\n return self._states_dict_[what(state, switch)]\n except AttributeError:\n for s in self.iter_states():\n if what(s, not switch) == state:\n return s\n except KeyError:\n pass\n raise LookupError(\"No state with label %s found.\" % (what(state, switch),))",
"def get_state():\n state = ''.join(\n random.choice(\n string.ascii_uppercase +\n string.digits) for x in xrange(32))\n login_session['state'] = state\n return login_session['state']",
"def target_state(self, s):\n raise NotImplementedError()",
"def lookup_state(state: str | int) -> dict:\n # Try to cast state as an integer to deal with \"02\", \"2\", 2.0, np.int64(2)...\n try:\n is_fips = isinstance(int(state), int)\n except ValueError:\n is_fips = False\n if is_fips:\n state = str(int(state)).zfill(2)\n return {x[\"fips\"]: x for x in STATES}[state]\n key = \"code\" if len(state) == 2 else \"name\"\n return {x[key].lower(): x for x in STATES}[state.lower()]"
] |
[
"0.6379251",
"0.63019204",
"0.62902385",
"0.61279786",
"0.5973234",
"0.5935471",
"0.5809283",
"0.58044195",
"0.58006954",
"0.5798137",
"0.57211673",
"0.5705519",
"0.5704255",
"0.5701755",
"0.5689011",
"0.56670773",
"0.56380117",
"0.5636881",
"0.5626896",
"0.56248236",
"0.55805737",
"0.55761373",
"0.552834",
"0.54861474",
"0.5485828",
"0.5477305",
"0.5430689",
"0.54143834",
"0.5383506",
"0.5345831"
] |
0.6673923
|
0
|
Return True if the server is in the process of being powered on.
|
def is_powering_on(self):
return self._get_state() == ServerState.POWERING_ON
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_on(self):\n if self._power_state == HYSEN_POWERON :\n return True\n else:\n return False",
"def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n status = True\n Logger.info(\"[FSCD Testing] userver power status {}\".format(status))\n return status",
"def is_on(self):\n return self._get_state() == ServerState.ON",
"def is_on(self):\n return self._client.get_power()",
"def is_powering_off(self):\n return self._get_state() == ServerState.POWERING_OFF",
"def is_on(self):\n run_state = self._get_run_state()\n return STATE_DISHWASHER_POWER_OFF not in run_state",
"def is_power_onoff(self):\n return self['application'] == 'ccd201_pon_app'",
"def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False",
"def is_powered(self) -> bool:\n return self.proto.is_powered",
"def ServerIsReady( self ):\n return self.ServerIsHealthy()",
"def is_on(self):\n return self._program.get(\"enabled\") is True",
"def is_on(self) -> bool:\n return self._device.is_on",
"def is_on(self):\n return self._mower_status in [\n STATUS_EXECUTING_START, STATUS_OK_CHARGING,\n STATUS_OK_CUTTING, STATUS_OK_LEAVING, STATUS_OK_SEARCHING, STATUS_OK_CUTTING_MANUAL]",
"def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False",
"def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False",
"def is_on(self) -> bool:\n return self._client.get_circ_pump()",
"def check_status(self):\n try:\n self.server.ping()\n return True\n except Exception as e:\n return False",
"def is_monitored(self):\n return self._is_monitored",
"def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)",
"def is_on(self):\n return self.state == WORKING_STATE",
"def is_on(self):\n return self.state == WORKING_STATE",
"def is_on(self):\n return self.state == WORKING_STATE",
"def is_on(self):\n if self.is_update_locked():\n return self.graceful_state\n if self._state['action'] == 1 and self._state['state'] == 2:\n return True\n return False",
"def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )",
"def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True",
"def is_on(self):\n return self._device.is_on",
"def is_on(self):\n return self._device.is_on",
"def is_on(self):\n return self._device.is_on",
"def is_on(self):\n return not self.ready",
"def is_on(self) -> bool:\n if self._on_off_cluster_handler.on_off is None:\n return False\n return self._on_off_cluster_handler.on_off"
] |
[
"0.7806083",
"0.7783991",
"0.77826375",
"0.7498044",
"0.73826337",
"0.7230849",
"0.7181862",
"0.711774",
"0.70670676",
"0.70519984",
"0.6885925",
"0.68290555",
"0.6812099",
"0.67613184",
"0.67492056",
"0.6744383",
"0.674361",
"0.6732181",
"0.6730665",
"0.67200327",
"0.67200327",
"0.67200327",
"0.6702169",
"0.66971576",
"0.66924703",
"0.66816366",
"0.66816366",
"0.66816366",
"0.6676709",
"0.66613936"
] |
0.8435485
|
0
|
Return True if the server is in the process of powered off.
|
def is_powering_off(self):
return self._get_state() == ServerState.POWERING_OFF
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_off(self):\n return self._get_state() == ServerState.OFF",
"def is_powered_off(self, instance_name):\n return self._smtclient.get_power_state(instance_name) == 'off'",
"def is_powering_on(self):\n return self._get_state() == ServerState.POWERING_ON",
"def is_on(self):\n run_state = self._get_run_state()\n return STATE_DISHWASHER_POWER_OFF not in run_state",
"def shutting_down(self):\n return self._shutdown.is_set()",
"def is_in_shutdown(self):\n return self._in_shutdown",
"def is_power_onoff(self):\n return self['application'] == 'ccd201_pon_app'",
"def is_on(self):\n return self._get_state() == ServerState.ON",
"def check_stop_flag(con):\n k, v = con.kv.get(\"service/rebootmgr/stop\")\n if v:\n return True\n return False",
"def idle_shutdown(players: Players, args: Namespace) -> bool:\n\n if players.online:\n LOGGER.info('Server is in use.')\n return False\n\n LOGGER.info('Server is idle.')\n unit = args.unit.format(server=args.server)\n command = ('/usr/bin/systemctl', 'stop', unit)\n\n try:\n check_call(command)\n except CalledProcessError as error:\n LOGGER.error('Could not shutdown the server.')\n LOGGER.debug(error)\n return False\n\n LOGGER.info('Server %s has been shut down.', unit)\n return True",
"def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n status = True\n Logger.info(\"[FSCD Testing] userver power status {}\".format(status))\n return status",
"def is_off(self) -> bool:\n return not self.is_on",
"def off(self) -> bool:\n off_cmd = HomeAssistantPlugin.service_map[self.domain.lower()][\"off\"]\n return self.send(off_cmd)",
"def test_off_reboot_on(self):\n self.openstack('baremetal node power off {0}'\n .format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power off', show_prop['power_state'])\n\n self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])",
"def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False",
"def is_not_power_onoff(self):\n return not self.is_power_onoff()",
"def is_on(self):\n if self._power_state == HYSEN_POWERON :\n return True\n else:\n return False",
"def poweroff_server(self, server=None, server_id=None):\n sid = server_id if server_id is not None else server.sid\n if sid is None:\n raise Exception('No Server Specified.')\n json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))\n json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)\n return True if json_obj['Success'] is 'True' else False",
"def is_on(self):\n return self._client.get_power()",
"def _isoff(self):\n return self.dp.state()==PyTango.DevState.OFF",
"def is_server_running(self, shut_off_is_down: bool = False) -> bool:\n out = self.cloud_cli.run_cloud_cmd(\n f\"compute --project={self.project} instances describe --zone={self.zone} {self.name} --format=json\")\n try:\n out = json.loads(out.strip())\n except json.JSONDecodeError:\n return False\n return True",
"def power_off(self):\n LOG.info('Powering off system')\n self._run_shutdown_command('poweroff')",
"def is_off(self):\n return self.value == OFF",
"def checkWakeup(self):\n # TODO include check for external wakeup sources\n if self.dbus2vdr.checkVDRstatus():\n\n return self.dbus2vdr.Shutdown.ManualStart()\n else:\n return True",
"def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)",
"def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True",
"def GetKillSwitchEnabled(self):\n status = self.wifi.GetKillSwitchStatus()\n return status",
"def is_powered(self) -> bool:\n return self.proto.is_powered",
"def power_off_all_server_profiles():\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n profile_name_list = CommonOperationServerProfile.get_server_profile_list()\n\n total = len(profile_name_list)\n already_off_or_not_supported = 0\n powered_off = 0\n\n for n, profile_name in enumerate(profile_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"powering off a server profile named '%s'\" % profile_name)\n CommonOperationServerProfile.click_server_profile(profile_name=profile_name, time_for_loading=4)\n # check if already powered off\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_general_server_power(expect_value='On', timeout=5, fail_if_false=False) is False:\n logger.warn(\"power state of server profile '%s' is not 'On' (it's Off, or not supported due to being managed by another system), 'Power Off' action is unavailable.\" % profile_name)\n already_off_or_not_supported += 1\n else:\n if power_off_server_profile_by_name(profile_name) is False:\n logger.warn(\"server profile '%s' is NOT powered off successfully\" % profile_name)\n continue\n else:\n powered_off += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_off_or_not_supported == 0:\n logger.warn(\"no server profile to power off! all %s server profile(s) is NOT applicable to power off (already powered off, or unknown power state), test is considered PASS\" % already_off_or_not_supported)\n return True\n else:\n if powered_off < total:\n logger.warn(\"not all of the server profile(s) is successfully powered off - %s out of %s powered off \" % (powered_off, total))\n if powered_off + already_off_or_not_supported == total:\n logger.warn(\"%s already-off-or-not-supported server profile(s) is skipped being powered off, test is considered PASS\" % already_off_or_not_supported)\n return True\n else:\n logger.warn(\"%s already-off-or-not-supported server profile(s) is skipped being powered off, \"\n \"%s server profile(s) left is failed being powered off \" % (already_off_or_not_supported, total - powered_off - already_off_or_not_supported))\n return False\n\n logger.info(\"all of the server profile(s) is successfully powered off - %s out of %s \" % (powered_off, total))\n return True",
"def power_off(self):\n print('Powering down O2 Meter ({})...'.format(self.ID))\n self.sensor.write(\"#PDWN\\r\")\n off_status = self.sensor.readline()\n\n if 'PDWN' in off_status:\n print(' Power Off.')\n elif 'ERR' in off_status:\n print('Power-off error: {}'.format(off_status.rstrip()))\n else:\n print('Something went wrong during power-off.\\n -> Sensor returned {}'.format(off_status))\n return"
] |
[
"0.7824",
"0.7776749",
"0.74727935",
"0.72223604",
"0.70757234",
"0.7032411",
"0.68682",
"0.67982227",
"0.6795486",
"0.6750043",
"0.6710196",
"0.6682203",
"0.66467077",
"0.6633493",
"0.660809",
"0.66007054",
"0.65932846",
"0.6574245",
"0.65727454",
"0.6453709",
"0.6424308",
"0.64106417",
"0.63003516",
"0.62947613",
"0.62242264",
"0.6174063",
"0.616593",
"0.6159942",
"0.6157835",
"0.61571896"
] |
0.8522721
|
0
|
Divides the signal into several, possibly overlapping frames.
|
def signal_to_frames(signal, frame_len, frame_step, win_func=None):
assert signal.ndim == 1
signal_len = len(signal)
frame_len = int(round(frame_len))
frame_step = int(round(frame_step))
num_frames = number_frames(signal_len, frame_len, frame_step)
indices = indices_grid(frame_len, frame_step, num_frames)
framed_signal = signal[indices]
if win_func is not None:
framed_signal = win_func(framed_signal)
remain_signal = []
# Add plus one to get first index
# that is not in framed_signal
max_idx = np.max(indices) + 1
if max_idx <= signal_len - 1:
remain_signal = np.r_[remain_signal, signal[max_idx:]]
return framed_signal, remain_signal
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def enframe(wavData, frameSize=400, step=160):\n coef = 0.97\n wlen = wavData.shape[0]\n frameNum = math.ceil(wlen / step)\n frameData = np.zeros((frameSize, frameNum))\n\n window = signal.windows.hamming(frameSize)\n\n for i in range(frameNum):\n singleFrame = wavData[i * step : min(i * step + frameSize, wlen)]\n # singleFrame[1:] = singleFrame[:-1] - coef * singleFrame[1:]\n frameData[:len(singleFrame), i] = singleFrame\n frameData[:, i] = window*frameData[:, i]\n\n return frameData",
"def divide_segment_by_face(self, segment_frame_list):\r\n\r\n # List with histogram differences between consecutive frames\r\n diff_list = []\r\n\r\n # List with histogram differences between consecutive detections\r\n det_diff_list = []\r\n\r\n # List that will contain new lists of frames\r\n sub_segment_list = []\r\n\r\n prev_hists = None\r\n\r\n frame_counter = 0\r\n\r\n det_counter = 0\r\n\r\n # Dictionary for storing correspondence between counter\r\n counter_dict = {}\r\n\r\n for frame_dict in segment_frame_list:\r\n\r\n sim = frame_dict[c.DETECTED_KEY]\r\n\r\n if sim:\r\n\r\n # Tracking window corresponds to detected face\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n frame_path = os.path.join(self.frames_path, frame_name)\r\n\r\n image = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n bbox = frame_dict[c.DETECTION_BBOX_KEY]\r\n\r\n x0 = bbox[0]\r\n y0 = bbox[1]\r\n w = bbox[2]\r\n h = bbox[3]\r\n x1 = x0 + w\r\n y1 = y0 + h\r\n\r\n face = image[y0:y1, x0:x1]\r\n\r\n [tot_diff, prev_hists] = utils.get_hist_difference(\r\n face, prev_hists)\r\n\r\n if tot_diff is not None:\r\n\r\n det_diff_list.append(tot_diff)\r\n\r\n diff_list.append(tot_diff)\r\n\r\n counter_dict[det_counter] = frame_counter\r\n\r\n det_counter += 1\r\n\r\n else:\r\n\r\n diff_list.append(-1)\r\n\r\n del image\r\n\r\n else:\r\n\r\n diff_list.append(-1)\r\n\r\n frame_counter += 1\r\n\r\n segment_divided = False\r\n\r\n if len(det_diff_list) > 0:\r\n\r\n half_window_size = c.HALF_WINDOW_SIZE\r\n\r\n std_mult_face = c.STD_MULTIPLIER_FACE\r\n\r\n if self.params is not None:\r\n\r\n if c.HALF_WINDOW_SIZE_KEY in self.params:\r\n half_window_size = self.params[c.HALF_WINDOW_SIZE_KEY]\r\n\r\n if c.STD_MULTIPLIER_FACE_KEY in self.params:\r\n std_mult_face = self.params[c.STD_MULTIPLIER_FACE_KEY]\r\n\r\n face_cut_idxs_temp = utils.get_shot_changes(\r\n det_diff_list, half_window_size, std_mult_face)\r\n\r\n if len(face_cut_idxs_temp) > 0:\r\n\r\n segment_divided = True\r\n\r\n # Get real counters\r\n face_cut_idxs = []\r\n\r\n for idx_temp in face_cut_idxs_temp:\r\n face_cut_idxs.append(counter_dict[idx_temp])\r\n\r\n # Counter for all frames in original segment\r\n counter = 0\r\n\r\n sub_frame_list = []\r\n\r\n for frame_dict in segment_frame_list:\r\n\r\n if counter in face_cut_idxs:\r\n sub_segment_list.append(sub_frame_list)\r\n\r\n sub_frame_list = []\r\n\r\n sub_frame_list.append(frame_dict)\r\n\r\n counter += 1\r\n\r\n if len(sub_frame_list) > 0:\r\n sub_segment_list.append(sub_frame_list)\r\n\r\n # If segment has not been divided,\r\n # list will contain only original segment\r\n\r\n if not segment_divided:\r\n sub_segment_list.append(segment_frame_list)\r\n\r\n new_segments = []\r\n\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n used_fps = c.USED_FPS\r\n min_detection_pct = c.MIN_DETECTION_PCT\r\n min_segment_duration = c.MIN_SEGMENT_DURATION\r\n\r\n if self.params is not None:\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.MIN_DETECTION_PCT_KEY in self.params:\r\n min_detection_pct = self.params[c.MIN_DETECTION_PCT_KEY]\r\n\r\n if c.MIN_SEGMENT_DURATION_KEY in self.params:\r\n min_segment_duration = self.params[c.MIN_SEGMENT_DURATION_KEY]\r\n\r\n # Minimum duration of a segment in frames\r\n min_segment_frames = int(\r\n math.ceil(self.fps * min_segment_duration))\r\n\r\n # If a reduced frame rate is used, frames are less\r\n if not use_or_fps:\r\n min_segment_frames = int(\r\n math.ceil((used_fps + 1) * min_segment_duration))\r\n\r\n # Iterate through new sub segments\r\n for sub_frame_list in sub_segment_list:\r\n\r\n frame_counter = len(sub_frame_list)\r\n\r\n segment_dict = {c.FRAMES_KEY: sub_frame_list,\r\n c.SEGMENT_TOT_FRAMES_NR_KEY: frame_counter}\r\n\r\n # Segment duration in milliseconds\r\n\r\n duration = frame_counter * 1000.0 / self.fps\r\n\r\n # If a reduced frame rate is used, frames are less\r\n\r\n if not use_or_fps:\r\n duration = frame_counter * 1000.0 / (used_fps + 1)\r\n\r\n segment_dict[c.SEGMENT_DURATION_KEY] = duration\r\n\r\n segment_dict[c.ASSIGNED_TAG_KEY] = c.UNDEFINED_TAG\r\n\r\n segment_dict[c.CONFIDENCE_KEY] = -1\r\n\r\n # Segment must be considered only if its number\r\n # of frames is greater or equals than a minimum\r\n if frame_counter >= min_segment_frames:\r\n\r\n # Start of segment in millisecond\r\n first_frame_dict = sub_frame_list[0]\r\n\r\n segment_start = first_frame_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n segment_dict[c.SEGMENT_START_KEY] = segment_start\r\n\r\n # Counter for frames with detections in new segment\r\n det_counter = 0\r\n\r\n for frame_dict in sub_frame_list:\r\n\r\n sim = frame_dict[c.DETECTED_KEY]\r\n\r\n if sim:\r\n det_counter += 1\r\n\r\n # Check percentage of detection\r\n det_pct = (float(det_counter) / frame_counter)\r\n\r\n # print('det_pct', det_pct)\r\n\r\n if det_pct >= min_detection_pct:\r\n\r\n new_segments.append(segment_dict)\r\n\r\n else:\r\n\r\n self.disc_tracked_faces.append(segment_dict)\r\n else:\r\n\r\n self.disc_tracked_faces.append(segment_dict)\r\n\r\n return new_segments",
"def make_frames(signal, sampling_rate, frame_size=0.025, frame_overlap=0.015):\n frame_length = int(round(frame_size * sampling_rate)) #seconds to samples\n frame_step = int(round((frame_size - frame_overlap) * sampling_rate)) #seconds to samples\n #signal_length = len(emphasized_signal)\n\n nf = abs(len(signal) - frame_length)/float(frame_step)\n num_frames = 0\n if int(nf) < 1:\n num_frames = 1 # Make sure that we have at least 1 frame\n else:\n num_frames = int(np.ceil(nf))\n\n padding = np.zeros((num_frames * frame_step) + frame_length - len(signal)) #padding to be added at the end of the signal\n# padded_signal = np.concatenate((signal, padding), axis = None)\n padded_signal = np.zeros((len(padding)+len(signal)))\n np.put(padded_signal, list(range(len(signal))), signal) #put original signal in the front\n np.put(padded_signal, list(range(len(signal), len(padded_signal))), padding) #put padding at the back after signal\n\n indices = np.tile(np.array(range(0, frame_length)), (num_frames, 1)) + np.tile(np.array(range(0, num_frames * frame_step, frame_step)), (frame_length, 1)).T\n frames = padded_signal[indices.astype(np.int32, copy=False)]\n\n #Windowing\n frames = frames * hamming(frame_length)\n return frames",
"def _convert_to_multi_segment(self):\n\n self.header['nb_segment'] = [self.info['n_episodes']]\n\n # drop repeated signal headers\n self.header['signal_channels'] = \\\n self.header['signal_channels'].reshape(\n self.info['n_episodes'], -1)[0]\n\n # reshape signal memmap list\n new_sig_memmaps = []\n n_channels = len(self.header['signal_channels'])\n sig_memmaps = self._raw_signals[0]\n for first_index in np.arange(0, len(sig_memmaps), n_channels):\n new_sig_memmaps.append(\n sig_memmaps[first_index:first_index + n_channels])\n self._raw_signals = new_sig_memmaps\n\n self.logger.debug('New number of segments: {}'.format(\n self.info['n_episodes']))\n\n return",
"def overlap_and_add(signal, frame_step):\n outer_dimensions = signal.size()[:-2]\n frames, frame_length = signal.size()[-2:]\n\n subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor\n subframe_step = frame_step // subframe_length\n subframes_per_frame = frame_length // subframe_length\n output_size = frame_step * (frames - 1) + frame_length\n output_subframes = output_size // subframe_length\n\n subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)\n\n frame = torch.arange(0, output_subframes, dtype=torch.int64, device=signal.device).unfold(0, subframes_per_frame, subframe_step)\n frame = frame.contiguous().view(-1)\n\n result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)\n result.index_add_(-2, frame, subframe_signal)\n result = result.view(*outer_dimensions, -1)\n return result",
"def apply_subspace(\n noisy_signal,\n frame_len=256,\n mu=10,\n lookback=10,\n skip=2,\n thresh=0.01,\n data_type=np.float32,\n):\n\n scnr = Subspace(frame_len, mu, lookback, skip, thresh, data_type)\n processed_audio = np.zeros(noisy_signal.shape)\n n = 0\n hop = frame_len // 2\n while noisy_signal.shape[0] - n >= hop:\n processed_audio[n : n + hop,] = scnr.apply(noisy_signal[n : n + hop])\n\n # update step\n n += hop\n\n return processed_audio",
"def framing(signal, frame_length, frame_step, window_func=lambda x: np.ones((x,))):\n signal_length = len(signal)\n num_frames = 1 + (signal_length - frame_length) // frame_step\n\n frames = np.zeros((num_frames, frame_length))\n for index in range(num_frames):\n frames[index] = np.asarray(signal[index * frame_step: index * frame_step + frame_length],\n dtype='float32') * window_func(frame_length)\n return frames",
"def denormalize_frames(frames):\n new_frames = frames + 1\n new_frames *= (255 / 2)\n # noinspection PyUnresolvedReferences\n new_frames = new_frames.astype(np.uint8)\n\n return new_frames",
"def overlap_and_add(signal, frame_step):\n outer_dimensions = signal.size()[:-2]\n frames, frame_length = signal.size()[-2:]\n\n subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor\n subframe_step = frame_step // subframe_length\n subframes_per_frame = frame_length // subframe_length\n output_size = frame_step * (frames - 1) + frame_length\n output_subframes = output_size // subframe_length\n\n subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)\n\n frame = torch.arange(0, output_subframes).unfold(0, subframes_per_frame, subframe_step)\n frame = signal.new_tensor(frame).long() # signal may in GPU or CPU\n frame = frame.contiguous().view(-1)\n\n result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)\n result.index_add_(-2, frame, subframe_signal)\n result = result.view(*outer_dimensions, -1)\n return result",
"def split_into_frames(filename_raw, thr_var_per_event=5e-4, downsampling_factor=2, disable_display=False,\n filename_output_video=None):\n\n assert downsampling_factor == int(downsampling_factor), \"Error: downsampling_factor must be an integer\"\n assert downsampling_factor >= 0, \"Error: downsampling_factor must be >= 0\"\n\n mv_adaptive_rate_iterator = AdaptiveRateEventsIterator(input_path=filename_raw,\n thr_var_per_event=thr_var_per_event,\n downsampling_factor=downsampling_factor)\n\n height, width = mv_adaptive_rate_iterator.get_size()\n\n if filename_output_video == None:\n video_process = None\n else:\n assert not os.path.exists(filename_output_video)\n video_process = FFmpegWriter(filename_output_video)\n\n if video_process or not disable_display:\n img_bgr = np.zeros((height, width, 3), dtype=np.uint8)\n\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n\n for events in mv_adaptive_rate_iterator:\n assert events.size > 0\n start_ts = events[0][\"t\"]\n end_ts = events[-1][\"t\"]\n print(\"frame: {} -> {} delta_t: {} fps: {} nb_ev: {}\".format(start_ts, end_ts,\n end_ts - start_ts,\n 1e6 / (end_ts - start_ts),\n events.size))\n if video_process or not disable_display:\n img = events_to_diff_image(events, sensor_size=(height, width))\n img_bgr[...] = 0\n img_bgr[img < 0, 0] = 255\n img_bgr[img > 0, 1] = 255\n\n chunk_start_ts = events[0][\"t\"]\n chunk_end_ts = events[-1][\"t\"]\n delta_t_frame = chunk_end_ts - chunk_start_ts + 1\n frame_txt = \"ts: {} -> {} delta_t: {} fps: {} (nb_ev): {}\".format(chunk_start_ts, chunk_end_ts,\n delta_t_frame,\n int(1.e6/delta_t_frame),\n events.size)\n img_bgr[20:45, ...] = 0\n cv2.putText(img_bgr,\n frame_txt,\n (int(0.05 * width), 40),\n cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 200, 100))\n\n if video_process:\n video_process.writeFrame(img_bgr.astype(np.uint8)[..., ::-1])\n if not disable_display:\n cv2.imshow(\"img\", img_bgr)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if video_process:\n video_process.close()\n if not disable_display:\n cv2.destroyAllWindows()",
"def enframe(samples, winlen, winshift):\n\n # check if i+winlen > len(samples):\n\n result = []\n for i in range(0,len(samples),winshift):\n if(i+winlen > len(samples)): break\n result.append(samples[i:i+winlen])\n return np.array(result)\n # return np.array([samples[i:i+winlen] for i in range(0,len(samples),winshift)])",
"def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames",
"def _process_data(data: np.ndarray) -> np.ndarray:\r\n result: np.ndarray = np.empty(shape=(0, 0))\r\n i = 0\r\n while i < (len(data) - 1):\r\n # Found beginning of frame\r\n if data[i] > 127:\r\n # Extract one sample from 2 bytes\r\n intout = (np.bitwise_and(data[i], 127)) * 128\r\n i += 1\r\n intout = intout + data[i]\r\n result = np.append(result, intout)\r\n i += 1\r\n return result",
"def SplitGap(data,gapsize,medwin,fluxdiff):\n \n # defining new empty lists and stuff\n pcount=0\n istamps=[]\n outData={}\n \n data['x'].mask = data['UnMasked']\n data['y'].mask = data['UnMasked']\n data['yerr'].mask = data['UnMasked']\n \n # median smoothing the lightcurve\n mvavg1 = movingMedian(data['y'],medwin)\n mvavg1 = num.append(mvavg1,mvavg1[-1])\n mvavg1 = data['y']\n # first derivative of smoothed lightcurve\n diff1 = num.diff(mvavg1)\n diff1 = num.hstack((diff1,diff1[-1]))\n \n # second derivative of smoothed lightcurve\n diff2 = num.diff(diff1)\n diff2 = num.hstack((diff2[-1],diff2))\n\n # compute ourlier resistant sigma\n sig = compute1Sigma(diff1)\n #pylab.plot(diff1,'g.')\n #pylab.plot([0,6000],[5*sig,5*sig],'k-')\n #pylab.plot([0,6000],[3*sig,3*sig],'k-')\n #pylab.plot([0,6000],[1*sig,1*sig],'k-')\n #pylab.show()\n\n # The grand master loop >=}\n # to make portion slices\n for i in range(len(data['x'])-1):\n dt = data['x'][i+1]- data['x'][i]\n j1 = max(0,i-medwin)\n j2 = i + medwin\n if pcount == 0:\n i0 = 0\n if pcount > 0:\n i0 = i1+1\n if dt > gapsize:\n i1 = i\n istamps.append([i0,i1])\n pcount += 1\n #if num.abs(diff1[i]) > 5*sig:\n #i1 = i\n #istamps.append([i0,i1])\n #pcount += 1\n #print num.abs(diff1[i]/data['y'][i]), diff1[i], data['y'][i], diff1[i+1], data['y'][i+1]\n #print i, ' test flux gap'\n i1 = i+1\n istamps.append([i0,i1])\n \n \n \n if data['bool']==False:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':False}\n else:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1], 'TransitMask':data['TransitMask'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':True}\n \n return outData",
"def step(self, frame):\n if not self._stack:\n # Fill stack with copies of first frame if empty.\n self._stack.extend([frame] * (self._num_frames - 1))\n self._stack.append(frame)\n # Match BCAgent's stacking along axis 2.\n stacked_frames = np.stack(self._stack, axis=2)\n\n if not self._flatten:\n return stacked_frames\n else:\n new_shape = stacked_frames.shape[:-2] + (-1,)\n return stacked_frames.reshape(*new_shape)",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def _subsample_frames(self, video_clip_frames):\n subsampled_frames = []\n current_ix = 0\n step_size = len(video_clip_frames) / float(config.RGB_N_FRAMES)\n for _ in range(config.RGB_N_FRAMES):\n frame = video_clip_frames[int(current_ix)]\n subsampled_frames.append(frame)\n current_ix += step_size\n\n return np.array(subsampled_frames)",
"def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames",
"def _select_frames(self, frames):\n converted_frames = list()\n # Ignore some frame at begin and end.\n for i in np.linspace(0, self.video_size, self.frame_num + 2)[1:self.frame_num + 1]:\n img = frames[int(i)]\n img = img.resize((224, 224), Image.BILINEAR)\n frame_data = np.array(img)\n converted_frames.append(frame_data)\n return converted_frames",
"def test_slicing_containers(self):\n\n pattern = midi.Pattern()\n pattern.extend([midi.Track()] * 5)\n result1 = pattern[1]\n result2 = pattern[1:5]\n result3 = pattern[::2]\n\n track = midi.Track()\n track.extend([midi.Event()] * 5)\n result1 = track[1]\n result2 = track[1:5]\n result3 = track[::2]",
"def get_initial_segmentation(frames: numpy.ndarray, frame_shift: float) -> List[CtmInterval]:\n segments = []\n cur_segment = None\n silent_frames = 0\n non_silent_frames = 0\n for i in range(frames.shape[0]):\n f = frames[i]\n if int(f) > 0:\n non_silent_frames += 1\n if cur_segment is None:\n cur_segment = CtmInterval(begin=i * frame_shift, end=0, label=\"speech\")\n else:\n silent_frames += 1\n if cur_segment is not None:\n cur_segment.end = (i - 1) * frame_shift\n segments.append(cur_segment)\n cur_segment = None\n if cur_segment is not None:\n cur_segment.end = len(frames) * frame_shift\n segments.append(cur_segment)\n return segments",
"def preprocess_frame(self, frame):\n # Greyscale frame\n img = np.mean(frame,-1)\n\n # Remove black bar at the bottom\n cropped_img = img[:-12, :]\n\n # Normalize Pixel Values\n normalized_frame = cropped_img/255.0\n\n return normalized_frame",
"def apply_fourier_transform(chunked_audio):\n pass",
"def split( self, rSilenceTresholdPercent = 0.1, rSilenceMinDuration = 0.3, nExtractJustFirsts = -1 ):\n nLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 ) \n print( \"INF: sound.Wav.split: splitting a sound of %5.3fs, using silence limits at %d for %5.3fs\" % (self.rDuration, nLimit, rSilenceMinDuration) ) \n aSplitted = []\n \n precalcWavIsNotSilence = np.abs(self.data)>nLimit\n\n #~ print self\n \n nCurrentPos = 0 # in data index (not sample)\n nSilenceMinLenData = rSilenceMinDuration * self.nAvgBytesPerSec * 8 / self.nNbrBitsPerSample\n while( nCurrentPos < len(self.data) ):\n \n # first find the beginning of a sound \n nFirstNonSilenceIndex = findFirstTrueValue( precalcWavIsNotSilence[nCurrentPos:] )\n #~ print( \"nFirstNonSilenceIndex (brut): %d\" % nFirstNonSilenceIndex )\n if( nFirstNonSilenceIndex == -1 ):\n # all remaining sound are silence!\n break\n nFirstNonSilenceIndex += nCurrentPos\n nNumFirstSample = nFirstNonSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found a sound at sample %d\" % nNumFirstSample )\n nCurrentPos = nFirstNonSilenceIndex # so at the end, we're stopping\n \n # then find end\n nEndOfSilence = nNumFirstSample*self.nNbrChannel # init of the loop\n while( nEndOfSilence < len(self.data) ):\n #nFirstSilenceIndex = np.argmax( np.abs(self.data[nEndOfSilence:])<=nLimit )\n nFirstSilenceIndex = findFirstFalseValue( precalcWavIsNotSilence[nEndOfSilence:] ) \n #~ print( \"nFirstSilenceIndex (brut): %d (from %d)\" % (nFirstSilenceIndex, nEndOfSilence) )\n if( nFirstSilenceIndex == -1 ):\n break\n nFirstSilenceIndex += nEndOfSilence\n # ensure there's enough silence\n nEndOfSilence = findFirstTrueValue( precalcWavIsNotSilence[nFirstSilenceIndex:] )\n #~ print( \"nEndOfSilence (brut): %d (data: %d) (offset: %d)\" % (nEndOfSilence, self.data[nFirstSilenceIndex+nEndOfSilence],nEndOfSilence + nFirstSilenceIndex) )\n # positionnate onto the end of the silence for next time\n if( nEndOfSilence == -1 ):\n nCurrentPos = len(self.data)\n else:\n nCurrentPos = nEndOfSilence + nFirstSilenceIndex\n \n if( nEndOfSilence > nSilenceMinLenData or nEndOfSilence == -1 ):\n break\n nEndOfSilence += nFirstSilenceIndex\n # while - end\n \n # each time we're out, we've got a silence or we're at the end => new split\n if( nFirstSilenceIndex == -1 ):\n break\n nNumLastSample = nFirstSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found the end of that sound at sample %d\" % nNumLastSample )\n if( nNumLastSample - nNumFirstSample > 4000 ):\n w = Wav()\n w.copyHeader( self )\n w.data = np.copy(self.data[nNumFirstSample*self.nNbrChannel:nNumLastSample*self.nNbrChannel])\n nPeakMax = max( max( w.data ), -min( w.data ) )\n if( nPeakMax > self.getSampleMaxValue() / 8 ): # remove glitch sound\n w.updateHeaderSizeFromDataLength()\n print( \"INF: sound.Wav.split: new split of %5.2fs\" % w.rDuration )\n aSplitted.append( w )\n #~ print( \"nCurLocalVs: %s\" % nCurLocalVs )\n if( nExtractJustFirsts != -1 and nExtractJustFirsts == len(aSplitted) ):\n print( \"WRN: sound.Wav.split: got enough split (%d), leaving...\" % len(aSplitted) )\n break\n # while - end\n print( \"INF: sound.Wav.split: created %d wav(s)\" % len( aSplitted ) )\n return aSplitted",
"def swingCapture(self, pulse, divider=2, count=5):\n pulse=int(pulse) #make sure is integer\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly \n frames=[] \n\n low, high=self._lo_hi_preprocess(CLK_WISE)\n low_antiClk, high_antiClk=self._lo_hi_preprocess(ANTI_CLK_W) # ret in C byte format already\n div=divider #divide pulses into groups\n for _ in range(count): #loop count\n sub_pulse=pulse//div\n for __ in range(div): #no of times to cap image\n for ___ in range(sub_pulse): # 1st clockwise direction\n self.objdll.USBIO_GPIOWrite(self.id, low, WRITE_EN) #;sleep(self.delay)#disable for highest motor speed\n self.objdll.USBIO_GPIOWrite(self.id, high, WRITE_EN) #; sleep(self.delay)#disable for highest motor speed\n ret, frame = cap.read() # Capture frame-by-frame \n frames.append(frame) # store per group\n\n for __ in range(div): \n for ___ in range(sub_pulse): # 1st anticlockwise direction\n self.objdll.USBIO_GPIOWrite(self.id, low_antiClk, WRITE_EN) #;sleep(self.delay)#disable for highest motor speed\n self.objdll.USBIO_GPIOWrite(self.id, high_antiClk, WRITE_EN) #; sleep(self.delay)#disable for highest motor speed\n ret, frame = cap.read() # Capture frame-by-frame \n frames.append(frame) # store per group\n\n\n \n cap.release()\n cv2.destroyAllWindows()\n return np.asarray(frames)",
"def granulate(self, length):\n if length == self._flen:\n return\n\n self._flen = length\n frame_count = int(math.ceil(len(self) / float(length)))\n #TODO: Recalculate findex (index will be the same)\n new_findex = 0\n frames = []\n for frame in range(frame_count):\n frames.append(self[frame * length:frame * length + length])\n self._frames = frames\n self._findex = new_findex\n self._index = 0 # temporary"
] |
[
"0.6031604",
"0.59864634",
"0.58658904",
"0.58351475",
"0.57659346",
"0.5749059",
"0.5747793",
"0.567315",
"0.56341195",
"0.5571236",
"0.5405695",
"0.53920823",
"0.5365756",
"0.5362583",
"0.5350319",
"0.53134966",
"0.53134966",
"0.53134966",
"0.53134966",
"0.53134966",
"0.5304561",
"0.52958804",
"0.5265327",
"0.5254805",
"0.5217427",
"0.5186136",
"0.5175343",
"0.5170501",
"0.51438427",
"0.51401716"
] |
0.60254115
|
1
|
Computes the number of frames for a given signal length.
|
def number_frames(signal_len, frame_len, frame_step):
frames = 1
if signal_len > frame_len:
temp = (1.0 * signal_len - frame_len)/frame_step
frames += int(np.floor(temp))
return frames
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M",
"def lws_num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M",
"def num_frames(self, inp_len: th.Tensor) -> th.Tensor:\n if inp_len is None:\n return None\n if self.spectra_index == -1:\n warnings.warn(\"SpectrogramTransform layer is not found, \" +\n \"return input as the #num_frames\")\n return inp_len\n if self.perturb_index != -1:\n inp_len = self.transform[self.perturb_index].output_length(inp_len)\n num_frames = self.transform[self.spectra_index].num_frames(inp_len)\n # return num_frames // self.subsampling_factor\n return th.div(num_frames,\n self.subsampling_factor,\n rounding_mode=\"trunc\")",
"def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample",
"def get_num_frames(self):\n return self._frames.shape[0]",
"def num_frames(self):\n return self._first_rgb.shape[1]",
"def n_blocks(n_frames, block_length):\n return n_frames - block_length + 1",
"def frame_length(self):\r\n return self.config.frame_length",
"def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))",
"def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]",
"def _frameLen(self):\n return self.numCols * self.numRows",
"def get_total_frames(self) -> int:\n return self.num_frames",
"def get_frame_size(*args):\n return _ida_frame.get_frame_size(*args)",
"def chunk_size(self):\r\n return int(self.frame_length * self.sample_rate)",
"def full_frame_length(self):\n return self.height * self.width * 3",
"def get_frame_width(self) -> int:\n return self.__sim.frame_size()[0]",
"def sample_count(self):\n if self._sample_count:\n return self._sample_count\n else:\n return self._wave.getnframes()",
"def framing(signal, frame_length, frame_step, window_func=lambda x: np.ones((x,))):\n signal_length = len(signal)\n num_frames = 1 + (signal_length - frame_length) // frame_step\n\n frames = np.zeros((num_frames, frame_length))\n for index in range(num_frames):\n frames[index] = np.asarray(signal[index * frame_step: index * frame_step + frame_length],\n dtype='float32') * window_func(frame_length)\n return frames",
"def size(self):\n if self.frames is None:\n return 0\n return self.frames.size",
"def count_frames(f):\n def counted(n):\n counted.open_count += 1\n counted.max_count = max(counted.max_count, counted.open_count)\n result = f(n)\n counted.open_count -= 1\n return result\n counted.open_count = 0\n counted.max_count = 0\n return counted",
"def get_frame_size(self):\n return self._frames.shape[-1]",
"def frame_width(self) -> int:\n pass",
"def pulse_width(self) -> int:",
"def __calculate_number_of_frames(self):\n # Save current position\n current_pos = self.__file_object.tell()\n\n # Go to start of first frame\n self.__file_object.seek(self.__first_frame_raw_data_position)\n self.number_of_frames = 0\n\n while True:\n if not self.__file_object.read(self.__frame_raw_data_size):\n break\n\n self.__file_object.readline()\n self.number_of_frames += 1\n\n # Restore file pointer\n self.__file_object.seek(current_pos)\n print('Number of frames:', self.number_of_frames)",
"def get_frame_size(self) -> Tuple[int, int]:\n return self.__sim.frame_size()",
"def num_frames(self):\n return len(self.video)",
"def get_mfcc_length_from_duration(duration):\n length = int(duration // FRAME_STRIDE) - 1\n return length",
"def frames(self):\n frame_count = 0\n if self.is_video() or self.is_audio():\n if self.__dict__['nb_frames']:\n try:\n frame_count = int(self.__dict__['nb_frames'])\n except ValueError:\n raise FFProbeError('None integer frame count')\n return frame_count",
"def calculate_handlen(hand):\n handlen = len(hand)\n return handlen",
"def fft_size(self):\n import supriya.ugens\n\n return supriya.ugens.BufFrames.ir(self.buffer_id)"
] |
[
"0.7928671",
"0.76205933",
"0.7365137",
"0.7048614",
"0.69185",
"0.6758897",
"0.67540646",
"0.6622454",
"0.65737015",
"0.6522947",
"0.65163374",
"0.6490955",
"0.64401543",
"0.64093006",
"0.6391117",
"0.637485",
"0.6360134",
"0.6314284",
"0.62693727",
"0.62614816",
"0.6247267",
"0.6150701",
"0.6124027",
"0.61095285",
"0.60960495",
"0.6073031",
"0.6030496",
"0.5988111",
"0.59810555",
"0.59627366"
] |
0.87695944
|
0
|
Computes a grid of indices for possibly overlapping frames.
|
def indices_grid(frame_len, frame_step, num_frames):
indices = np.tile(np.arange(0, frame_len), (num_frames, 1)) + \
np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_len, 1)).T
indices = np.array(indices, dtype=np.int32)
return indices
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check",
"def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for channel_id in range(channel_num):\n for row_id in range(row_num):\n for col_id in range(col_num):\n our_indices[batch_id, channel_id, row_id, col_id] = col_num * 2 * 2 * row_id + 2 * col_id\n return torch.from_numpy(our_indices)",
"def get_grid_index(init_grid_size, map_size, device):\n H_init, W_init = init_grid_size\n H, W = map_size\n idx = torch.arange(H * W, device=device).reshape(1, 1, H, W)\n idx = F.interpolate(idx.float(), [H_init, W_init], mode='nearest').long()\n return idx.flatten()",
"def idx_to_grid(n):\n\n x = n % MAX_Y\n y = int(n / MAX_X)\n return(x, y)",
"def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs",
"def hoggar_indices():\n return list(product([0,1], repeat=6))",
"def build_bounds_index(image_files):\n idx = index.Index()\n xs = []\n ys = []\n for i, img_path in tqdm(list(enumerate(image_files))):\n with rasterio.open(img_path) as src:\n left, bottom, right, top = src.bounds\n xs.extend([left, right])\n ys.extend([bottom, top])\n idx.insert(i, (left, bottom, right, top))\n dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)\n return idx, (dst_w, dst_s, dst_e, dst_n)",
"def get_overlapping_indices(self):\n return self._get_atomic_overlaps()",
"def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst",
"def get_track_mask_idxes(self):\n instance_id_num_pts = defaultdict(lambda: 0)\n instance_id_lifetimes = defaultdict(lambda: [10000, -1])\n\n for frame_num, labels_per_frame in enumerate(self._frame_labels):\n for id in labels_per_frame.unique().tolist():\n instance_id_num_pts[id] += (labels_per_frame == id).long().sum().item()\n instance_id_lifetimes[id][0] = min(frame_num, instance_id_lifetimes[id][0])\n instance_id_lifetimes[id][1] = max(frame_num, instance_id_lifetimes[id][1])\n\n instance_id_lifetimes = {k: v[1] - v[0] for k, v in instance_id_lifetimes.items()}\n return self._frame_labels, instance_id_num_pts, instance_id_lifetimes",
"def get_tile_indices(rows, cols, row_tile_size, col_tile_size):\n indices = list()\n num_row_tiles, num_col_tiles = get_num_tiles(rows, cols, row_tile_size, col_tile_size)\n for r in range(0, num_row_tiles):\n start_r = r * row_tile_size\n end_r = ((r + 1) * row_tile_size) if (r < num_row_tiles - 1) else rows\n for c in range(0, num_col_tiles):\n start_c = c * col_tile_size\n end_c = ((c + 1) * col_tile_size) if (c < num_col_tiles - 1) else cols\n indices.append((start_r, end_r, start_c, end_c, r + 1, c + 1))\n return indices",
"def get_grids(N_X, N_Y, N_frame):\n if N_frame>1:\n fx, fy, ft = np.mgrid[(-N_X//2):((N_X-1)//2 + 1), (-N_Y//2):((N_Y-1)//2 + 1), (-N_frame//2):((N_frame-1)//2 + 1)]\n else:\n fx, fy, ft = np.mgrid[(-N_X//2):((N_X-1)//2 + 1), (-N_Y//2):((N_Y-1)//2 + 1), 0:1]\n fx, fy, ft = fx*1./N_X, fy*1./N_Y, ft*1./N_frame\n\n return fx, fy, ft",
"def get_grid_locations(self, top_left, other_pos):\n cell_x = torch.floor(((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *self.grid_size)\n\n # Added this part to implementation, otherwise the pooling is going to run into an indexing error\n cell_x[cell_x == self.grid_size] -= 1\n cell_y = torch.floor(((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *self.grid_size)\n cell_y[cell_y == self.grid_size] -= 1\n grid_pos = cell_x + cell_y * self.grid_size\n\n return grid_pos",
"def get_hashed_spatial_pos_emb_index(grid_size, count_h,\n count_w):\n pos_emb_grid = tf.range(grid_size, dtype=tf.int32)\n pos_emb_grid = tf.reshape(pos_emb_grid, [grid_size, 1, 1])\n pos_emb_hash_w = tf.image.resize(\n pos_emb_grid, [count_w, 1],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n name='pos_emb_hash_w')[:, 0, 0]\n pos_emb_hash_w = tf.cast(pos_emb_hash_w, dtype=tf.int32)\n pos_emb_hash_w = tf.expand_dims(pos_emb_hash_w, axis=0)\n pos_emb_hash_w = tf.tile(pos_emb_hash_w, (count_h, 1))\n\n pos_emb_hash_h = tf.image.resize(\n pos_emb_grid, [count_h, 1],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n name='pos_emb_hash_h')[:, 0, 0]\n pos_emb_hash_h = tf.cast(pos_emb_hash_h, dtype=tf.int32)\n pos_emb_hash_h = tf.cast(pos_emb_hash_h, dtype=tf.int32)\n pos_emb_hash_h = tf.expand_dims(pos_emb_hash_h, axis=1)\n pos_emb_hash_h = tf.tile(pos_emb_hash_h, (1, count_w))\n\n pos_emb_hash = pos_emb_hash_h * grid_size + pos_emb_hash_w\n\n # Shape (num_patches, 1)\n pos_emb_hash = tf.reshape(pos_emb_hash, (-1, 1))\n pos_emb_hash = tf.cast(pos_emb_hash, tf.float32)\n return pos_emb_hash",
"def find_all_elements(grid, target):\n \n indices = []\n \n ### This pattern of iterating through row and col indices is very common\n for row_number in range(len(grid)):\n for col_number in range(len(grid[row_number])):\n \n if grid[row_number][col_number] == target:\n indices.append((row_number, col_number))\n \n return indices",
"def get_valid_index_from_cartesian_grid(cart_grid, lons, lats,\n radius_of_influence):\n\n def _get_lons(x, y):\n return np.rad2deg(np.arccos(x / np.sqrt(x ** 2 + y ** 2))) * np.sign(y)\n\n def _get_lats(z):\n return 90 - np.rad2deg(np.arccos(z / R))\n\n # Get sides of target grid and transform to lon lats\n lons_side1 = _get_lons(cart_grid[0, :, 0], cart_grid[0, :, 1])\n lons_side2 = _get_lons(cart_grid[:, -1, 0], cart_grid[:, -1, 1])\n lons_side3 = _get_lons(cart_grid[-1, ::-1, 0], cart_grid[-1, ::-1, 1])\n lons_side4 = _get_lons(cart_grid[::-1, 0, 0], cart_grid[::-1, 0, 1])\n\n lats_side1 = _get_lats(cart_grid[0, :, 2])\n lats_side2 = _get_lats(cart_grid[:, -1, 2])\n lats_side3 = _get_lats(cart_grid[-1, ::-1, 2])\n lats_side4 = _get_lats(cart_grid[::-1, 0, 2])\n\n valid_index = _get_valid_index(lons_side1, lons_side2, lons_side3, lons_side4,\n lats_side1, lats_side2, lats_side3, lats_side4,\n lons, lats, radius_of_influence)\n\n return valid_index",
"def _get_valid_index(lons_side1, lons_side2, lons_side3, lons_side4,\n lats_side1, lats_side2, lats_side3, lats_side4,\n lons, lats, radius_of_influence):\n\n # Coarse reduction of data based on extrema analysis of the boundary\n # lon lat values of the target grid\n illegal_lons = (((lons_side1 < -180) | (lons_side1 > 180)).any() or\n ((lons_side2 < -180) | (lons_side2 > 180)).any() or\n ((lons_side3 < -180) | (lons_side3 > 180)).any() or\n ((lons_side4 < -180) | (lons_side4 > 180)).any())\n\n illegal_lats = (((lats_side1 < -90) | (lats_side1 > 90)).any() or\n ((lats_side2 < -90) | (lats_side2 > 90)).any() or\n ((lats_side3 < -90) | (lats_side3 > 90)).any() or\n ((lats_side4 < -90) | (lats_side4 > 90)).any())\n\n if illegal_lons or illegal_lats:\n # Grid boundaries are not safe to operate on\n return np.ones(lons.size, dtype=np.bool)\n\n # Find sum angle sum of grid boundary\n angle_sum = 0\n for side in (lons_side1, lons_side2, lons_side3, lons_side4):\n prev = None\n side_sum = 0\n for lon in side:\n if prev:\n delta = lon - prev\n if abs(delta) > 180:\n delta = (abs(delta) - 360) * (delta // abs(delta))\n angle_sum += delta\n side_sum += delta\n prev = lon\n\n # Buffer min and max lon and lat of interest with radius of interest\n lat_min = min(lats_side1.min(), lats_side2.min(), lats_side3.min(),\n lats_side4.min())\n lat_min_buffered = lat_min - float(radius_of_influence) / R\n lat_max = max(lats_side1.max(), lats_side2.max(), lats_side3.max(),\n lats_side4.max())\n lat_max_buffered = lat_max + float(radius_of_influence) / R\n\n max_angle_s2 = max(abs(lats_side2.max()), abs(lats_side2.min()))\n max_angle_s4 = max(abs(lats_side4.max()), abs(lats_side4.min()))\n lon_min_buffered = (lons_side4.min() -\n float(radius_of_influence) /\n (np.sin(np.radians(max_angle_s4)) * R))\n\n lon_max_buffered = (lons_side2.max() +\n float(radius_of_influence) /\n (np.sin(np.radians(max_angle_s2)) * R))\n\n # From the winding number theorem follows:\n # angle_sum possiblilities:\n # -360: area covers north pole\n # 360: area covers south pole\n # 0: area covers no poles\n # else: area covers both poles\n if round(angle_sum) == -360:\n # Covers NP\n valid_index = (lats >= lat_min_buffered)\n elif round(angle_sum) == 360:\n # Covers SP\n valid_index = (lats <= lat_max_buffered)\n elif round(angle_sum) == 0:\n # Covers no poles\n valid_lats = (lats >= lat_min_buffered) * (lats <= lat_max_buffered)\n\n if lons_side2.min() > lons_side4.max():\n # No date line crossing\n valid_lons = (lons >= lon_min_buffered) * \\\n (lons <= lon_max_buffered)\n else:\n # Date line crossing\n seg1 = (lons >= lon_min_buffered) * (lons <= 180)\n seg2 = (lons <= lon_max_buffered) * (lons >= -180)\n valid_lons = seg1 + seg2\n\n valid_index = valid_lats * valid_lons\n else:\n # Covers both poles don't reduce\n valid_index = np.ones(lons.size, dtype=np.bool)\n\n return valid_index",
"def createGrid(nx, ny, include_center = False):\n direction = 0\n positions = []\n if (nx > 1) or (ny > 1):\n half_x = int(nx/2)\n half_y = int(ny/2)\n for i in range(-half_y, half_y+1):\n for j in range(-half_x, half_x+1):\n if ((i==0) and (j==0)) and not include_center:\n continue\n else:\n if ((direction%2)==0):\n positions.append([j,i])\n else:\n positions.append([-j,i])\n direction += 1\n return positions",
"def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)",
"def get_inter_sample_indices(self):\n inter_sample_indices = []\n for group_index1 in range(self.num_groups):\n for group_index2 in range(self.num_groups):\n num_images_in_group1 = self.num_images_per_group[group_index1]\n num_images_in_group2 = self.num_images_per_group[group_index2]\n for image_index1 in range(num_images_in_group1):\n for image_index2 in range(num_images_in_group2):\n inter_sample_indices.append(\n ((group_index1, image_index1), (group_index2, image_index2))\n )\n return inter_sample_indices",
"def footprint_corner_indices():",
"def makeIndexMap(self):\n\t\tn = self.numRects\n\t\thalfList = [[(j,n-1-i+j) for j in range(i+1)] for i in range(n)]\n\t\tfullList = halfList + [[(j[1],j[0]) for j in i] for i in halfList[n-2::-1]]\n\t\treturn fullList",
"def base_idx_neighbor_idx_simplices(n_base, n_neighbors=5, n_dim=2):\n combinations = np.array(list(itertools.combinations(np.arange(1,\n n_neighbors),\n n_dim-1))).astype(int)\n base_indices = np.repeat(np.arange(n_base), len(combinations))\n all_simplices = np.vstack([base_indices,\n np.tile(combinations, (n_base, 1)).T]).T\n #print('simplices', os.getpid(), len(all_simplices), flush=True)\n return all_simplices",
"def get_interbreeding_indexes(index_count, including_same = False, cross = False, deep = 0) -> 'list':\n indexes = []\n correct = 1 if not including_same else 0\n for i in range(index_count - correct):\n index_from = deep\n index_to = i + deep + correct\n\n indexes.append([\n index_from,\n index_to\n ])\n\n if cross and index_from != index_to:\n indexes.append([\n index_to,\n index_from\n ])\n\n if index_count - correct > 1:\n indexes += get_interbreeding_indexes(index_count - 1, including_same, cross, deep + 1)\n\n return indexes",
"def _iter_indices(self, frame, y):\n pass",
"def trackCells(self): \n # variable initialization\n self.t_appearance = []\n self.id_seq = []\n self._n_cell = 0\n self.division = []\n self.dict_track = {}\n\n t_ini = self.configs[self.TIME_INI_KEY]\n\n # Initialize points with first frame\n self.dict_track[0] = {}\n for id_num in self.pos[0][4]:\n self._addTrack(0, id_num)\n\n # From frame=1 on...\n for t, data_t in enumerate(self.pos[1:]):\n self.dict_track[t+1] = {}\n # check all the cells in this frame\n for cell, id_num in enumerate(data_t[4]):\n parent = data_t[5][cell]\n if parent == -1:\n # new track\n self._addTrack(t+1, id_num)\n elif parent in self.dict_track[t]:\n # check if parent is in the previous list\n index = self.dict_track[t][parent]\n if index in self.dict_track[t+1].values():\n # then we have cell division, and we monitor the child separarely\n self._addDivision(t+1, id_num, index)\n else:\n # cell continues in the same track\n self.id_seq[index].append(id_num)\n self.dict_track[t+1][id_num] = index\n else:\n # weird things happened!\n print(\"Warning! Time %s, Cell ID %s lost track\"%(str(t+t_ini), str(id_num)))\n\n\n # all track indexes are included, no filter (yet)\n self.index_filter = list(range(len(self.id_seq)))\n\n return self.id_seq, self.t_appearance",
"def setup_positions(self):\n x, y = np.meshgrid(np.arange(self.img.shape[1]), np.arange(self.img.shape[0]))\n x = x[self.img > 0]\n y = y[self.img > 0]\n self.X = np.array([x, y]).T\n N = x.size\n pos2idx = {(x[i], y[i]):i for i in range(x.size)}\n neighbors = [[i] for i in range(N)]\n for i in range(N):\n xi = x[i]\n yi = y[i]\n for (dx, dy) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n neighb = (xi+dx, yi+dy)\n if neighb in pos2idx:\n neighbors[i].append(pos2idx[neighb])\n self.pos2idx = pos2idx\n self.neighbors = neighbors",
"def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)",
"def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count",
"def _possible_grids(self, num_windows):\n if num_windows < 2:\n end = 2\n else:\n end = num_windows // 2 + 1\n for rows in range(1, end):\n cols = int(math.ceil(num_windows / rows))\n yield (rows, cols, ROWCOL)\n if rows != cols:\n # also want the reverse test\n yield (cols, rows, COLROW)"
] |
[
"0.66452646",
"0.6411415",
"0.6340614",
"0.6286571",
"0.6212913",
"0.61323994",
"0.6022557",
"0.60102123",
"0.59811884",
"0.5980919",
"0.5949921",
"0.5935643",
"0.5918394",
"0.58906466",
"0.58795816",
"0.5863771",
"0.5861669",
"0.58537227",
"0.5849864",
"0.5849568",
"0.58342475",
"0.58314884",
"0.58215964",
"0.5797253",
"0.5782588",
"0.5782274",
"0.5778475",
"0.57126343",
"0.57115954",
"0.5708771"
] |
0.82699645
|
0
|
Computes either the hamming window or its inverse and applies it to a sequence of frames.
|
def apply_hamming(frames, inv=False):
M = frames.shape[1]
win = np.hamming(M)**(-1) if inv else np.hamming(M)
return frames * win
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def windowing(input):\n return input * hamming(input.shape[1], sym=0)",
"def windowing(input):\n N, M = np.shape(input)\n\n window = signal.hamming(M, sym=0)\n\n window_axis = lambda sample: sample * window\n\n output = np.apply_along_axis(window_axis, 1, input)\n\n # myplot(output, 'Hamming Window')\n\n return output",
"def apply_window(audio):\n\treturn audio * numpy.hanning(len(audio))",
"def Butler_mize_window(self):\n\n wind = np.ones([1, dim_matrix*2])\n a_0 = (1-alpha_0)/2\n a_1 = 1/2\n a_2 = a_1-a_0\n for i in range(0, dim_matrix*2):\n wind[0, i] = (a_0-a_1*np.cos(2*np.pi*(i)/(2*dim_matrix-1)) +\n a_2*np.cos(4*np.pi*(i)/(2*dim_matrix-1)))\n # normal blackman function\n for i in range(1+x_m+2*x_0+dim_matrix, x_m+2*x_0+2*dim_matrix):\n # creates the right half\n wind[0, i] = wind[0, i]\n for i in range(0, x_m): # define left side\n wind[0, i] = x_m+1\n wind[0, x_m+dim_matrix] = 1\n wind[0, x_m+dim_matrix-1] = 1\n # these two lines define the center;\n # they make positions 127, 128 both 1\n\n wind[0, x_m] = 0 # makes left side zero\n wind[0, 2*dim_matrix-1] = 0 # makes right side zero\n dwind = np.ones([dim_matrix, dim_matrix])\n # create the array for the next step\n for i in range(0, dim_matrix):\n dwind[:, i] = wind[0, (dim_matrix-i):2*dim_matrix-i]\n wind2 = np.ones([1, dim_matrix])\n for i_2 in range(dim_matrix-round(dim_matrix/4), dim_matrix):\n wind2[0, i_2] = abs(np.sin(np.pi/2*((i_2)/\n (round(dim_matrix/4)))))\n # Taper\n wind2[0, dim_matrix-1] = 0\n wind3 = (wind2*(np.ones([dim_matrix, dim_matrix])))\n windowed = self.baseline_corrected*wind3*np.transpose(wind3)*dwind\n\n return windowed",
"def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack",
"def morph(face0, face1, h, w, tri0_vertices, tri1_vertices):\n frames = []\n\n for i in range(46):\n # i = 5\n print(\"Working on the \" + str(i) + \"th frame!\")\n print(\"Creating triangulation.\")\n # Create Dalaunay triangulation for the ith morph set\n morph_verticies = morphPointSet(tri0_vertices, tri1_vertices, i/45)\n t = Delaunay(morph_verticies)\n trianguations = t.simplices\n\n # Compute Affine Transformation matrices for both transformations (src-->mid; dst-->mid)\n print(\"Computing Affine Transformation.\")\n affine_matrices_0 = computeAffines(trianguations, tri0_vertices, morph_verticies)\n affine_matrices_inv_0 = [linalg.inv(A) for A in affine_matrices_0]\n affine_matrices_1 = computeAffines(trianguations, tri1_vertices, morph_verticies)\n affine_matrices_inv_1 = [linalg.inv(A) for A in affine_matrices_1]\n\n # Morphed images\n print(\"Inverse warping.\")\n morph0 = warp(h, w, t, affine_matrices_inv_0, face0)\n morph1 = warp(h, w, t, affine_matrices_inv_1, face1)\n # frame = morph_frame(face0, face1, h, w, tri0_vertices, tri1_vertices, t, i/45, i/45)\n\n print(\"Creating morph frame.\")\n frame = morph_frame(morph0, morph1, i/45)\n frames.append(frame)\n # break\n\n return frames",
"def hybrid_forward(self, F, x, img):\n x = F.maximum(x, 0.0)\n # window [B, 2] -> reverse hw -> tile [B, 4] -> [B, 1, 4], boxes [B, N, 4]\n window = F.shape_array(img).slice_axis(axis=0, begin=2, end=None).expand_dims(0)\n m = F.tile(F.reverse(window, axis=1), reps=(2,)).reshape((0, -4, 1, -1))\n return F.broadcast_minimum(x, F.cast(m, dtype='float32'))",
"def window(self):\n hanning = numpy.hanning(nFFT)\n for i in range(nFFT):\n self.cur_input[i] *= hanning[i]",
"def windowing(im, win):\n im1 = im.astype(float)\n im1 -= win[0]\n im1 /= win[1] - win[0]\n im1[im1 > 1] = 1\n im1[im1 < 0] = 0\n im1 *= 255\n return im1",
"def hannwin(*args, **kwargs):\n return hanning(*args, **kwargs)",
"def horizontal_flip() -> Callable:\n return lambda img: TF.hflip(img)",
"def sliding_window_decoding( model, X, input_shape, overlapping = 32 ) :\n patch_bboxes = get_patch_bboxes( X.shape, input_shape, overlapping )\n n_samples, n_chs, height, width = X.shape\n Z = np.zeros( X.shape, dtype = np.float32 )\n C = np.zeros( X.shape, dtype = np.float32 )\n pad_before, pad_after = min( input_shape ) // 4, min( input_shape ) // 4\n for top, bot, left, right in patch_bboxes :\n x = X[ :, :, top:bot, left:right ]\n z = model.predict( x )\n if ( top == 0 ) and ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot,left:right] += z\n C[:,:,top:bot,left:right] += 1. \n elif ( left == 0 ) :\n Z[:,:,top:bot,left:right-pad_after] += z[:,:,:,:-pad_after]\n C[:,:,top:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top:bot,left+pad_before:right] += z[:,:,:,pad_before:]\n C[:,:,top:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot,left+pad_before:right-pad_after] += z[:,:,:,pad_before:-pad_after]\n C[:,:,top:bot,left+pad_before:right-pad_after] += 1. \n elif ( top == 0 ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot-pad_after,left:right] += z[:,:,:-pad_after,:]\n C[:,:,top:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top:bot-pad_after,left:right-pad_after] += z[:,:,:-pad_after,:-pad_after]\n C[:,:,top:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top:bot-pad_after,left+pad_before:right] += z[:,:,:-pad_after,pad_before:]\n C[:,:,top:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,:-pad_after,pad_before:-pad_after]\n C[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += 1.\n elif ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top+pad_before:bot,left:right] += z[:,:,pad_before:,:]\n C[:,:,top+pad_before:bot,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot,left:right-pad_after] += z[:,:,pad_before:,:-pad_after]\n C[:,:,top+pad_before:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top+pad_before:bot,left+pad_before:right] += z[:,:,pad_before:,pad_before:]\n C[:,:,top+pad_before:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += z[:,:,pad_before:,pad_before:-pad_after]\n C[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += 1.\n else :\n if ( left == 0 ) and ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right] += z[:,:,pad_before:-pad_after,:]\n C[:,:,top+pad_before:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += z[:,:,pad_before:-pad_after,:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += z[:,:,pad_before:-pad_after,pad_before:]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,pad_before:-pad_after,pad_before:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += 1.\n return Z / C",
"def hammwin(x):\n print('hammwin is untested')\n if isinstance(x, (list, tuple, np.ndarray)):\n n = x.shape[1]\n f = hammwin(n)\n\n if len(x.shape) == 3:\n f, _, _ = np.meshgrid(f[0, :], np.arange(\n x.shape[0]), np.arange(x.shape[2]))\n else:\n f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))\n else:\n\n n = x\n f = np.reshape((0.54 - 0.46 * np.cos(2 * np.pi * (np.arange(n)) /\n (n - 1))) * np.sqrt(5000 / 1987),\n (1, -1))\n f = f / la.norm(f) * np.sqrt(n)\n\n return f",
"def shiftDetectorONH(frame, onh_info, x_onh_bounds):\n\n x_min = x_onh_bounds[0]-30\n x_max = x_onh_bounds[1]+30\n frame_len = frame.shape[1]\n mid_x = int(frame_len/2)\n\n norm = frame/np.max(frame)#(2**16)\n #if the frame midpoint is inside the bbox x bounds\n #this section is to avoid using any part of the onh as the a-scan to reference when doing the cross-correlation\n if mid_x>=x_min and mid_x<=x_max:\n d_min = mid_x-x_min\n d_max = x_max-mid_x\n #if mid_x is closer to x_min but not close to the edge of the image -- at least 75 px\n if d_min<d_max and x_min>75:\n acol = int((frame_len/2)-(d_min+1))\n elif x_max<frame_len-75:\n acol = int((frame_len/2)+(d_max+1))\n else:\n acol = int((frame_len/2)-(d_min+1))\n anchorCol = norm[:,acol]\n else:\n anchorCol = norm[:,mid_x]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame_len)]\n\n #if onh detection is bad, bbox might be huge. The onh area should be less that 10% of the image (256*1024 pixels)\n if onh_info.area/(2**18) > 0.10:\n return shifts\n #old, changed 1-29-2018 because this is really about location, not size\n #if x_min<100 or x_max>902:\n #return shifts\n\n #This ensures that clean_shifts and clean_x are the same length and comes into play when the ONH is basically touching the\n #side of the image.\n #if the onh is too far to the right side of the frame, only use the left side info\n #fit a quadratic to get LOCAL curvature\n if x_max>=frame_len-100:\n #this uses the entire bscans to get the curvature, otherwise it will fit very poorly\n clean_x = np.arange(0,x_min,1)\n curve_fit_params = np.polyfit(clean_x, shifts[0:x_min],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #if the onh is too far to the left side, only use right side info\n elif x_min<100:\n clean_x = np.arange(x_max+1,frame_len,1)\n curve_fit_params = np.polyfit(clean_x, shifts[x_max+1:frame_len],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #Everything is normal, everyone is happy.\n else:\n #need to cut out onh, I don't think there is a way to index this to put it\n #directly in polyfit\n clean_shifts = np.array(shifts[0:x_min] + shifts[x_max+1:frame_len])\n clean_x = np.concatenate((np.arange(x_min-100,x_min,1),np.arange(x_max+1,x_max+101,1)))\n curve_fit_params = np.polyfit(clean_x, clean_shifts[x_min-100:x_min+100],3)\n curve_fit = lambda x: curve_fit_params[0]*x**3 + curve_fit_params[1]*x**2 + curve_fit_params[2]*x + curve_fit_params[3]\n #!!astype added 4-18-19 because floats throw an error when correcting shifts\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = np.insert(clean_shifts, x_min+1, corrected_shifts)\n\n return list(clean_shifts)",
"def _get_normalized_flow_countrywide(x_sample):\n global win; win /= 3\n global nebr; nebr = 7 # nebr /= 3\n global norm_min; norm_min = norm_min * 1. / 3\n global MIN_FLOW_NORM; MIN_FLOW_NORM = MIN_FLOW_NORM * 1. / 3\n global MIN_MOVE_PIXEL; MIN_MOVE_PIXEL /= (6*6)\n \n prev_frame = norm_trans(x_sample[-2])\n next_frame = norm_trans(x_sample[-1])\n kernel_shape = (79, 79) # (477/6, 477/6)\n flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame, 0.5,3,win, 3, nebr, nebr/4, cv2.OPTFLOW_FARNEBACK_GAUSSIAN)\n \n # flow_norm = numpy.linalg.norm(flow, axis=2) # for numpy version >= 1.8\n flow_norm = np.sum(flow**2, axis=2)**(1./2) # for numpy version < 1.8\n \n kernel = np.ones(kernel_shape, np.float32)\n\n# num_moved_flows = numpy.sum(flow_norm>norm_min)\n num_moved_flows = cv2.filter2D((flow_norm>norm_min).astype('float32'), -1, kernel, borderType=cv2.BORDER_REPLICATE)\n\n# if num_moved_flows > MIN_MOVE_PIXEL:\n# flow_fliter = numpy.zeros(shape=flow.shape);\n# flow_fliter[:,:,0] = flow[:,:,0] * (flow_norm > norm_min)\n# flow_fliter[:,:,1] = flow[:,:,1] * (flow_norm > norm_min)\n# \n# flow_mean = numpy.sum(flow_fliter, axis=(0,1)) / num_moved_flows\n# else:\n# flow_mean = numpy.array([0,0])\n \n flow_filter = flow * (flow_norm > norm_min)[:, :, np.newaxis]\n flow_mean = np.zeros_like(flow)\n flow_mean[:,:,0] = cv2.filter2D(flow_filter[:,:,0], -1, kernel, borderType=cv2.BORDER_REPLICATE) / (num_moved_flows + 0.00001)\n flow_mean[:,:,1] = cv2.filter2D(flow_filter[:,:,1], -1, kernel, borderType=cv2.BORDER_REPLICATE) / (num_moved_flows + 0.00001)\n flow_mean = flow_mean * (num_moved_flows > MIN_MOVE_PIXEL)[:, :, np.newaxis]\n\n# flow_mean_norm = np.sum(flow_mean**2)**(1./2)\n# if flow_mean_norm > MIN_FLOW_NORM:\n# flow_norm = flow_norm.reshape((flow_norm.shape[0], flow_norm.shape[1], 1)) \n# flow = flow * (flow_norm < MIN_FLOW_NORM) * flow_mean_norm / flow_norm + flow * (flow_norm >= MIN_FLOW_NORM)\n flow_mean_norm = np.sum(flow_mean**2, axis=2)**(1./2)\n flow = flow * ((flow_norm < MIN_FLOW_NORM) * (flow_mean_norm > MIN_FLOW_NORM) * flow_mean_norm / (flow_norm + 0.000001))[:, :, np.newaxis] + \\\n flow * ((flow_norm >= MIN_FLOW_NORM) | (flow_mean_norm <= MIN_FLOW_NORM))[:, :, np.newaxis] \n return flow",
"def enframe(samples, winlen, winshift):\n # The window length is sampling_rate*window_length_in_ms\n length = len(samples)\n start_indices = np.arange(0, length, winshift)\n end_indices = np.arange(winlen, length, winlen - winshift)\n pairs = zip(start_indices, end_indices)\n\n output = [samples[i[0]: i[1]] for i in pairs]\n\n # myplot(output, 'Framing')\n\n return output",
"def sliding_hog_windows(self, image):\n # initialization\n image_height, image_width = 48, 48\n window_size = 24\n window_step = 6\n hog_windows = []\n for y in range(0, image_height, window_step):\n for x in range(0, image_width, window_step):\n window = image[y:y+window_size, x:x+window_size]\n hog_windows.extend(hog(window, orientations=8, pixels_per_cell=(8, 8),\n cells_per_block=(1, 1)))\n return hog_windows",
"def sliding_window_init(lane, binary_warped, draw=False):\n\n # Assuming you have created a warped binary image called \"binary_warped\"\n # Take a histogram of the bottom half of the image\n histogram1 = np.sum(binary_warped[binary_warped.shape[0] / 2:, :], axis=0)\n # Create an output image to draw on and visualize the result\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\n\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram1.shape[0] / 2)\n leftx_base = np.argmax(histogram1[:midpoint])\n rightx_base = np.argmax(histogram1[midpoint:]) + midpoint\n\n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(binary_warped.shape[0] / nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\n win_y_high = binary_warped.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)\n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\n nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\n nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n if len(leftx) == 0:\n left_fit = lane.recent_left_fit\n else:\n left_fit = np.polyfit(lefty, leftx, 2)\n\n if len(rightx) == 0:\n right_fit = lane.recent_right_fit\n else:\n right_fit = np.polyfit(righty, rightx, 2)\n\n if draw:\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\n\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n plt.imshow(out_img)\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n plt.xlim(0, 1280)\n plt.ylim(720, 0)\n plt.show()\n plt.close()\n\n lane.recent_left_fit = left_fit\n lane.recent_right_fit = right_fit\n lane.detected = True\n\n return left_fit, right_fit",
"def shiftDetector(frame, onh_info=None):\n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts",
"def apply_windows(slided_signals, window_type=\"hann\"):\n # length of each slided signal\n n = slided_signals.shape[-1]\n window = signal.get_window(window_type, n)\n windowed_signals = numpy.multiply(slided_signals, window)\n return windowed_signals",
"def stackingWindows():\n space = 50\n offset = 70\n cv2.moveWindow(\"Original image\", space, space)\n cv2.moveWindow(\"Keypoints original\", space, hsize + space + offset)\n cv2.moveWindow(\"Color matched\", wsize + space, space)\n cv2.moveWindow(\"Keypoints Dark\", wsize + space, hsize + space + offset)",
"def enframe(samples, winlen, winshift, padded=False):\n\n shift = (winlen - winshift)\n N_samples = samples.size\n N_frames = 1 + (N_samples - winlen) / shift\n\n if padded:\n # Attach as much zeros to cover other window\n sample = np.append(samples, np.zeros(winlen))\n output = np.zeros((N_frames + 1, winlen))\n\n for index in xrange(N_frames + 1):\n output[index] = sample[index*shift:index*shift + winlen]\n\n else:\n output = np.zeros((N_frames, winlen))\n\n for index in xrange(N_frames):\n output[index] = samples[index*shift:index*shift + winlen]\n\n return output",
"def forward(self, xs, ilens, masks):\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs, _ = self.encoders(xs, masks)\n if self.normalize_before:\n xs = self.after_norm(xs)\n hlens = [xs.size(1) for i in range(xs.size(0))]\n return xs, hlens",
"def _process(self, X):\n # 周波数毎に実施する\n ones = np.ones(self.L.shape[1])\n\n # 初期のポジションベクトル\n n_channels = np.shape(X)[0]\n n_freq_bins = np.shape(X)[1]\n n_frames = np.shape(X)[2]\n\n d = None\n n_mic_pair = 0\n # for m1 in range(1):\n\n step = 2\n\n mic_pairs = self.mic_pairs\n # mic_pairs=[[m1,m2] for m1 in range(n_channels-1) for m2 in range(m1+1,np.minimum(m1+step+1,n_channels)) ]\n mic_pairs = np.array(mic_pairs)\n\n n_mic_pair = np.shape(mic_pairs)[0]\n d = np.array(self.mic_positions[mic_pairs[:, 1]]) - np.array(\n self.mic_positions[mic_pairs[:, 0]]\n )\n # d: n_mic_pair,dim\n\n # for the linear surrogate function, we need the smallest eigenvalue\n # of the covariance matrix of the microphone pairs\n if self.mm_type == SurrogateType.Linear:\n mic_diff_cov = d.T @ d\n mic_diff_cov_ev_max = np.linalg.eigvalsh(mic_diff_cov)[-1]\n else:\n mic_diff_cov_ev_max = None\n\n # 時間周波数毎の初期のポジションベクトル\n position_vector = np.zeros(shape=(n_freq_bins, n_frames, self.dim))\n\n X_temp = X[:, self.freq_bins, :]\n\n sigma = np.angle(X_temp[mic_pairs[:, 1], ...] / X_temp[mic_pairs[:, 0], ...])\n sigma = np.transpose(sigma, (1, 2, 0))\n\n sigma = np.where(np.abs(sigma) < 1.0e-18, np.zeros_like(sigma) + 1.0e-18, sigma)\n z = np.zeros(shape=(n_freq_bins, n_frames, n_mic_pair), dtype=np.int)\n x = np.random.normal(size=n_freq_bins * n_frames * n_mic_pair)\n x = np.reshape(x, newshape=(n_freq_bins, n_frames, n_mic_pair))\n # 初期化\n mode_vec = self.rough_mode_vec[self.freq_bins, :, :]\n mode_vec = np.conjugate(mode_vec)\n\n # Evaluation of the cost function on rough grid\n XX = X[:, self.freq_bins, :].transpose([1, 2, 0]) # (freq, time, chan)\n mv = mode_vec.transpose([0, 2, 1]) # (freq, grid, chan)\n prod = (mv[:, None, :, :] @ XX[:, :, :, None])[..., 0]\n\n amp = np.abs(prod)\n # ft\n index = np.argmax(amp, axis=-1)\n org_shape = np.shape(index)\n index = np.reshape(index, [-1])\n\n # indexに相当する方向を取る\n if self.dim == 2:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n # ダミー\n rough_colatitude_recon = np.zeros_like(rough_azimuth_recon) + np.pi\n elif self.dim == 3:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n rough_colatitude_recon = self.rough_grid.colatitude[index]\n\n doas = np.concatenate(\n (\n rough_colatitude_recon[:, None], # colatitude [0, pi]\n rough_azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n\n # source_locations: 3, n_frames\n source_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n source_locations = np.reshape(source_locations, (3, org_shape[0], org_shape[1]))\n\n position_vector[self.freq_bins, :, :] = np.transpose(\n source_locations[: self.dim, :, :], (1, 2, 0)\n )\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n use_clustering = False\n cluster_index = np.random.randint(0, self.num_src, size=n_freq_bins * n_frames)\n cluster_index = np.reshape(cluster_index, (n_freq_bins, n_frames))\n cluster_center = np.random.normal(size=self.num_src * self.dim)\n cluster_center = np.reshape(cluster_center, newshape=(self.num_src, self.dim))\n size = np.einsum(\"ci,ci->c\", np.conjugate(cluster_center), cluster_center)\n size = np.sqrt(size)[..., np.newaxis]\n cluster_center = cluster_center / np.maximum(size, 1.0e-18)\n if use_clustering == True:\n # pを作る\n for k in self.freq_bins:\n for l in range(n_frames):\n position_vector[k, l, :] = cluster_center[cluster_index[k, l], :]\n\n est_p = position_vector[self.freq_bins, ...]\n z = z[self.freq_bins, ...]\n x = x[self.freq_bins, ...]\n freqs = self.freq_hz\n cluster_index = cluster_index[self.freq_bins, ...]\n\n silent_mode = True\n freqs_d = np.einsum(\"f,pi->fpi\", freqs, d)\n for i in range(self.n_mm_itertaions):\n #\n (\n org_cost_0,\n org_cost_1,\n org_cost_2,\n org_cost_3,\n cost_0,\n cost_1,\n cost_2,\n cost_3,\n est_p,\n z,\n x,\n ) = doa_estimation_one_iteration(\n freqs_d,\n est_p,\n sigma,\n z,\n x,\n cluster_index=cluster_index,\n cluster_center=cluster_center,\n iter_num2=self.rooting_n_iter,\n silent_mode=silent_mode,\n surrogate=self.mm_type,\n mic_diff_cov_ev_max=mic_diff_cov_ev_max,\n freqs=freqs,\n mic_diff=d,\n )\n if silent_mode == False:\n print(\"Cost function:\", org_cost_0)\n # est_pから\n # fti\n position_vector[self.freq_bins, ...] = est_p\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n # gridを探す\n\n # position_vectorに相当する方向を取る\n if self.dim == 2:\n azimuth_recon = self.grid.azimuth\n # ダミー\n colatitude_recon = np.zeros_like(azimuth_recon) + np.pi\n elif self.dim == 3:\n azimuth_recon = self.grid.azimuth\n colatitude_recon = self.grid.colatitude\n\n doas = np.concatenate(\n (\n colatitude_recon[:, None], # colatitude [0, pi]\n azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n # source_locations: 3, n_grid_num\n grid_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n size = np.einsum(\"in,in->n\", np.conjugate(grid_locations), grid_locations)\n size = np.sqrt(size)[np.newaxis, ...]\n grid_locations = grid_locations / np.maximum(size, 1.0e-18)\n\n if not self.use_kd_tree:\n grid_index_buf = []\n for k in self.freq_bins:\n prod = np.einsum(\"in,ti->tn\", grid_locations, position_vector[k, ...])\n grid_index = np.argmax(prod, axis=-1)\n grid_index_buf.append(grid_index)\n grid_index_buf = np.array(grid_index_buf)\n\n spire_cost = np.zeros(self.grid.n_points)\n for n in range(self.grid.n_points):\n spire_cost[n] = spire_cost[n] + np.count_nonzero(grid_index_buf == n)\n\n else:\n\n # Same code, but with a kd-tree (Robin version)\n dim = position_vector.shape[-1]\n pv = position_vector[self.freq_bins, ...].reshape((-1, dim))\n _, nn = self.tree.query(pv)\n bin_indices, bin_count = np.unique(nn, return_counts=True)\n\n spire_cost = np.zeros(self.grid.n_points, dtype=np.float)\n spire_cost[bin_indices] = bin_count\n\n self.grid.set_values(spire_cost)",
"def forward(self, y, mask_y, h):\n y = y.transpose(1, 0) # batch x T x dim\n\n mask_y = mask_y.transpose(1, 0) # batch x T\n Wy = torch.bmm(y, self.W_y.unsqueeze(0).expand(y.size(0), *self.W_y.size())) # batch x T x dim\n Wh = torch.mm(h, self.W_h) # batch x dim\n\n M = torch.tanh(Wy + Wh.unsqueeze(1).expand(Wh.size(0), y.size(1), Wh.size(1))) # batch x T x dim\n alpha = torch.bmm(M, self.W_alpha.unsqueeze(0).expand(y.size(0), *self.W_alpha.size())).squeeze(-1) # batch x T\n\n alpha = alpha + (-1000.0 * (1. - mask_y)) # To ensure probability mass doesn't fall on non tokens\n alpha = F.softmax(alpha, dim=1)\n r = torch.bmm(alpha.unsqueeze(1), y).squeeze(1) # batch x dim\n\n h_star = self.combine_last(r, h)\n\n return h_star, alpha",
"def highPassFilter(img, window=30):\n gray = grayscale(img)\n\tf = np.fft.fft2(gray)\n\tfshift = np.fft.fftshift(f)\n\trows, cols = gray.shape\n\tcrow, ccol = rows/2, cols/2\n\tfshift[crow-window:crow+window, ccol-window:ccol+window] = 0\n\tf_ishift = np.fft.ifftshift(fshift)\n\timg_back = np.fft.ifft2(f_ishift)\n\timg_back = np.abs(img_back)\n\treturn img_back",
"def slidingWindow(self, img):\n # 720 x 1280\n # y --> 720 (0)\n # x --> 1280 (1)\n\n sizeY, sizeX = img.shape\n\n outputImg = np.dstack((img, img, img)) * 255\n\n # Compute histogram for the bottom half of the image along the x-axis\n hist = np.sum(img[sizeY//2:,:], axis=0)\n\n # Height of each window\n window_height = np.int(sizeY // self.nwindows)\n\n # Check indexes != 0\n nonzero = np.nonzero(img)\n nonzeroInY = np.array(nonzero[0])\n nonzeroInX = np.array(nonzero[1])\n\n # Split the image in two and set the centers\n leftXCenter = np.argmax(hist[:sizeX // 2])\n rightXCenter = np.argmax(hist[sizeX // 2:]) + sizeX // 2\n\n # Set the x-center of the boxes, which will be corrected over time\n leftXCurrent = leftXCenter\n rightXCurrent = rightXCenter\n \n # Lists to save indexes of pixel inside the rectangle\n leftSidePixels = []\n rightSidePixels = []\n\n for window in range(self.nwindows):\n # Make the boxes\n # Calculate the Y coords\n yLow = sizeY - (1 + window) * window_height\n yHigh = sizeY - window * window_height\n \n # Calculate the X coords for the left and right side\n xLowLeft = leftXCurrent - self.margin\n xHighLeft = leftXCurrent + self.margin\n xLowRight = rightXCurrent - self.margin\n xHighRight = rightXCurrent + self.margin\n\n # Draw rectangle for the left lane\n cv2.rectangle(outputImg, (xLowLeft, yLow), (xHighLeft, yHigh), (0, 255, 0), 3)\n \n # Draw rectangle for the right lane\n cv2.rectangle(outputImg, (xLowRight, yLow), (xHighRight, yHigh), (0, 255, 0), 3)\n\n # Check if pixels's values != 0 are inside the window (rectanle)\n\n # Check if the indexes are in the boxes and their values != 0\n leftSidePixelsInsideBox = ((nonzeroInX >= xLowLeft) & (nonzeroInX <= xHighLeft) & (nonzeroInY >= yLow) & (nonzeroInY <= yHigh)).nonzero()[0]\n rightSidePixelsInsideBox = ((nonzeroInX >= xLowRight) & (nonzeroInX <=xHighRight) & (nonzeroInY >= yLow) & (nonzeroInY <= yHigh)).nonzero()[0]\n\n leftSidePixels.append(leftSidePixelsInsideBox)\n rightSidePixels.append(rightSidePixelsInsideBox)\n\n if len(leftSidePixelsInsideBox) > self.minpixels:\n leftXCurrent = np.int(np.mean(nonzeroInX[leftSidePixelsInsideBox]))\n\n if len(rightSidePixelsInsideBox) > self.minpixels:\n rightXCurrent = np.int(np.mean(nonzeroInX[rightSidePixelsInsideBox]))\n\n try:\n leftSidePixels = np.concatenate(leftSidePixels)\n rightSidePixels = np.concatenate(rightSidePixels)\n except ValueError:\n # Avoids an error if the above is not implemented fully\n pass\n\n leftLaneY = nonzeroInY[leftSidePixels]\n leftLaneX = nonzeroInX[leftSidePixels]\n rightLaneY = nonzeroInY[rightSidePixels]\n rightLaneX = nonzeroInX[rightSidePixels]\n\n # Get the coefficients (A, B, C)\n leftFit = np.polyfit(leftLaneX, leftLaneY, 2)\n rightFit = np.polyfit(rightLaneX, rightLaneY, 2)\n \n # Generate x values. These will be the y for plotting\n ploty = np.linspace(0, outputImg.shape[0]-1, outputImg.shape[0])\n \n try:\n leftFitX = ploty*leftFit[0]**2 + ploty*leftFit[1] + leftFit[2]\n rightFitX = ploty*rightFit[0]**2 + ploty*rightFit[1] + leftFit[2]\n \n except TypeError:\n # In case there is no C\n leftFitX = ploty*leftFit[0]**2 + ploty*leftFit[1]\n rightFitX = ploty*rightFit[0]**2 + ploty*rightFit[1]\n\n windowImg = np.zeros_like(outputImg)\n\n outputImg[leftLaneY, leftLaneX] = [255, 0, 0]\n outputImg[rightLaneY, rightLaneX] = [0, 0, 255]\n\n leftLineWindow1 = np.array([np.transpose(np.vstack([leftFitX - self.margin, ploty]))])\n leftLineWindow2 = np.array([np.flipud(np.transpose(np.vstack([leftFitX + self.margin, ploty])))])\n leftLinePts = np.hstack((leftLineWindow1, leftLineWindow2))\n \n rightLineWindow1 = np.array([np.transpose(np.vstack([rightFitX - self.margin, ploty]))])\n rightLineWindow2 = np.array([np.flipud(np.transpose(np.vstack([rightFitX + self.margin, ploty])))])\n rightLinePts = np.hstack((rightLineWindow1, rightLineWindow2))\n\n cv2.fillPoly(windowImg, np.int_([leftLinePts]), (0, 255, 0))\n cv2.fillPoly(windowImg, np.int_([rightLinePts]), (0, 255, 0))\n result = cv2.addWeighted(outputImg, 1, windowImg, 0.3, 0)\n\n plt.plot(leftFitX, ploty, color = 'yellow')\n plt.plot(rightFitX, ploty, color = 'yellow')\n\n # leftFitX -> Formula for the left lane\n # rightFitX -> Formula for the right lane\n # leftLaneX -> X - index inside the left window and their values != 0\n # rightLaneX -> X - index inside the right window and their values != 0\n return leftFitX, leftLaneX, rightFitX, rightLaneX, result",
"def apply_sliding_windows(self, binary_warped, leftx_base, rightx_base):\r\n # Choose the number of sliding windows\r\n nwindows = 9\r\n # Set height of windows\r\n window_height = np.int(binary_warped.shape[0] / nwindows)\r\n # Identify the x and y positions of all nonzero pixels in the image\r\n nonzero = binary_warped.nonzero()\r\n nonzeroy, nonzerox = np.array(nonzero[0]), np.array(nonzero[1])\r\n # Current positions to be updated for each window\r\n leftx_current, rightx_current = leftx_base, rightx_base\r\n # Set the width of the windows +/- margin\r\n margin = 100\r\n # Set minimum number of pixels found to recenter window\r\n minpix = 50\r\n # Create empty lists to receive left and right lane pixel indices\r\n left_lane_inds, right_lane_inds = [], []\r\n\r\n # Step through the windows one by one\r\n # Create an output image to draw on and visualize the result\r\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\r\n for window in range(nwindows):\r\n # Identify window boundaries in x and y (and right and left)\r\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\r\n win_y_high = binary_warped.shape[0] - window * window_height\r\n win_xleft_low, win_xleft_high = leftx_current - margin, leftx_current + margin\r\n win_xright_low, win_xright_high = rightx_current - margin, rightx_current + margin\r\n # Draw the windows on the visualization image\r\n if self.debug:\r\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)\r\n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)\r\n # Identify the nonzero pixels in x and y within the window\r\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\r\n nonzerox < win_xleft_high)).nonzero()[0]\r\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\r\n nonzerox < win_xright_high)).nonzero()[0]\r\n # Append these indices to the lists\r\n left_lane_inds.append(good_left_inds)\r\n right_lane_inds.append(good_right_inds)\r\n # If you found > minpix pixels, recenter next window on their mean position\r\n if len(good_left_inds) > minpix:\r\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\r\n if len(good_right_inds) > minpix:\r\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\r\n if self.debug:\r\n cv_rgb = cv2.cvtColor(out_img.astype(np.uint8), cv2.COLOR_BGR2RGB)\r\n plt.imshow(cv_rgb)\r\n #cv2.imshow('Sliding window computation',out_img)\r\n # Concatenate the arrays of indices\r\n left_lane_inds = np.concatenate(left_lane_inds)\r\n right_lane_inds = np.concatenate(right_lane_inds)\r\n if self.debug:\r\n self.fit_dict['left_lane_inds'] = left_lane_inds\r\n self.fit_dict['right_lane_inds'] = right_lane_inds\r\n\r\n # Extract left and right line pixel positions\r\n leftx, lefty = nonzerox[left_lane_inds], nonzeroy[left_lane_inds]\r\n rightx, righty = nonzerox[right_lane_inds], nonzeroy[right_lane_inds]\r\n return leftx, lefty, rightx, righty",
"def hpf(im, goal, window, j=0):\r\n\r\n # Fourier Transform\r\n F_im = dip.fft2(im)\r\n h, w = im.shape\r\n preset = False\r\n\r\n # Was there scope provided\r\n if j != 0:\r\n scope = np.array([j])\r\n preset = True\r\n else:\r\n scope = range(0, h)\r\n\r\n # Searching for the appropriate cutoff frequency\r\n for i in scope:\r\n freq_square = i\r\n\r\n # Error Check\r\n q = int(freq_square / 2)\r\n if q > w: # Error code\r\n print(\"Error! The filter width is larger than the transform!\")\r\n\r\n # Take a 1/4 square from each quadrant\r\n F_im[0:q, 0:q] = 0 # top left\r\n F_im[0:q, w - q:w] = 0 # top right\r\n F_im[h - q:h, 0:q] = 0 # bottom left\r\n F_im[h - q:h, w - q:w] = 0 # bottom right\r\n\r\n # Take real part only\r\n im_new = np.abs(dip.ifft2(F_im))\r\n\r\n # Loop if target frequency isn't provided\r\n if preset == False:\r\n if (np.mean(im_new) - goal) < window:\r\n return im_new, i\r\n else:\r\n return im_new, i",
"def clean_window(self) -> None:\r\n prune_before = time.time() - self.window_size\r\n while self.frames_rec:\r\n left = self.frames_rec.popleft()\r\n if left[1] >= prune_before:\r\n self.frames_rec.appendleft(left)\r\n break\r\n self.sum_frames_rec -= left[0]\r\n\r\n while self.frames_proc:\r\n left = self.frames_proc.popleft()\r\n if left[1] >= prune_before:\r\n self.frames_proc.appendleft(left)\r\n break\r\n self.sum_frames_proc -= left[0]"
] |
[
"0.6625369",
"0.6422833",
"0.6392129",
"0.5698724",
"0.561408",
"0.5579971",
"0.5555758",
"0.549854",
"0.54475296",
"0.5434125",
"0.5420006",
"0.5349881",
"0.53259605",
"0.5276302",
"0.5256042",
"0.5244462",
"0.524319",
"0.5211385",
"0.51988655",
"0.5157169",
"0.5148183",
"0.5147728",
"0.5113871",
"0.50943315",
"0.5087307",
"0.507836",
"0.5070803",
"0.50641507",
"0.5056998",
"0.5024736"
] |
0.7515329
|
0
|
Copy the basic env file and config file to a tmp_path.
|
def copy_basic_fixtures(cfngin_fixtures: Path, tmp_path: Path) -> None:
copy_fixture(
src=cfngin_fixtures / "envs" / "basic.env", dest=tmp_path / "test-us-east-1.env"
)
copy_fixture(
src=cfngin_fixtures / "configs" / "basic.yml", dest=tmp_path / "basic.yml"
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):\n cfg_dict = conf.as_dict(\n display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env\n )\n temp_fd, cfg_path = mkstemp()\n\n with os.fdopen(temp_fd, \"w\") as temp_file:\n # Set the permissions before we write anything to it.\n if chmod is not None and not IS_WINDOWS:\n os.fchmod(temp_fd, chmod)\n json.dump(cfg_dict, temp_file)\n\n return cfg_path",
"def logging_conf_tmp_file_path(tmp_path_factory: pytest.TempPathFactory) -> Path:\n tmp_dir = tmp_path_factory.mktemp(\"tmp_log\")\n shutil.copy(Path(logging_conf_module.__file__), Path(f\"{tmp_dir}/tmp_log.py\"))\n return tmp_dir",
"def gunicorn_conf_tmp_file_path(\n gunicorn_conf_tmp_path: Path,\n monkeypatch: pytest.MonkeyPatch,\n tmp_path_factory: pytest.TempPathFactory,\n) -> Path:\n tmp_file = Path(f\"{gunicorn_conf_tmp_path}/gunicorn_conf.py\")\n shutil.copy(Path(gunicorn_conf_module.__file__), tmp_file)\n monkeypatch.setenv(\"GUNICORN_CONF\", str(tmp_file))\n assert os.getenv(\"GUNICORN_CONF\", str(tmp_file))\n return tmp_file",
"def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()",
"def test_write_config(default_config, tmp_path):\n testpath = Path(tmp_path, \"write_config\")\n testpath.mkdir()\n abcconfig.write_config(default_config, configpath=testpath)\n assert Path(testpath, \"config.yml\").exists()",
"def _prepare(self):\n logging.info('-> copy configuration...')\n path_cofig = self.params['path_config_bUnwarpJ']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_SIFT' in self.params:\n path_cofig = self.params['path_config_IJ_SIFT']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_MOPS' in self.params:\n path_cofig = self.params['path_config_IJ_MOPS']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))",
"def backup_tempest_config(conf_file, res_dir):\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n shutil.copyfile(conf_file,\n os.path.join(res_dir, 'tempest.conf'))",
"def prepare_environment(base_path):\n if os.path.exists(base_path):\n shutil.rmtree(base_path)\n os.makedirs(base_path)",
"def create_temp_env_directory():\n return tempfile.mkdtemp(prefix=\"spack-\")",
"def setup(self, tmp_path):\n create_users_file(tmp_path)\n create_jobs_file(tmp_path)",
"def mv_properties(self):\n f = '/coretemp/coretemp.properties'\n b = os.getcwd()\n shutil.copy2(b+f, '/etc/')",
"def freeze(self, tmp_dir):\n for sfile in self.secrets():\n src_file = hard_path(sfile, self.opt.secrets)\n if not os.path.exists(src_file):\n raise aomi.exceptions.IceFile(\"%s secret not found at %s\" %\n (self, src_file))\n\n dest_file = \"%s/%s\" % (tmp_dir, sfile)\n dest_dir = os.path.dirname(dest_file)\n if not os.path.isdir(dest_dir):\n os.mkdir(dest_dir, 0o700)\n\n shutil.copy(src_file, dest_file)\n log(\"Froze %s %s\" % (self, sfile), self.opt)",
"def sync_local_fabric_env(self):\n env.sync_filename = '/tmp/{0}_env.txt'.format(time.time())\n env_copy = self.env\n env_copy.use_ssh_config = False\n env_copy.host = False\n env_copy.host_string = False\n env_copy.local_deployment = True\n # TODO: add context from each need to repopulate\n with self.file.tmpfile(self.to_json(env_copy, cls=SilentEncoder)) as f:\n self.up(f.name, env.sync_filename)",
"def prepare_environment(base_path):\n shutil.rmtree(base_path, ignore_errors=True)\n if not os.path.isdir(base_path):\n os.makedirs(base_path)",
"def CreateTempFileFromTestcase(\n tempdir: pathlib.Path, tc: testcase.Testcase\n) -> pathlib.Path:\n path = tempdir / f\"{tc.id}.cl\"\n with open(path, \"w\") as f:\n f.write(tc.inputs[\"src\"])\n return path",
"def test_env_file(self, tmp_path: Path) -> None:\n test_env = tmp_path / \"test.env\"\n test_env.write_text(\"test_value: test\")\n\n result = CFNgin(ctx=self.get_context(), sys_path=tmp_path)\n assert result.env_file[\"test_value\"] == \"test\"\n\n test_us_east_1 = tmp_path / \"test-us-east-1.env\"\n test_us_east_1.write_text(\"test_value: test-us-east-1\")\n\n test_us_west_2 = tmp_path / \"test-us-west-2.env\"\n test_us_west_2.write_text(\"test_value: test-us-west-2\")\n\n lab_ca_central_1 = tmp_path / \"lab-ca-central-1.env\"\n lab_ca_central_1.write_text(\"test_value: lab-ca-central-1\")\n\n result = CFNgin(ctx=self.get_context(), sys_path=tmp_path)\n assert result.env_file[\"test_value\"] == \"test-us-east-1\"\n\n result = CFNgin(ctx=self.get_context(region=\"us-west-2\"), sys_path=tmp_path)\n assert result.env_file[\"test_value\"] == \"test-us-west-2\"\n\n result = CFNgin(\n ctx=self.get_context(name=\"lab\", region=\"ca-central-1\"), sys_path=tmp_path\n )\n assert result.env_file[\"test_value\"] == \"lab-ca-central-1\"",
"def copy_tmp_file(self, dst):\n if dst and self.file_exists(self.tmp_file):\n shutil.copyfile(self.tmp_file, dst)",
"def app_module_tmp_path(tmp_path_factory: pytest.TempPathFactory) -> Path:\n tmp_dir = tmp_path_factory.mktemp(\"app\")\n shutil.copytree(Path(pre_start_module.__file__).parent, Path(f\"{tmp_dir}/tmp_app\"))\n return tmp_dir",
"def create_config_file(original_file, copy_file):\n copy(original_file, copy_file)",
"def create_dir(self):\n\n os.makedirs(self.path)\n\n instance_config_dir = p.abspath(p.join(self.path, \"configs\"))\n os.makedirs(instance_config_dir)\n\n print(\n f\"Copy common default production configuration from {self.base_config_dir}. Files: {self.main_config_name}, {self.users_config_name}\"\n )\n\n shutil.copyfile(\n p.join(self.base_config_dir, self.main_config_name),\n p.join(instance_config_dir, self.main_config_name),\n )\n shutil.copyfile(\n p.join(self.base_config_dir, self.users_config_name),\n p.join(instance_config_dir, self.users_config_name),\n )\n\n logging.debug(\"Create directory for configuration generated in this helper\")\n # used by all utils with any config\n conf_d_dir = p.abspath(p.join(instance_config_dir, \"conf.d\"))\n os.mkdir(conf_d_dir)\n\n logging.debug(\"Create directory for common tests configuration\")\n # used by server with main config.xml\n self.config_d_dir = p.abspath(p.join(instance_config_dir, \"config.d\"))\n os.mkdir(self.config_d_dir)\n users_d_dir = p.abspath(p.join(instance_config_dir, \"users.d\"))\n os.mkdir(users_d_dir)\n dictionaries_dir = p.abspath(p.join(instance_config_dir, \"dictionaries\"))\n os.mkdir(dictionaries_dir)\n extra_conf_dir = p.abspath(p.join(instance_config_dir, \"extra_conf.d\"))\n os.mkdir(extra_conf_dir)\n\n def write_embedded_config(name, dest_dir, fix_log_level=False):\n with open(p.join(HELPERS_DIR, name), \"r\") as f:\n data = f.read()\n data = data.replace(\"clickhouse\", self.config_root_name)\n if fix_log_level:\n data = data.replace(\"<level>test</level>\", \"<level>trace</level>\")\n with open(p.join(dest_dir, name), \"w\") as r:\n r.write(data)\n\n logging.debug(\"Copy common configuration from helpers\")\n # The file is named with 0_ prefix to be processed before other configuration overloads.\n if self.copy_common_configs:\n write_embedded_config(\n \"0_common_instance_config.xml\",\n self.config_d_dir,\n self.with_installed_binary,\n )\n\n write_embedded_config(\"0_common_instance_users.xml\", users_d_dir)\n if (\n os.environ.get(\"CLICKHOUSE_USE_NEW_ANALYZER\") is not None\n and self.allow_analyzer\n ):\n write_embedded_config(\"0_common_enable_analyzer.xml\", users_d_dir)\n\n if len(self.custom_dictionaries_paths):\n write_embedded_config(\"0_common_enable_dictionaries.xml\", self.config_d_dir)\n\n logging.debug(\"Generate and write macros file\")\n macros = self.macros.copy()\n macros[\"instance\"] = self.name\n with open(p.join(conf_d_dir, \"macros.xml\"), \"w\") as macros_config:\n macros_config.write(self.dict_to_xml({\"macros\": macros}))\n\n # Put ZooKeeper config\n if self.with_zookeeper:\n shutil.copy(self.zookeeper_config_path, conf_d_dir)\n\n if self.with_secrets:\n if self.with_kerberos_kdc:\n base_secrets_dir = self.cluster.instances_dir\n else:\n base_secrets_dir = self.path\n from_dir = self.secrets_dir\n to_dir = p.abspath(p.join(base_secrets_dir, \"secrets\"))\n logging.debug(f\"Copy secret from {from_dir} to {to_dir}\")\n shutil.copytree(\n self.secrets_dir,\n p.abspath(p.join(base_secrets_dir, \"secrets\")),\n dirs_exist_ok=True,\n )\n\n if self.with_coredns:\n shutil.copytree(\n self.coredns_config_dir, p.abspath(p.join(self.path, \"coredns_config\"))\n )\n\n # Copy config.d configs\n logging.debug(\n f\"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}\"\n )\n for path in self.custom_main_config_paths:\n shutil.copy(path, self.config_d_dir)\n\n # Copy users.d configs\n for path in self.custom_user_config_paths:\n shutil.copy(path, users_d_dir)\n\n # Copy dictionaries configs to configs/dictionaries\n for path in self.custom_dictionaries_paths:\n shutil.copy(path, dictionaries_dir)\n for path in self.custom_extra_config_paths:\n shutil.copy(path, extra_conf_dir)\n\n db_dir = p.abspath(p.join(self.path, \"database\"))\n logging.debug(f\"Setup database dir {db_dir}\")\n if self.clickhouse_path_dir is not None:\n logging.debug(f\"Database files taken from {self.clickhouse_path_dir}\")\n shutil.copytree(self.clickhouse_path_dir, db_dir)\n logging.debug(\n f\"Database copied from {self.clickhouse_path_dir} to {db_dir}\"\n )\n else:\n os.mkdir(db_dir)\n\n logs_dir = p.abspath(p.join(self.path, \"logs\"))\n logging.debug(f\"Setup logs dir {logs_dir}\")\n os.mkdir(logs_dir)\n self.logs_dir = logs_dir\n\n depends_on = []\n\n if self.with_mysql_client:\n depends_on.append(self.cluster.mysql_client_host)\n\n if self.with_mysql:\n depends_on.append(\"mysql57\")\n\n if self.with_mysql8:\n depends_on.append(\"mysql80\")\n\n if self.with_mysql_cluster:\n depends_on.append(\"mysql57\")\n depends_on.append(\"mysql2\")\n depends_on.append(\"mysql3\")\n depends_on.append(\"mysql4\")\n\n if self.with_postgres_cluster:\n depends_on.append(\"postgres2\")\n depends_on.append(\"postgres3\")\n depends_on.append(\"postgres4\")\n\n if self.with_kafka:\n depends_on.append(\"kafka1\")\n depends_on.append(\"schema-registry\")\n\n if self.with_kerberized_kafka:\n depends_on.append(\"kerberized_kafka1\")\n\n if self.with_kerberos_kdc:\n depends_on.append(\"kerberoskdc\")\n\n if self.with_kerberized_hdfs:\n depends_on.append(\"kerberizedhdfs1\")\n\n if self.with_rabbitmq:\n depends_on.append(\"rabbitmq1\")\n\n if self.with_nats:\n depends_on.append(\"nats1\")\n\n if self.with_zookeeper:\n depends_on.append(\"zoo1\")\n depends_on.append(\"zoo2\")\n depends_on.append(\"zoo3\")\n\n if self.with_minio:\n depends_on.append(\"minio1\")\n\n if self.with_azurite:\n depends_on.append(\"azurite1\")\n\n self.cluster.env_variables.update(self.env_variables)\n\n odbc_ini_path = \"\"\n if self.odbc_ini_path:\n self._create_odbc_config_file()\n odbc_ini_path = \"- \" + self.odbc_ini_path\n\n entrypoint_cmd = self.clickhouse_start_command\n\n if self.stay_alive:\n entrypoint_cmd = self.clickhouse_stay_alive_command.replace(\n \"{main_config_file}\", self.main_config_name\n )\n else:\n entrypoint_cmd = (\n \"[\"\n + \", \".join(map(lambda x: '\"' + x + '\"', entrypoint_cmd.split()))\n + \"]\"\n )\n\n logging.debug(\"Entrypoint cmd: {}\".format(entrypoint_cmd))\n\n networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = \"\"\n if (\n self.ipv4_address is not None\n or self.ipv6_address is not None\n or self.hostname != self.name\n ):\n networks = \"networks:\"\n app_net = \"default:\"\n if self.ipv4_address is not None:\n ipv4_address = \"ipv4_address: \" + self.ipv4_address\n if self.ipv6_address is not None:\n ipv6_address = \"ipv6_address: \" + self.ipv6_address\n if self.hostname != self.name:\n net_aliases = \"aliases:\"\n net_alias1 = \"- \" + self.hostname\n\n if not self.with_installed_binary:\n binary_volume = \"- \" + self.server_bin_path + \":/usr/bin/clickhouse\"\n odbc_bridge_volume = (\n \"- \" + self.odbc_bridge_bin_path + \":/usr/bin/clickhouse-odbc-bridge\"\n )\n library_bridge_volume = (\n \"- \"\n + self.library_bridge_bin_path\n + \":/usr/bin/clickhouse-library-bridge\"\n )\n else:\n binary_volume = \"- \" + self.server_bin_path + \":/usr/share/clickhouse_fresh\"\n odbc_bridge_volume = (\n \"- \"\n + self.odbc_bridge_bin_path\n + \":/usr/share/clickhouse-odbc-bridge_fresh\"\n )\n library_bridge_volume = (\n \"- \"\n + self.library_bridge_bin_path\n + \":/usr/share/clickhouse-library-bridge_fresh\"\n )\n\n external_dirs_volumes = \"\"\n if self.external_dirs:\n for external_dir in self.external_dirs:\n external_dir_abs_path = p.abspath(\n p.join(self.cluster.instances_dir, external_dir.lstrip(\"/\"))\n )\n logging.info(f\"external_dir_abs_path={external_dir_abs_path}\")\n os.makedirs(external_dir_abs_path, exist_ok=True)\n external_dirs_volumes += (\n \"- \" + external_dir_abs_path + \":\" + external_dir + \"\\n\"\n )\n\n with open(self.docker_compose_path, \"w\") as docker_compose:\n docker_compose.write(\n DOCKER_COMPOSE_TEMPLATE.format(\n image=self.image,\n tag=self.tag,\n name=self.name,\n hostname=self.hostname,\n binary_volume=binary_volume,\n odbc_bridge_volume=odbc_bridge_volume,\n library_bridge_volume=library_bridge_volume,\n instance_config_dir=instance_config_dir,\n config_d_dir=self.config_d_dir,\n db_dir=db_dir,\n external_dirs_volumes=external_dirs_volumes,\n tmpfs=str(self.tmpfs),\n logs_dir=logs_dir,\n depends_on=str(depends_on),\n user=os.getuid(),\n env_file=self.env_file,\n odbc_ini_path=odbc_ini_path,\n keytab_path=self.keytab_path,\n krb5_conf=self.krb5_conf,\n entrypoint_cmd=entrypoint_cmd,\n networks=networks,\n app_net=app_net,\n ipv4_address=ipv4_address,\n ipv6_address=ipv6_address,\n net_aliases=net_aliases,\n net_alias1=net_alias1,\n )\n )",
"def _temp_dir(self):\n tmp_dir = os.path.join(self.output_dir, self.config.find_tune[\"run_dir\"])\n try:\n os.makedirs(tmp_dir)\n except OSError:\n pass\n os.chdir(tmp_dir)\n self.tmp_dir = \"./\"",
"def setup_session(tmp_path_factory):\n reset_config()\n data_dir = tmp_path_factory.mktemp(\"data\")\n # convert from Path object to str\n data_dir_str = data_dir.as_posix()\n set_data_dir(data_dir=data_dir_str)",
"def gunicorn_conf_tmp_path(tmp_path_factory: pytest.TempPathFactory) -> Path:\n return tmp_path_factory.mktemp(\"gunicorn\")",
"def create_temp_output_paths() -> None:\n if not os.path.exists(TMP_PATH):\n os.makedirs(TMP_PATH)\n if not os.path.exists(TMP_MAP_PATH):\n os.makedirs(TMP_MAP_PATH)",
"def mktemp(self):\n if self.dryrun:\n return os.path.expandvars(\"$TEMP/build\")\n\n return tempfile.mkdtemp()",
"def prepare_run(input_path: str, output_path: str, tmp: str) -> None:\n input_file_exists(input_path)\n if os.path.isdir(output_path) and len(os.listdir(output_path)) != 0:\n raise AssertionError(\"output folder must be empty or non-existent.\")\n set_tempdir(tmp)\n os.makedirs(output_path, exist_ok=True)",
"def test_set_env_file_path_method(self) -> None:\n\n given = self.temp_env_file.name\n expected = given\n\n self.helper.set_env_file_path(given)\n\n actual = self.helper.env_file_path\n\n self.assertEqual(expected, actual)",
"def _prepare_env_file(settings):\n\n\tenv_extractor = BinpkgEnvExtractor(background=False,\n\t\tscheduler=EventLoop(main=False), settings=settings)\n\n\tif env_extractor.dest_env_exists():\n\t\t# There are lots of possible states when doebuild()\n\t\t# calls this function, and we want to avoid\n\t\t# clobbering an existing environment file.\n\t\treturn os.EX_OK\n\n\tif not env_extractor.saved_env_exists():\n\t\t# If the environment.bz2 doesn't exist, then ebuild.sh will\n\t\t# source the ebuild as a fallback.\n\t\treturn os.EX_OK\n\n\tenv_extractor.start()\n\tenv_extractor.wait()\n\treturn env_extractor.returncode",
"def put_settings_files(env='development'):\n projects = build_projects_vars()\n project = projects[env]\n if exists('%(dir)s/%(inner_dir)s' % project):\n put(project['settings_path'], '%(dir)s/%(inner_dir)s/local_settings.py' % project)\n if env == 'production':\n with cd('%(dir)s/%(inner_dir)s' % project):\n sed('local_settings.py', '^DEBUG = True$', 'DEBUG = False')",
"def env_dir(tmpdir):\n test_dir = tmpdir.mkdir(\"fromenv\")\n test_dir.join('fromenv.cfg').write('')\n\n return test_dir"
] |
[
"0.68297136",
"0.63292444",
"0.60993797",
"0.60852176",
"0.6083526",
"0.5991829",
"0.59314793",
"0.5919459",
"0.59138197",
"0.58782107",
"0.58720523",
"0.5813424",
"0.578418",
"0.5774709",
"0.5765266",
"0.573933",
"0.5734823",
"0.5723604",
"0.5715323",
"0.57145584",
"0.5712628",
"0.569968",
"0.56621385",
"0.56237954",
"0.55965567",
"0.5570788",
"0.5551484",
"0.55495465",
"0.554834",
"0.5544053"
] |
0.6842399
|
0
|
Configure a mock action.
|
def configure_mock_action_instance(mock_action: Mock) -> Mock:
mock_instance = Mock(return_value=None)
mock_action.return_value = mock_instance
mock_instance.execute = Mock()
return mock_instance
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def default_setup(self, mocker):\n # pylama: ignore=W0201\n self.url = '/api/v0/publish'\n self.client = wsgi.application.test_client()\n self._retryable = mocker.patch.object(wsgi, '_retryable')",
"def _set_action(self, action):\n raise NotImplementedError()",
"def _set_action(self, action):\n raise NotImplementedError()",
"def _set_action(self, action):\n raise NotImplementedError()",
"def _set_action(self, action):\n raise NotImplementedError()",
"def _set_action(self, action):\n raise NotImplementedError()",
"def _set_action(self, action):\n raise NotImplementedError()",
"def test_action_mocked(self):\n with self.mock_global_connection:\n self.assertEqual(0, Action.count())\n\n manager = Manager(self.connection)\n self.assertFalse(manager.is_populated())\n manager.populate()\n self.assertTrue(manager.is_populated())\n\n self.assertEqual(1, Action.count())\n actions = Action.ls()\n action = actions[0]\n self.assertEqual(manager.module_name, action.resource)\n self.assertEqual('populate', action.action)",
"def setUp(self):\n class TestHandler(BaseHandler):\n urlconf = 'conman.routes.tests.urls'\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.view = 'conman.routes.tests.urls.dummy_view'",
"def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)",
"def setAction(self, action):\n self.action = action\n return self",
"def configure_test(self, test, config_json):\n pass",
"def setUp(self) -> None:\n super().setUp()\n\n self.test_action = TestAction()\n self.test_header_action = TestHeaderAction()\n self.test_menu_action = TestMenuAction()\n self.test_menu_item_action = TestMenuItemAction()",
"def set_action(self, action):\n self.action = action",
"def set_action(self, action):\n self.action = action",
"def set_sample_action(self, sample_action):\n\n self.sample_action = sample_action",
"def set_action(self, action):\n self._action = action\n return self",
"def take_action(self, action):\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )",
"def setUp(self):\n class TestHandler(SimpleHandler):\n view = mock.MagicMock()\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.route.get_handler.return_value = self.handler\n self.view = TestHandler.view",
"def set_action(self,action):\n self.__action = action",
"def call_action(self, action):\n pass",
"def ToAction(self):\n action = self.action_key.get()\n if not action:\n raise ValueError('Test run action %s not found' % self.action_key)\n options = NameValuePair.ToDict(action.options or [])\n options.update(NameValuePair.ToDict(self.options or []))\n action.options = NameValuePair.FromDict(options)\n return action",
"def test_all_actions_setup(self, mocked_find):\n\n setup_identity_cache()\n\n mocked_find.side_effect = KeyError(\"Error forced for testing\")\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)\n\n new_task = Task.objects.all()[0]\n\n class_conf = new_task.config\n expected_action_names = CreateProjectAndUser.default_actions[:]\n expected_action_names += class_conf.additional_actions\n\n actions = new_task.actions\n observed_action_names = [a.action_name for a in actions]\n self.assertEqual(observed_action_names, expected_action_names)",
"def test_failing_action(self):\n dummy_calls = []\n\n self.action_fail.side_effect = dummy_calls.append\n\n def dummy_action(args):\n raise ValueError(\"uh oh\")\n\n with mock.patch.dict(actions.ACTIONS, {\"foo\": dummy_action}):\n actions.main([\"foo\"])\n self.assertEqual(dummy_calls, [\"uh oh\"])",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def take_action(self, action):\n\t\traise NotImplementedError",
"def setup_method(self) -> None:\n self.client = Mock()",
"def _formulate_action(Action, **kwargs):\n\n return Action(**kwargs)",
"def action(self, action):\n self._action = action"
] |
[
"0.5844788",
"0.5802622",
"0.5802622",
"0.5802622",
"0.5802622",
"0.5802622",
"0.5802622",
"0.58012307",
"0.5685793",
"0.5590533",
"0.5575047",
"0.55686426",
"0.5557722",
"0.5527817",
"0.5527817",
"0.5523214",
"0.55023485",
"0.54832983",
"0.5477297",
"0.5472402",
"0.54713327",
"0.5455813",
"0.5415668",
"0.5395263",
"0.5367237",
"0.5367237",
"0.53619474",
"0.5345068",
"0.5335702",
"0.5318974"
] |
0.78626853
|
0
|
Test load a CFN template.
|
def test_load_cfn_template(self, caplog: LogCaptureFixture, tmp_path: Path) -> None:
cfn_template = tmp_path / "template.yml"
cfn_template.write_text("test_key: !Ref something")
cfngin = CFNgin(ctx=self.get_context(), sys_path=tmp_path)
caplog.set_level("ERROR", logger="runway.cfngin")
with pytest.raises(SystemExit):
cfngin.load(cfn_template)
assert "appears to be a CloudFormation template" in caplog.text
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)",
"def load(template):\n with open(template) as f:\n return f.read()",
"def test_read_namespaced_template(self):\n pass",
"def test_templates(self):\n path = str(Template())\n self.assertTrue(os.path.exists(path))",
"def _load_template(file_name):\n\n filepath = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '../../fixtures/autoscaling_templates', file_name)\n with open(filepath) as f:\n return f.read()",
"def test_code_template(tmpdir):\n # Create temp file\n fn = tmpdir.mkdir(\"data\")\n expected_file = os.path.join(str(fn), 'loader.py')\n\n # Gen code template\n runner = CliRunner()\n result = runner.invoke(cli.generate_code_template,\n ['-o', str(fn)], env=env)\n\n assert result.exit_code == 0\n assert os.path.isfile(expected_file)\n\n # Update file\n with open(expected_file, 'w') as f:\n f.write('print(\"hello world!\")')\n\n # Try to generate file again\n result = runner.invoke(cli.generate_code_template,\n ['-o', str(fn)], env=env)\n\n assert 'already exists' in result.stdout\n assert result.exit_code == 0\n\n # Check file\n with open(expected_file, 'r') as f:\n assert 'hello world!' in f.read()",
"def test_register_template(self):\n pass",
"def _LoadTemplate(self,fname):\n f = open(fname, 'r')\n lines = f.readlines()\n data = ''\n for line in lines:\n if not line.startswith('---'):\n data += line\n data = data.replace('\\t',' ')\n if '\\t' in data:\n errstr = \\\n 'Illegal tabs encountered in template file. Use spaces instead.'\n raise ScannerError(errstr)\n proc.LogErrors(errstr)\n tmplt = yaml.load(data)\n f.close()\n return tmplt",
"def test_retrieve_template_registration(self):\n pass",
"def test_template(project):\n project.add_mock_file(\"templates\", \"test.tmpl\", \"{{ value }}\")\n project.compile(\"\"\"import unittest\nvalue = \"1234\"\nstd::print(std::template(\"unittest/test.tmpl\"))\n \"\"\")\n\n assert project.get_stdout() == \"1234\\n\"",
"def test_expand_cloud_init_user_data_template(\n load_config_dict_mock, get_config_path_mock): \\\n # pylint: disable=unused-argument\n\n tmpl = '''\n#!/usr/bin/env python\n\ninstaller = '{{ installer }}'\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n'''\n\n adapter = Aws()\n\n config = adapter.get_config()\n\n result = adapter.expand_cloud_init_user_data_template(\n config, template=Template(tmpl))\n\n assert result and isinstance(result, str)\n\n assert get_config_path_mock.called_with('notblah.txt')",
"def _parse_template(self):\n with open(\"./common/sagemaker_rl/orchestrator/cloudformation.yaml\") as template_fileobj:\n template_data = template_fileobj.read()\n self.cf_client.validate_template(TemplateBody=template_data)\n return template_data",
"def load_template(format_: str) -> Template:\n template_path = Path(TEMPLATES_PATH).joinpath(f'{format_}{TEMPLATE_SUFFIX}')\n template = Template(template_path.read_text())\n return template",
"def testTemplateGet(self):\n self.assertRaises(NotImplementedError, getattr,\n self.tempfile, 'template')",
"def test_template_local_file(file, tmp_path, prefix):\n source = tmp_path / \"source\"\n dest = tmp_path / \"dest\"\n source.write_text(\"{{ foo }}\\n\")\n\n ret = file.managed(\n name=str(dest),\n source=\"{}{}\".format(prefix, source),\n template=\"jinja\",\n context={\"foo\": \"Hello world!\"},\n )\n assert ret.result is True\n assert dest.read_text() == \"Hello world!\\n\"",
"def readTemplate(tfn):\n\n if opts.verbose: print \"fetching template\", tfn\n\n found = 0\n foundInRoot = 0\n\n # check in user-specified template root.\n if opts.templates:\n fn = join(opts.templates, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n found = 1\n\n # check in hierarchy root\n if not found:\n fn = join(opts.root, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n foundInRoot = 1\n found = 1\n\n # look for it in the environment var path\n if not found:\n try:\n curatorPath = os.environ[ 'CURATOR_TEMPLATE' ]\n pathlist = string.split(curatorPath, os.pathsep)\n for p in pathlist:\n fn = join(p, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n found = 1\n break\n except KeyError:\n pass\n\n if found == 1:\n # read the file\n try:\n tfile = open(fn, \"r\")\n t = tfile.read()\n tfile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't open image template file:\", fn\n sys.exit(1)\n if opts.verbose: print \" succesfully loaded template\", tfn\n\n else:\n # bah... can't load it, use fallback templates\n if opts.verbose:\n print \" falling back on simplistic default templates.\"\n global fallbackTemplates\n try:\n t = fallbackTemplates[ splitext(tfn)[0] ]\n except KeyError:\n t = ''\n\n # Save templates in root, if it was requested.\n if opts.save_templates and foundInRoot == 0:\n rootfn = join(opts.root, tfn)\n if opts.verbose: print \" saving template in %s\" % rootfn\n\n # saving the file template\n if exists(rootfn):\n bakfn = join(opts.root, tfn + '.bak')\n if opts.verbose: print \" making backup in %s\" % bakfn\n import shutil\n try:\n shutil.copy(rootfn, bakfn)\n except:\n print >> sys.stderr, \\\n \"Error: can't copy backup template %s\", bakfn\n\n try:\n ofile = open(rootfn, \"w\")\n ofile.write(t)\n ofile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't save template file to\", rootfn\n\n return t",
"def main_function(template_file):\n\n content = load(template_file)\n assert content, \"Couldn't load template\"\n\n template = Template(content)\n\n return template.render(context(content))",
"def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'cadastro.html')",
"def _load_template(template_file: str = None, module_name: str = None, stack_name: str = None) -> str:\n if template_file:\n # read the template file\n with open(template_file, 'r') as fh:\n template_body = fh.read()\n else:\n # Import the troposphere module\n stack = _import_tropo_module(stack_name, module_name)\n # Get the yaml template file\n template_body = stack.get_template().to_json()\n return template_body",
"def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'inicio.html')",
"def load(self, spec):\n if spec.template is not None:\n return self.loader.unicode(spec.template, spec.template_encoding)\n\n path = self._find(spec)\n\n return self.loader.read(path, spec.template_encoding)",
"def test_template(filename, rule, mode, rules):\n\n filename = os.path.join(os.path.dirname(__file__), \"templates\", filename)\n\n template = cfnlint.decode.cfn_yaml.load(filename)\n matches = cfnlint.core.run_checks(\n filename,\n template,\n rules,\n # TODO: parametrize the region\n [\"eu-west-1\"],\n )\n\n match_ids = [match.rule.id for match in matches]\n\n # No non-serverless errors\n assert len([m for m in match_ids if m[1] != \"S\"]) == 0\n\n if mode == \"fail\":\n assert rule in match_ids\n else:\n assert rule not in match_ids",
"def test_get_tosca_template(self):\n pass",
"def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test",
"def test_load_object_from_string():\n tests = (\n (\"string.Template\", string.Template),\n (\"os.path.basename\", os.path.basename),\n (\"string.ascii_letters\", string.ascii_letters)\n )\n for test in tests:\n assert load_object_from_string(test[0]) is test[1]",
"def test_create_template_subsciption(self):\n pass",
"def _get_template(self, tgt):\n with open(tgt, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n self.template = Template(template_file_content)\n return",
"def refl_load(file_descriptors):\n modules = [{\"module\": \"ncnr.refl.load\", \"version\": \"0.1\", \"config\": {}}]\n template = Template(\"test\", \"test template\", modules, [], \"ncnr.magik\", version='0.0')\n retval = process_template(template, {0: {\"files\": file_descriptors}}, target=(0, \"output\"))\n return retval.todict()",
"def __init__(self, template_name):\n # self.env = Environment(loader=PackageLoader(\n # package, path))\n # self.template = self.env.get_template(template_name)\n with open(template_name, 'r', encoding='UTF-8') as f:\n self.template = Template(f.read())",
"def from_template(cls, template, fpath, defaults=defaults, **kwargs):\n\n try:\n #use defaults first and overwrite with user's specs\n inp = template.substitute(defaults, **kwargs)\n return cls(fpath, inp_string=inp)\n except (KeyError, ValueError) as error:\n print(error)\n print(error.args)\n quit(0)"
] |
[
"0.71696216",
"0.6851754",
"0.66243255",
"0.64899427",
"0.6404037",
"0.63239944",
"0.6242543",
"0.61893934",
"0.6188154",
"0.6186724",
"0.6116805",
"0.6085659",
"0.60642785",
"0.60526496",
"0.6009952",
"0.59993297",
"0.5983575",
"0.59523255",
"0.5914063",
"0.59060115",
"0.5903425",
"0.5838479",
"0.58222395",
"0.5814252",
"0.5801118",
"0.5796245",
"0.5790028",
"0.5730904",
"0.57237434",
"0.57190436"
] |
0.72675973
|
0
|
input iperf client log; output final bw value in kbps
|
def get_iperf_bw(self, filename):
#last line has avg values
for line in open(filename, 'r'):
pass
bw = line.split(',')[-1].strip()
return int(bw)/1000 # bw in kbps
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def iperf3_bandwidth(self, client, port):\n\n if not client:\n return\n\n iperf_res = None\n\n if self.nma.conf['databases']['tinydb_enable']:\n speed = self.speed_db.all()\n\n measured_bw = {'upload': 0, 'download': 0}\n measured_jitter = {'upload': 0, 'download': 0}\n\n for direction, value in measured_bw.items():\n reverse = False\n\n bandwidth = 0\n\n if self.nma.conf['databases']['tinydb_enable']:\n bandwidth = speed[0][direction] + 10\n if direction == 'download':\n bandwidth += 40\n reverse = True\n\n iperf_cmd = \"/usr/local/src/nm-exp-active-netrics/bin/iperf3.sh -c {} -p {} -u -i 0 -b {}M {} | awk 'NR=={}'\"\\\n .format(client, port, bandwidth,\n '-R' if reverse else \"\", 10 if reverse else 8)\n iperf_res = Popen(iperf_cmd, shell=True,\n stdout=PIPE).stdout.read().decode('utf-8')\n\n measured_bw[direction] = iperf_res.split()[6]\n measured_jitter[direction] = iperf_res.split()[8]\n\n self.results[f'iperf_udp_{direction}'] = float(\n measured_bw[direction])\n self.results[f'iperf_udp_{direction}_jitter_ms'] = float(\n measured_jitter[direction])\n\n if not self.quiet:\n if direction == 'upload':\n print('\\n --- iperf Bandwidth and Jitter ---')\n print(f'{direction} bandwidth: {measured_bw[direction]} Mb/s')\n print(f'{direction} jitter: {measured_jitter[direction]} ms')\n\n if self.nma.conf['databases']['tinydb_enable']:\n self.update_max_speed(float(measured_bw['download']),\n float(measured_bw['upload']))\n return iperf_res",
"async def fetch_logs(self) -> bytes:\n host = \"127.0.0.1\"\n port = 42000\n dt = datetime.now(pytz.timezone(\"Europe/Amsterdam\"))\n request = {\"id\": 1, \"method\": \"getstat\"}\n\n point = TCP4ClientEndpoint(reactor, host, port)\n try:\n connected_p = await connectProtocol(\n point, EWBFProtocol()) # type: EWBFProtocol\n response = await connected_p.make_request(request)\n except Exception as e:\n print(\"couldn't connect. {}\".format(e))\n return b\"\"\n else:\n rl = []\n t = 0 # type: int\n power = speed = accept = reject = 0\n for idx, data in enumerate(response['result']):\n rl.append(\"GPU{0}_SPEED: {1} H/s\".format(\n idx, data['speed_sps']))\n rl.append(\"GPU{0}_POWER: {1}\".format(\n idx, data['gpu_power_usage']))\n t = data['start_time']\n power += data['gpu_power_usage']\n speed += data['speed_sps']\n accept += data['accepted_shares']\n reject += data['rejected_shares']\n\n rl.append(\"Power: {0}\".format(power))\n rl.append(\"Total speed: {0} Sol/s\".format(speed))\n rl.append(\"Accepted share: {0}\".format(accept))\n rl.append(\"Rejected share: {0}\".format(reject))\n rl.append(\"Total GPUs: {0}\".format(len(response['result'])))\n rl.append(\"START_TIME: {0}\".format(int(t)))\n rl.append(\"CURRENT_TIME: {0}\".format(int(dt.timestamp())))\n rl.append(\"UPTIME: {0}\".format(int(dt.timestamp() - t)))\n return \";\".join(rl).encode('utf-8') + b\";\"",
"def dut_config(vx_handle, str_chans, i_wait_count):\n vx_handle.write('CAPTURECFG %s'%str_chans) # the vars to captures\n i_cap_len_k = math.ceil(len(str_chans) * i_wait_count / 256.0)\n vx_handle.write('CAPTURELEN %d'%i_cap_len_k) # in kB. dut rounds odd numbers up to next even\n # print 'CAPTURELEN %d'%i_cap_len_k # in kB. dut rounds odd number up\n f_rate_max = float(vx_handle.ask('CAPTURERATEMAX?')) # filters determine the max data rate\n return f_rate_max",
"def bw_calc(start_time, end_time, packet_bit_len):\n delta_time = end_time - start_time\n delta_time = delta_time.seconds + delta_time.microseconds/1E6\n #Bandwidth b/s\n bw = packet_bit_len/delta_time\n return",
"def bandwidth(self):\n return self.stop_hz - self.start_hz",
"def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})",
"def testBandwidth(self):\n\n if self.config.isIperf:\n info(\"**** [G2]: running bandwidth test using iperf\\n\")\n p1 = Process(target=self.iperfDriver)\n p1.start()\n\n if self.config.isSwStat:\n info(\"**** [G2]: collecting switch stats\\n\")\n ifList = self.interfaceList\n pfx = self.config.prefix\n freq = self.config.frequency\n\n procs = []\n for iface in ifList:\n p = Process(target=self.monitorInterface, args=(iface, pfx, freq))\n procs.append(p)\n p.start()\n\n p1.join()\n # Once the iperf is done, we will terminate all the switch monitoring processes.\n if self.config.isSwStat:\n for p in procs:\n p.terminate()",
"def print_polling_traffic_stats(device_int):\n print \"previous counter {}\".format(device_int[\"previous_counter\"])\n print \"current_counter {}\".format(device_int[\"current_counter\"])\n print \"bits_out {}\".format(device_int[\"bits_out\"])\n print \"time_of poll {}\".format(device_int[\"update_time\"])\n print \"previous_update {}\".format(device_int[\"previous_update\"])\n print \"secounds since {}\".format(device_int[\"seconds_since\"])\n print \"bits_per_sec {}\".format(device_int[\"bits_per_sec\"])\n print \"speed {}\".format(device_int[\"speed\"])\n print \"util_percentage {}\".format(device_int[\"util_percentage\"])\n print \"util_percentage after round {}\".format(device_int[\"util_percentage\"])",
"def subbandwidth(self):",
"def throughputbin(conn):\n c = conn.cursor()\n bc = bincount(conn)\n total_tasks = totaltasks(conn)\n return {\n \"throughput_tasks_per_bin\": total_tasks / bc['bins']['count']\n }",
"def throughput(self):\n return self.cwnd_from_file * self.mss / self.rtt",
"def calculate_br_up_metric(br_up):\n if br_up < 1:\n br_up = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_up) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)",
"def calculate_br_down_metric(br_down):\n if br_down < 1:\n br_down = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_down) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)",
"def _ExtractThroughput(output):\n regex = r'final\\s+=\\s+(\\S+)\\s+GF'\n match = re.search(regex, output)\n try:\n return float(match.group(1))\n except:\n raise HpcgParseOutputException('Unable to parse HPCG output')",
"def read_core_vbat(self) -> float:",
"def _get_bandwidth(self, report):\n match = re.search(\"bw\\=\\s*(\\d+\\.{0,1}\\d*)\\s*(\\w+)\\/s\",\n report)\n if match:\n bandwidth = float(match.group(1))\n unit = match.group(2)\n if unit.lower() == 'b':\n bandwidth = round(bandwidth / 1024 / 1024, 2)\n elif \"kb\" in unit.lower():\n bandwidth = round(bandwidth / 1024, 2)\n elif \"gb\" in unit.lower():\n bandwidth = round(bandwidth * 1024, 2)\n\n return bandwidth",
"def get_port_txrate(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")",
"def getIFBW(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SENS:BWID?\")\n ret = int(self.myFieldFox.read())\n else:\n ret = 1000000\n return ret",
"def rawrtt(self):\n return self.fowd + self.bowd",
"def bandwidth(self):\r\n return self._bandwidth",
"def freq2erb(freq_hz):\n return 9.265 * np.log(1 + freq_hz / (24.7 * 9.265))",
"def get_speed(self):\n return self.get_par(\"slew_speed\")",
"def cputime(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT SUM(length) as total_bin_time FROM event WHERE bin_id not null\").fetchall()\n total_bin_time = r[0]['total_bin_time'];\n last_time = maxtime(conn)\n\n cputime = 0\n if total_bin_time and last_time:\n cpu_time = total_bin_time / last_time\n\n return {\n \"cpu_time\":cpu_time\n }",
"def bandwidth(self):\n self._filter()\n return 1. * self._aggregate / self._window",
"def bandwidth_event(self, event):\r\n pass",
"def get_bandwidth(self, chan=0):\n return _uhd_swig.usrp_sink_sptr_get_bandwidth(self, chan)",
"def output_statistics(self, run_time):\n fps = self.received_frames / run_time\n MBps_per_frame = self.full_frame_length() / 1000.0 / 1000.0\n print '\\nRun time: %.2f seconds' % (run_time,)\n print 'Received frames: ', self.received_frames\n print 'Avg. frame rate: %s fps' % (fps,)\n print 'Avg. Bit rate: %.2f MB/s' % (MBps_per_frame * fps,)",
"def getKikiLog(self):\n self._sendCommand(self.SONY_CMD_KikiLogSender_InitKikiLog)\n kikilog = b''\n remaining = 1\n while remaining:\n data = BytesIO(self._sendCommand(self.SONY_CMD_KikiLogSender_ReadKikiLog))\n data.read(4)\n remaining = parse32le(data.read(4))\n size = parse32le(data.read(4))\n kikilog += data.read(size)\n return kikilog[24:]",
"def get_bandwidth(self, chan=0):\n return _uhd_swig.usrp_sink_get_bandwidth(self, chan)",
"def _estimate_write_time(self, data):\n return (len(data.encode('utf-8'))/(self.byte_rate))*1.25\n #1.25 multiplier accounts for start & stop bits"
] |
[
"0.6555676",
"0.5904527",
"0.5810257",
"0.5724674",
"0.56913453",
"0.5641988",
"0.5623032",
"0.55325526",
"0.5453401",
"0.5430281",
"0.54273736",
"0.54010695",
"0.5346723",
"0.53466177",
"0.5341869",
"0.5321154",
"0.5310894",
"0.52825856",
"0.5252985",
"0.52407163",
"0.5235296",
"0.5216046",
"0.51757336",
"0.514644",
"0.5138706",
"0.5126725",
"0.5107851",
"0.5106229",
"0.5094895",
"0.5084903"
] |
0.6255306
|
1
|
Takes visibilities from the last result, if there is one, and associates them with galaxies in this search where fullpath galaxy names match. If the galaxy collection has a different name then an association is not made. e.g.
|
def associate_hyper_visibilities(
self, instance: af.ModelInstance
) -> af.ModelInstance:
if self.hyper_galaxy_visibilities_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(
ag.Galaxy
):
if galaxy_path in self.hyper_galaxy_visibilities_path_dict:
galaxy.hyper_model_visibilities = self.hyper_model_visibilities
galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[
galaxy_path
]
return instance
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _update_galaxy_file_mapping(self):\n galaxy_to_refinery_mapping_list = []\n for node in self.tool._get_input_nodes():\n galaxy_to_refinery_mapping_list.append(\n {\n WorkflowTool.GALAXY_DATASET_HISTORY_ID:\n self.FAKE_DATASET_HISTORY_ID,\n Tool.REFINERY_FILE_UUID: node.file_item.uuid,\n }\n )\n\n with mock.patch.object(\n celery.result.TaskSetResult, 'join',\n return_value=galaxy_to_refinery_mapping_list\n ) as join_mock:\n self.tool.update_file_relationships_with_galaxy_history_data()\n self.assertTrue(join_mock.called)\n self.assertTrue(self.get_taskset_result_mock.called)\n\n self.collection_description = (\n self.tool._create_collection_description()\n )",
"def parse_grism_associations(exposure_groups, \n best_direct=DIRECT_ORDER,\n get_max_overlap=True):\n N = len(exposure_groups)\n \n grism_groups = []\n for i in range(N):\n f_i = exposure_groups[i]['product'].split('-')[-1]\n root_i = exposure_groups[i]['product'].split('-'+f_i)[0]\n \n if f_i.startswith('g'):\n group = OrderedDict(grism=exposure_groups[i], \n direct=None)\n else:\n continue\n \n fp_i = exposure_groups[i]['footprint']\n olap_i = 0.\n d_i = f_i\n \n #print('\\nx\\n')\n d_idx = 10\n for j in range(N):\n f_j = exposure_groups[j]['product'].split('-')[-1]\n if f_j.startswith('g'):\n continue\n \n fp_j = exposure_groups[j]['footprint']\n olap = fp_i.intersection(fp_j)\n root_j = exposure_groups[j]['product'].split('-'+f_j)[0]\n\n #print(root_j, root_i, root_j == root_i)\n if (root_j == root_i):\n # if (group['direct'] is not None):\n # pass\n # if (group['direct']['product'].startswith(root_i)) & (d_i.upper() == best_direct[f_i.upper()]):\n # continue\n \n if best_direct[f_i.upper()].index(f_j.upper()) < d_idx:\n d_idx = best_direct[f_i.upper()].index(f_j.upper())\n group['direct'] = exposure_groups[j]\n olap_i = olap.area\n d_i = f_j\n #print(0,group['grism']['product'], group['direct']['product'])\n # continue\n \n #print(exposure_groups[i]['product'], exposure_groups[j]['product'], olap.area*3600.)\n \n # #print(exposure_groups[j]['product'], olap_i, olap.area)\n # if olap.area > 0:\n # if group['direct'] is None:\n # group['direct'] = exposure_groups[j]\n # olap_i = olap.area\n # d_i = f_j\n # #print(1,group['grism']['product'], group['direct']['product'])\n # else:\n # #if (f_j.upper() == best_direct[f_i.upper()]):\n # if get_max_overlap:\n # if olap.area < olap_i:\n # continue\n # \n # if d_i.upper() == best_direct[f_i.upper()]:\n # continue\n # \n # group['direct'] = exposure_groups[j]\n # #print(exposure_groups[j]['product'])\n # olap_i = olap.area\n # d_i = f_j\n # #print(2,group['grism']['product'], group['direct']['product'])\n \n grism_groups.append(group)\n \n return grism_groups",
"def link_caesarGalProp_galname(galObj, galname, index, groupID, galnames, mstar, mgas, mbh, fedd_array, sfr, sfrsd, sfrsd_manual, gassd, gassd_manual, gasR, gasR_half, starR_half, Zgas, Zstar, fgas, fh2, gdr, central, mhalo, hid, SFRSD_manual, gasSD_manual, f_h2, bhmdot, DTM, Zmet_massweighted, frad=0.1):\n\n phm, phid = -1, -1\n\n if galObj.halo is not None:\n phm, phid = galObj.halo.masses['total'], galObj.halo.GroupID\n\n try:\n bhmdots = [bhmdot[k] for k in galObj.bhlist]\n bm = galObj.masses['bh']\n imax = np.argmax(bm)\n try:\n bm = bm[imax]\n bmdot = bhmdots[imax] # only the massive BH particle matters.\n except:\n bm = bm\n bmdot = bhmdots\n\n mdot_edd = 4*np.pi*6.67e-8*1.673e-24/(frad*3.e10*6.65245e-25) * bm * 3.155e7 # in Mo/yr\n fedd = bmdot / mdot_edd\n fedd = fedd[0].value\n except:\n bm = 0\n fedd = 0\n\n groupID.append(galObj.GroupID)\n galnames.append(galname)\n\n mstar.append(galObj.masses['stellar'])\n mgas.append(galObj.masses['gas'])\n mbh.append(bm)\n fedd_array.append(fedd)\n sfr.append(galObj.sfr)\n sfrsd.append(galObj.sfr/np.pi/(galObj.radii['gas'].in_units('kpc'))**2)\n sfrsd_manual.append(SFRSD_manual)\n gassd.append(galObj.masses['gas']/np.pi/(galObj.radii['gas'].in_units('pc'))**2)\n gassd_manual.append(gasSD_manual)\n gasR.append(galObj.radii['gas'].in_units('kpc'))\n gasR_half.append(galObj.radii['gas_half_mass'].in_units('kpc'))\n starR_half.append(galObj.radii['stellar_half_mass'].in_units('kpc'))\n Zgas.append(galObj.metallicities['sfr_weighted']/0.0134)\n Zstar.append(galObj.metallicities['stellar']/0.0134)\n fgas.append(galObj.gas_fraction) # = Mgas / (Mg + Ms)\n fh2.append(f_h2)\n gdr.append(galObj.masses['gas']/galObj.masses['dust'])\n central.append(galObj.central)\n mhalo.append(phm)\n hid.append(phid)\n\n Zmet = galObj.metallicities['mass_weighted'] / 0.0134\n Zmet_massweighted.append(Zmet)\n\n Mgmet = Zmet * (galObj.masses['gas'] - galObj.masses['dust'])\n dtm = np.log10(galObj.masses['dust'] / (Mgmet + galObj.masses['dust']) + 1.e-9)\n DTM.append(dtm)\n\n return groupID, galnames, mstar, mgas, mbh, fedd_array, sfr, sfrsd, sfrsd_manual, gassd, gassd_manual, gasR, gasR_half, starR_half, Zgas, Zstar, fgas, fh2, gdr, central, mhalo, hid, DTM, Zmet_massweighted",
"def update_results(adjective: str, animal_picture_dict: Dict[str, str], lock: Lock,\n animals_by_collateral_adjective: Dict[str, List[Dict[str, str]]]):\n with lock:\n if adjective in animals_by_collateral_adjective:\n animals_by_collateral_adjective[adjective].append(animal_picture_dict)\n else:\n animals_by_collateral_adjective[adjective] = [animal_picture_dict]",
"def Galaxies(name_path):\n\tp = pathlib.Path(name_path)\n\tgalaxies = []\n\tfor f in p.glob('*.fits'):\n\t\thdu = fits.open(f)\n\t\tZ1= hdu[0].data\n\t\tgalaxies.append(Z1)\n \n\treturn galaxies",
"def relink_datasets(self, name, ref_product):\n\n list_ds = [(ds[\"name\"], ds[\"reference product\"], ds[\"location\"]) for ds in self.db]\n\n for act in self.db:\n for exc in act['exchanges']:\n if \"name\" in exc and \"product\" in exc and exc[\"type\"] == \"technosphere\":\n if (exc['name'], exc.get('product')) == (name, ref_product):\n if (name, ref_product, act[\"location\"]) in list_ds:\n exc[\"location\"] = act[\"location\"]\n else:\n try:\n new_loc = self.geo.ecoinvent_to_iam_location(act[\"location\"])\n except KeyError:\n new_loc = \"\"\n\n if (name, ref_product, new_loc) in list_ds:\n exc[\"location\"] = new_loc\n else:\n # new location in ei3.7, not yet defined in `constructive_geometries`\n if act[\"location\"] in (\"North America without Quebec\", \"US only\"):\n new_loc = self.geo.ecoinvent_to_iam_location(\"US\")\n exc[\"location\"] = new_loc\n\n elif act[\"location\"] in (\"RoW\", \"GLO\"):\n new_loc = self.geo.ecoinvent_to_iam_location(\"CN\")\n exc[\"location\"] = new_loc\n else:\n print(\"Issue with {} used in {}: cannot find the IAM equiavlent for \"\n \"the location {}\".format(name, act[\"name\"], act[\"location\"]))\n\n if \"input\" in exc:\n exc.pop(\"input\")",
"def _orgsWithLogoForQuery(query, batch_size=5):\n orgs = []\n for org in query:\n if org.logo_url:\n orgs.append(org)\n if len(orgs) == batch_size:\n break\n\n return orgs",
"def get_matching(self):\n verts, plaqs, d_verts, d_plaqs = self.get_stabs()\n\n # def get_matching(anyons, d_anyons):\n # edges = self.get_edges(anyons)\n # for i0, i1, weight in edges:\n # nxgraph.add_edge(i0, i1, weight=-weight)\n # output = nx.algorithms.matching.max_weight_matching(nxgraph, maxcardinality=True)\n # return [[d_anyons[i0], d_anyons[i1]] for i0, i1 in output]\n\n def get_matching(anyons, d_anyons):\n output = pm.getMatching(len(anyons), self.get_edges(anyons))\n return [[d_anyons[i0], d_anyons[i1], anyons[i0], anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]\n\n self.matching = []\n if verts:\n self.matching += get_matching(verts, d_verts)\n if plaqs:\n self.matching += get_matching(plaqs, d_plaqs)",
"def search_results(request):\n competencies = Competence.objects.all()\n comp_user = request.user\n comp_list_filer = []\n comp_num = 0\n for competence in competencies:\n if competence.person == comp_user:\n comp_list_filer.append((competence.title_of_competence, competence.level_of_competence))\n comp_num = comp_num + 1\n\n q = request.GET.get('q')\n add_list = get_vac(q)\n competence_list = []\n for vacancy_dict in add_list:\n if Vacancy.objects.filter(title_of_vacancy=vacancy_dict['name']):\n percent = get_percent(comp_list_filer, comp_num, vacancy_dict['name'])\n obj = Vacancy.objects.get(title_of_vacancy=vacancy_dict['name'])\n obj.percent = percent\n obj.save()\n continue\n else:\n percent = 0\n if vacancy_dict['salary'] == 0:\n vacancy = Vacancy.objects.create_vacancy(vacancy_dict['name'], vacancy_dict['description'],\n vacancy_dict['city'],\n 0, vacancy_dict['webSite'], percent)\n else:\n vacancy = Vacancy.objects.create_vacancy(vacancy_dict['name'], vacancy_dict['description'],\n vacancy_dict['city'],\n vacancy_dict['salary'], vacancy_dict['webSite'], percent)\n for i in vacancy_dict['description'].split(\" \"):\n competence_list.append(i)\n graph_dict = {\"vac_name\": vacancy_dict['name'], \"com_name\": competence_list}\n graph_add(graph_dict)\n percent = get_percent(comp_list_filer, comp_num, vacancy_dict['name'])\n obj = Vacancy.objects.get(title_of_vacancy=vacancy_dict['name'])\n obj.percent = percent\n obj.save()\n competence_list = []\n if q:\n vacs = VacDocument.search().query(\"match\", title_of_vacancy=q)\n vacs = vacs.sort({\"percent\": {\"order\": \"desc\"}})\n else:\n vacs = ''\n return render(request, 'core/search-results.html', {'vacs': vacs, 'competencies': competencies})",
"def add_aliments(self):\n for cat in Category.objects.all():\n print(CAT_SEARCH.format(cat.name))\n n_page = 1\n counter = 0\n while counter < MAX_PRODUCTS_KEEPED:\n imported_aliments = self._scratch_category(cat.name, n_page)\n if not imported_aliments:\n print(CAT_FINDING_FAIL.format(counter, cat.name))\n break\n\n for aliment in imported_aliments:\n if self._no_full_info(aliment):\n continue\n if self._redundant_info(aliment, cat):\n continue\n\n counter = self._save(aliment, cat, counter)\n n_page += 1\n\n print(CAT_FINDING_SUCCESS.format(cat))",
"def characterize_collection(self):\n # If the lattices are not collected, look for matching saved lattices.\n # This allows the option of characterizing without holding all lattices in memory\n if self.lattices != []:\n matching = self.lattices\n if len(self.lattices) < len(self.meshfns):\n # Add other meshfns to self.lattices\n pass\n else:\n if not hasattr(self,'meshfns'):\n self.get_meshfns()\n \n # List the meshfn (paths) to the matching saved lattices \n matching = glob.glob(self.lp['rootdir']+'networks/'+self.LatticeTop+'/'+self.LatticeTop+'_'+self.shape+'*')",
"def relationships(self):",
"def compute_matches(self):\n\t\tself.local_database[\"figure_number\"] = [0] * len(self.local_database[\"feature_vectors\"])\n\t\tForensics = wbForensicsHOG(Database=self.local_database)\n\t\tForensics.KDTree_pairs(leaf_size = len(self.local_database)+1)\n\t\tForensics.d_rank(pairs=Forensics.pairs, distances=Forensics.dists, ratios=Forensics.ratios)\n\n\t\tself.local_matches = Forensics.Dist_Rank",
"def _set_gal_types(self):\n _gal_type_list = []\n for component_model in self.model_dictionary.values():\n _gal_type_list.append(component_model.gal_type)\n self.gal_types = set(list(_gal_type_list))",
"def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes",
"def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1",
"def scan(self, cut_off, r0 = 4, dr = 2, rmax = 80, bg_global = 3419, sigma = 3.5 * 13.8):\r\n \r\n start_time = time.time()\r\n for trial in range(self.dimension):\r\n \r\n max_count, max_rank = self.pick_largest(cut_off = cut_off)\r\n fitted = 0 # galaxy not fitted to a circle\r\n point = [] # \r\n circle_number = 0\r\n \r\n if max_count == -1:\r\n print(\"Scan completed, number of galaxies found is \", len(self.galaxies), \"run time is\", time.time() - start_time)\r\n break\r\n \r\n if max_count >= 0: # That means a value that is larger than cut_off exists\r\n ypos,xpos = self.rank_yx(max_rank)\r\n # print(\"max_count, y, x\", max_count, ypos, xpos) \r\n \r\n \r\n for r in range(r0, rmax, dr): # r = radius, we know the largest radius can't be >80 by inspecting the pic.\r\n print(\"locating the galaxy position at\", ypos, xpos, \"at a radius r =\", r) \r\n if fitted == 1 or r == 80:\r\n # print(\"max_count, yx\",max_count, ypos, xpos, \"cut =\", no_cut, len(new_point)/2)\r\n \r\n self.mask_region(ypos, xpos, r - dr)\r\n # print(bg_local, no_bg)\r\n if r - dr > r0: # it must be a galaxy\r\n \r\n # getting the local bg\r\n if no_bg > 3: # check if enough data to deduce bg\r\n bg = bg_local / no_bg \r\n else:\r\n bg = bg_global\r\n \r\n if circle_number >= np.pi* 2* (r - dr)**2:\r\n self.galaxies.append(galaxy(ypos, xpos, r - dr, np.array(point).sum(), bg, circle_number))\r\n\r\n else:\r\n if no_cut < circle_number/2 and circle_number*2 >= np.pi * (r0**2):\r\n self.galaxies.append(galaxy(ypos, xpos, r - dr, np.array(point).sum(), bg, circle_number))\r\n\r\n print(\"\\ngalaxy scan completed,radius =\", r - dr, \"position =\", ypos, xpos, \"max count =\", max_count, \"time = \", time.time() - start_time)\r\n\r\n break\r\n \r\n ##### Resetting parameters ####\r\n no_bg = 0 # number 0f background pixels\r\n bg_local = 0 # sum of local background noises\r\n no_cut = 0 # number of counts below cut off\r\n new_point = [] # pending pixels to be added\r\n ###############################\r\n \r\n if fitted == 0:\r\n \r\n if r == r0:\r\n \r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n \r\n # Check if it is within the circle radius = 2\r\n if int((i - xpos) ** 2 + (j - ypos) ** 2) <= r ** 2 and 0<= j <= (self.shapes[0] - 1) and 0<= i <= self.shapes[1] - 1:\r\n \r\n i,j =[int(i), int(j)]\r\n if self.raw_image_data[j,i] == self.raw_image_data[j,i] *self.masked[j,i]: # Append the ppoint if not masked (masked = 1)\r\n\r\n if self.raw_image_data[j,i] <= cut_off:\r\n no_cut += 1\r\n\r\n if self.raw_image_data[j,i] > cut_off:\r\n point.append(self.raw_image_data[j, i])\r\n circle_number += 1\r\n\r\n if abs(self.raw_image_data[j,i] - bg_global) <= sigma:\r\n bg_local += self.raw_image_data[j,i]\r\n no_bg += 1\r\n \r\n if no_cut >= len(point)/2 or 2*circle_number < np.pi * r0**2:\r\n fitted = 1\r\n else:\r\n pass\r\n \r\n#######################################################################################################\r\n if r > r0:\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): \r\n \r\n # Check if data are in between the previous and the new circle\r\n if (r - dr)**2 < int((i - xpos) ** 2 + (j - ypos) ** 2) <= r ** 2 and 0<= j <= (self.shapes[0] - 1) and 0<= i <= self.shapes[1] - 1:\r\n i,j =[int(i), int(j)] # just incase \r\n \r\n if self.raw_image_data[j,i] * self.masked[j,i] == self.raw_image_data[j,i]:\r\n\r\n if self.raw_image_data[j,i] <= cut_off:\r\n no_cut += 1\r\n if self.raw_image_data[j,i] > cut_off:\r\n new_point.append(self.raw_image_data[j, i]) # points are pending to be added in\r\n circle_number += 1\r\n\r\n if abs(self.raw_image_data[j,i] - bg_global) <= sigma:\r\n bg_local += self.raw_image_data[j,i]\r\n no_bg += 1\r\n \r\n # Check if half of the new data points are inside cut off region\r\n if no_cut <= int(len(new_point)/2) or circle_number*2 < np.pi * r**2:\r\n for rannk in range(len(new_point)):\r\n point.append(new_point[rannk])\r\n\r\n else:\r\n fitted = 1",
"def galaxy_model_visibilities_dict(self) -> {g.Galaxy: np.ndarray}:\r\n galaxy_model_visibilities_dict = self.plane.galaxy_profile_visibilities_dict_from_grid_and_transformer(\r\n grid=self.interferometer.grid, transformer=self.interferometer.transformer\r\n )\r\n\r\n # TODO : Extend to multiple inversioons across Planes\r\n\r\n for galaxy in self.galaxies:\r\n\r\n if galaxy.has_pixelization:\r\n\r\n galaxy_model_visibilities_dict.update(\r\n {galaxy: self.inversion.mapped_reconstructed_visibilities}\r\n )\r\n\r\n return galaxy_model_visibilities_dict",
"def organize(inventory, grocery_list, exists=set()):\n\tlst = sorted(inventory, key=lambda x : x.aisle) #sort by aisle - O(N*logN)\n\taisles = [[] for y in lst if not exist_test(y.aisle, exists)] #create unique aisles only - O(N)\n\t[aisles[y.aisle].append(y.grocery) for y in lst if y.grocery in grocery_list] #append groceries - O(N*G) \n\treturn aisles",
"def update_association(self, association):\n bad_goids = set()\n # Loop through all sets of GO IDs for all genes\n for goids in association.values():\n parents = set()\n # Iterate thru each GO ID in the current gene's association\n for goid in goids:\n try:\n parents.update(self[goid].get_all_parents())\n except:\n bad_goids.add(goid.strip())\n # Add the GO parents of all GO IDs in the current gene's association\n goids.update(parents)\n if bad_goids:\n sys.stdout.write(\"{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\\n\".format(\n N=len(bad_goids), GOs=\" \".join(bad_goids)))",
"def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)",
"def galaxy_model_visibilities_dict(self) -> {g.Galaxy: np.ndarray}:\r\n galaxy_model_visibilities_dict = self.tracer.galaxy_profile_visibilities_dict_from_grid_and_transformer(\r\n grid=self.interferometer.grid, transformer=self.interferometer.transformer\r\n )\r\n\r\n # TODO : Extend to multiple inversioons across Planes\r\n\r\n for plane_index in self.tracer.plane_indexes_with_pixelizations:\r\n\r\n galaxy_model_visibilities_dict.update(\r\n {\r\n self.tracer.planes[plane_index].galaxies[\r\n 0\r\n ]: self.inversion.mapped_reconstructed_visibilities\r\n }\r\n )\r\n\r\n return galaxy_model_visibilities_dict",
"def recon_sg(obj_names, locations, if_return_assigns=False, if_add_bases=True):\n location_dict = {}\n objects = []\n\n if type(locations) == torch.Tensor:\n locations = locations.cpu().numpy()\n elif isinstance(locations, list):\n locations = np.array(locations)\n\n locations = locations.reshape(-1, 2)\n k_means_assign = kmeans(locations[:, 0])\n\n for idx, object_id in enumerate(obj_names):\n a_key = k_means_assign[idx]\n if a_key not in location_dict:\n location_dict[a_key] = [(object_id, locations[idx][1])]\n else:\n location_dict[a_key].append((object_id, locations[idx][1]))\n objects.append(object_id)\n relationships = []\n if if_add_bases:\n relationships.extend([\n [\"brown\", \"left\", \"purple\"],\n [\"purple\", \"left\", \"cyan\"],\n ])\n for du3 in location_dict:\n location = sorted(location_dict[du3], key=lambda x: x[1])\n while len(location) > 1:\n o1 = location.pop()[0]\n o2 = location[-1][0]\n relationships.append([o1, \"up\", o2])\n if if_return_assigns:\n return relationships, k_means_assign\n return relationships",
"def g_n():\n for gname in os.listdir(sroot):\n if gname != 's1-league1-game1':\n continue\n if gname.startswith('s1'):\n p0 = os.path.join(sroot, gname)\n p1 = os.path.join(p0, 'commitment', 'jperret')\n p2 = os.path.join(p0, 'commitment', 'sa')\n if os.path.isdir(p1) and os.path.isdir(p2):\n for fname in os.listdir(p1):\n if fname.endswith('.aa'):\n bname = fname[:-3]\n #~ if bname == 's1-league1-game2_07':\n #~ continue\n a = ad.Annotations(os.path.join(p1, fname))\n a.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a.gen_full_struct()\n a.commitments = list(u for u in a.units if u.type == 'Commitment')\n a2 = ad.Annotations(os.path.join(p2, fname))\n a2.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a2.gen_full_struct()\n a2.commitments = list(u for u in a2.units if u.type == 'Commitment')\n yield bname, (a, a2)",
"def subplot_adapt_images_of_galaxies(\r\n self, adapt_galaxy_image_path_dict: Dict[Galaxy, aa.Array2D]\r\n ):\r\n if adapt_galaxy_image_path_dict is None:\r\n return\r\n\r\n self.open_subplot_figure(number_subplots=len(adapt_galaxy_image_path_dict))\r\n\r\n for path, galaxy_image in adapt_galaxy_image_path_dict.items():\r\n self.figure_adapt_galaxy_image(galaxy_image=galaxy_image)\r\n\r\n self.mat_plot_2d.output.subplot_to_figure(\r\n auto_filename=\"subplot_adapt_images_of_galaxies\"\r\n )\r\n\r\n self.close_subplot_figure()",
"def __find_correlations(self, results):\n\n for result in results[:self.__result_limit]:\n\n # pub without venue\n if len(result['ven']) == 0:\n result['alternative'] = []\n\n with self.vix.searcher(weighting=Frequency) as vs:\n vq_parse = QueryParser('key', self.vix.schema).parse(result['pub']['crossref'])\n tresult = vs.search(vq_parse, limit=None, )\n if len(tresult) != 0:\n result['ven'] = {}\n result['added'] = 1\n for attr in tresult[0].items():\n result['ven'][attr[0]] = attr[1]\n\n self.__output.append(result)\n\n # venue without pub or venue with a list of pubs\n elif len(result['pub']) == 0 or (\n isinstance(result['pub'], list) and len(result['pub']) > 1):\n result['alternative'] = []\n\n with self.pix.searcher(weighting=Frequency) as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(result['ven']['key'])\n tresult = ps.search(pq_parse, limit=None, )\n\n if len(tresult):\n plist = []\n tmp = dict()\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in result['pub']]:\n plist.append(attr[1])\n break\n\n result['alternative'] = plist\n self.__output.append(result)\n\n # mixed case\n elif len(self.__output) == 0 or not result['ven']['key'] in [x['key'] for x in self.__output]:\n lis = [x for x in results if len(x['ven']) and x['ven']['key'] == result['ven']['key']]\n tmp = {}\n if len(lis) <= 1:\n tmp = {'key': result['pub']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n else:\n tmp = {'key': result['ven']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n plist = []\n with self.pix.searcher() as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(tmp['key'])\n tresult = ps.search(pq_parse, limit=None, )\n if len(tresult):\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in tmp['pub']]:\n plist.append(attr[1])\n break\n\n tmp['alternative'] = plist\n self.__output.append(tmp)",
"def associate(conn, detected_sources, imobj, search_radius, save):\n # Find image resolution class\n for config, res_range in res_dict.items():\n if res_range[0] < imobj.bmin <= res_range[1]:\n res_class = config\n \n # Extract all previously detected sources in the same FOV\n assoc_rows = cone_search(conn, 'assoc_source', imobj.obs_ra,\n imobj.obs_dec, search_radius)\n match_logger.info('Extracted {} sources from assoc_source table '\n 'within {} degrees.'.format(\n len(assoc_rows), search_radius))\n # Limit to sources taken from images of similar resolution\n if len(assoc_rows) > 0:\n filtered_assoc_rows = filter_res(assoc_rows, res_class)\n else:\n filtered_assoc_rows = []\n\n if not filtered_assoc_rows:\n # No previous sources found in that sky region at that resolution\n for src in detected_sources:\n src.res_class = res_class\n src.ndetect = 1\n detected_matched = []\n detected_unmatched = detected_sources\n assoc_matched = []\n assoc_unmatched = []\n else:\n # Translate row dictionaries to DetectedSource objects\n assoc_sources = []\n assoc_ids = []\n for asrc in filtered_assoc_rows:\n assoc_ids.append(asrc['id'])\n assoc_sources.append(dbclasses.DetectedSource())\n dbclasses.dict2attr(assoc_sources[-1], asrc)\n match_logger.info('Attempting to match {} sources from this image to '\n '{} sources previously detected in VLITE images...'.\n format(len(detected_sources), len(assoc_sources)))\n\n detected_matched = []\n detected_unmatched = []\n assoc_matched = []\n assoc_unmatched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Print results without saving to database\n if not save:\n # Dump detected_sources into temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in detected_sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b WHERE b.id IN %s\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1)\n AS bb'''\n values = (0.5*imobj.bmin, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n match_logger.info('src_id match assoc_id\\tra\\t\\te_ra\\t\\t\\tdec\\t\\t'\n 'e_dec\\t\\tseparation (arcsec)\\tndetect')\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n # Save association results for database\n else:\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM detected_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b\n WHERE a.image_id = %s AND b.id IN %s ORDER BY\n q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, imobj.id, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n\n cur.close()\n\n # Create dictionary of src_id keys & associated values\n rowdict = {}\n for row in rows:\n rowdict[row['src_id']] = [row['assoc_id'], row['sep'], row['match']]\n\n for src in detected_sources:\n # Get the associated source object\n asrc = [msrc for msrc in assoc_sources if \\\n msrc.id == rowdict[src.src_id][0]][0]\n if rowdict[src.src_id][2]:\n # It's a match!\n src.assoc_id = asrc.id\n detected_matched.append(src)\n # Compute weighted averages\n cur_sigra_sq = asrc.e_ra * asrc.e_ra\n cur_sigdec_sq = asrc.e_dec * asrc.e_dec\n asrc.e_ra = np.sqrt(1. / (\n (1. / cur_sigra_sq) + (1. / (src.e_ra * src.e_ra))))\n asrc.ra = (asrc.e_ra * asrc.e_ra) * (\n (asrc.ra / cur_sigra_sq) + (src.ra / (\n src.e_ra * src.e_ra)))\n asrc.e_dec = np.sqrt(1. / (\n (1. / cur_sigdec_sq) + (1. / (src.e_dec * src.e_dec))))\n asrc.dec = (asrc.e_dec * asrc.e_dec) * (\n (asrc.dec / cur_sigdec_sq) + (src.dec / (\n src.e_dec * src.e_dec)))\n asrc.ndetect += 1\n assoc_matched.append(asrc)\n else:\n # No match -- new source\n src.res_class = res_class\n src.ndetect = 1\n detected_unmatched.append(src)\n assoc_unmatched.append(asrc)\n if not save:\n match_logger.info('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\n src.src_id, rowdict[src.src_id][2], asrc.id, asrc.ra,\n asrc.e_ra, asrc.dec, asrc.e_dec, rowdict[src.src_id][1],\n asrc.ndetect))\n\n match_logger.info(' -- number of matches: {}'.format(len(detected_matched)))\n match_logger.info(' -- number of new sources to add: {}'.format(\n len(detected_unmatched)))\n\n return detected_matched, detected_unmatched, assoc_matched, assoc_unmatched",
"def extract_attributes_of_galaxies(self, cls, attr_name, filter_nones=False):\r\n if filter_nones:\r\n\r\n return [\r\n galaxy.extract_attribute(cls=cls, attr_name=attr_name)\r\n for galaxy in self.galaxies\r\n if galaxy.extract_attribute(cls=cls, attr_name=attr_name) is not None\r\n ]\r\n\r\n else:\r\n\r\n return [\r\n galaxy.extract_attribute(cls=cls, attr_name=attr_name)\r\n for galaxy in self.galaxies\r\n ]",
"def populate(self, batches='all', verbose=True):\n\n dk = self.name\n meta = self._meta\n data = self._data\n stack = qp.Stack(name='aggregations', add_data={dk: (data, meta)})\n batches = stack._check_batches(dk, batches)\n for name in batches:\n batch = meta['sets']['batches'][name]\n xys = batch['x_y_map']\n fs = batch['x_filter_map']\n fy = batch['y_filter_map']\n my = batch['yks']\n total_len = len(xys) + len(batch['y_on_y'])\n for idx, xy in enumerate(xys, start=1):\n x, y = xy\n if x == '@':\n if fs[y[0]] is None:\n fi = 'no_filter'\n else:\n fi = {fs[y[0]]: {fs[y[0]]: 0}}\n stack.add_link(dk, fi, x='@', y=y)\n else:\n if fs[x] is None:\n fi = 'no_filter'\n else:\n fi = {fs[x]: {fs[x]: 0}}\n stack.add_link(dk, fi, x=x, y=y)\n if verbose:\n done = float(idx) / float(total_len) *100\n print('\\r', end=' ')\n time.sleep(0.01)\n print('Batch [{}]: {} %'.format(name, round(done, 1)), end=' ')\n sys.stdout.flush()\n for idx, y_on_y in enumerate(batch['y_on_y'], len(xys)+1):\n if fy[y_on_y] is None:\n fi = 'no_filter'\n else:\n fi = {fy[y_on_y]: {fy[y_on_y]: 1}}\n stack.add_link(dk, fi, x=my[1:], y=my)\n if verbose:\n done = float(idx) / float(total_len) *100\n print('\\r', end=' ')\n time.sleep(0.01)\n print('Batch [{}]: {} %'.format(name, round(done, 1)), end=' ')\n sys.stdout.flush()\n if verbose:\n print('\\n')\n return stack",
"def _extract_collection(self, result):\n if isinstance(result, GeometryCollection):\n matching_geometries = list()\n for part in result:\n if self.geom_dim(part) == self.dim:\n matching_geometries.append(part)\n if self.dim == 0:\n points = list()\n for geom in matching_geometries:\n if isinstance(geom, Point):\n points.append(geom)\n elif isinstance(geom, MultiPoint):\n points.extend(geom.geoms)\n return MultiPoint(points)\n elif self.dim == 1:\n return linemerge(matching_geometries)\n elif self.dim == 2:\n return cascaded_union(matching_geometries)\n else:\n return result"
] |
[
"0.526028",
"0.5191906",
"0.5074366",
"0.4874031",
"0.487003",
"0.4859161",
"0.48347816",
"0.48158997",
"0.4813295",
"0.47717586",
"0.47441304",
"0.47319773",
"0.47140983",
"0.47064015",
"0.47041777",
"0.46938083",
"0.46632653",
"0.4642102",
"0.4637065",
"0.46200624",
"0.46047163",
"0.45991635",
"0.45868886",
"0.45816585",
"0.45548975",
"0.45401353",
"0.45341364",
"0.45201433",
"0.45196182",
"0.45142543"
] |
0.5272391
|
0
|
The Amazon Resource Name (ARN) of the inference scheduler being created.
|
def inference_scheduler_arn(self) -> Optional[str]:
return pulumi.get(self, "inference_scheduler_arn")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def task_definition_arn(self) -> str:\n return pulumi.get(self, \"task_definition_arn\")",
"def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")",
"def scheduler(self):\n return self._get_param(\"Scheduler\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> str:\n return pulumi.get(self, \"arn\")",
"def arn(self) -> str:\n return pulumi.get(self, \"arn\")"
] |
[
"0.6681855",
"0.6431098",
"0.6323862",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6243085",
"0.6239003",
"0.6239003"
] |
0.85025364
|
0
|
Any tags associated with the inference scheduler.
|
def tags(self) -> Optional[Sequence['outputs.InferenceSchedulerTag']]:
return pulumi.get(self, "tags")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tags(self) -> Optional[Sequence['outputs.FuotaTaskTag']]:\n return pulumi.get(self, \"tags\")",
"def tags():",
"def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleTagArgs']]]]:\n return pulumi.get(self, \"tags\")",
"def tag_specifications(self) -> pulumi.Output[Optional[Sequence['outputs.LaunchTemplateTagSpecification']]]:\n return pulumi.get(self, \"tag_specifications\")",
"def tag_specifications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateTagSpecificationArgs']]]]:\n return pulumi.get(self, \"tag_specifications\")",
"def tags(self) -> Optional[Sequence['outputs.ApplicationTag']]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Sequence['outputs.ApplicationTag']]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Sequence['outputs.AssessmentTag']]:\n return pulumi.get(self, \"tags\")",
"def get_tasks_tag(self, tag=None):\n cur = self.conn.cursor()\n if tag == None:\n return None\n else:\n cur.execute(\"SELECT * FROM tangerine WHERE '\" + str(tag) + \"'=any(tags);\")\n \n self.conn.commit()\n return [Task(self.columns, task) for task in cur.fetchall()]",
"def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpamTagArgs']]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Sequence['outputs.EventSubscriptionTag']]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Mapping[str, Any]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ResolverRuleTag']]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataSetTagArgs']]]]:\n return pulumi.get(self, \"tags\")",
"def defined_tags(self):\n return self._defined_tags",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")"
] |
[
"0.6255115",
"0.60453326",
"0.5894944",
"0.5880745",
"0.58180666",
"0.5779538",
"0.5779538",
"0.5774154",
"0.5734583",
"0.57167625",
"0.56472594",
"0.5631142",
"0.5600881",
"0.55336976",
"0.5526532",
"0.5525852",
"0.55180365",
"0.55180365",
"0.5504982",
"0.54882246",
"0.5481916",
"0.5481916",
"0.5481916",
"0.5481916",
"0.5481916",
"0.5481916",
"0.5481916",
"0.5481916",
"0.54744315",
"0.54283583"
] |
0.7780149
|
0
|
Resource schema for LookoutEquipment InferenceScheduler.
|
def get_inference_scheduler(inference_scheduler_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInferenceSchedulerResult:
__args__ = dict()
__args__['inferenceSchedulerName'] = inference_scheduler_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws-native:lookoutequipment:getInferenceScheduler', __args__, opts=opts, typ=GetInferenceSchedulerResult).value
return AwaitableGetInferenceSchedulerResult(
data_delay_offset_in_minutes=pulumi.get(__ret__, 'data_delay_offset_in_minutes'),
data_input_configuration=pulumi.get(__ret__, 'data_input_configuration'),
data_output_configuration=pulumi.get(__ret__, 'data_output_configuration'),
data_upload_frequency=pulumi.get(__ret__, 'data_upload_frequency'),
inference_scheduler_arn=pulumi.get(__ret__, 'inference_scheduler_arn'),
role_arn=pulumi.get(__ret__, 'role_arn'),
tags=pulumi.get(__ret__, 'tags'))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def instance_schema(self):\n raise NotImplementedError",
"def get_inference_scheduler_output(inference_scheduler_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInferenceSchedulerResult]:\n ...",
"def inference_scheduler_arn(self) -> Optional[str]:\n return pulumi.get(self, \"inference_scheduler_arn\")",
"def from_dict(cls, dikt) -> \"Scheduler\":\n return util.deserialize_model(dikt, cls)",
"def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'start_date': 'datetime',\n 'length_minutes': 'int',\n 'activities': 'list[BuAgentScheduleActivity]',\n 'manually_edited': 'bool',\n 'schedule': 'BuScheduleReference'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'start_date': 'startDate',\n 'length_minutes': 'lengthMinutes',\n 'activities': 'activities',\n 'manually_edited': 'manuallyEdited',\n 'schedule': 'schedule'\n }\n\n self._id = None\n self._start_date = None\n self._length_minutes = None\n self._activities = None\n self._manually_edited = None\n self._schedule = None",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input[pulumi.InputType['DataIntegrationScheduleConfigArgs']]] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...",
"def appointments(resources_slots, from_date, to_date, resources=[], status_all=[], resources_all={}):\n\n query = \"\"\"\n SELECT A.STARTTIME, A.ENDTIME, V.APPOINTMENTTYPEID, V.TYPE, \\\n A.RESOURCEID, APPOINTMENTDATE, S.STATUS, S.APPOINTMENTSTATUSID\n FROM PATIENT P\n JOIN PATIENT_APPOINTMENTS AS A ON P.PATIENTID = A.PATIENTID\n JOIN APPOINTMENTTYPE AS V ON a.APPOINTMENTTYPEID = v.APPOINTMENTTYPEID\n LEFT OUTER JOIN APPOINTMENTSTATUS AS S ON A.APPOINTMENTSTATUSID = S.APPOINTMENTSTATUSID\n left join (PATIENTINSURANCE PAI\n join INSURANCE_TYPE IT on IT.INSURANCE_TYPE_ID=PAI.INSURANCE_TYPEID\n join INSURANCE_COMPANY IC on IC.INSURANCE_COMPANY_ID=PAI.INSURANCE_COMPANY_ID)\n on P.PatientID=PAI.PATIENTID and PAI.INSURANCE_TYPEID=1 and PAI.ACTIVE = 1\n WHERE V.APPOINTMENTTYPEID = A.APPOINTMENTTYPEID AND P.PATIENTID = A.PATIENTID\n AND A.ACTIVE = 1\n \"\"\"\n\n if from_date and to_date:\n query += \" AND APPOINTMENTDATE >= '%s' AND APPOINTMENTDATE <= '%s' \" % (from_date, to_date)\n\n if resources:\n query += \" AND A.RESOURCEID IN (%s)\" % ','.join([str(r) for r in resources])\n\n query += \" ORDER BY A.STARTTIME\"\n results = []\n if not EMRSQLServer.connection():\n return results\n\n rows = EMRSQLServer.execute_query(query)\n\n output = defaultdict(list)\n for row in rows:\n output[row['RESOURCEID']].append(row)\n for item, value in output.items():\n studies = defaultdict(list)\n for i, v in enumerate(output[item]):\n studies_start_date = v['APPOINTMENTDATE'].strftime('%Y-%m-%d')\n studies[item].append({\n 'name': v['TYPE'],\n 'start_time': v['STARTTIME'],\n 'end_time': v['ENDTIME'],\n 'studies_start_date': studies_start_date,\n 'status': v['STATUS'],\n 'APPOINTMENTSTATUSID': v['APPOINTMENTSTATUSID']\n })\n\n studies_by_date = defaultdict(list)\n studies_seen = defaultdict(list)\n for st in studies[item]:\n studies_by_date[st['studies_start_date']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n studies_seen[st['APPOINTMENTSTATUSID']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n\n number_of_confirmed_studies = sum([len(studies_seen[int(i)]) for i in status_all])\n days_taken_for_studies = len(studies_by_date)\n total_slots_for_days = resources_slots[item] * days_taken_for_studies\n utilization = (number_of_confirmed_studies * 100) // total_slots_for_days\n\n if utilization <= 79:\n color_code, text_color = '#d9534f', 'white'\n elif (utilization >= 80) and (utilization <= 89):\n color_code, text_color = '#ffe14b', 'black'\n elif utilization >= 90:\n color_code, text_color = '#3c903d', 'white'\n\n results.append({\n 'ResourceID': item,\n 'ResourceName': resources_all[item],\n 'TotalStudies': len(value),\n 'Studies': studies[item],\n 'studies_by_date': studies_by_date,\n 'utilization': '{0}%'.format(utilization),\n 'scheduled_percentage': '{0}%'.format((len(value) * 100) // total_slots_for_days),\n 'number_of_confirmed_studies': number_of_confirmed_studies,\n 'seen_percentage': '{0}%'.format((number_of_confirmed_studies * 100) // len(value)),\n 'total_slots_in_a_day': total_slots_for_days,\n 'color_code': color_code,\n 'text_color': text_color\n })\n return results",
"def test_get_monitoring_schedules_manufacturer_v3(self):\n pass",
"def __init__(self, schedule_type=None, datetime=None): # noqa: E501\n self.openapi_types = {\n 'schedule_type': str,\n 'datetime': datetime\n }\n\n self.attribute_map = {\n 'schedule_type': 'schedule_type',\n 'datetime': 'datetime'\n }\n\n self._schedule_type = schedule_type\n self._datetime = datetime",
"def schema(self):\n return {\n # _id is md5 hash of (effectiveDate, carrier, flightNumber)\n 'carrier' : { 'type': 'string', 'nullable': False, 'required': True},\n 'flightNumber' : { 'type': 'integer', 'nullable': False, 'required': True},\n 'serviceType' : {'type': 'string', 'nullable': True},\n 'effectiveDate' : { 'type': 'datetime', 'required': True, 'datetime_format': '%d/%m/%Y'},\n 'discontinuedDate' : { 'type': 'datetime', 'required': True, 'datetime_format': '%d/%m/%Y'},\n 'day1' : { 'type': 'boolean', 'nullable': True},\n 'day2' : { 'type': 'boolean', 'nullable': True},\n 'day3' : { 'type': 'boolean', 'nullable': True},\n 'day4' : { 'type': 'boolean', 'nullable': True},\n 'day5' : { 'type': 'boolean', 'nullable': True},\n 'day6' : { 'type': 'boolean', 'nullable': True},\n 'day7' : { 'type': 'boolean', 'nullable': True},\n 'departureAirport' : { 'type': 'dict', 'nullable': False, 'required': True},\n 'departureCity' : { 'type': 'string', 'nullable': True},\n 'departureState' : { 'type': 'string', 'nullable': True},\n 'departureCountry' : { 'type': 'string', 'nullable': True},\n 'departureTimePub' : { 'type': 'string', 'nullable': True},\n #'departureTimeActual' : { 'type': 'datetime', 'nullable': True, 'datetime_format': '%H:%M:%S'},\n 'departureUTCVariance' : { 'type': 'integer', 'nullable': True},\n #'departureTerminal' : { 'type': 'string', 'nullable': True},\n 'arrivalAirport' : { 'type': 'dict', 'nullable': False, 'required': True},\n 'arrivalCity' : { 'type': 'string', 'nullable': True},\n 'arrivalState' : { 'type': 'string', 'nullable': True},\n 'arrivalCountry' : { 'type': 'string', 'nullable': True},\n 'arrivalTimePub' : { 'type': 'string', 'nullable': True},\n #'arrivalTimeActual' : { 'type': 'datetime', 'nullable': True, 'datetime_format': '%H:%M:%S'},\n 'arrivalUTCVariance' : { 'type': 'integer', 'nullable': True},\n #'arrivalTerminal' : { 'type': 'string', 'nullable': True},\n #'subAircraftCode' : { 'type': 'string', 'nullable': True},\n #'groupAircraftCode' : { 'type': 'string', 'nullable': True},\n #'classes' : { 'type': 'string', 'nullable': True},\n #'classesFull' : { 'type': 'string', 'nullable': True},\n #'trafficRestriction' : { 'type': 'string', 'nullable': True},\n 'flightArrivalDayIndicator' : { 'type': 'string', 'nullable': True},\n 'stops' : { 'type': 'integer', 'nullable': True},\n 'stopCodes' : { 'type': 'list', 'nullable': True},\n #'stopRestrictions' : { 'type': 'string', 'nullable': True},\n #'stopsubAircraftCodes' : { 'type': 'integer', 'nullable': True},\n #'aircraftChangeIndicator' : { 'type': 'string', 'nullable': True},\n #'meals' : { 'type': 'string', 'nullable': True},\n #'flightDistance' : { 'type': 'integer', 'nullable': True},\n #'elapsedTime' : { 'type': 'integer', 'nullable': True},\n #'layoverTime' : { 'type': 'integer', 'nullable': True},\n #'inFlightService' : { 'type': 'string', 'nullable': True},\n #'SSIMcodeShareStatus' : { 'type': 'string', 'nullable': True},\n #'SSIMcodeShareCarrier' : { 'type': 'string', 'nullable': True},\n #'codeshareIndicator' : { 'type': 'boolean', 'nullable': True},\n #'wetleaseIndicator' : { 'type': 'boolean', 'nullable': True},\n #'codeshareInfo' : { 'type': 'list', 'nullable': True},\n #'wetleaseInfo' : { 'type': 'string', 'nullable': True},\n #'operationalSuffix' : { 'type': 'string', 'nullable': True},\n #'ivi' : { 'type': 'integer', 'nullable': True},\n #'leg' : { 'type': 'integer', 'nullable': True},\n #'recordId' : { 'type': 'integer', 'nullable': True},\n #'daysOfOperation' : { 'type': 'string', 'nullable': True},\n #'totalFrequency' : { 'type': 'integer', 'nullable': True},\n 'weeklyFrequency' : { 'type': 'integer', 'nullable': True, 'required': False},\n #'availSeatMi' : { 'type': 'integer', 'nullable': True},\n #'availSeatKm' : { 'type': 'integer', 'nullable': True},\n #'intStopArrivaltime' : { 'type': 'list', 'nullable': True},\n #'intStopDepartureTime' : { 'type': 'list', 'nullable': True},\n #'intStopNextDay' : { 'type': 'list', 'nullable': True},\n #'physicalLegKey' : { 'type': 'list', 'nullable': True},\n #'departureAirportName' : { 'type': 'string', 'nullable': True},\n #'departureCityName' : { 'type': 'string', 'nullable': True},\n #'departureCountryName' : { 'type': 'string', 'nullable': True},\n #'arrivalAirportName' : { 'type': 'string', 'nullable': True},\n #'arrivalCityName' : { 'type': 'string', 'nullable': True},\n #'arrivalCountryName' : { 'type': 'string', 'nullable': True},\n #'aircraftType' : { 'type': 'string', 'nullable': True},\n #'carrierName' : { 'type': 'string', 'nullable': True},\n 'totalSeats' : { 'type': 'integer', 'nullable': True}}\n #'firstClassSeats' : { 'type': 'integer', 'nullable': True},\n #'businessClassSeats' : { 'type': 'integer', 'nullable': True},\n #'premiumEconomyClassSeats' : { 'type': 'integer', 'nullable': True},\n #'economyClassSeats' : { 'type': 'integer', 'nullable': True},\n #'aircraftTonnage' : { 'type': 'integer', 'nullable': True}}",
"def vehicles_schema():\n definitions, properties = {}, {}\n\n prop, defn = common.property_definition(\"links\")\n definitions.update(defn)\n properties.update(prop)\n\n prop, _ = common.property_definition(\"last_updated\", ref=common.definition_id(\"timestamp\"))\n properties.update(prop)\n\n prop, defn = common.property_definition(\"ttl\")\n definitions.update(defn)\n properties.update(prop)\n\n state_defs, transitions = common.vehicle_state_machine(\"last_vehicle_state\", \"last_event_types\")\n definitions.update(state_defs)\n\n schema = endpoint_schema(\"vehicles\", definitions)\n\n # update list of required and properties object\n schema[\"required\"].extend([\"last_updated\", \"ttl\"])\n schema[\"properties\"].update(properties)\n\n # add state machine transition rules\n schema[\"properties\"][\"data\"][\"properties\"][\"vehicles\"][\"items\"][\"allOf\"].append(transitions)\n\n # verify and return\n return common.check_schema(schema)",
"def scheduler(self):\n return self._get_param(\"Scheduler\")",
"def scheduler():\r\n document.add_heading('Scheduler details', 1)\r\n scheduler_metrics = ['customProperties', 'schedulerServiceType', 'maxConcurrentEngines', 'engineTimeout', 'tags', 'hostname',\r\n 'logVerbosityAuditActivity','logVerbosityAuditSecurity','logVerbosityService','logVerbosityApplication',\r\n 'logVerbosityAudit','logVerbosityPerformance','logVerbositySecurity','logVerbositySystem',\r\n 'logVerbosityTaskExecution']\r\n\r\n schedulernodes = get_qlik_sense.get_scheduler()\r\n num_of_schedulers = len(schedulernodes)\r\n num_of_scheduler_metrics = len(scheduler_metrics)\r\n table = document.add_table(rows=num_of_scheduler_metrics+1, cols=num_of_schedulers+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_schedulers):\r\n row.cells[item+1].text = schedulernodes[item][15]\r\n for item in range(num_of_scheduler_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(scheduler_metrics[item])\r\n for schedulernode in range(num_of_schedulers):\r\n row.cells[schedulernode+1].text = str(schedulernodes[schedulernode][item])\r\n document.add_page_break()",
"def __init__(self, *args, **kwargs):\n BaseScheduler.__init__(self, *args, **kwargs)",
"def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input['DataIntegrationScheduleConfigArgs']] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if kms_key is not None:\n pulumi.set(__self__, \"kms_key\", kms_key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if schedule_config is not None:\n pulumi.set(__self__, \"schedule_config\", schedule_config)\n if source_uri is not None:\n pulumi.set(__self__, \"source_uri\", source_uri)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)",
"def schema(self):\n pass",
"def schedule(self) -> pulumi.Input['ScheduleArgs']:\n return pulumi.get(self, \"schedule\")",
"def resource_type(cls):\n pass",
"def __init__(self):\n self.swagger_types = {\n 'max_occupancy_percent_for_deferred_work': 'int',\n 'default_shrinkage_percent': 'float',\n 'shrinkage_overrides': 'ShrinkageOverrides',\n 'planning_period': 'ValueWrapperPlanningPeriodSettings',\n 'start_day_of_weekend': 'str'\n }\n\n self.attribute_map = {\n 'max_occupancy_percent_for_deferred_work': 'maxOccupancyPercentForDeferredWork',\n 'default_shrinkage_percent': 'defaultShrinkagePercent',\n 'shrinkage_overrides': 'shrinkageOverrides',\n 'planning_period': 'planningPeriod',\n 'start_day_of_weekend': 'startDayOfWeekend'\n }\n\n self._max_occupancy_percent_for_deferred_work = None\n self._default_shrinkage_percent = None\n self._shrinkage_overrides = None\n self._planning_period = None\n self._start_day_of_weekend = None",
"def resource_type(self) -> str:\n return 'TABLE'",
"def schema(self):\n raise NotImplementedError",
"def stops_schema():\n definitions, properties = {}, {}\n\n prop, _ = common.property_definition(\"last_updated\", ref=common.definition_id(\"timestamp\"))\n properties.update(prop)\n\n prop, defn = common.property_definition(\"ttl\")\n definitions.update(defn)\n properties.update(prop)\n\n stop_defs = common.stop_definitions()\n definitions.update(stop_defs)\n\n schema = endpoint_schema(\"stops\", definitions)\n\n # update list of required and properties object\n schema[\"required\"].extend([\"last_updated\", \"ttl\"])\n schema[\"properties\"].update(properties)\n\n # verify and return\n return common.check_schema(schema)",
"def schema(self):",
"def compute_resources(instance: dict):\r\n\r\n # Retrieve usefull infos\r\n Interventions = instance[INTERVENTIONS_STR]\r\n T_max = instance[T_STR]\r\n Resources = instance[RESOURCES_STR]\r\n # Init resource usage dictionnary for each resource and time\r\n resources_usage = {}\r\n for resource_name in Resources.keys():\r\n resources_usage[resource_name] = np.zeros(T_max)\r\n # Compute value for each resource and time step\r\n for intervention_name, intervention in Interventions.items():\r\n # start time should be defined (already checked in scheduled constraint checker)\r\n if not START_STR in intervention:\r\n continue\r\n start_time = intervention[START_STR]\r\n start_time_idx = start_time - 1\r\n # index of list starts at 0\r\n intervention_worload = intervention[RESOURCE_CHARGE_STR]\r\n intervention_delta = int(intervention[DELTA_STR][start_time_idx])\r\n # compute effective worload\r\n for (\r\n resource_name,\r\n intervention_resource_worload,\r\n ) in intervention_worload.items():\r\n for time in range(start_time_idx, start_time_idx + intervention_delta):\r\n # null values are not available\r\n if (\r\n str(time + 1) in intervention_resource_worload\r\n and str(start_time) in intervention_resource_worload[str(time + 1)]\r\n ):\r\n resources_usage[resource_name][\r\n time\r\n ] += intervention_resource_worload[str(time + 1)][str(start_time)]\r\n\r\n return resources_usage",
"def _create_schedules(self):\n\n ''''''",
"def __init__(self, valid_days=None, start_date=None, schedule_typicality=None, schedule_type=None, schedule_name=None, removed_dates_notes=None, removed_dates=None, rating_start_date=None, rating_end_date=None, rating_description=None, end_date=None, description=None, added_dates_notes=None, added_dates=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._valid_days = None\n self._start_date = None\n self._schedule_typicality = None\n self._schedule_type = None\n self._schedule_name = None\n self._removed_dates_notes = None\n self._removed_dates = None\n self._rating_start_date = None\n self._rating_end_date = None\n self._rating_description = None\n self._end_date = None\n self._description = None\n self._added_dates_notes = None\n self._added_dates = None\n self.discriminator = None\n\n if valid_days is not None:\n self.valid_days = valid_days\n if start_date is not None:\n self.start_date = start_date\n if schedule_typicality is not None:\n self.schedule_typicality = schedule_typicality\n self.schedule_type = schedule_type\n self.schedule_name = schedule_name\n if removed_dates_notes is not None:\n self.removed_dates_notes = removed_dates_notes\n if removed_dates is not None:\n self.removed_dates = removed_dates\n self.rating_start_date = rating_start_date\n self.rating_end_date = rating_end_date\n self.rating_description = rating_description\n if end_date is not None:\n self.end_date = end_date\n self.description = description\n if added_dates_notes is not None:\n self.added_dates_notes = added_dates_notes\n if added_dates is not None:\n self.added_dates = added_dates",
"def __init__(self, dry_cargo=None, rocket=None, decay=None, type=None, port1=None, introduction_date=None, port2=None, free_flight_time=None, diameter=None, design_company=None, discharge=None, port2_docked_time=None, id=None, rebuilder=None, port1_docking_date=None, height=None, model_end_date=None, number_of_launches=None, port1_undocking_date=None, model_end_year=None, cargo_water=None, port2_undocking_date=None, port1_docked_time=None, weight=None, version=None, apoapsis=None, discharge_average=None, total_cargo=None, number_of_crew=None, power_type=None, cargo_fuel=None, cargo_gas=None, mass=None, description=None, engine_type=None, total_mass=None, crew=None, docked_time=None, assembly=None, _class=None, model_start_date=None, inclination=None, periapsis=None, regime=None, port2_docking_date=None, length=None, launch=None, label=None, number_of_seats=None, model_start_year=None, width=None, related_mean_of_transportation=None, target_space_station=None): # noqa: E501\n\n\n self.openapi_types = {\n 'dry_cargo': List[object],\n 'rocket': List[object],\n 'decay': List[str],\n 'type': List[str],\n 'port1': List[object],\n 'introduction_date': List[str],\n 'port2': List[object],\n 'free_flight_time': List[object],\n 'diameter': List[object],\n 'design_company': List[object],\n 'discharge': List[float],\n 'port2_docked_time': List[object],\n 'id': str,\n 'rebuilder': List[object],\n 'port1_docking_date': List[str],\n 'height': List[object],\n 'model_end_date': List[str],\n 'number_of_launches': List[int],\n 'port1_undocking_date': List[str],\n 'model_end_year': List[str],\n 'cargo_water': List[object],\n 'port2_undocking_date': List[str],\n 'port1_docked_time': List[object],\n 'weight': List[object],\n 'version': List[object],\n 'apoapsis': List[object],\n 'discharge_average': List[float],\n 'total_cargo': List[object],\n 'number_of_crew': List[int],\n 'power_type': List[object],\n 'cargo_fuel': List[object],\n 'cargo_gas': List[object],\n 'mass': List[object],\n 'description': List[str],\n 'engine_type': List[object],\n 'total_mass': List[object],\n 'crew': List[object],\n 'docked_time': List[object],\n 'assembly': List[object],\n '_class': List[object],\n 'model_start_date': List[str],\n 'inclination': List[float],\n 'periapsis': List[object],\n 'regime': List[str],\n 'port2_docking_date': List[str],\n 'length': List[object],\n 'launch': List[str],\n 'label': List[str],\n 'number_of_seats': List[int],\n 'model_start_year': List[str],\n 'width': List[object],\n 'related_mean_of_transportation': List[object],\n 'target_space_station': List[object]\n }\n\n self.attribute_map = {\n 'dry_cargo': 'dryCargo',\n 'rocket': 'rocket',\n 'decay': 'decay',\n 'type': 'type',\n 'port1': 'port1',\n 'introduction_date': 'introductionDate',\n 'port2': 'port2',\n 'free_flight_time': 'freeFlightTime',\n 'diameter': 'diameter',\n 'design_company': 'designCompany',\n 'discharge': 'discharge',\n 'port2_docked_time': 'port2DockedTime',\n 'id': 'id',\n 'rebuilder': 'rebuilder',\n 'port1_docking_date': 'port1DockingDate',\n 'height': 'height',\n 'model_end_date': 'modelEndDate',\n 'number_of_launches': 'numberOfLaunches',\n 'port1_undocking_date': 'port1UndockingDate',\n 'model_end_year': 'modelEndYear',\n 'cargo_water': 'cargoWater',\n 'port2_undocking_date': 'port2UndockingDate',\n 'port1_docked_time': 'port1DockedTime',\n 'weight': 'weight',\n 'version': 'version',\n 'apoapsis': 'apoapsis',\n 'discharge_average': 'dischargeAverage',\n 'total_cargo': 'totalCargo',\n 'number_of_crew': 'numberOfCrew',\n 'power_type': 'powerType',\n 'cargo_fuel': 'cargoFuel',\n 'cargo_gas': 'cargoGas',\n 'mass': 'mass',\n 'description': 'description',\n 'engine_type': 'engineType',\n 'total_mass': 'totalMass',\n 'crew': 'crew',\n 'docked_time': 'dockedTime',\n 'assembly': 'assembly',\n '_class': 'class',\n 'model_start_date': 'modelStartDate',\n 'inclination': 'inclination',\n 'periapsis': 'periapsis',\n 'regime': 'regime',\n 'port2_docking_date': 'port2DockingDate',\n 'length': 'length',\n 'launch': 'launch',\n 'label': 'label',\n 'number_of_seats': 'numberOfSeats',\n 'model_start_year': 'modelStartYear',\n 'width': 'width',\n 'related_mean_of_transportation': 'relatedMeanOfTransportation',\n 'target_space_station': 'targetSpaceStation'\n }\n\n self._dry_cargo = dry_cargo\n self._rocket = rocket\n self._decay = decay\n self._type = type\n self._port1 = port1\n self._introduction_date = introduction_date\n self._port2 = port2\n self._free_flight_time = free_flight_time\n self._diameter = diameter\n self._design_company = design_company\n self._discharge = discharge\n self._port2_docked_time = port2_docked_time\n self._id = id\n self._rebuilder = rebuilder\n self._port1_docking_date = port1_docking_date\n self._height = height\n self._model_end_date = model_end_date\n self._number_of_launches = number_of_launches\n self._port1_undocking_date = port1_undocking_date\n self._model_end_year = model_end_year\n self._cargo_water = cargo_water\n self._port2_undocking_date = port2_undocking_date\n self._port1_docked_time = port1_docked_time\n self._weight = weight\n self._version = version\n self._apoapsis = apoapsis\n self._discharge_average = discharge_average\n self._total_cargo = total_cargo\n self._number_of_crew = number_of_crew\n self._power_type = power_type\n self._cargo_fuel = cargo_fuel\n self._cargo_gas = cargo_gas\n self._mass = mass\n self._description = description\n self._engine_type = engine_type\n self._total_mass = total_mass\n self._crew = crew\n self._docked_time = docked_time\n self._assembly = assembly\n self.__class = _class\n self._model_start_date = model_start_date\n self._inclination = inclination\n self._periapsis = periapsis\n self._regime = regime\n self._port2_docking_date = port2_docking_date\n self._length = length\n self._launch = launch\n self._label = label\n self._number_of_seats = number_of_seats\n self._model_start_year = model_start_year\n self._width = width\n self._related_mean_of_transportation = related_mean_of_transportation\n self._target_space_station = target_space_station",
"def __init__(__self__, *,\n comparison: Optional[pulumi.Input[str]] = None,\n created_at: Optional[pulumi.Input[int]] = None,\n critical: Optional[pulumi.Input['InfraAlertConditionCriticalArgs']] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n entity_guid: Optional[pulumi.Input[str]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n updated_at: Optional[pulumi.Input[int]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input['InfraAlertConditionWarningArgs']] = None,\n where: Optional[pulumi.Input[str]] = None):\n if comparison is not None:\n pulumi.set(__self__, \"comparison\", comparison)\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if critical is not None:\n pulumi.set(__self__, \"critical\", critical)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if enabled is not None:\n pulumi.set(__self__, \"enabled\", enabled)\n if entity_guid is not None:\n pulumi.set(__self__, \"entity_guid\", entity_guid)\n if event is not None:\n pulumi.set(__self__, \"event\", event)\n if integration_provider is not None:\n pulumi.set(__self__, \"integration_provider\", integration_provider)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if policy_id is not None:\n pulumi.set(__self__, \"policy_id\", policy_id)\n if process_where is not None:\n pulumi.set(__self__, \"process_where\", process_where)\n if runbook_url is not None:\n pulumi.set(__self__, \"runbook_url\", runbook_url)\n if select is not None:\n pulumi.set(__self__, \"select\", select)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if updated_at is not None:\n pulumi.set(__self__, \"updated_at\", updated_at)\n if violation_close_timer is not None:\n pulumi.set(__self__, \"violation_close_timer\", violation_close_timer)\n if warning is not None:\n pulumi.set(__self__, \"warning\", warning)\n if where is not None:\n pulumi.set(__self__, \"where\", where)",
"def scheduler(startdate, enddate, tag, description,\n title, reminder, observers, priority, interval):\n observers = console_utils.split_str_to_list(observers)\n priority = Priority[priority]\n tag = Tag(tag)\n manager = Actions()\n manager.add_scheduler(title,\n datetime.strptime(startdate, \"%Y-%m-%d\"),\n datetime.strptime(enddate, \"%Y-%m-%d\"),\n interval,\n tag=tag,\n description=description,\n observers=observers,\n reminder=datetime.strptime(reminder, \"%H:%M\"),\n priority=priority)",
"def _declr(self):\n tmpl = self._template_interface\n for i in tmpl._interfaces:\n setattr(self, i._name, monitor_of(i))"
] |
[
"0.49787045",
"0.4933817",
"0.48545372",
"0.48421177",
"0.48299405",
"0.482916",
"0.47866398",
"0.47530758",
"0.47223446",
"0.4716753",
"0.47071758",
"0.47028518",
"0.46939448",
"0.4683963",
"0.46800286",
"0.46705967",
"0.46699038",
"0.46512488",
"0.46383926",
"0.46343276",
"0.46330518",
"0.4620822",
"0.46187308",
"0.46077898",
"0.46075433",
"0.45714957",
"0.45510104",
"0.45319507",
"0.45074394",
"0.45036757"
] |
0.5230324
|
0
|
Method(isInternal, docstring name, args, isConst) > Method Creates a new Method description with the given docstring, name and args, for the language, with special consideration if the method was declared constant and/or internal.
|
def __init__ (self, isInternal, docstring, name, args, isConst):
self.name = name
self.isConst = isConst
self.isInternal = isInternal
if isInternal:
if language == 'java':
# We have a special Javadoc doclet that understands a non-standard
# Javadoc tag, @internal. When present in the documentation string
# of a method, it causes it to be excluded from the final
# documentation output. @internal is something doxygen offers.
#
p = re.compile('(\s+?)\*/', re.MULTILINE)
self.docstring = p.sub(r'\1* @internal\1*/', docstring)
elif language == 'csharp':
# We mark internal methods in a different way for C#.
self.docstring = docstring
else:
self.docstring = " @internal\n" + docstring
else:
self.docstring = docstring
# In Java and C#, if a method is const and swig has to translate the type,
# then for some reason swig cannot match up the resulting doc strings
# that we put into %javamethodmodifiers. The result is that the java
# documentation for the methods are empty. I can't figure out why, but
# have figured out that if we omit the argument list in the doc string
# that is put on %javamethodmodifiers for such case, swig does generate
# the comments for those methods. This approach is potentially dangerous
# because swig might attach the doc string to the wrong method if a
# methods has multiple versions with varying argument types, but the
# combination doesn't seem to arise in antimony currently, and anyway,
# this fixes a real problem in the Java documentation for antimony.
if language == 'java' or language == 'csharp':
if isConst and (args.find('unsigned int') >= 0):
self.args = ''
elif not args.strip() == '()':
if isConst:
self.args = args + ' const'
else:
self.args = args
else:
if isConst:
self.args = '() const'
else:
self.args = ''
else:
self.args = args
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, exact=None, javadocs=None, modifiers=None,\n return_type=None, name=None, params=None, exceptions=None,\n body=None, indent=4):\n super(JavaMethod, self).__init__(exact=exact, javadocs=javadocs,\n modifiers=modifiers, name=name,\n value=None, indent=indent)\n self.is_default = False\n if self.modifiers == []:\n self.add_modifier('public')\n\n self.annotations = []\n\n self.return_type = 'void'\n if return_type is not None:\n self.set_return_type(return_type)\n\n self.parameters = OrderedSet()\n if params is not None:\n for param_type, param_name in params:\n self.add_parameter(param_type, param_name)\n\n self.exceptions = OrderedSet()\n if exceptions is not None:\n for exc in exceptions:\n self.add_exception(exc)\n\n self.body = []\n if body is not None:\n for line in body:\n self.add_line(line)\n\n self.exact = exact\n self.default_modifiers = True",
"def method(name, doc):\n import html\n\n params = method_params(doc)\n doc = html.escape(doc)\n return string.Template(METHOD_TEMPLATE).substitute(\n name=name, params=params, doc=doc\n )",
"def create_method(self):\n n_indents = 1 if self.target_language in ['java', 'js',\n 'php', 'ruby'] else 0\n return self.temp('separated.method', n_indents=n_indents,\n skipping=True).format(**self.__dict__)",
"def test_method_creation():\n my_method = SGMethod(\"Test\")\n \n assert my_method.name == \"Test\"\n assert len(my_method.params) == 0\n assert my_method.return_type == None",
"def derived_from(original_method):\n\n def wrapper(method):\n doc = original_method.__doc__.replace(\"*,\", \"\\*,\") # noqa\n doc = doc.replace(\n \":ref:`ufunc docs <ufuncs.kwargs>`.\",\n \"`ufunc docs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html#ufuncs-kwargs>`_.\",\n )\n\n # remove examples\n doc = doc.split(\"\\n\\n Examples\\n\")[0]\n\n # remove references\n doc = [a for a in doc.split(\"\\n\\n\") if \"References\\n----------\\n\" not in a]\n\n l1 = \"This docstring was copied from numpy.{}\".format(original_method.__name__)\n l2 = \"Some inconsistencies with the Workflows version may exist\"\n\n if isinstance(original_method, np.ufunc):\n # what the function does\n info = doc[1]\n\n # parameters (sometimes listed on separate lines, someimtes not)\n parameters = [a for a in doc if \"Parameters\\n\" in a][0].split(\"\\n\")\n if parameters[4][0] == \"x\":\n parameters = \"\\n\".join(parameters[:6])\n else:\n parameters = \"\\n\".join(parameters[:4])\n\n # return value\n returns = [a for a in doc if \"Returns\\n\" in a][0]\n\n # final docstring\n doc = \"\\n\\n\".join([info, l1, l2, parameters, returns])\n else:\n # does the first line contain the function signature? (not always the case)\n if doc[0][-1] == \")\":\n doc = [doc[1]] + [\"\\n\\n\" + \" {}\\n\\n {}\\n\\n\".format(l1, l2)] + doc[2:]\n else:\n doc = [doc[0]] + [\"\\n\\n\" + \" {}\\n\\n {}\\n\\n\".format(l1, l2)] + doc[1:]\n doc = \"\\n\\n\".join(doc)\n\n method.__doc__ = doc\n return method\n\n return wrapper",
"def convert_method(self, access_modifier, return_type, func_name, params):\n\n # Run super definition\n access_modifier, return_type, func_name, params = \\\n super().convert_method(access_modifier, return_type,\n func_name, params)\n\n # Make function definition\n func = []\n func += [self.make_function_definition(return_type,\n func_name, params)]\n\n # Add decorator if required\n if \"static\" in access_modifier:\n func.insert(0, \"@staticmethod\")\n\n # Return processed func definition\n return func, []",
"def __init__(self, cls_method) -> None:\n self.method = cls_method\n self.__doc__ = self.method.__doc__",
"def test_method_definition(self):\n self.script(\"# script.py\\n\"\n \"class C():\\n\"\n \" 'cdoc'\\n\"\n \" def f(self):\\n\"\n \" 'mdoc'\\n\"\n \" pass\")\n self.compile()\n\n class_def = self.find_code_component(name=\"C\")\n method_def = self.find_code_component(name=\"f\")\n\n self.assertEqual(method_def.type, \"function_def\")\n self.assertEqual(method_def.mode, \"w\")\n self.assertEqual(method_def.first_char_line, 4)\n self.assertEqual(method_def.first_char_column, 4)\n self.assertEqual(method_def.last_char_line, 6)\n self.assertEqual(method_def.last_char_column, 12)\n self.assertEqual(method_def.container_id, class_def.id)\n\n method_def_block = self.metascript.code_blocks_store[method_def.id]\n self.assertEqual(method_def_block.code,\n \"def f(self):\\n\"\n \" 'mdoc'\\n\"\n \" pass\")\n self.assertEqual(method_def_block.docstring, \"mdoc\")\n self.assertTrue(bool(method_def_block.code_hash))",
"def format_method(cls, **kwargs): \n _doc_formatter = cls._format_obj(**kwargs) \n ## using functools.wraps: this will work but the method type of any bounded\n ## function (static, instance or class method) is also altered\n #def _func_decorator(func):\n # new_func = functools.wraps(func)(func)\n # new_func.__doc__ = _doc_formatter(func)\n # return new_func\n try:\n assert USE_WRAPT_OR_NOT and wrapt\n except: \n class _func_decorator(__MethodDecorator):\n def __init__(self, func, obj=None, cls=None, method_type='function'):\n #super(_func_decorator,self).__init__(func, obj=obj, cls=cls, method_type=method_type)\n __MethodDecorator.__init__(self, func, obj=obj, cls=cls, method_type=method_type)\n # we had one attribute wrt. a standard method_decorator instance\n setattr(self,'__doc__',_doc_formatter(self.func))\n def __getattribute__(self, attr_name): \n # we ensure that the docstring which is the __doc__ attribute of the\n # decorator, not that of the function itself\n if attr_name in ('__doc__',):\n return object.__getattribute__(self, attr_name) \n # otherwise behaves like the superclass class\n #return super(_func_decorator,self).__getattribute__(attr_name)\n return __MethodDecorator.__getattribute__(self, attr_name)\n else:\n def _func_decorator(func):\n #@my_wrapper\n #def new_func(*_args, **_kwargs):\n # return func(*_args, **_kwargs)\n new_func = method_decorator(func)\n #new_func = method_wrapper(func)\n # now we update the '__doc__' by recycling the doc already commited in \n # the FunctionWrapper object new_func: this enables avoiding issues when\n # dealing with classmethod or staticmethod methods:\n # \"AttributeError: 'classmethod' object attribute '__doc__' is read-only\"\n try: # write on the wrapper...\n new_func.__doc__ = _doc_formatter(new_func)\n except: \n # still, we allow this type of error, as it may occur in the case the\n # order of closures was not well set, e.g. by implementing:\n # @classmethod\n # @Docstring.format_class(**kwargs)\n # instead of:\n # @Docstring.format_class(**kwargs)\n # @classmethod\n pass\n return new_func\n return _func_decorator",
"def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None",
"def add_method(self,f,*def_args,**def_kw):\n\t\tmethod = LadonMethodInfo(self,f,*def_args,**def_kw)\n\t\t# store the method info\n\t\tself.methods[get_function_name(f)] = method\n\t\treturn method",
"def build_method(method_name, description, parameters, api_path, http_method, summary, return_type):\n allow_per_page = False\n parameters = check_for_pre_attachment_param(parameters)\n arg_list = get_parameters(parameters)\n param_descriptions = get_parameter_descriptions(parameters)\n payload = build_payload(parameters)\n enums = check_for_enums(parameters)\n\n \"\"\"\n If the method returns an array, allow the per_page parameter for paging\n \"\"\"\n if return_type == 'array' or (method_name.startswith(\"list_\") and http_method == \"GET\"):\n arg_list.append('per_page=None')\n param_descriptions.append(':param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE')\n param_descriptions.append(':type per_page: integer or None')\n payload.append('\\'per_page\\': per_page,')\n allow_per_page = True\n\n arg_list.append('**request_kwargs')\n\n \"\"\"\n Create the method signature\n \"\"\"\n\n content = line_format('def ' + method_name + '(request_ctx, ' + ', '.join(arg_list) + '):', NONE)\n content += line_format('\"\"\"', FOUR)\n\n \"\"\"\n Create the method description text from the description in the meta api\n \"\"\"\n regex = re.compile(r'\\{api\\:(\\w+)\\#(\\w+).*?\\}')\n for line in description.splitlines(True):\n rst_line = regex.sub(format_api_string, line)\n content += line_format(rst_line.rstrip(), FOUR)\n\n \"\"\"\n list out the method paramters\n \"\"\"\n content += line_format('', NONE)\n content += line_format(':param request_ctx: The request context', EIGHT)\n content += line_format(':type request_ctx: :class:RequestContext', EIGHT)\n for param in param_descriptions:\n content += line_format(param, EIGHT)\n content += line_format(':return: '+summary, EIGHT)\n content += line_format(':rtype: requests.Response (with ' + return_type + ' data)', EIGHT)\n content += line_format('', NONE)\n content += line_format('\"\"\"', FOUR)\n content += line_format('', NONE)\n\n \"\"\"\n Add the per_page check\n \"\"\"\n if allow_per_page:\n content += line_format('if per_page is None:', FOUR)\n content += line_format('per_page = request_ctx.per_page', EIGHT)\n\n \"\"\"\n Add any enums if they exist.\n \"\"\"\n for enum in enums:\n content += line_format(enum, FOUR)\n\n \"\"\"\n Add the api path\n \"\"\"\n path_formatted = 'path = \\'' + api_path + '\\''\n content += line_format(path_formatted, FOUR)\n\n \"\"\"\n Add a payload if one exists\n \"\"\"\n payload_string = ''\n if payload:\n content += line_format('payload = {', FOUR)\n for item in payload:\n content += line_format(item, EIGHT)\n content += line_format('}', FOUR)\n payload_string = ', payload=payload'\n\n content += line_format('url = request_ctx.base_api_url + path.format(' + ', '.join(get_path_parameters(parameters)) + ')', FOUR)\n content += line_format(\n 'response = client.'+http_method.lower()+'(request_ctx, url' + payload_string + ', **request_kwargs)', FOUR)\n\n content += line_format('', NONE)\n content += line_format('return response', FOUR)\n content += line_format('', NONE)\n content += line_format('', NONE)\n return content",
"def wrap_method(cls, methodName, newMethod):\n cls[methodName].exclude()\n add_member_function(cls, methodName, newMethod)",
"def method(rtype):\n\n def decorator(func):\n argcount = func.__code__.co_argcount\n argnames = func.__code__.co_varnames[:argcount]\n ndefaults = 0\n if func.__defaults__:\n ndefaults = len(func.__defaults__)\n\n argNames = func.__code__.co_varnames[(argcount - ndefaults):]\n\n if ndefaults < (argcount - 1):\n raise cSyntaxError(\n 'Type declarations missing from arguments %(args)r in the BLM '\n 'method %(func)s().' % {\n 'args': list(reversed(argnames))[ndefaults:],\n 'func': func.__name__,})\n params = []\n if func.__defaults__:\n params = [ arg._instantiate(name) for arg, name in\n zip(func.__defaults__, argNames)]\n\n func.__defaults__ = None\n m = ExternalMethod(func.__name__, func)\n if rtype:\n m.rtype = rtype._instantiate('result')\n m.params = params\n\n return m\n\n return decorator",
"def set_method(self, method, **kwargs):\n if isinstance(method, str) and method in self.available_methods.keys():\n self.method_name = method\n self.method = self.available_methods[method]['IDIMethod'](self, **kwargs)\n elif callable(method) and hasattr(method, 'calculate_displacements'):\n self.method_name = 'external_method'\n try:\n self.method = method(self, **kwargs)\n except:\n raise ValueError(\"The input `method` is not a valid `IDIMethod`.\")\n else:\n raise ValueError(\"method must either be a valid name from `available_methods` or an `IDIMethod`.\")\n \n # Update `get_displacements` docstring\n tools.update_docstring(self.get_displacements, self.method.calculate_displacements)\n # Update `show_points` docstring\n if hasattr(self.method, 'show_points'):\n try:\n tools.update_docstring(self.show_points, self.method.show_points)\n except:\n pass",
"def method_description(self):\n pass",
"def create_method(self):\n assert self.stored_args != None\n if self.generated_method != None:\n return (self.generated_cf, self.generated_method)\n\n class_name = self._cf.this.name.value + \"_lambda_\" + str(self._ins.pos)\n self.generated_cf = ClassFile.create(class_name)\n # Jawa doesn't seem to expose this cleanly. Technically we don't need\n # to implement the interface because the caller doesn't actually care,\n # but it's better to implement it anyways for the future.\n # (Due to the hacks below, the interface isn't even implemented properly\n # since the method we create has additional parameters and is static.)\n iface_const = self.generated_cf.constants.create_class(self.implemented_iface)\n self.generated_cf._interfaces.append(iface_const.index)\n\n # HACK: This officially should use instantiated_desc.descriptor,\n # but instead use a combination of the stored arguments and the\n # instantiated descriptor to make packetinstructions work better\n # (otherwise we'd need to generate and load fields in a way that\n # packetinstructions understands)\n descriptor = \"(\" + self.dynamic_desc.args_descriptor + \\\n self.instantiated_desc.args_descriptor + \")\" + \\\n self.instantiated_desc.returns_descriptor\n method = self.generated_cf.methods.create(self.dynamic_name,\n descriptor, code=True)\n self.generated_method = method\n # Similar hack: make the method static, so that packetinstructions\n # doesn't look for the corresponding instance.\n method.access_flags.acc_static = True\n # Third hack: the extra arguments are in the local variables/arguments\n # list, not on the stack. So we need to move them to the stack.\n # (In a real implementation, these would probably be getfield instructions)\n # Also, this uses aload for everything, instead of using the appropriate\n # instruction for each type.\n instructions = []\n for i in range(len(method.args)):\n instructions.append((\"aload\", i))\n\n cls_ref = self.generated_cf.constants.create_class(self.method_class)\n if self.ref_kind in FIELD_REFS:\n # This case is not currently hit, but provided for future use\n # (Likely method_name and method_descriptor would no longer be used though)\n ref = self.generated_cf.constants.create_field_ref(\n self.method_class, self.method_name, self.method_desc.descriptor)\n elif self.ref_kind == REF_invokeInterface:\n ref = self.generated_cf.constants.create_interface_method_ref(\n self.method_class, self.method_name, self.method_desc.descriptor)\n # See https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-6.html#jvms-6.5.invokeinterface.notes\n # Since the generated classfile only exists for use by burger,\n # we don't _really_ need to handle this, other than providing\n # some value, but it's not too hard. However, we're not currently\n # treating longs and doubles as 2 instead of 1 (which is incorrect,\n # but again, doesn't really matter since this is redundant information\n # that burger does not use).\n count = len(method.args)\n else:\n ref = self.generated_cf.constants.create_method_ref(\n self.method_class, self.method_name, self.method_desc.descriptor)\n\n # See https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-5.html#jvms-5.4.3.5\n if self.ref_kind == REF_getField:\n instructions.append((\"getfield\", ref))\n elif self.ref_kind == REF_getStatic:\n instructions.append((\"getstatic\", ref))\n elif self.ref_kind == REF_putField:\n instructions.append((\"putfield\", ref))\n elif self.ref_kind == REF_putStatic:\n instructions.append((\"putstatic\", ref))\n elif self.ref_kind == REF_invokeVirtual:\n instructions.append((\"invokevirtual\", ref))\n elif self.ref_kind == REF_invokeStatic:\n instructions.append((\"invokestatic\", ref))\n elif self.ref_kind == REF_invokeSpecial:\n instructions.append((\"invokespecial\", ref))\n elif self.ref_kind == REF_newInvokeSpecial:\n instructions.append((\"new\", cls_ref))\n instructions.append((\"dup\",))\n instructions.append((\"invokespecial\", ref))\n elif self.ref_kind == REF_invokeInterface:\n instructions.append((\"invokeinterface\", ref, count, 0))\n\n method.code.assemble(assemble(instructions))\n\n return (self.generated_cf, self.generated_method)",
"def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal",
"def generate_client_method(self, symbol_table, method, ci, has_impl):\n (Method, Type, (MName, Name, Extension), Attrs, Args,\n Except, From, Requires, Ensures, DocComment) = method\n\n abstract = member_chk(sidlir.abstract, Attrs)\n #final = member_chk(sidlir.final, Attrs)\n static = member_chk(sidlir.static, Attrs)\n\n attrs = []\n if abstract:\n # we still need to output a stub for an abstract function,\n # since it might me a virtual function call through an\n # abstract interface\n pass\n if static: attrs.append(ir.static)\n\n ior_type = babel.lower_ir(symbol_table, Type)\n ior_args = babel.epv_args(Attrs, Args, symbol_table, ci.epv.name)\n call_args = map(lambda arg: ir.arg_id(arg), ior_args)\n cdecl = ir.Fn_decl(attrs, ior_type, Name + Extension, ior_args, DocComment)\n qname = '_'.join(ci.co.qualified_name+[Name]) + Extension\n\n if self.server and has_impl:\n # if we are generating server code we can take a shortcut\n # and directly invoke the implementation\n modname = '_'.join(ci.co.symbol_table.prefix+['Impl'])\n if not static:\n qname = '_'.join(ci.co.qualified_name+['Impl'])\n # FIXME!\n callee = '_'.join([modname, ir.fn_decl_id(cdecl)])\n else:\n callee = babel.build_function_call(ci, cdecl, static)\n\n if Type == sidlir.void:\n call = [ir.Stmt(ir.Call(callee, call_args))]\n else:\n call = [ir.Stmt(ir.Return(ir.Call(callee, call_args)))]\n\n cdecl = ir.Fn_decl(attrs, ior_type, qname, ior_args, DocComment)\n cdefn = ir.Fn_defn(attrs, ior_type, qname, ior_args, call, DocComment)\n\n if static:\n # TODO: [performance] we would only need to put the\n # _externals variable into the _Stub.c, not necessarily\n # all the function definitions\n ci.stub.gen(cdecl)\n ci.stub.new_def('#pragma weak '+qname)\n ci.stub.gen(cdefn)\n else:\n # FIXME: can't UPC handle the inline keyword??\n ci.stub.new_header_def('static inline')\n ci.stub.genh(cdefn)\n # ci.stub.gen(cdecl)\n # ci.stub.gen(cdefn)",
"def entry_for_one_method(nom, method):\r\n # TODO(lhosken) : This is darned similar to entry_for_one_func. Merge 'em?\r\n # (Punted so far since funcdoc indentation made my head hurt)\r\n assert inspect.ismethod(method)\r\n args, varargs, varkw, defaults = inspect.getargspec(method)\r\n # args[:1] instead of args to discard \"self\" arg\r\n argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)\r\n return entry(nom,\r\n argspec=argspec,\r\n funcdoc=(method.__doc__ or \"\").replace(\"\\n\", \" \"))",
"def generate_method_definition(func):\n indent = 4\n\n # initial definition\n method_definition = (\" \" * indent) + \"def \" + func[\"name\"]\n\n # Here we just create a queue and put all the parameters\n # into the queue in the order that they were given,\n params_required = [\n param for param in func[\"arguments\"] if param[\"is_required\"]\n ]\n params_optional = [\n param for param in func[\"arguments\"]\n if not param[\"is_required\"]\n ]\n\n # Open the parameter definitions\n method_definition += \"(self, \"\n\n for param in params_required:\n # Put the parameter into the queue\n\n method_definition += param[\"name\"]\n method_definition += \", \"\n\n for param in params_optional:\n method_definition += param[\"name\"]\n\n # Default methods not required\n method_definition += \"=None, \"\n\n # Peel off the final \", \" and close off the parameter definition\n method_definition = method_definition.rstrip(\", \") + \"):\\n\"\n\n indent += 4\n\n # re-indent\n method_definition += \" \" * indent\n\n # Begin with description.\n\n method_definition += '\"\"\"' + func[\"description\"]\n\n # re-indent\n method_definition += \"\\n\\n\" + \" \" * indent\n\n # Go through each parameter and insert description & type hint\n for param in params_required + params_optional:\n # Add the type\n method_definition += \":param \" + DTYPE_MAPPING[param[\"type\"].lower()]\n\n # Add the name\n method_definition += \" \" + param[\"name\"] + \": \"\n\n # Add the description\n method_definition += param[\"description\"]\n\n # Add optionality & reindent\n method_definition += \"\\n\" if param[\n \"is_required\"] else \" (Optional)\\n\"\n\n method_definition += \" \" * indent\n # Do not parse the returns because it doesn't work correctly at the moment\n# open_index = func[\"returns\"].find('(')\n# close_index = func[\"returns\"].find(\n# ')', (open_index if open_index > -1 else 0))\n#\n# func[\"returns\"] = func[\"returns\"].replace(\"\\t\", \" \" * 4)\n# return_string = func[\"returns\"].replace(\"\\n\", \"\")\n#\n# if open_index < close_index and func[\"returns\"][\n# open_index + 1:close_index] in DTYPE_MAPPING:\n# method_definition += \":rtype: \" + DTYPE_MAPPING[\n# func[\"returns\"][open_index + 1:close_index]]\n#\n# func[\"returns\"] = func[\"returns\"].replace(\n# func[\"returns\"][open_index:close_index + 1], \"\")\n#\n# method_definition += \"\\n\" + \" \" * indent\n#\n# method_definition += \":return: \" + return_string\n#\n# for i in range(0, len(return_string) + 1, 80 - (indent + 2)):\n# method_definition += return_string[i:i + (\n# 80 - (indent + 2))] + \"\\n\" + \" \" * indent\n\n # Close it off & reindent\n method_definition += '\"\"\"' + \"\\n\" + \" \" * indent\n\n # Create the params map\n params_map = \"__params_map = {\"\n\n # Save the indent\n params_indent, num_params = len(\n params_map), len(params_required) + len(params_optional)\n\n # Append the map to the method_definition\n method_definition += params_map\n\n # Go through the required parameters first\n for i, param in enumerate(params_required + params_optional):\n\n # append the methods to the map\n method_definition += \"'\" + param[\"name\"] + \"': \" + param[\"name\"]\n\n if not param[\"is_required\"]:\n method_definition + \" if \" + param[\n \"name\"] + \"is not None else None\"\n\n # add commas or ending bracket if needed & reindent correctly\n method_definition += \",\\n\" + \" \" * indent + ' ' * params_indent if i + 1 < num_params else \"\"\n\n method_definition += '}\\n\\n' + ' ' * indent\n\n method_definition += \"return self.make_request(SERVER_ADDRESS, '\" + func[\"name\"] + \"', \" \\\n + params_map.rstrip(\" = {\") + \", timeout=self.timeout)\\n\\n\"\n\n return method_definition",
"def exec_method(self, module_name, version=None, client_class=None,\n method_name=None, *args, **kwargs):\n client_class = client_class or 'Client'\n client_version = version or 2\n _client = self.create_client(module_name, client_version,\n client_class)\n try:\n # NOTE(kiennt): method_name could be a combination\n # for example 'servers.list'. Here is the\n # workaround.\n method = getattr(_client, method_name.split('.')[0])\n for attr in method_name.split('.')[1:]:\n method = getattr(method, attr)\n return method(*args, **kwargs)\n except Exception as err:\n raise err",
"def new_method(*args, **kwargs):\n key = _represent_args(*args, **kwargs)\n if key not in method.cache:\n method.cache[key] = method(*args, **kwargs)\n return method.cache[key]",
"def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''",
"def _generate_method(name, func):\n argspec = inspect.getargspec(func)\n assert not argspec.varargs, 'varargs not supported'\n assert not argspec.keywords, 'keywords not supported'\n assert not argspec.defaults, 'defaults not supported'\n\n args = ', '.join(argspec.args[1:]) # skipped self arg\n source = _method_code_template.format(method=name, args=args)\n glbls = {}\n exec_(source, glbls)\n method = njit(glbls['method'])\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n return method(*args, **kwargs)\n\n return wrapper",
"def add_service_method(self,f,*def_args,**def_kw):\n\t\t# Extract the source filename and line number where the method is defined\n\t\tsrc_fname = f.__code__.co_filename\n\t\tfirstlineno = f.__code__.co_firstlineno\n\t\t# get an ast-analyzed object of the source file\n\t\tsinfo = self.source_info(src_fname)\n\t\t# Detect the name of the class that the unbound method will be member of\n\t\t\n\t\tfor clsname,v in sinfo.items():\n\t\t\tif firstlineno>v[0] and firstlineno<=v[1]:\n\t\t\t\t# The method is somewhere within this class\n\t\t\t\t# Check if the class has been registered as a service yet\n\t\t\t\tif not (src_fname,clsname) in self.services:\n\t\t\t\t\t# If no then do it by creating a LadonServiceInfo object for it\n\t\t\t\t\tself.services[(src_fname,clsname)] = LadonServiceInfo(clsname,src_fname,firstlineno,v[2],self.service_counter,self.src_encoding[src_fname])\n\t\t\t\t\tself.service_and_number[self.service_counter] = (src_fname,clsname)\n\t\t\t\t\tself.service_counter += 1\n\t\t\t\t# Register the method as member of this class/service\n\t\t\t\tmethod = self.services[(src_fname,clsname)].add_method(f,*def_args,**def_kw)\n\t\t\t\treturn method\n\t\t\n\t\t# Method could not be recognised as a class member\n\t\treturn None",
"def make(self, code, descriptions={}):\n # Removes line continuation symbols from declarations\n # to make parsing easier.\n lines = code_parser.remove_continuations_symbols(code).split('\\n')\n\n for ln in lines:\n if 'Attribute VB_Name = \"' in ln:\n mod_name = self.__get_mod_name(ln)\n if (mod_name in descriptions):\n doc = module_doc.ModuleDoc(\n mod_name, descriptions[mod_name])\n else:\n doc = module_doc.ModuleDoc(mod_name)\n\n elif 'Public Sub' in ln or 'Public Function' in ln:\n meth_name = self.__get_method_name(ln)\n\n args = self.__get_args(ln)\n formatted = self.__format_args(list(args.values()))\n key = mod_name + '.' + meth_name + f' ({formatted})'\n if (key in descriptions):\n doc.addMethod(meth_name, args,\n descriptions[key]['short-description'])\n else:\n doc.addMethod(meth_name, args)\n\n return doc",
"def create_basic_method(path, member):\n serial = 1\n path = path.encode()\n member = member.encode()\n\n headers = bytearray()\n headers.append(HeaderField.PATH)\n headers += encode_signature(DataType.OBJECT_PATH)\n headers += align(4, len(headers))\n headers += struct.pack('<I', len(path))\n headers += path\n headers.append(0)\n headers += align(8, len(headers))\n\n headers.append(HeaderField.MEMBER)\n headers += encode_signature(DataType.STRING)\n headers += align(4, len(headers))\n headers += struct.pack('<I', len(member))\n headers += member\n headers.append(0)\n\n body = bytearray()\n\n data = bytearray()\n data.append(Endian.LITTLE)\n data.append(MsgType.METHOD_CALL)\n data.append(Flags.NONE)\n data.append(Version.ONE)\n data += struct.pack('<I', len(body))\n data += struct.pack('<I', serial)\n data += struct.pack('<I', len(headers))\n data += headers\n data += align(8, len(data))\n data += body\n\n return bytes(data)",
"def method():\n pass",
"def method_decl(self):\r\n return '\\t{\"%s\", %s, %s, \"%s\"}' % (\r\n self.name, self.name, self.method, self.doc)"
] |
[
"0.6497778",
"0.6236314",
"0.61402524",
"0.583954",
"0.575997",
"0.572355",
"0.5714042",
"0.5590507",
"0.55856365",
"0.557706",
"0.5535297",
"0.5442776",
"0.5375448",
"0.53494215",
"0.53286654",
"0.5320115",
"0.5189196",
"0.51168287",
"0.5111711",
"0.5095843",
"0.50664973",
"0.5045041",
"0.5015238",
"0.49942723",
"0.49882397",
"0.49754617",
"0.49700174",
"0.49617445",
"0.4958528",
"0.49515793"
] |
0.750537
|
0
|
CClassDoc(docstring, name) > CClassDoc Creates a new CClassDoc with the given docstring and name.
|
def __init__ (self, docstring, name, isInternal):
# Take out excess leading blank lines.
docstring = re.sub('/\*\*(\s+\*)+', r'/** \n *', docstring)
self.docstring = docstring
self.name = name
self.isInternal = isInternal
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring",
"def __buildDocumentClassDocString():\n\n # build a dictionary of tags and their descriptions, seems a little over\n # the top, but keeps all the information in one place\n tagsStrings = {\n \"comment\" : \"Define the comment string\",\n \"define\" : \"Define the symbol name for #define's\",\n \"info\" : \"Information string, to end up in the 'info' output\",\n \"instance\" : \"Instance name\",\n \"matlabRoot\" : \"Name of variable used by the matlab output\",\n \"members\" : \"List of symbols, which are going to be children of this symbol\",\n \"name\" : \"Name of this symbol\",\n \"size\" : \"Size of this symbol, i.e. indicate it is an array\",\n \"subtype\" : \"Define the actual type of general symbol\",\n \"symbol\" : \"Define a symbol, either a top level entity a child in a members\",\n \"test\" : \"Define the preprocessor test\",\n \"text\" : \"Text to put into a banner symbol\",\n \"title\" : \"Set the overall document title\",\n \"value\" : \"Define a value for this symbol\",\n \"valuesRequired\" : \"Does the enumeration allow automatic value assignment in entries\",\n }\n # build the list of classes\n classes = dict(filter(lambda (k,v): type(v) == types.ClassType, globals().iteritems()))\n (tagsUsed, optionsUsed) = buildKeys(classes)\n\n # build the string we are going to add to the document class\n s = \"Document class that represents the XML document and contains the data.\\n\\n\"\n s += \"Available tags:\\n\"\n\n for tag in tagsStrings:\n try:\n used = \" Required by : %s\\n\" % (\", \".join(tagsUsed[tag]))\n except KeyError:\n used = \"\"\n try:\n opts = \" Optional for: %s\\n\" % (\", \".join(optionsUsed[tag]))\n except KeyError:\n opts = \"\"\n s += \" %s\\n %s\\n %s\\n\\n%s%s\\n\" % (tag, \"-\"*len(tag), tagsStrings[tag], used, opts)\n\n return s",
"def DocString():\n return",
"def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc",
"def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")",
"def sphinx_class(self):\n classdoc = ':class:`{cls} <{pref}.{cls}>`'.format(\n cls=self.instance_class.__name__,\n pref=self.instance_class.__module__,\n )\n return classdoc",
"def test_doc_class(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)",
"def __doc__(self, ???):",
"def get_class_doc_string_name(name):\n name = _strip_class_name(name)\n return name.replace('_', ' ')",
"def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring",
"def main_docstring():",
"def click_doc(arg):\n import inspect\n\n def decorator(function):\n if type(arg) is str:\n function.__doc__ = arg\n elif inspect.isclass(arg):\n function.__doc__ = arg.__doc__\n else:\n function.__doc__ = None\n return function\n\n return decorator",
"async def create_doc(self, *args, **kwargs):\n pass",
"def make(self, code, descriptions={}):\n # Removes line continuation symbols from declarations\n # to make parsing easier.\n lines = code_parser.remove_continuations_symbols(code).split('\\n')\n\n for ln in lines:\n if 'Attribute VB_Name = \"' in ln:\n mod_name = self.__get_mod_name(ln)\n if (mod_name in descriptions):\n doc = module_doc.ModuleDoc(\n mod_name, descriptions[mod_name])\n else:\n doc = module_doc.ModuleDoc(mod_name)\n\n elif 'Public Sub' in ln or 'Public Function' in ln:\n meth_name = self.__get_method_name(ln)\n\n args = self.__get_args(ln)\n formatted = self.__format_args(list(args.values()))\n key = mod_name + '.' + meth_name + f' ({formatted})'\n if (key in descriptions):\n doc.addMethod(meth_name, args,\n descriptions[key]['short-description'])\n else:\n doc.addMethod(meth_name, args)\n\n return doc",
"def inherit_docstring_from(cls):\n def _doc(func):\n cls_docstring = getattr(cls, func.__name__).__doc__\n func_docstring = func.__doc__\n if func_docstring is None:\n func.__doc__ = cls_docstring\n else:\n new_docstring = func_docstring % dict(super=cls_docstring)\n func.__doc__ = new_docstring\n return func\n return _doc",
"def _create_documenter(env: sphinx.environment.BuildEnvironment,\n documenter_cls: Type[sphinx.ext.autodoc.Documenter],\n name: str) -> sphinx.ext.autodoc.Documenter:\n bridge = _FakeBridge(env)\n documenter = documenter_cls(bridge, name)\n assert documenter.parse_name()\n assert documenter.import_object()\n if documenter_cls.objtype == 'class':\n bridge.genopt['special-members'] = [\n '__eq__',\n '__getitem__',\n '__setitem__',\n # '__hash__',\n '__init__',\n '__class_getitem__',\n '__call__',\n '__array__',\n ]\n try:\n documenter.analyzer = sphinx.pycode.ModuleAnalyzer.for_module(\n documenter.get_real_modname())\n # parse right now, to get PycodeErrors on parsing (results will\n # be cached anyway)\n documenter.analyzer.find_attr_docs()\n except sphinx.pycode.PycodeError:\n # no source file -- e.g. for builtin and C modules\n documenter.analyzer = None\n return documenter",
"def docstring_parameter(*args, **kwargs):\n\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*args, **kwargs)\n return obj\n\n return dec",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def improve_class_docstring(app, cls, lines):\n if issubclass(cls, models.Model):\n improve_model_docstring(app, cls, lines)\n elif issubclass(cls, forms.BaseForm):\n improve_form_docstring(cls, lines)",
"def docstrings(param1, param2):\n return \"example string\"",
"def test_doc(cls, type_str):\n do_doc_test(cls, type_str)",
"def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)",
"def _add_doc(func, doc):\r\n func.__doc__ = doc",
"def _add_doc(func, doc):\r\n func.__doc__ = doc",
"def _add_doc(func, doc):\r\n func.__doc__ = doc",
"def init_doc(self):\n raise NotImplementedError()",
"def add_documentation(cls, documentation):\n cls.__doc__ = documentation.CBAMLibrary\n methods = list(filter(lambda x: not x.startswith(\"_\"), dir(cls)))\n for method_name in methods:\n method = getattr(cls, method_name)\n if callable(method):\n name = method.__name__\n if hasattr(documentation, name):\n getattr(cls, name).__doc__ = getattr(documentation, name)",
"def _add_doc(func, doc):\n func.__doc__ = doc",
"def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)"
] |
[
"0.619224",
"0.6169938",
"0.61175907",
"0.59712034",
"0.58990765",
"0.5855205",
"0.58150935",
"0.5814917",
"0.57944465",
"0.5758752",
"0.57489455",
"0.5739099",
"0.5705725",
"0.57024103",
"0.5677019",
"0.56496066",
"0.56422275",
"0.5635301",
"0.56168365",
"0.5609705",
"0.5590626",
"0.5552144",
"0.55411255",
"0.5526051",
"0.5526051",
"0.5526051",
"0.5517061",
"0.5511837",
"0.5511108",
"0.5506207"
] |
0.65876174
|
0
|
getHeadersFromSWIG (filename) > (filename1, filename2, .., filenameN) Reads the list of %include directives from the given SWIG (.i). The list of C/C++ headers (.h) included is returned.
|
def getHeadersFromSWIG (filename):
stream = open(filename)
lines = stream.readlines()
stream.close()
lines = [line for line in lines if line.strip().startswith('%include')]
lines = [line for line in lines if line.strip().endswith('.h')]
return [line.replace('%include', '').strip() for line in lines]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def headers(path):\n for item in listdir(path):\n if not isfile(path_join(path, item)):\n continue\n\n if not item.endswith(\".h\"):\n continue\n\n yield item",
"def _getHeaders(self, headers):\n\n exclude_indeces = [headers.index(x) for x in self.parser.EXCLUDE]\n return filter(lambda x: x not in exclude_indeces, \n [i for i in range(len(headers))])",
"def visioncpp_headers():\n import os\n cwd = os.getcwd()\n\n # Change to the module root directory, since package_data paths must be\n # relative to this.\n module_root = \"visioncpp\"\n os.chdir(module_root)\n\n # Recursively list header files.\n header_root = \"lib/include/\"\n header_extension = \".hpp\"\n visioncpp_headers = [\n os.path.join(dp, f) for dp, dn, filenames\n in os.walk(header_root, followlinks=True)\n for f in filenames if os.path.splitext(f)[1] == header_extension]\n\n # Restore the working directory.\n os.chdir(cwd)\n\n return visioncpp_headers",
"def GetIncludedFilesForHeaderString():\n\n # Don't really need to automate this as it'll be the same for all of them. \n include_files_string = (IncludeString(\"\\\"ChasteSerialization.hpp\\\"\") + \n IncludeString(\"<boost/serialization/base_object.hpp>\") + \n IncludeString(\"<boost/serialization/shared_ptr.hpp>\") + \n \"\\n\" + \n IncludeString(\"<cmath>\") + \n IncludeString(\"<iostream>\") + \n IncludeString(\"\\\"AbstractOdeSystem.hpp\\\"\\n\") )\n\n return include_files_string",
"def getHeaders(self, args, pascalCase = False):\n\t\tfilePath, idxStart, idxEnd, regex = args\n\t\theaders = []\n\n\t\twith open(filePath, 'r', encoding=\"utf-8\") as fp: \n\t\t\tlines = fp.readlines()[idxStart:idxEnd]\t\t\t\n\t\t\tfor ln in lines:\n\t\t\t\tkey = search(regex, ln)\n\n\t\t\t\tif key:\n\t\t\t\t\t# Removes the last char (e.g. :)\n\t\t\t\t\tkey = key.group()[:-1]\n\t\t\t\t\t# Puts the string in pascal case\n\t\t\t\t\tif pascalCase: key = ''.join(x for x in key.title() if not x.isspace())\n\n\t\t\t\t\theaders.append(key)\n\n\t\treturn headers",
"def get_private_headers(self):\n if self._private_headers is None:\n with open(self.headername, 'r') as headerfile:\n included = re.findall(r'#include \"(.*)\\.h\"', headerfile.read())\n self._private_headers = list(included)\n return self._private_headers",
"def get_header_list(filename, sort=True):\n\theaders = []\n\ttry:\n\t\tif os.path.splitext(filename)[1]=='.gz':\n\t\t\tf = gzip.open(filename, 'rb')\n\t\t\tzipped = True\n\t\telse:\n\t\t\tf = open(filename, 'r')\n\t\t\tzipped = False\n\texcept:\n\t\treturn None\n\t\n\tline = '#'\n\twhile line and line[0] == '#':\n\t\tif zipped:\n\t\t\tline = f.readline().decode()\n\t\telse:\n\t\t\tline = f.readline()\n\t\theaders = line.split()\n\t\tif line and not headers:\n\t\t\tline = '#'\n\tif sort:\n\t\theaders.sort()\n\treturn headers",
"def get_header_files(dirpath, export_symbols=None):\n headers = []\n for path, subdirs, files in os.walk(dirpath):\n for name in files:\n if name.endswith('.h'):\n headers.append(HeaderFile(pathlib.Path(path, name), export_symbols))\n return headers",
"def get_header_files(options):\n\n header_file_paths = []\n header_base_dir = os.path.join(options.src_root, \"include\", \"lldb\")\n\n # Specify the include files in include/lldb that are not easy to\n # grab programatically.\n for header in [\n \"lldb-defines.h\",\n \"lldb-enumerations.h\",\n \"lldb-forward.h\",\n \"lldb-types.h\"]:\n header_file_paths.append(os.path.normcase(\n os.path.join(header_base_dir, header)))\n\n # Include the main LLDB.h file.\n api_dir = os.path.join(header_base_dir, \"API\")\n header_file_paths.append(os.path.normcase(\n os.path.join(api_dir, \"LLDB.h\")))\n\n filename_regex = re.compile(r\"^SB.+\\.h$\")\n\n # Include all the SB*.h files in the API dir.\n for filename in os.listdir(api_dir):\n if filename_regex.match(filename):\n header_file_paths.append(\n os.path.normcase(os.path.join(api_dir, filename)))\n\n logging.debug(\"found public API header file paths: %s\", header_file_paths)\n return header_file_paths",
"def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data",
"def get_header_file_map(dir_name):\n result = defaultdict(list)\n for filename in get_files(dir_name, \".h\"):\n parts = Path(os.path.dirname(filename)).parts\n include_prefix = os.path.join(parts[-2], parts[-1])\n result[os.path.basename(filename)].append(include_prefix)\n return result",
"def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile",
"def _get_include_files(self):\n for dirpath, dirnames, filenames in os.walk(self.IncludesDirectory):\n for f in filenames:\n rel_name = path.join(dirpath, f)\n if f.endswith('.pyx'):\n yield (rel_name, 'PyRex')\n elif f.endswith('.h'):\n yield (rel_name, 'Header')\n else:\n pass",
"def _get_headers(environ):\n # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith(\"HTTP_\") and key not in (\n \"HTTP_CONTENT_TYPE\",\n \"HTTP_CONTENT_LENGTH\",\n ):\n yield key[5:].replace(\"_\", \"-\").title(), value\n elif key in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n yield key.replace(\"_\", \"-\").title(), value",
"def find_headers(file):\n headers = []\n with open(file, 'r') as f:\n for line in f:\n if line[0] == '#':\n if line[-1] == '\\n':\n headers.append(line[1:-1].strip())\n else:\n headers.append(line[1:].strip())\n return headers",
"def read_headers(input_file):\n\n with open(input_file+'.hdr','r') as f:\n return [float(h) if not h.isalpha() else h for h in [l.split()[1] for l in f.readlines()]] #isdigit() does not catch floats",
"def get_headers(headers: HTTPHeaders) -> Mapping[str, List[str]]:\r\n return {header.lower(): headers.get_list(header) for header in headers.keys()}",
"def test_include_headers_count(dataset: linux.LinuxSourcesDataset):\n # FIXME(cec): This value does not appear to stable across platforms, but it\n # should be.\n assert abs(len(dataset.ListFiles(\"include\", \"*.h\")) - 4890) < 100",
"def get_headers():\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'cfg', 'headers.json'))\n return open_json_file(file_path)",
"def getAllHeaders():",
"def view_headers():\n\n return jsonify(get_dict('headers'))",
"def include_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-I'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-I for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)",
"def GetHeaderContents(input_file_name, var_name, output_file_name):\n zero_query_rules = GetZeroQueryRules(input_file_name)\n\n result = []\n result.extend(GetDefineGuardHeaderLines(output_file_name))\n result.append('namespace mozc {')\n result.append('namespace {')\n\n for i, rule in enumerate(zero_query_rules):\n result.append('const char *%s%d[] = {' % (var_name, i))\n result.append(' ' + ', '.join(\n [EscapeString(s) for s in [rule[0]] + rule[1]] + ['0']))\n result.append('};')\n\n result.append('} // namespace')\n\n result.append('const char **%s_data[] = {' % var_name)\n result.append(' ' + ', '.join(\n ['%s%d' % (var_name, c) for c in range(len(zero_query_rules))]))\n result.append('};')\n result.append(\n 'const size_t %s_size = %d;' % (var_name, len(zero_query_rules)))\n\n result.append('} // namespace mozc')\n result.extend(GetDefineGuardFooterLines(output_file_name))\n return result",
"def headers(self):\n return HeaderList(self.prefix.include.uuid.join(\"uuid.h\"))",
"def get_headers(filename, delim=','):\n\tif filename[-3:].casefold() == '.gz':\n\t\twith gzip.open(filename, 'rt') as f:\n\t\t\tfirstline = f.readline()\n\telse:\n\t\twith open(filename, 'r') as f:\n\t\t\tfirstline = f.readline()\n\tfirstline = firstline.strip()\n\treturn firstline.split(delim)",
"def get_headers():\n soup = get_html()\n titles = []\n for i in soup.find_all('i'):\n header = str(i.text)\n titles.append(header.strip())\n return titles",
"def getHeaderList(self):\r\n return self.headerList",
"def GetIncludedFilesForSourceString(filename, model):\n\n model_name = GetModelName(filename, model)\n\n # Don't really need to automate this as it'll be the same for all of them. \n include_files_string = (IncludeString(\"\\\"\" + model_name + \".hpp\\\"\") + \n IncludeString(\"\\\"CellwiseOdeSystemInformation.hpp\\\"\") )\n\n return include_files_string",
"def get_included_files(space):\n files = space.ec.interpreter.included_files\n arr_list = []\n for f in files:\n arr_list.append(space.newstr(f))\n return space.new_array_from_list(arr_list)",
"def headers(self):\n return self.generator.headers"
] |
[
"0.61545664",
"0.58099794",
"0.58025914",
"0.5752392",
"0.5665402",
"0.56634474",
"0.5662152",
"0.55984664",
"0.5509919",
"0.5410221",
"0.54100114",
"0.5409726",
"0.5372038",
"0.5359347",
"0.53312",
"0.5306133",
"0.53054756",
"0.5291724",
"0.526709",
"0.5260466",
"0.52375114",
"0.5227617",
"0.5220658",
"0.51895666",
"0.51618063",
"0.514215",
"0.5106265",
"0.5100237",
"0.5093922",
"0.50918984"
] |
0.8412772
|
0
|
sanitizeForHTML (docstring) > docstring Performs HTML transformations on the C++/Doxygen docstring.
|
def sanitizeForHTML (docstring):
# Remove @~, which we use as a hack in Doxygen 1.7-1.8
docstring = docstring.replace(r'@~', '')
# First do conditional section inclusion based on the current language.
# Our possible conditional elements and their meanings are:
#
# java: only Java
# python: only Python
# perl: only Perl
# cpp: only C++
# csharp: only C#
# conly: only C
# clike: C, C++
# notcpp: not C++
# notclike: not C or C++
#
# The notcpp/notclike variants are because Doxygen 1.6.x doesn't have
# @ifnot, yet sometimes we want to say "if not C or C++".
cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'
p = re.compile('@if\s+(' + cases + ')\s+(.+?)((@else)\s+(.+?))?@endif', re.DOTALL)
docstring = p.sub(translateIfElse, docstring)
# Replace blank lines between paragraphs with <p>. There are two main
# cases: comments blocks whose lines always begin with an asterix (e.g.,
# C/C++), and comment blocks where they don't (e.g., Python). The third
# substitution below does the same thing for blank lines, except for the
# very end of the doc string.
p = re.compile('^(\s+)\*\s*$', re.MULTILINE)
docstring = p.sub(r'\1* <p>', docstring)
p = re.compile('^((?!\s+\Z)\s+)$', re.MULTILINE)
docstring = p.sub(r'\1<p>', docstring)
p = re.compile('^(?!\Z)$', re.MULTILINE)
docstring = p.sub(r'<p>', docstring)
# Javadoc doesn't have an @htmlinclude command, so we process the file
# inclusion directly here.
p = re.compile('@htmlinclude\s+([^\s:;,(){}+|?"\'/]+)([\s:;,(){}+|?"\'/])', re.MULTILINE)
docstring = p.sub(translateInclude, docstring)
# There's no Javadoc verbatim or @code/@endcode equivalent, so we have to
# convert it to raw HTML and transform the content too. This requires
# helpers. The following treats both @verbatim and @code the same way.
p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)
docstring = p.sub(translateVerbatim, docstring)
p = re.compile('@code.+?@endcode', re.DOTALL)
docstring = p.sub(translateVerbatim, docstring)
# Javadoc doesn't have a @section or @subsection commands, so we translate
# those ourselves.
p = re.compile('@section\s+[^\s]+\s+(.*)$', re.MULTILINE)
docstring = p.sub(r'<h2>\1</h2>', docstring)
p = re.compile('@subsection\s+[^\s]+\s+(.*)$', re.MULTILINE)
docstring = p.sub(r'<h3>\1</h3>', docstring)
p = re.compile('@subsubsection\s+[^\s]+\s+(.*)$', re.MULTILINE)
docstring = p.sub(r'<h4>\1</h4>', docstring)
# Javadoc doesn't have an @image command. We translate @image html
# but ditch @image latex.
p = re.compile('@image\s+html+\s+([^\s]+).*$', re.MULTILINE)
docstring = p.sub(r"<center><img src='\1'></center><br>", docstring)
p = re.compile('@image\s+latex+\s+([^\s]+).*$', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Doxygen doesn't understand HTML character codes like ≥, so we've
# been using doxygen's Latex facility to get special mathematical
# characters into the documentation, but as luck would have it, Javadoc
# doesn't understand the Latex markup. All of this is getting old.
docstring = re.sub(r'\\f\$\\geq\\f\$', '≥', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '≤', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '×', docstring)
# The following are done in pairs because I couldn't come up with a
# better way to catch the case where @c and @em end up alone at the end
# of a line and the thing to be formatted starts on the next one after
# the comment '*' character on the beginning of the line.
docstring = re.sub('@c *([^ ,;()/*\n\t]+)', r'<code>\1</code>', docstring)
docstring = re.sub('@c(\n[ \t]*\*[ \t]*)([^ ,;()/*\n\t]+)', r'\1<code>\2</code>', docstring)
docstring = re.sub('@p +([^ ,.:;()/*\n\t]+)', r'<code>\1</code>', docstring)
docstring = re.sub('@p(\n[ \t]*\*[ \t]+)([^ ,.:;()/*\n\t]+)', r'\1<code>\2</code>', docstring)
docstring = re.sub('@em *([^ ,.:;()/*\n\t]+)', r'<em>\1</em>', docstring)
docstring = re.sub('@em(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t]+)', r'\1<em>\2</em>', docstring)
# Convert @li into <li>, but also add <ul> ... </ul>. This is a bit
# simple-minded (I suppose like most of this code), but ought to work
# for the cases we use in practice.
p = re.compile('^(\s+\*\s+)(@li\s+.*?)(\s+)(\*/|\*\s+@(?!li\s)|\*\s+<p>)', re.MULTILINE|re.DOTALL)
docstring = p.sub(rewriteList, docstring)
# Wrap @deprecated content with a class so that we can style it.
p = re.compile('^(\s+\*\s+)(@deprecated\s)((\S|\s)+)(<p>|\*/)', re.MULTILINE|re.DOTALL)
docstring = p.sub(rewriteDeprecated, docstring)
# Doxygen automatically cross-references class names in text to the class
# definition page, but Javadoc does not. Rather than having to put in a
# lot conditional @if/@endif's into the documentation to manually create
# cross-links just for the Java case, let's automate. This needs to be
# done better (e.g., by not hard-wiring the class names).
p = re.compile(r'([^a-zA-Z0-9_.">])(' + r')\b([^:])', re.DOTALL)
if language == 'csharp':
docstring = p.sub(translateClassRefCSharp, docstring)
elif language == 'java':
docstring = p.sub(translateClassRefJava, docstring)
# Massage method cross-references.
p = re.compile('(\s+)(\S+?)::(\w+\s*\([^)]*?\))', re.MULTILINE)
if language == 'csharp':
docstring = p.sub(translateCSharpCrossRef, docstring)
elif language == 'java':
docstring = p.sub(translateJavaCrossRef, docstring)
# Clean-up step needed because some of the procedures above are imperfect.
# This converts " * * @foo" lines into " * @foo":
p = re.compile('^(\s+)\*\s+\*\s+@', re.MULTILINE)
docstring = p.sub(r'\1* @', docstring)
# Take out any left-over Doxygen-style quotes, because Javadoc doesn't have
# the %foo quoting mechanism.
docstring = re.sub('(\s)%(\w)', r'\1\2', docstring)
# Currently, we don't handle @ingroup.
docstring = re.sub('@ingroup \w+', '', docstring)
return docstring
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_html(docstring: str) -> str:\n # careful: markdown2 returns a subclass of str with an extra\n # .toc_html attribute. don't further process the result,\n # otherwise this attribute will be lost.\n return pdoc.markdown2.markdown( # type: ignore\n docstring,\n extras=markdown_extensions,\n link_patterns=markdown_link_patterns,\n )",
"def clean_markdown_html(html):\n # Allow users to override the protocols. We're checking for this\n # dynamically, partly to ease unit testing, and partly to eventually\n # allow dynamic configuration.\n safe_url_protocols = SAFE_MARKDOWN_URL_PROTOCOLS\n custom_safe_url_protocols = settings.ALLOWED_MARKDOWN_URL_PROTOCOLS\n\n if custom_safe_url_protocols:\n safe_url_protocols = (set(safe_url_protocols) |\n set(custom_safe_url_protocols))\n\n # Create a bleach HTML cleaner, and override settings on the html5lib\n # serializer it contains to ensure we use self-closing HTML tags, like\n # <br/>. This is needed so that we can parse the resulting HTML in\n # Djblets for things like Markdown diffing.\n cleaner = Cleaner(tags=SAFE_MARKDOWN_TAGS,\n attributes=SAFE_MARKDOWN_ATTRS,\n protocols=safe_url_protocols)\n cleaner.serializer.use_trailing_solidus = True\n\n return cleaner.clean(html)",
"def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output",
"def sanitize_html(input):\n p = HTMLParser(tokenizer=HTMLSanitizer, tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))",
"def remove_html_tags_fun(self):\n cleaner = re.compile('<.*?>')\n cleaned_text = re.sub(cleaner, '', self.doc)\n cleaned_text = re.sub('[\\n\\t]', '', cleaned_text)\n self.doc = cleaned_text",
"def docstring_hack():\n pass",
"def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring",
"def sanitize(text):\n try:\n from airy.core import sanitizer\n return smart_unicode(sanitizer.clean_html(text))\n except ImportError:\n logging.error(\"You need html5lib in order to use sanitize\")\n return \"ERROR: You need html5lib in order to use sanitize\"",
"def html_clean(options):\r\n remake_directories(options.sphinx.doctrees, options.html.outdir)\r\n html(options)\r\n return",
"def normalize_html(html):\n # Replace many whitespace characters with a single space in some elements\n # kind of like a browser does.\n soup = BeautifulSoup(html, 'lxml')\n for e in soup.select(':not(script,pre,code,style)'):\n for part in e:\n if isinstance(part, NavigableString):\n crunched = NavigableString(re.sub(r'\\s+', ' ', part))\n if crunched != part:\n part.replace_with(crunched)\n # Asciidoctor adds a \"content\" wrapper. It doesn't really change the layout\n # so we're ok with it.\n for e in soup.select('#content'):\n e.unwrap()\n # Docbook adds <span class=\"emphasis\"> around <em> tags. We don't need them\n # and it isn't worth making Asciidoctor make them.\n for e in soup.select('.emphasis'):\n e.unwrap()\n # Asciidoctor adds a \"ulist\" class to all unordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.itemizedlist.ulist'):\n e['class'].remove('ulist')\n # Docbook adds type=\"disc\" to ul which is the default and isn't needed.\n for e in soup.select('ul'):\n if 'type' in e.attrs and e['type'] == 'disc':\n del e['type']\n # Asciidoctor adds a \"olist\" class to all ordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.orderedlist.olist'):\n e['class'].remove('olist')\n # Docbook adds type=\"1\" to ol which is the default and isn't needed.\n for e in soup.select('ol'):\n if 'type' in e.attrs and e['type'] == '1':\n del e['type']\n # Docbook emits images with the 'inlinemediaobject' class and Asciidoctor\n # has the 'image' class. We've updated our styles to make both work.\n for e in soup.select('.inlinemediaobject'):\n e['class'].remove('inlinemediaobject')\n e['class'].append('image')\n # Docbook links with `<a class=\"link\"` when linking from one page of a book\n # to another. Asciidoctor emits `<a class=\"link\"`. Both look fine.\n for e in soup.select('a.xref'):\n if '.html#' in e['href']:\n e['class'].remove('xref')\n e['class'].append('link')\n # Format the html with indentation so we can *see* things\n html = soup.prettify()\n # docbook spits out the long-form charset and asciidoctor spits out the\n # short form but they are equivalent\n html = html.replace(\n '<meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"/>',\n '<meta charset=\"utf-8\"/>')\n return html",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def strip_html(unclean, tags=[]):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])",
"def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring",
"def filter_doc(doc_text):\n # remove stars\n filter_regex=re.compile(r\"[_*]\")\n doc=filter_regex.sub(\"\",doc_text)\n # substitute quotation marks\n double_quot_regex=re.compile(r\"[“”]\")\n single_quot_regex=re.compile(r\"[’‘]\")\n doc=double_quot_regex.sub('\"',doc)\n doc=single_quot_regex.sub(\"'\",doc)\n # substitute new lines inside the text for spaces\n # these new lines are usually caused by formatting texts to fit in 80 columns \n newline_quot_regex=re.compile(r\"(\\S)\\n(\\S)\")\n doc=newline_quot_regex.sub(r\"\\1 \\2\",doc)\n # remove illustration tag\n #illustration_regex=re.compile(r\"\\[Illustration.*]\")\n #doc=illustration_regex.sub(\"\",doc)\n return doc",
"def strip_html(unclean):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])",
"def strip_html(text: str, **serializer_kwargs: bool):\n cleaner = get_cleaner(**serializer_kwargs)\n text = cleaner.clean(text)\n return text",
"def convert_html():\n return",
"def sanitize_code_as_html(rewriter):\n # Generate a list of whitespace, <>, etc, to rewrite, and do it all at\n # once. This is because we're searching by position in the rewriter's\n # buffer, which will get changed once we rewrite it.\n replacements = []\n for (line, text) in enumerate(rewriter.lines):\n for col in [m.start() for m in re.finditer('<', text)]:\n replacements.append((\"<\", line, col, line, col+1))\n for col in [m.start() for m in re.finditer('>', text)]:\n replacements.append((\">\", line, col, line, col+1))\n\n for (text, from_line, from_col, to_line, to_col) in replacements:\n rewriter.replace(text, from_line, from_col, to_line, to_col)",
"def clean_html(text):\n cleanr = re.compile(\"<.*?>\")\n clean_text = re.sub(cleanr, \"\", text)\n return clean_text",
"def sanitize_html(answer):\r\n clean_html = bleach.clean(answer,\r\n tags=['embed', 'iframe', 'a', 'img', 'br'],\r\n attributes=ALLOWED_HTML_ATTRS,\r\n strip=True)\r\n autolinked = bleach.linkify(clean_html,\r\n callbacks=[bleach.callbacks.target_blank],\r\n skip_pre=True,\r\n tokenizer=HTMLTokenizer)\r\n return OpenEndedChild.replace_newlines(autolinked)",
"def normalised_html(html):\n soup = bs4.BeautifulSoup(html)\n root = soup.find(attrs='refentry')\n\n # Format function signature\n synopsis = root.find(attrs='methodsynopsis')\n pre = soup.new_tag('pre')\n pre.append(re.sub('\\s+', ' ', synopsis.get_text().strip()))\n synopsis.replace_with(pre)\n\n # Remove unwanted information\n changelog = root.find(attrs='changelog')\n if changelog: changelog.decompose()\n\n # Remove misused/unnecessary <blockquote>s\n for tag in root.find_all('blockquote'):\n tag.unwrap()\n\n # Convert <h3> => <h2>\n for h3 in root.find_all('h3'):\n h2 = soup.new_tag('h2')\n h2.append(h3.get_text().strip())\n h3.replace_with(h2)\n\n # Unwrap decorated <code> elements. Markdown looks a bit noisy when\n # different formatting elements are combined (e.g. **`foo`**)\n for code in root.find_all('code'):\n if code.parent.name in ('em', 'strong'):\n code.parent.unwrap()\n\n # Convert block <code> => <pre>\n for code in [div.find('code') for div in root.find_all('div', 'phpcode')]:\n for br in code.find_all('br'):\n br.replace_with('\\n')\n pre = soup.new_tag('pre')\n pre.append(code.get_text().strip())\n code.replace_with(pre)\n\n return unicode(root)",
"def strip_html(func):\n\n cleaner = re.compile(\"<.*?>\")\n def new_func(*args, strip_html=False, **kwargs):\n name = func(*args, **kwargs)\n if strip_html:\n if isinstance(name, str):\n return html.unescape(re.sub(cleaner, \"\", name))\n elif isinstance(name, list) or isinstance(name, tuple):\n return type(name)([html.unescape(re.sub(cleaner, \"\", n)) for n in name])\n else:\n return name\n new_func.__name__ = func.__name__\n new_func.__doc__ = func.__doc__\n return new_func",
"def sanitize_comment(comment):\n\n if hasattr(settings, \"BLEACH_ALLOWED_TAGS\"):\n allowed_tags = settings.BLEACH_ALLOWED_TAGS\n else:\n allowed_tags = bleach.sanitizer.ALLOWED_TAGS\n\n return bleach.clean(comment, tags=allowed_tags, strip=True)",
"def clean_html(html):\n html = re.sub(r\"(?s)<!--(.*?)-->[\\n]?\", \"\\\\1\", html)\n html = re.sub(r\"<!--\", \"\", html)\n if html == '':\n return ''\n s = MLStripper()\n s.feed(html)\n return s.get_data().strip()",
"def remove_html( html):\n return html2txt(html)",
"def clean_for_html(cls, value):\r\n return cls._clean(value, INVALID_HTML_CHARS)",
"def clean_html_content(field_name):\n\n @check_field_is_empty(field_name)\n def wrapped(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n from HTMLParser import HTMLParseError\n\n content = self.cleaned_data.get(field_name)\n\n # clean_html_content is called when writing data into GAE rather than\n # when reading data from GAE. This short-circuiting of the sanitizer\n # only affects html authored by developers. The isDeveloper test for\n # example allows developers to add javascript.\n if user_logic.isDeveloper():\n return content\n\n try:\n cleaner = HtmlSanitizer.Cleaner()\n cleaner.string = content\n cleaner.clean()\n except (HTMLParseError, safe_html.IllegalHTML), msg:\n raise forms.ValidationError(msg)\n\n content = cleaner.string\n content = content.strip().replace('\\r\\n', '\\n')\n\n return content\n\n return wrapped",
"def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)",
"def stripHTMLTags (html):\n text = html\n \n # apply rules in given order!\n rules = [\n { r'>\\s+' : u'>'}, # remove spaces after a tag opens or closes\n { r'\\s+' : u' '}, # replace consecutive spaces\n { r'\\s*<br\\s*/?>\\s*' : u'\\n'}, # newline after a <br>\n { r'</(div)\\s*>\\s*' : u'\\n'}, # newline after </p> and </div> and <h1/>...\n { r'</(p|h\\d)\\s*>\\s*' : u'\\n\\n'}, # newline after </p> and </div> and <h1/>...\n { r'<head>.*<\\s*(/head|body)[^>]*>' : u'' }, # remove <head> to </head>\n { r'<a\\s+href=\"([^\"]+)\"[^>]*>.*</a>' : r'\\1' }, # show links instead of texts\n { r'[ \\t]*<[^<]*?/?>' : u'' }, # remove remaining tags\n { r'^\\s+' : u'' } # remove spaces at the beginning\n ]\n \n for rule in rules:\n for (k,v) in rule.items():\n regex = re.compile (k)\n text = regex.sub (v, text)\n \n # replace special strings\n special = {\n ' ' : ' ', '&' : '&', '"' : '\"',\n '<' : '<', '>' : '>'\n }\n \n for (k,v) in special.items():\n text = text.replace (k, v)\n\n filtered = filter(lambda x: not re.match(r'^\\s*$', x), text) \n finaltext = re.sub(u'分享:','', filtered)\n return finaltext",
"def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result"
] |
[
"0.6426833",
"0.60326535",
"0.5976255",
"0.5938563",
"0.59128064",
"0.58987826",
"0.5814522",
"0.5779177",
"0.5754586",
"0.57343876",
"0.5713335",
"0.5580562",
"0.55243945",
"0.55169314",
"0.5503092",
"0.5473832",
"0.54169965",
"0.53993046",
"0.53867984",
"0.53826857",
"0.53666",
"0.5361034",
"0.53566754",
"0.53507245",
"0.53385",
"0.53221154",
"0.5320514",
"0.5306506",
"0.5287662",
"0.527789"
] |
0.7580223
|
0
|
rewriteDocstringForJava (docstring) > docstring Performs some mimimal javadocspecific sanitizations on the C++/Doxygen docstring.
|
def rewriteDocstringForJava (docstring):
# Preliminary: rewrite some of the data type references to equivalent
# Java types. (Note: this rewriting affects only the documentation
# comments inside classes & methods, not the method signatures.)
docstring = docstring.replace(r'const char *', 'String ')
docstring = docstring.replace(r'const char* ', 'String ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'String')
docstring = docstring.replace(r'const std::string &', 'String ')
docstring = docstring.replace(r'const std::string ', 'String ')
docstring = docstring.replace(r'std::string', 'String')
docstring = docstring.replace(r'NULL', 'null')
# Also use Java syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
p = re.compile(r'(%?)(' + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
# Do the big work.
docstring = sanitizeForHTML(docstring)
# Fix up for a problem introduced by sanitizeForHTML: it puts {@link ...}
# into the arguments of functions mentioned in @see's, if the function
# has more than one argument. This gets rid of the @link's. This should
# be fixed properly some day.
p = re.compile(r'((@see|@throws)\s+[\w\\ ,.\'"=<>()#]*?){@link\s+([^}]+?)}')
while re.search(p, docstring) != None:
docstring = p.sub(r'\1\3', docstring)
# Inside of @see, change double colons to pound signs.
docstring = re.sub('(@see\s+\w+)::', r'\1#', docstring)
# The syntax for @see is slightly different: method names need to have a
# leading pound sign character. This particular bit of code only handles
# a single @see foo(), which means the docs have to be written that way.
# Maybe someday in the future it should be expanded to handle
# @see foo(), bar(), etc., but I don't have time right now to do it.
docstring = re.sub('(@see\s+)([\w:.]+)\(', r'\1#\2(', docstring)
# Remove the '*' character that Javadoc doesn't want to see in @see's.
# (This doesn't make a difference; javadoc still can't match up the refs.)
# p = re.compile('@see[\s\w.:,()#]+[*][\s\w.:,()*#]')
# docstring = p.sub(removeStar, docstring)
# The syntax for @link is vastly different.
p = re.compile('@link([\s/*]+[\w\s,.:#()*]+[\s/*]*[\w():#]+[\s/*]*)@endlink', re.DOTALL)
docstring = p.sub(r'{@link \1}', docstring)
# Outside of @see and other constructs, dot is used to reference members
# instead of C++'s double colon.
docstring = docstring.replace(r'::', '.')
# Need to escape quotation marks. The reason is that the
# %javamethodmodifiers directives created for use with SWIG will
# themselves be double-quoted strings, and leaving embedded quotes
# will completely screw that up.
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
return docstring
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Python types.\n # (Note: this rewriting affects only the documentation comments inside\n # classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'NULL', 'None')\n docstring = docstring.replace(r'@c true', '@c True')\n docstring = docstring.replace(r'@c false', '@c False')\n\n # Also use Python syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n p = re.compile(r'(%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n # Python method cross-references won't be made by doxygen unless\n # the method reference is written without arguments.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*)(\\([^)]*?\\))', re.MULTILINE)\n docstring = p.sub(translatePythonCrossRef, docstring)\n p = re.compile('(@see\\s+)(\\w+\\s*)(\\([^)]*?\\))')\n docstring = p.sub(translatePythonSeeRef, docstring)\n\n # Friggin' doxygen escapes HTML character codes, so the hack we have to\n # do for Javadoc turns out doesn't work for the Python documentation.\n # Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # SWIG does some bizarre truncation of leading characters that\n # happens to hit us because of how we have to format verbatim's.\n # This tries to kluge around it: \n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(indentVerbatimForPython, docstring)\n\n return docstring",
"def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring",
"def sanitizeForHTML (docstring):\n\n # Remove @~, which we use as a hack in Doxygen 1.7-1.8\n\n docstring = docstring.replace(r'@~', '')\n\n # First do conditional section inclusion based on the current language.\n # Our possible conditional elements and their meanings are:\n #\n # java: only Java\n # python: only Python\n # perl: only Perl\n # cpp: only C++\n # csharp: only C#\n # conly: only C\n # clike: C, C++\n # notcpp:\tnot C++\n # notclike: not C or C++\n #\n # The notcpp/notclike variants are because Doxygen 1.6.x doesn't have\n # @ifnot, yet sometimes we want to say \"if not C or C++\".\n\n cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'\n p = re.compile('@if\\s+(' + cases + ')\\s+(.+?)((@else)\\s+(.+?))?@endif', re.DOTALL)\n docstring = p.sub(translateIfElse, docstring)\n\n # Replace blank lines between paragraphs with <p>. There are two main\n # cases: comments blocks whose lines always begin with an asterix (e.g.,\n # C/C++), and comment blocks where they don't (e.g., Python). The third\n # substitution below does the same thing for blank lines, except for the\n # very end of the doc string.\n\n p = re.compile('^(\\s+)\\*\\s*$', re.MULTILINE)\n docstring = p.sub(r'\\1* <p>', docstring)\n p = re.compile('^((?!\\s+\\Z)\\s+)$', re.MULTILINE)\n docstring = p.sub(r'\\1<p>', docstring)\n p = re.compile('^(?!\\Z)$', re.MULTILINE)\n docstring = p.sub(r'<p>', docstring)\n\n # Javadoc doesn't have an @htmlinclude command, so we process the file\n # inclusion directly here.\n\n p = re.compile('@htmlinclude\\s+([^\\s:;,(){}+|?\"\\'/]+)([\\s:;,(){}+|?\"\\'/])', re.MULTILINE)\n docstring = p.sub(translateInclude, docstring)\n\n # There's no Javadoc verbatim or @code/@endcode equivalent, so we have to\n # convert it to raw HTML and transform the content too. This requires\n # helpers. The following treats both @verbatim and @code the same way.\n\n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n p = re.compile('@code.+?@endcode', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n\n # Javadoc doesn't have a @section or @subsection commands, so we translate\n # those ourselves.\n\n p = re.compile('@section\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h2>\\1</h2>', docstring)\n p = re.compile('@subsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h3>\\1</h3>', docstring)\n p = re.compile('@subsubsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h4>\\1</h4>', docstring)\n\n # Javadoc doesn't have an @image command. We translate @image html\n # but ditch @image latex.\n\n p = re.compile('@image\\s+html+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r\"<center><img src='\\1'></center><br>\", docstring)\n p = re.compile('@image\\s+latex+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Doxygen doesn't understand HTML character codes like ≥, so we've\n # been using doxygen's Latex facility to get special mathematical\n # characters into the documentation, but as luck would have it, Javadoc\n # doesn't understand the Latex markup. All of this is getting old.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '≥', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '≤', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '×', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'<em>\\1</em>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1<em>\\2</em>', docstring)\n\n # Convert @li into <li>, but also add <ul> ... </ul>. This is a bit\n # simple-minded (I suppose like most of this code), but ought to work\n # for the cases we use in practice.\n\n p = re.compile('^(\\s+\\*\\s+)(@li\\s+.*?)(\\s+)(\\*/|\\*\\s+@(?!li\\s)|\\*\\s+<p>)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteList, docstring)\n\n # Wrap @deprecated content with a class so that we can style it.\n\n p = re.compile('^(\\s+\\*\\s+)(@deprecated\\s)((\\S|\\s)+)(<p>|\\*/)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteDeprecated, docstring)\n\n # Doxygen automatically cross-references class names in text to the class\n # definition page, but Javadoc does not. Rather than having to put in a\n # lot conditional @if/@endif's into the documentation to manually create\n # cross-links just for the Java case, let's automate. This needs to be\n # done better (e.g., by not hard-wiring the class names).\n\n p = re.compile(r'([^a-zA-Z0-9_.\">])(' + r')\\b([^:])', re.DOTALL)\n if language == 'csharp':\n docstring = p.sub(translateClassRefCSharp, docstring)\n elif language == 'java':\n docstring = p.sub(translateClassRefJava, docstring)\n\n # Massage method cross-references.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*\\([^)]*?\\))', re.MULTILINE)\n if language == 'csharp':\n docstring = p.sub(translateCSharpCrossRef, docstring)\n elif language == 'java':\n docstring = p.sub(translateJavaCrossRef, docstring)\n\n # Clean-up step needed because some of the procedures above are imperfect.\n # This converts \" * * @foo\" lines into \" * @foo\":\n\n p = re.compile('^(\\s+)\\*\\s+\\*\\s+@', re.MULTILINE)\n docstring = p.sub(r'\\1* @', docstring)\n\n # Take out any left-over Doxygen-style quotes, because Javadoc doesn't have\n # the %foo quoting mechanism.\n\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # Currently, we don't handle @ingroup.\n\n docstring = re.sub('@ingroup \\w+', '', docstring)\n\n return docstring",
"def rewriteDocstringForCSharp (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # C# types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the actual method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string &', 'string ')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'const ', '')\n docstring = docstring.replace(r'NULL', 'null')\n docstring = docstring.replace(r'boolean', 'bool')\n\n # Use C# syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # <code> has its own special meaning in C#; we have to turn our input\n # file's uses of <code> into <c>. Conversely, we have to turn our\n # uses of verbatim to <code>.\n\n p = re.compile(r'<code>(.+?)</code>', re.DOTALL)\n docstring = p.sub(r'<c>\\1</c>', docstring)\n p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)\n docstring = p.sub(r'<code>\\1</code>', docstring)\n\n # Do replacements on some documentation text we sometimes use.\n\n p = re.compile(r'antimonyConstants([@.])')\n docstring = p.sub(r'antimonycs.antimony\\1', docstring)\n\n # Fix @link for constants that we forgot conditionalize in the source.\n\n p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)\n docstring = p.sub(r'@link antimony.\\1@endlink', docstring)\n\n # Can't use math symbols. Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # Some additional special cases.\n\n docstring = docstring.replace(r'SBML_formulaToString()', 'antimonycs.antimony.formulaToString()')\n docstring = docstring.replace(r'SBML_parseFormula()', 'antimonycs.antimony.parseFormula()')\n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\") \n\n return docstring",
"def docstring_hack():\n pass",
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring",
"def update_javadoc(javadoc=None, since=None, author=None):\n\n if javadoc is None:\n javadoc = \"\\n\\n/**\\n */\\n\"\n\n if since is not None:\n javadoc = re.sub(\"/\\*\\*\", \"/**\\n * @since \" + since, javadoc)\n\n if author is not None:\n javadoc = re.sub(\"/\\*\\*\", \"/**\\n * @author \" + author, javadoc)\n\n return javadoc",
"def _parse_docstring(doc):\n _cache_key = doc\n try:\n return _parse_docstring_cache[_cache_key]\n except KeyError:\n pass\n\n if doc is None:\n return _Doc('', '', {})\n\n # Convert Google- or Numpy-style docstrings to RST.\n # (Should do nothing if not in either style.)\n doc = str(GoogleDocstring(doc))\n doc = str(NumpyDocstring(doc))\n\n tree = publish_doctree(doc)\n\n class Visitor(NodeVisitor):\n optional = [\n 'document', 'docinfo',\n 'field_list', 'field_body',\n 'literal', 'problematic']\n\n def __init__(self, document):\n NodeVisitor.__init__(self, document)\n self.paragraphs = []\n self.start_lines = []\n self.params = defaultdict(dict)\n self._current_paragraph = None\n self._indent_iterator_stack = []\n self._indent_stack = []\n\n def _do_nothing(self, node):\n pass\n\n def visit_paragraph(self, node):\n self.start_lines.append(node.line)\n self._current_paragraph = []\n\n def depart_paragraph(self, node):\n text = ''.join(self._current_paragraph)\n text = ''.join(self._indent_stack) + text\n self._indent_stack = [\n ' ' * len(item) for item in self._indent_stack]\n text = text.replace('\\n', '\\n' + ''.join(self._indent_stack))\n self.paragraphs.append(text)\n self._current_paragraph = None\n\n def visit_Text(self, node):\n self._current_paragraph.append(node)\n\n depart_Text = _do_nothing\n\n def visit_emphasis(self, node):\n self._current_paragraph.append('\\033[3m') # *foo*: italic\n\n def visit_strong(self, node):\n self._current_paragraph.append('\\033[1m') # **foo**: bold\n\n def visit_title_reference(self, node):\n self._current_paragraph.append('\\033[4m') # `foo`: underlined\n\n def _depart_markup(self, node):\n self._current_paragraph.append('\\033[0m')\n\n depart_emphasis = depart_strong = depart_title_reference = \\\n _depart_markup\n\n def visit_literal_block(self, node):\n text, = node\n self.start_lines.append(node.line)\n self.paragraphs.append(re.sub('^|\\n', r'\\g<0> ', text)) # indent\n raise SkipNode\n\n def visit_bullet_list(self, node):\n self._indent_iterator_stack.append(\n (node['bullet'] + ' ' for _ in range(len(node))))\n\n def depart_bullet_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_enumerated_list(self, node):\n enumtype = node['enumtype']\n fmt = {('(', ')'): 'parens',\n ('', ')'): 'rparen',\n ('', '.'): 'period'}[node['prefix'], node['suffix']]\n try:\n start = node['start']\n except KeyError:\n start = 1\n else:\n start = {\n 'arabic': int,\n 'loweralpha': lambda s: ord(s) - ord('a') + 1,\n 'upperalpha': lambda s: ord(s) - ord('A') + 1,\n 'lowerroman': lambda s: roman.fromRoman(s.upper()),\n 'upperroman': lambda s: roman.fromRoman(s),\n }[enumtype](start)\n enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0]\n for i in range(start, start + len(node))]\n width = max(map(len, enumerators))\n enumerators = [enum.ljust(width) for enum in enumerators]\n self._indent_iterator_stack.append(iter(enumerators))\n\n def depart_enumerated_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_list_item(self, node):\n self._indent_stack.append(next(self._indent_iterator_stack[-1]))\n\n def depart_list_item(self, node):\n self._indent_stack.pop()\n\n def visit_field(self, node):\n field_name_node, field_body_node = node\n field_name, = field_name_node\n parts = field_name.split()\n if len(parts) == 2:\n doctype, name = parts\n elif len(parts) == 3:\n doctype, type_, name = parts\n if doctype not in _PARAM_TYPES:\n raise SkipNode\n if 'type' in self.params[name]:\n raise ValueError('type defined twice for {}'.format(name))\n self.params[name]['type'] = type_\n else:\n raise SkipNode\n if doctype in _PARAM_TYPES:\n doctype = 'param'\n if doctype in _TYPE_NAMES:\n doctype = 'type'\n if doctype in self.params[name]:\n raise ValueError('{} defined twice for {}'.format(doctype, name))\n visitor = Visitor(self.document)\n field_body_node.walkabout(visitor)\n self.params[name][doctype] = ''.join(visitor.paragraphs)\n raise SkipNode\n\n def visit_comment(self, node):\n raise SkipNode\n\n def visit_system_message(self, node):\n raise SkipNode\n\n visitor = Visitor(tree)\n tree.walkabout(visitor)\n\n tuples = {name: _Param(values.get('param'), values.get('type'))\n for name, values in visitor.params.items()}\n if visitor.paragraphs:\n text = []\n for start, paragraph, next_start in zip(\n visitor.start_lines,\n visitor.paragraphs,\n visitor.start_lines[1:] + [0]):\n text.append(paragraph)\n # We insert a space before each newline to prevent argparse\n # from stripping consecutive newlines down to just two\n # (http://bugs.python.org/issue31330).\n text.append(' \\n' * (next_start - start - paragraph.count('\\n')))\n parsed = _Doc('', ''.join(text), tuples)\n else:\n parsed = _Doc('', '', tuples)\n _parse_docstring_cache[_cache_key] = parsed\n return parsed",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def write_updated_content(filename, updated_jdoc):\n\n with open(filename, 'r+') as f:\n java_doc_location = find_javadoc(filename)\n original_text = f.read()\n\n if java_doc_location is None:\n # this is when no java doc is present\n imports_end = position_before_code(filename)\n before_doc = original_text[:imports_end]\n after_doc = original_text[imports_end:]\n else:\n class_def_len = len(java_doc_location.group(0)) - len(java_doc_location.group(1))\n before_doc = original_text[:java_doc_location.start()]\n after_doc = original_text[java_doc_location.end() - class_def_len:]\n\n text = before_doc + updated_jdoc + after_doc\n f.seek(0)\n f.write(text)\n f.truncate()",
"def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines",
"def clean_doc(doc):\r\n # Replace regular enter (i.e. mere comment formatting in cpp file)\r\n # with space\r\n doc = doc.replace(\"\\n\", \" \")\r\n\r\n # The removal can cause a \"hard enter\" (literal \\n) to get an unintended\r\n # trailing space - trim those.\r\n doc = doc.replace(\"\\\\n \", \"\\\\n\")\r\n return '\"%s\"' % doc",
"def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal",
"def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)",
"def clean_schema_doc_string(doc_str, add_prefix=None, add_postfix=None, rst_format='**', remove_html_tags=True):\n prefix = ' ' if add_prefix is None else add_prefix\n clean_doc_str = doc_str\n if remove_html_tags:\n clean_doc_str = clean_doc_str.replace('<', '<')\n clean_doc_str = clean_doc_str.replace('>', '>')\n clean_doc_str = clean_doc_str.replace('<b>', ' **')\n clean_doc_str = clean_doc_str.replace('</b>', '** ')\n clean_doc_str = clean_doc_str.replace('<i>', ' *')\n clean_doc_str = clean_doc_str.replace('</i>', '* ')\n clean_doc_str = clean_doc_str.replace(':blue:', '')\n\n clean_doc_str = clean_doc_str.replace('COMMENT:', '%s%sComment:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('MORE_INFO:', '%s%sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('NOTE:', '%s %sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n if add_postfix is not None:\n clean_doc_str += add_postfix\n return clean_doc_str",
"def DocString():\n return",
"def sphinxify(docstring, context, buildername='html', img_path=''):\n if img_path:\n if os.name == 'nt':\n img_path = img_path.replace('\\\\', '/')\n leading = '/' if os.name.startswith('posix') else ''\n docstring = docstring.replace('_images', leading+img_path)\n\n srcdir = osp.join(DOCDIR, '_sources')\n if not osp.exists(srcdir):\n os.makedirs(srcdir)\n base_name = osp.join(srcdir, xrtQookPageName)\n rst_name = base_name + '.rst'\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n\n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n # NOTE: Before doing this, we escape common html chars so that they\n # don't interfere with the rest of html present in the page\n argspec = escape(context['argspec'])\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(\n char, '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n\n confoverrides = {'html_context': context,\n 'extensions': ['sphinx.ext.mathjax',\n 'sphinxcontrib.jquery']}\n\n doctreedir = osp.join(DOCDIR, 'doctrees')\n sphinx_app = Sphinx(srcdir, DOCDIR, DOCDIR, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n pass",
"def main_docstring():",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring",
"def doDocStrings(parentNode, srcNode):\n def makeDocElement(name, content):\n node = libxml2.newNode(name)\n node.addChild(libxml2.newText(content))\n return node\n \n autodoc = getAttr(srcNode, \"python_autodoc\")\n docstr = getAttr(srcNode, \"feature_docstring\")\n if autodoc:\n parentNode.addChild(makeDocElement(\"autodoc\", autodoc))\n if docstr:\n parentNode.addChild(makeDocElement(\"docstring\", docstr))",
"def docstrings(param1, param2):\n return \"example string\"",
"def copy_docstring(other):\n\n def wrapper(func):\n func.__doc__ = other.__doc__\n return func\n\n return wrapper",
"def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)",
"def doc_string():\n pass # pass does nothing",
"def sphinxify(docstring, context, buildername='html'):\n\n srcdir = mkdtemp()\n srcdir = encoding.to_unicode_from_fs(srcdir)\n\n base_name = osp.join(srcdir, 'docstring')\n rst_name = base_name + '.rst'\n\n if buildername == 'html':\n suffix = '.html'\n else:\n suffix = '.txt'\n output_name = base_name + suffix\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n if context['right_sphinx_version'] and context['math_on']:\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n \n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n argspec = context['argspec']\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(char,\n '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n \n temp_confdir = False\n if temp_confdir:\n # TODO: This may be inefficient. Find a faster way to do it.\n confdir = mkdtemp()\n confdir = encoding.to_unicode_from_fs(confdir)\n generate_configuration(confdir)\n else:\n confdir = osp.join(get_module_source_path('spyderlib.utils.inspector'))\n\n confoverrides = {'html_context': context}\n\n doctreedir = osp.join(srcdir, 'doctrees')\n\n sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n # TODO: Investigate if this is necessary/important for us\n if osp.exists(output_name):\n output = codecs.open(output_name, 'r', encoding='utf-8').read()\n output = output.replace('<pre>', '<pre class=\"literal-block\">')\n else:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n if temp_confdir:\n shutil.rmtree(confdir, ignore_errors=True)\n shutil.rmtree(srcdir, ignore_errors=True)\n\n return output",
"def remove_comments_and_docstrings(source):\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n # The following two conditionals preserve indentation.\n # This is necessary because we're not using tokenize.untokenize()\n # (because it spits out code with copious amounts of oddly-placed\n # whitespace).\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n out = '\\n'.join([line for line in out.splitlines() if line.strip()])\n return out",
"def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0",
"def convert_doxygen_format(line, name):\n\n line = line.replace('<b>', '**').replace('</b>', '**')\n\n # Single backticks in doxygen map to doubles in Sphinx\n line = line.replace('`', '``')\n\n # But double backticks are literal backticks\n line = line.replace('````', '\\\\`')\n\n # Search for method and class references. We pick them up either when they\n # have a scoping operator, or when they end with (), or we would match all\n # the words in the text!\n origline = line\n for m in re.finditer(r'\\b([a-zA-Z_][a-zA-Z0-9_.:]*)\\(\\)|\\b([a-zA-Z_][a-zA-Z0-9_]*::[a-zA-Z_][a-zA-Z0-9_.:]*)\\b', origline):\n result = resolve_reference(m.group(0).rstrip('()'), name)\n if not result:\n continue\n\n if '::' in m.group():\n # We want a scoped name, apparently.\n ref = ':{0}:`{1}`'.format(*result)\n else:\n ref = ':{0}:`~{1}`'.format(*result)\n\n # Are we inside double-backticks?\n if origline[:m.start()].count('``') % 2 != 0:\n # Only replace if it's entirely wrapped in backticks.\n line = line.replace('``' + m.group() + '``', ref)\n else:\n line = line.replace(m.group(), ref)\n\n return line",
"def old_function_with_docstring(x, y):\n return x + y"
] |
[
"0.747815",
"0.7309348",
"0.6963823",
"0.6864948",
"0.65862286",
"0.62206876",
"0.61575556",
"0.6104046",
"0.5784846",
"0.57597166",
"0.5673069",
"0.56552356",
"0.55879945",
"0.55521667",
"0.55452293",
"0.5543954",
"0.54484963",
"0.541877",
"0.54074806",
"0.54048115",
"0.537801",
"0.5343656",
"0.53334713",
"0.53222245",
"0.531579",
"0.530661",
"0.52904695",
"0.52395624",
"0.5218396",
"0.52168095"
] |
0.8334153
|
0
|
rewriteDocstringForCSharp (docstring) > docstring Performs some mimimal Cspecific sanitizations on the C++/Doxygen docstring.
|
def rewriteDocstringForCSharp (docstring):
# Preliminary: rewrite some of the data type references to equivalent
# C# types. (Note: this rewriting affects only the documentation
# comments inside classes & methods, not the actual method signatures.)
docstring = docstring.replace(r'const char *', 'string ')
docstring = docstring.replace(r'const char* ', 'string ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'string')
docstring = docstring.replace(r'const std::string &', 'string ')
docstring = docstring.replace(r'const std::string', 'string')
docstring = docstring.replace(r'std::string', 'string')
docstring = docstring.replace(r'const ', '')
docstring = docstring.replace(r'NULL', 'null')
docstring = docstring.replace(r'boolean', 'bool')
# Use C# syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
p = re.compile(r'(%?)(' + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
# <code> has its own special meaning in C#; we have to turn our input
# file's uses of <code> into <c>. Conversely, we have to turn our
# uses of verbatim to <code>.
p = re.compile(r'<code>(.+?)</code>', re.DOTALL)
docstring = p.sub(r'<c>\1</c>', docstring)
p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)
docstring = p.sub(r'<code>\1</code>', docstring)
# Do replacements on some documentation text we sometimes use.
p = re.compile(r'antimonyConstants([@.])')
docstring = p.sub(r'antimonycs.antimony\1', docstring)
# Fix @link for constants that we forgot conditionalize in the source.
p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)
docstring = p.sub(r'@link antimony.\1@endlink', docstring)
# Can't use math symbols. Kluge around it.
docstring = re.sub(r'\\f\$\\geq\\f\$', '>=', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '<=', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '*', docstring)
# Some additional special cases.
docstring = docstring.replace(r'SBML_formulaToString()', 'antimonycs.antimony.formulaToString()')
docstring = docstring.replace(r'SBML_parseFormula()', 'antimonycs.antimony.parseFormula()')
# Need to escape the quotation marks:
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
return docstring
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Python types.\n # (Note: this rewriting affects only the documentation comments inside\n # classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'NULL', 'None')\n docstring = docstring.replace(r'@c true', '@c True')\n docstring = docstring.replace(r'@c false', '@c False')\n\n # Also use Python syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n p = re.compile(r'(%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n # Python method cross-references won't be made by doxygen unless\n # the method reference is written without arguments.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*)(\\([^)]*?\\))', re.MULTILINE)\n docstring = p.sub(translatePythonCrossRef, docstring)\n p = re.compile('(@see\\s+)(\\w+\\s*)(\\([^)]*?\\))')\n docstring = p.sub(translatePythonSeeRef, docstring)\n\n # Friggin' doxygen escapes HTML character codes, so the hack we have to\n # do for Javadoc turns out doesn't work for the Python documentation.\n # Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # SWIG does some bizarre truncation of leading characters that\n # happens to hit us because of how we have to format verbatim's.\n # This tries to kluge around it: \n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(indentVerbatimForPython, docstring)\n\n return docstring",
"def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring",
"def docstring_hack():\n pass",
"def sanitizeForHTML (docstring):\n\n # Remove @~, which we use as a hack in Doxygen 1.7-1.8\n\n docstring = docstring.replace(r'@~', '')\n\n # First do conditional section inclusion based on the current language.\n # Our possible conditional elements and their meanings are:\n #\n # java: only Java\n # python: only Python\n # perl: only Perl\n # cpp: only C++\n # csharp: only C#\n # conly: only C\n # clike: C, C++\n # notcpp:\tnot C++\n # notclike: not C or C++\n #\n # The notcpp/notclike variants are because Doxygen 1.6.x doesn't have\n # @ifnot, yet sometimes we want to say \"if not C or C++\".\n\n cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'\n p = re.compile('@if\\s+(' + cases + ')\\s+(.+?)((@else)\\s+(.+?))?@endif', re.DOTALL)\n docstring = p.sub(translateIfElse, docstring)\n\n # Replace blank lines between paragraphs with <p>. There are two main\n # cases: comments blocks whose lines always begin with an asterix (e.g.,\n # C/C++), and comment blocks where they don't (e.g., Python). The third\n # substitution below does the same thing for blank lines, except for the\n # very end of the doc string.\n\n p = re.compile('^(\\s+)\\*\\s*$', re.MULTILINE)\n docstring = p.sub(r'\\1* <p>', docstring)\n p = re.compile('^((?!\\s+\\Z)\\s+)$', re.MULTILINE)\n docstring = p.sub(r'\\1<p>', docstring)\n p = re.compile('^(?!\\Z)$', re.MULTILINE)\n docstring = p.sub(r'<p>', docstring)\n\n # Javadoc doesn't have an @htmlinclude command, so we process the file\n # inclusion directly here.\n\n p = re.compile('@htmlinclude\\s+([^\\s:;,(){}+|?\"\\'/]+)([\\s:;,(){}+|?\"\\'/])', re.MULTILINE)\n docstring = p.sub(translateInclude, docstring)\n\n # There's no Javadoc verbatim or @code/@endcode equivalent, so we have to\n # convert it to raw HTML and transform the content too. This requires\n # helpers. The following treats both @verbatim and @code the same way.\n\n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n p = re.compile('@code.+?@endcode', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n\n # Javadoc doesn't have a @section or @subsection commands, so we translate\n # those ourselves.\n\n p = re.compile('@section\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h2>\\1</h2>', docstring)\n p = re.compile('@subsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h3>\\1</h3>', docstring)\n p = re.compile('@subsubsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h4>\\1</h4>', docstring)\n\n # Javadoc doesn't have an @image command. We translate @image html\n # but ditch @image latex.\n\n p = re.compile('@image\\s+html+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r\"<center><img src='\\1'></center><br>\", docstring)\n p = re.compile('@image\\s+latex+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Doxygen doesn't understand HTML character codes like ≥, so we've\n # been using doxygen's Latex facility to get special mathematical\n # characters into the documentation, but as luck would have it, Javadoc\n # doesn't understand the Latex markup. All of this is getting old.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '≥', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '≤', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '×', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'<em>\\1</em>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1<em>\\2</em>', docstring)\n\n # Convert @li into <li>, but also add <ul> ... </ul>. This is a bit\n # simple-minded (I suppose like most of this code), but ought to work\n # for the cases we use in practice.\n\n p = re.compile('^(\\s+\\*\\s+)(@li\\s+.*?)(\\s+)(\\*/|\\*\\s+@(?!li\\s)|\\*\\s+<p>)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteList, docstring)\n\n # Wrap @deprecated content with a class so that we can style it.\n\n p = re.compile('^(\\s+\\*\\s+)(@deprecated\\s)((\\S|\\s)+)(<p>|\\*/)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteDeprecated, docstring)\n\n # Doxygen automatically cross-references class names in text to the class\n # definition page, but Javadoc does not. Rather than having to put in a\n # lot conditional @if/@endif's into the documentation to manually create\n # cross-links just for the Java case, let's automate. This needs to be\n # done better (e.g., by not hard-wiring the class names).\n\n p = re.compile(r'([^a-zA-Z0-9_.\">])(' + r')\\b([^:])', re.DOTALL)\n if language == 'csharp':\n docstring = p.sub(translateClassRefCSharp, docstring)\n elif language == 'java':\n docstring = p.sub(translateClassRefJava, docstring)\n\n # Massage method cross-references.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*\\([^)]*?\\))', re.MULTILINE)\n if language == 'csharp':\n docstring = p.sub(translateCSharpCrossRef, docstring)\n elif language == 'java':\n docstring = p.sub(translateJavaCrossRef, docstring)\n\n # Clean-up step needed because some of the procedures above are imperfect.\n # This converts \" * * @foo\" lines into \" * @foo\":\n\n p = re.compile('^(\\s+)\\*\\s+\\*\\s+@', re.MULTILINE)\n docstring = p.sub(r'\\1* @', docstring)\n\n # Take out any left-over Doxygen-style quotes, because Javadoc doesn't have\n # the %foo quoting mechanism.\n\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # Currently, we don't handle @ingroup.\n\n docstring = re.sub('@ingroup \\w+', '', docstring)\n\n return docstring",
"def rewriteDocstringForJava (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # Java types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'String ')\n docstring = docstring.replace(r'const char* ', 'String ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'String')\n docstring = docstring.replace(r'const std::string &', 'String ')\n docstring = docstring.replace(r'const std::string ', 'String ')\n docstring = docstring.replace(r'std::string', 'String')\n docstring = docstring.replace(r'NULL', 'null')\n\n # Also use Java syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # Do the big work.\n\n docstring = sanitizeForHTML(docstring)\n\n # Fix up for a problem introduced by sanitizeForHTML: it puts {@link ...}\n # into the arguments of functions mentioned in @see's, if the function\n # has more than one argument. This gets rid of the @link's. This should\n # be fixed properly some day.\n\n p = re.compile(r'((@see|@throws)\\s+[\\w\\\\ ,.\\'\"=<>()#]*?){@link\\s+([^}]+?)}')\n while re.search(p, docstring) != None:\n docstring = p.sub(r'\\1\\3', docstring)\n\n # Inside of @see, change double colons to pound signs.\n\n docstring = re.sub('(@see\\s+\\w+)::', r'\\1#', docstring)\n\n # The syntax for @see is slightly different: method names need to have a\n # leading pound sign character. This particular bit of code only handles\n # a single @see foo(), which means the docs have to be written that way.\n # Maybe someday in the future it should be expanded to handle\n # @see foo(), bar(), etc., but I don't have time right now to do it.\n\n docstring = re.sub('(@see\\s+)([\\w:.]+)\\(', r'\\1#\\2(', docstring)\n\n # Remove the '*' character that Javadoc doesn't want to see in @see's.\n # (This doesn't make a difference; javadoc still can't match up the refs.)\n\n # p = re.compile('@see[\\s\\w.:,()#]+[*][\\s\\w.:,()*#]')\n # docstring = p.sub(removeStar, docstring)\n\n # The syntax for @link is vastly different.\n \n p = re.compile('@link([\\s/*]+[\\w\\s,.:#()*]+[\\s/*]*[\\w():#]+[\\s/*]*)@endlink', re.DOTALL)\n docstring = p.sub(r'{@link \\1}', docstring)\n\n # Outside of @see and other constructs, dot is used to reference members\n # instead of C++'s double colon.\n\n docstring = docstring.replace(r'::', '.')\n\n # Need to escape quotation marks. The reason is that the\n # %javamethodmodifiers directives created for use with SWIG will\n # themselves be double-quoted strings, and leaving embedded quotes\n # will completely screw that up.\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n return docstring",
"def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)",
"def remove_comments_and_docstrings(source):\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n # The following two conditionals preserve indentation.\n # This is necessary because we're not using tokenize.untokenize()\n # (because it spits out code with copious amounts of oddly-placed\n # whitespace).\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n out = '\\n'.join([line for line in out.splitlines() if line.strip()])\n return out",
"def sphinxify(docstring, context, buildername='html', img_path=''):\n if img_path:\n if os.name == 'nt':\n img_path = img_path.replace('\\\\', '/')\n leading = '/' if os.name.startswith('posix') else ''\n docstring = docstring.replace('_images', leading+img_path)\n\n srcdir = osp.join(DOCDIR, '_sources')\n if not osp.exists(srcdir):\n os.makedirs(srcdir)\n base_name = osp.join(srcdir, xrtQookPageName)\n rst_name = base_name + '.rst'\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n\n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n # NOTE: Before doing this, we escape common html chars so that they\n # don't interfere with the rest of html present in the page\n argspec = escape(context['argspec'])\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(\n char, '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n\n confoverrides = {'html_context': context,\n 'extensions': ['sphinx.ext.mathjax',\n 'sphinxcontrib.jquery']}\n\n doctreedir = osp.join(DOCDIR, 'doctrees')\n sphinx_app = Sphinx(srcdir, DOCDIR, DOCDIR, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n pass",
"def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)",
"def main_docstring():",
"def sphinxify(docstring, context, buildername='html'):\n\n srcdir = mkdtemp()\n srcdir = encoding.to_unicode_from_fs(srcdir)\n\n base_name = osp.join(srcdir, 'docstring')\n rst_name = base_name + '.rst'\n\n if buildername == 'html':\n suffix = '.html'\n else:\n suffix = '.txt'\n output_name = base_name + suffix\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n if context['right_sphinx_version'] and context['math_on']:\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n \n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n argspec = context['argspec']\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(char,\n '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n \n temp_confdir = False\n if temp_confdir:\n # TODO: This may be inefficient. Find a faster way to do it.\n confdir = mkdtemp()\n confdir = encoding.to_unicode_from_fs(confdir)\n generate_configuration(confdir)\n else:\n confdir = osp.join(get_module_source_path('spyderlib.utils.inspector'))\n\n confoverrides = {'html_context': context}\n\n doctreedir = osp.join(srcdir, 'doctrees')\n\n sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n # TODO: Investigate if this is necessary/important for us\n if osp.exists(output_name):\n output = codecs.open(output_name, 'r', encoding='utf-8').read()\n output = output.replace('<pre>', '<pre class=\"literal-block\">')\n else:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n if temp_confdir:\n shutil.rmtree(confdir, ignore_errors=True)\n shutil.rmtree(srcdir, ignore_errors=True)\n\n return output",
"def clean_doc(doc):\r\n # Replace regular enter (i.e. mere comment formatting in cpp file)\r\n # with space\r\n doc = doc.replace(\"\\n\", \" \")\r\n\r\n # The removal can cause a \"hard enter\" (literal \\n) to get an unintended\r\n # trailing space - trim those.\r\n doc = doc.replace(\"\\\\n \", \"\\\\n\")\r\n return '\"%s\"' % doc",
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def doDocStrings(parentNode, srcNode):\n def makeDocElement(name, content):\n node = libxml2.newNode(name)\n node.addChild(libxml2.newText(content))\n return node\n \n autodoc = getAttr(srcNode, \"python_autodoc\")\n docstr = getAttr(srcNode, \"feature_docstring\")\n if autodoc:\n parentNode.addChild(makeDocElement(\"autodoc\", autodoc))\n if docstr:\n parentNode.addChild(makeDocElement(\"docstring\", docstr))",
"def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines",
"def docstrings(param1, param2):\n return \"example string\"",
"def DocString():\n return",
"def old_function_with_docstring(x, y):\n return x + y",
"def clean_schema_doc_string(doc_str, add_prefix=None, add_postfix=None, rst_format='**', remove_html_tags=True):\n prefix = ' ' if add_prefix is None else add_prefix\n clean_doc_str = doc_str\n if remove_html_tags:\n clean_doc_str = clean_doc_str.replace('<', '<')\n clean_doc_str = clean_doc_str.replace('>', '>')\n clean_doc_str = clean_doc_str.replace('<b>', ' **')\n clean_doc_str = clean_doc_str.replace('</b>', '** ')\n clean_doc_str = clean_doc_str.replace('<i>', ' *')\n clean_doc_str = clean_doc_str.replace('</i>', '* ')\n clean_doc_str = clean_doc_str.replace(':blue:', '')\n\n clean_doc_str = clean_doc_str.replace('COMMENT:', '%s%sComment:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('MORE_INFO:', '%s%sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('NOTE:', '%s %sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n if add_postfix is not None:\n clean_doc_str += add_postfix\n return clean_doc_str",
"def doc_string():\n pass # pass does nothing",
"def _parse_docstring(doc):\n _cache_key = doc\n try:\n return _parse_docstring_cache[_cache_key]\n except KeyError:\n pass\n\n if doc is None:\n return _Doc('', '', {})\n\n # Convert Google- or Numpy-style docstrings to RST.\n # (Should do nothing if not in either style.)\n doc = str(GoogleDocstring(doc))\n doc = str(NumpyDocstring(doc))\n\n tree = publish_doctree(doc)\n\n class Visitor(NodeVisitor):\n optional = [\n 'document', 'docinfo',\n 'field_list', 'field_body',\n 'literal', 'problematic']\n\n def __init__(self, document):\n NodeVisitor.__init__(self, document)\n self.paragraphs = []\n self.start_lines = []\n self.params = defaultdict(dict)\n self._current_paragraph = None\n self._indent_iterator_stack = []\n self._indent_stack = []\n\n def _do_nothing(self, node):\n pass\n\n def visit_paragraph(self, node):\n self.start_lines.append(node.line)\n self._current_paragraph = []\n\n def depart_paragraph(self, node):\n text = ''.join(self._current_paragraph)\n text = ''.join(self._indent_stack) + text\n self._indent_stack = [\n ' ' * len(item) for item in self._indent_stack]\n text = text.replace('\\n', '\\n' + ''.join(self._indent_stack))\n self.paragraphs.append(text)\n self._current_paragraph = None\n\n def visit_Text(self, node):\n self._current_paragraph.append(node)\n\n depart_Text = _do_nothing\n\n def visit_emphasis(self, node):\n self._current_paragraph.append('\\033[3m') # *foo*: italic\n\n def visit_strong(self, node):\n self._current_paragraph.append('\\033[1m') # **foo**: bold\n\n def visit_title_reference(self, node):\n self._current_paragraph.append('\\033[4m') # `foo`: underlined\n\n def _depart_markup(self, node):\n self._current_paragraph.append('\\033[0m')\n\n depart_emphasis = depart_strong = depart_title_reference = \\\n _depart_markup\n\n def visit_literal_block(self, node):\n text, = node\n self.start_lines.append(node.line)\n self.paragraphs.append(re.sub('^|\\n', r'\\g<0> ', text)) # indent\n raise SkipNode\n\n def visit_bullet_list(self, node):\n self._indent_iterator_stack.append(\n (node['bullet'] + ' ' for _ in range(len(node))))\n\n def depart_bullet_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_enumerated_list(self, node):\n enumtype = node['enumtype']\n fmt = {('(', ')'): 'parens',\n ('', ')'): 'rparen',\n ('', '.'): 'period'}[node['prefix'], node['suffix']]\n try:\n start = node['start']\n except KeyError:\n start = 1\n else:\n start = {\n 'arabic': int,\n 'loweralpha': lambda s: ord(s) - ord('a') + 1,\n 'upperalpha': lambda s: ord(s) - ord('A') + 1,\n 'lowerroman': lambda s: roman.fromRoman(s.upper()),\n 'upperroman': lambda s: roman.fromRoman(s),\n }[enumtype](start)\n enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0]\n for i in range(start, start + len(node))]\n width = max(map(len, enumerators))\n enumerators = [enum.ljust(width) for enum in enumerators]\n self._indent_iterator_stack.append(iter(enumerators))\n\n def depart_enumerated_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_list_item(self, node):\n self._indent_stack.append(next(self._indent_iterator_stack[-1]))\n\n def depart_list_item(self, node):\n self._indent_stack.pop()\n\n def visit_field(self, node):\n field_name_node, field_body_node = node\n field_name, = field_name_node\n parts = field_name.split()\n if len(parts) == 2:\n doctype, name = parts\n elif len(parts) == 3:\n doctype, type_, name = parts\n if doctype not in _PARAM_TYPES:\n raise SkipNode\n if 'type' in self.params[name]:\n raise ValueError('type defined twice for {}'.format(name))\n self.params[name]['type'] = type_\n else:\n raise SkipNode\n if doctype in _PARAM_TYPES:\n doctype = 'param'\n if doctype in _TYPE_NAMES:\n doctype = 'type'\n if doctype in self.params[name]:\n raise ValueError('{} defined twice for {}'.format(doctype, name))\n visitor = Visitor(self.document)\n field_body_node.walkabout(visitor)\n self.params[name][doctype] = ''.join(visitor.paragraphs)\n raise SkipNode\n\n def visit_comment(self, node):\n raise SkipNode\n\n def visit_system_message(self, node):\n raise SkipNode\n\n visitor = Visitor(tree)\n tree.walkabout(visitor)\n\n tuples = {name: _Param(values.get('param'), values.get('type'))\n for name, values in visitor.params.items()}\n if visitor.paragraphs:\n text = []\n for start, paragraph, next_start in zip(\n visitor.start_lines,\n visitor.paragraphs,\n visitor.start_lines[1:] + [0]):\n text.append(paragraph)\n # We insert a space before each newline to prevent argparse\n # from stripping consecutive newlines down to just two\n # (http://bugs.python.org/issue31330).\n text.append(' \\n' * (next_start - start - paragraph.count('\\n')))\n parsed = _Doc('', ''.join(text), tuples)\n else:\n parsed = _Doc('', '', tuples)\n _parse_docstring_cache[_cache_key] = parsed\n return parsed",
"def test_0_check_xc_docstring(self):\n self.banner(\"Checking the docstring on your extra credit.\") \n filename = self.find_file('project9_xc.py')\n self.check_docstring(filename)",
"def generic_run_standardize_comments(raw_input_file, clean_output_file):\n df = pd.read_csv(raw_input_file)\n df = df.drop(['Unnamed: 0'], axis=1)\n\n standardized_df = standardize_comments(df, 'body')\n print(standardized_df.head())\n print()\n print('original length:', len(df))\n print('standardized length:', len(standardized_df))\n print('removed', len(df) - len(standardized_df), 'comments')\n\n # THIS MIGHT BRING BACK THE UTF-8 ENCODING EMOJIS. MIGHT HAVE TO WRITE TO CSV IN ASCII\n standardized_df.to_csv(clean_output_file)",
"def strip_docstring(blob):\n docstring = True\n while docstring == True:\n match_docstring = re.search('\\n\\s*\"\"\"[^\"\"\"]*\"\"\"', blob)\n if not match_docstring:\n docstring = False\n else:\n blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.span()[1]], '')\n return blob",
"def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)",
"def convert_doxygen_format(line, name):\n\n line = line.replace('<b>', '**').replace('</b>', '**')\n\n # Single backticks in doxygen map to doubles in Sphinx\n line = line.replace('`', '``')\n\n # But double backticks are literal backticks\n line = line.replace('````', '\\\\`')\n\n # Search for method and class references. We pick them up either when they\n # have a scoping operator, or when they end with (), or we would match all\n # the words in the text!\n origline = line\n for m in re.finditer(r'\\b([a-zA-Z_][a-zA-Z0-9_.:]*)\\(\\)|\\b([a-zA-Z_][a-zA-Z0-9_]*::[a-zA-Z_][a-zA-Z0-9_.:]*)\\b', origline):\n result = resolve_reference(m.group(0).rstrip('()'), name)\n if not result:\n continue\n\n if '::' in m.group():\n # We want a scoped name, apparently.\n ref = ':{0}:`{1}`'.format(*result)\n else:\n ref = ':{0}:`~{1}`'.format(*result)\n\n # Are we inside double-backticks?\n if origline[:m.start()].count('``') % 2 != 0:\n # Only replace if it's entirely wrapped in backticks.\n line = line.replace('``' + m.group() + '``', ref)\n else:\n line = line.replace(m.group(), ref)\n\n return line",
"def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring"
] |
[
"0.71808505",
"0.68017626",
"0.65541434",
"0.64886326",
"0.6318638",
"0.6209112",
"0.59584296",
"0.5808757",
"0.5648765",
"0.55061555",
"0.55013335",
"0.5500921",
"0.5479426",
"0.54745865",
"0.5454628",
"0.5422479",
"0.5376744",
"0.5362173",
"0.53605646",
"0.52951014",
"0.52797174",
"0.5236166",
"0.5203575",
"0.51892895",
"0.5183287",
"0.51290923",
"0.50928533",
"0.5090464",
"0.50156367",
"0.5005119"
] |
0.8077192
|
0
|
rewriteDocstringForPython (docstring) > docstring Performs some mimimal Python specific sanitizations on the C++/Doxygen docstring.
|
def rewriteDocstringForPython (docstring):
# Take out the C++ comment start and end.
docstring = docstring.replace('/**', '').replace('*/', '')
p = re.compile('^(\s*)\*([ \t]*)', re.MULTILINE)
docstring = p.sub(r'\2', docstring)
# Rewrite some of the data type references to equivalent Python types.
# (Note: this rewriting affects only the documentation comments inside
# classes & methods, not the method signatures.)
docstring = docstring.replace(r'const char *', 'string ')
docstring = docstring.replace(r'const char* ', 'string ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'string')
docstring = docstring.replace(r'const std::string', 'string')
docstring = docstring.replace(r'std::string', 'string')
docstring = docstring.replace(r'NULL', 'None')
docstring = docstring.replace(r'@c true', '@c True')
docstring = docstring.replace(r'@c false', '@c False')
# Also use Python syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + r') ?(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRef, docstring)
p = re.compile(r'(%?)(' + r') ?(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRef, docstring)
# Need to escape the quotation marks:
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
# Python method cross-references won't be made by doxygen unless
# the method reference is written without arguments.
p = re.compile('(\s+)(\S+?)::(\w+\s*)(\([^)]*?\))', re.MULTILINE)
docstring = p.sub(translatePythonCrossRef, docstring)
p = re.compile('(@see\s+)(\w+\s*)(\([^)]*?\))')
docstring = p.sub(translatePythonSeeRef, docstring)
# Friggin' doxygen escapes HTML character codes, so the hack we have to
# do for Javadoc turns out doesn't work for the Python documentation.
# Kluge around it.
docstring = re.sub(r'\\f\$\\geq\\f\$', '>=', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '<=', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '*', docstring)
# SWIG does some bizarre truncation of leading characters that
# happens to hit us because of how we have to format verbatim's.
# This tries to kluge around it:
p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)
docstring = p.sub(indentVerbatimForPython, docstring)
return docstring
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring",
"def docstring_hack():\n pass",
"def sanitizeForHTML (docstring):\n\n # Remove @~, which we use as a hack in Doxygen 1.7-1.8\n\n docstring = docstring.replace(r'@~', '')\n\n # First do conditional section inclusion based on the current language.\n # Our possible conditional elements and their meanings are:\n #\n # java: only Java\n # python: only Python\n # perl: only Perl\n # cpp: only C++\n # csharp: only C#\n # conly: only C\n # clike: C, C++\n # notcpp:\tnot C++\n # notclike: not C or C++\n #\n # The notcpp/notclike variants are because Doxygen 1.6.x doesn't have\n # @ifnot, yet sometimes we want to say \"if not C or C++\".\n\n cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'\n p = re.compile('@if\\s+(' + cases + ')\\s+(.+?)((@else)\\s+(.+?))?@endif', re.DOTALL)\n docstring = p.sub(translateIfElse, docstring)\n\n # Replace blank lines between paragraphs with <p>. There are two main\n # cases: comments blocks whose lines always begin with an asterix (e.g.,\n # C/C++), and comment blocks where they don't (e.g., Python). The third\n # substitution below does the same thing for blank lines, except for the\n # very end of the doc string.\n\n p = re.compile('^(\\s+)\\*\\s*$', re.MULTILINE)\n docstring = p.sub(r'\\1* <p>', docstring)\n p = re.compile('^((?!\\s+\\Z)\\s+)$', re.MULTILINE)\n docstring = p.sub(r'\\1<p>', docstring)\n p = re.compile('^(?!\\Z)$', re.MULTILINE)\n docstring = p.sub(r'<p>', docstring)\n\n # Javadoc doesn't have an @htmlinclude command, so we process the file\n # inclusion directly here.\n\n p = re.compile('@htmlinclude\\s+([^\\s:;,(){}+|?\"\\'/]+)([\\s:;,(){}+|?\"\\'/])', re.MULTILINE)\n docstring = p.sub(translateInclude, docstring)\n\n # There's no Javadoc verbatim or @code/@endcode equivalent, so we have to\n # convert it to raw HTML and transform the content too. This requires\n # helpers. The following treats both @verbatim and @code the same way.\n\n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n p = re.compile('@code.+?@endcode', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n\n # Javadoc doesn't have a @section or @subsection commands, so we translate\n # those ourselves.\n\n p = re.compile('@section\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h2>\\1</h2>', docstring)\n p = re.compile('@subsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h3>\\1</h3>', docstring)\n p = re.compile('@subsubsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h4>\\1</h4>', docstring)\n\n # Javadoc doesn't have an @image command. We translate @image html\n # but ditch @image latex.\n\n p = re.compile('@image\\s+html+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r\"<center><img src='\\1'></center><br>\", docstring)\n p = re.compile('@image\\s+latex+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Doxygen doesn't understand HTML character codes like ≥, so we've\n # been using doxygen's Latex facility to get special mathematical\n # characters into the documentation, but as luck would have it, Javadoc\n # doesn't understand the Latex markup. All of this is getting old.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '≥', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '≤', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '×', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'<em>\\1</em>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1<em>\\2</em>', docstring)\n\n # Convert @li into <li>, but also add <ul> ... </ul>. This is a bit\n # simple-minded (I suppose like most of this code), but ought to work\n # for the cases we use in practice.\n\n p = re.compile('^(\\s+\\*\\s+)(@li\\s+.*?)(\\s+)(\\*/|\\*\\s+@(?!li\\s)|\\*\\s+<p>)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteList, docstring)\n\n # Wrap @deprecated content with a class so that we can style it.\n\n p = re.compile('^(\\s+\\*\\s+)(@deprecated\\s)((\\S|\\s)+)(<p>|\\*/)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteDeprecated, docstring)\n\n # Doxygen automatically cross-references class names in text to the class\n # definition page, but Javadoc does not. Rather than having to put in a\n # lot conditional @if/@endif's into the documentation to manually create\n # cross-links just for the Java case, let's automate. This needs to be\n # done better (e.g., by not hard-wiring the class names).\n\n p = re.compile(r'([^a-zA-Z0-9_.\">])(' + r')\\b([^:])', re.DOTALL)\n if language == 'csharp':\n docstring = p.sub(translateClassRefCSharp, docstring)\n elif language == 'java':\n docstring = p.sub(translateClassRefJava, docstring)\n\n # Massage method cross-references.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*\\([^)]*?\\))', re.MULTILINE)\n if language == 'csharp':\n docstring = p.sub(translateCSharpCrossRef, docstring)\n elif language == 'java':\n docstring = p.sub(translateJavaCrossRef, docstring)\n\n # Clean-up step needed because some of the procedures above are imperfect.\n # This converts \" * * @foo\" lines into \" * @foo\":\n\n p = re.compile('^(\\s+)\\*\\s+\\*\\s+@', re.MULTILINE)\n docstring = p.sub(r'\\1* @', docstring)\n\n # Take out any left-over Doxygen-style quotes, because Javadoc doesn't have\n # the %foo quoting mechanism.\n\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # Currently, we don't handle @ingroup.\n\n docstring = re.sub('@ingroup \\w+', '', docstring)\n\n return docstring",
"def rewriteDocstringForCSharp (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # C# types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the actual method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string &', 'string ')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'const ', '')\n docstring = docstring.replace(r'NULL', 'null')\n docstring = docstring.replace(r'boolean', 'bool')\n\n # Use C# syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # <code> has its own special meaning in C#; we have to turn our input\n # file's uses of <code> into <c>. Conversely, we have to turn our\n # uses of verbatim to <code>.\n\n p = re.compile(r'<code>(.+?)</code>', re.DOTALL)\n docstring = p.sub(r'<c>\\1</c>', docstring)\n p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)\n docstring = p.sub(r'<code>\\1</code>', docstring)\n\n # Do replacements on some documentation text we sometimes use.\n\n p = re.compile(r'antimonyConstants([@.])')\n docstring = p.sub(r'antimonycs.antimony\\1', docstring)\n\n # Fix @link for constants that we forgot conditionalize in the source.\n\n p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)\n docstring = p.sub(r'@link antimony.\\1@endlink', docstring)\n\n # Can't use math symbols. Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # Some additional special cases.\n\n docstring = docstring.replace(r'SBML_formulaToString()', 'antimonycs.antimony.formulaToString()')\n docstring = docstring.replace(r'SBML_parseFormula()', 'antimonycs.antimony.parseFormula()')\n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\") \n\n return docstring",
"def rewriteDocstringForJava (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # Java types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'String ')\n docstring = docstring.replace(r'const char* ', 'String ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'String')\n docstring = docstring.replace(r'const std::string &', 'String ')\n docstring = docstring.replace(r'const std::string ', 'String ')\n docstring = docstring.replace(r'std::string', 'String')\n docstring = docstring.replace(r'NULL', 'null')\n\n # Also use Java syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # Do the big work.\n\n docstring = sanitizeForHTML(docstring)\n\n # Fix up for a problem introduced by sanitizeForHTML: it puts {@link ...}\n # into the arguments of functions mentioned in @see's, if the function\n # has more than one argument. This gets rid of the @link's. This should\n # be fixed properly some day.\n\n p = re.compile(r'((@see|@throws)\\s+[\\w\\\\ ,.\\'\"=<>()#]*?){@link\\s+([^}]+?)}')\n while re.search(p, docstring) != None:\n docstring = p.sub(r'\\1\\3', docstring)\n\n # Inside of @see, change double colons to pound signs.\n\n docstring = re.sub('(@see\\s+\\w+)::', r'\\1#', docstring)\n\n # The syntax for @see is slightly different: method names need to have a\n # leading pound sign character. This particular bit of code only handles\n # a single @see foo(), which means the docs have to be written that way.\n # Maybe someday in the future it should be expanded to handle\n # @see foo(), bar(), etc., but I don't have time right now to do it.\n\n docstring = re.sub('(@see\\s+)([\\w:.]+)\\(', r'\\1#\\2(', docstring)\n\n # Remove the '*' character that Javadoc doesn't want to see in @see's.\n # (This doesn't make a difference; javadoc still can't match up the refs.)\n\n # p = re.compile('@see[\\s\\w.:,()#]+[*][\\s\\w.:,()*#]')\n # docstring = p.sub(removeStar, docstring)\n\n # The syntax for @link is vastly different.\n \n p = re.compile('@link([\\s/*]+[\\w\\s,.:#()*]+[\\s/*]*[\\w():#]+[\\s/*]*)@endlink', re.DOTALL)\n docstring = p.sub(r'{@link \\1}', docstring)\n\n # Outside of @see and other constructs, dot is used to reference members\n # instead of C++'s double colon.\n\n docstring = docstring.replace(r'::', '.')\n\n # Need to escape quotation marks. The reason is that the\n # %javamethodmodifiers directives created for use with SWIG will\n # themselves be double-quoted strings, and leaving embedded quotes\n # will completely screw that up.\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n return docstring",
"def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def main_docstring():",
"def old_function_with_docstring(x, y):\n return x + y",
"def clean_schema_doc_string(doc_str, add_prefix=None, add_postfix=None, rst_format='**', remove_html_tags=True):\n prefix = ' ' if add_prefix is None else add_prefix\n clean_doc_str = doc_str\n if remove_html_tags:\n clean_doc_str = clean_doc_str.replace('<', '<')\n clean_doc_str = clean_doc_str.replace('>', '>')\n clean_doc_str = clean_doc_str.replace('<b>', ' **')\n clean_doc_str = clean_doc_str.replace('</b>', '** ')\n clean_doc_str = clean_doc_str.replace('<i>', ' *')\n clean_doc_str = clean_doc_str.replace('</i>', '* ')\n clean_doc_str = clean_doc_str.replace(':blue:', '')\n\n clean_doc_str = clean_doc_str.replace('COMMENT:', '%s%sComment:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('MORE_INFO:', '%s%sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('NOTE:', '%s %sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n if add_postfix is not None:\n clean_doc_str += add_postfix\n return clean_doc_str",
"def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)",
"def copy_docstring(other):\n\n def wrapper(func):\n func.__doc__ = other.__doc__\n return func\n\n return wrapper",
"def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines",
"def _parse_docstring(doc):\n _cache_key = doc\n try:\n return _parse_docstring_cache[_cache_key]\n except KeyError:\n pass\n\n if doc is None:\n return _Doc('', '', {})\n\n # Convert Google- or Numpy-style docstrings to RST.\n # (Should do nothing if not in either style.)\n doc = str(GoogleDocstring(doc))\n doc = str(NumpyDocstring(doc))\n\n tree = publish_doctree(doc)\n\n class Visitor(NodeVisitor):\n optional = [\n 'document', 'docinfo',\n 'field_list', 'field_body',\n 'literal', 'problematic']\n\n def __init__(self, document):\n NodeVisitor.__init__(self, document)\n self.paragraphs = []\n self.start_lines = []\n self.params = defaultdict(dict)\n self._current_paragraph = None\n self._indent_iterator_stack = []\n self._indent_stack = []\n\n def _do_nothing(self, node):\n pass\n\n def visit_paragraph(self, node):\n self.start_lines.append(node.line)\n self._current_paragraph = []\n\n def depart_paragraph(self, node):\n text = ''.join(self._current_paragraph)\n text = ''.join(self._indent_stack) + text\n self._indent_stack = [\n ' ' * len(item) for item in self._indent_stack]\n text = text.replace('\\n', '\\n' + ''.join(self._indent_stack))\n self.paragraphs.append(text)\n self._current_paragraph = None\n\n def visit_Text(self, node):\n self._current_paragraph.append(node)\n\n depart_Text = _do_nothing\n\n def visit_emphasis(self, node):\n self._current_paragraph.append('\\033[3m') # *foo*: italic\n\n def visit_strong(self, node):\n self._current_paragraph.append('\\033[1m') # **foo**: bold\n\n def visit_title_reference(self, node):\n self._current_paragraph.append('\\033[4m') # `foo`: underlined\n\n def _depart_markup(self, node):\n self._current_paragraph.append('\\033[0m')\n\n depart_emphasis = depart_strong = depart_title_reference = \\\n _depart_markup\n\n def visit_literal_block(self, node):\n text, = node\n self.start_lines.append(node.line)\n self.paragraphs.append(re.sub('^|\\n', r'\\g<0> ', text)) # indent\n raise SkipNode\n\n def visit_bullet_list(self, node):\n self._indent_iterator_stack.append(\n (node['bullet'] + ' ' for _ in range(len(node))))\n\n def depart_bullet_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_enumerated_list(self, node):\n enumtype = node['enumtype']\n fmt = {('(', ')'): 'parens',\n ('', ')'): 'rparen',\n ('', '.'): 'period'}[node['prefix'], node['suffix']]\n try:\n start = node['start']\n except KeyError:\n start = 1\n else:\n start = {\n 'arabic': int,\n 'loweralpha': lambda s: ord(s) - ord('a') + 1,\n 'upperalpha': lambda s: ord(s) - ord('A') + 1,\n 'lowerroman': lambda s: roman.fromRoman(s.upper()),\n 'upperroman': lambda s: roman.fromRoman(s),\n }[enumtype](start)\n enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0]\n for i in range(start, start + len(node))]\n width = max(map(len, enumerators))\n enumerators = [enum.ljust(width) for enum in enumerators]\n self._indent_iterator_stack.append(iter(enumerators))\n\n def depart_enumerated_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_list_item(self, node):\n self._indent_stack.append(next(self._indent_iterator_stack[-1]))\n\n def depart_list_item(self, node):\n self._indent_stack.pop()\n\n def visit_field(self, node):\n field_name_node, field_body_node = node\n field_name, = field_name_node\n parts = field_name.split()\n if len(parts) == 2:\n doctype, name = parts\n elif len(parts) == 3:\n doctype, type_, name = parts\n if doctype not in _PARAM_TYPES:\n raise SkipNode\n if 'type' in self.params[name]:\n raise ValueError('type defined twice for {}'.format(name))\n self.params[name]['type'] = type_\n else:\n raise SkipNode\n if doctype in _PARAM_TYPES:\n doctype = 'param'\n if doctype in _TYPE_NAMES:\n doctype = 'type'\n if doctype in self.params[name]:\n raise ValueError('{} defined twice for {}'.format(doctype, name))\n visitor = Visitor(self.document)\n field_body_node.walkabout(visitor)\n self.params[name][doctype] = ''.join(visitor.paragraphs)\n raise SkipNode\n\n def visit_comment(self, node):\n raise SkipNode\n\n def visit_system_message(self, node):\n raise SkipNode\n\n visitor = Visitor(tree)\n tree.walkabout(visitor)\n\n tuples = {name: _Param(values.get('param'), values.get('type'))\n for name, values in visitor.params.items()}\n if visitor.paragraphs:\n text = []\n for start, paragraph, next_start in zip(\n visitor.start_lines,\n visitor.paragraphs,\n visitor.start_lines[1:] + [0]):\n text.append(paragraph)\n # We insert a space before each newline to prevent argparse\n # from stripping consecutive newlines down to just two\n # (http://bugs.python.org/issue31330).\n text.append(' \\n' * (next_start - start - paragraph.count('\\n')))\n parsed = _Doc('', ''.join(text), tuples)\n else:\n parsed = _Doc('', '', tuples)\n _parse_docstring_cache[_cache_key] = parsed\n return parsed",
"def sphinxify(docstring, context, buildername='html'):\n\n srcdir = mkdtemp()\n srcdir = encoding.to_unicode_from_fs(srcdir)\n\n base_name = osp.join(srcdir, 'docstring')\n rst_name = base_name + '.rst'\n\n if buildername == 'html':\n suffix = '.html'\n else:\n suffix = '.txt'\n output_name = base_name + suffix\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n if context['right_sphinx_version'] and context['math_on']:\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n \n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n argspec = context['argspec']\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(char,\n '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n \n temp_confdir = False\n if temp_confdir:\n # TODO: This may be inefficient. Find a faster way to do it.\n confdir = mkdtemp()\n confdir = encoding.to_unicode_from_fs(confdir)\n generate_configuration(confdir)\n else:\n confdir = osp.join(get_module_source_path('spyderlib.utils.inspector'))\n\n confoverrides = {'html_context': context}\n\n doctreedir = osp.join(srcdir, 'doctrees')\n\n sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n # TODO: Investigate if this is necessary/important for us\n if osp.exists(output_name):\n output = codecs.open(output_name, 'r', encoding='utf-8').read()\n output = output.replace('<pre>', '<pre class=\"literal-block\">')\n else:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n if temp_confdir:\n shutil.rmtree(confdir, ignore_errors=True)\n shutil.rmtree(srcdir, ignore_errors=True)\n\n return output",
"def doc_apply(doc):\n\n def wrapper(func):\n func.__doc__ = doc\n return func\n\n return wrapper",
"def sphinxify(docstring, context, buildername='html', img_path=''):\n if img_path:\n if os.name == 'nt':\n img_path = img_path.replace('\\\\', '/')\n leading = '/' if os.name.startswith('posix') else ''\n docstring = docstring.replace('_images', leading+img_path)\n\n srcdir = osp.join(DOCDIR, '_sources')\n if not osp.exists(srcdir):\n os.makedirs(srcdir)\n base_name = osp.join(srcdir, xrtQookPageName)\n rst_name = base_name + '.rst'\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n\n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n # NOTE: Before doing this, we escape common html chars so that they\n # don't interfere with the rest of html present in the page\n argspec = escape(context['argspec'])\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(\n char, '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n\n confoverrides = {'html_context': context,\n 'extensions': ['sphinx.ext.mathjax',\n 'sphinxcontrib.jquery']}\n\n doctreedir = osp.join(DOCDIR, 'doctrees')\n sphinx_app = Sphinx(srcdir, DOCDIR, DOCDIR, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n pass",
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def clean_doc(doc):\r\n # Replace regular enter (i.e. mere comment formatting in cpp file)\r\n # with space\r\n doc = doc.replace(\"\\n\", \" \")\r\n\r\n # The removal can cause a \"hard enter\" (literal \\n) to get an unintended\r\n # trailing space - trim those.\r\n doc = doc.replace(\"\\\\n \", \"\\\\n\")\r\n return '\"%s\"' % doc",
"def DocString():\n return",
"def update_javadoc(javadoc=None, since=None, author=None):\n\n if javadoc is None:\n javadoc = \"\\n\\n/**\\n */\\n\"\n\n if since is not None:\n javadoc = re.sub(\"/\\*\\*\", \"/**\\n * @since \" + since, javadoc)\n\n if author is not None:\n javadoc = re.sub(\"/\\*\\*\", \"/**\\n * @author \" + author, javadoc)\n\n return javadoc",
"def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_",
"def strip_docstring(blob):\n docstring = True\n while docstring == True:\n match_docstring = re.search('\\n\\s*\"\"\"[^\"\"\"]*\"\"\"', blob)\n if not match_docstring:\n docstring = False\n else:\n blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.span()[1]], '')\n return blob",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring",
"def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0",
"def remove_comments_and_docstrings(source):\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n # The following two conditionals preserve indentation.\n # This is necessary because we're not using tokenize.untokenize()\n # (because it spits out code with copious amounts of oddly-placed\n # whitespace).\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n out = '\\n'.join([line for line in out.splitlines() if line.strip()])\n return out",
"def docstrings(param1, param2):\n return \"example string\"",
"def doDocStrings(parentNode, srcNode):\n def makeDocElement(name, content):\n node = libxml2.newNode(name)\n node.addChild(libxml2.newText(content))\n return node\n \n autodoc = getAttr(srcNode, \"python_autodoc\")\n docstr = getAttr(srcNode, \"feature_docstring\")\n if autodoc:\n parentNode.addChild(makeDocElement(\"autodoc\", autodoc))\n if docstr:\n parentNode.addChild(makeDocElement(\"docstring\", docstr))",
"def doc_string():\n pass # pass does nothing",
"def convert_doxygen_format(line, name):\n\n line = line.replace('<b>', '**').replace('</b>', '**')\n\n # Single backticks in doxygen map to doubles in Sphinx\n line = line.replace('`', '``')\n\n # But double backticks are literal backticks\n line = line.replace('````', '\\\\`')\n\n # Search for method and class references. We pick them up either when they\n # have a scoping operator, or when they end with (), or we would match all\n # the words in the text!\n origline = line\n for m in re.finditer(r'\\b([a-zA-Z_][a-zA-Z0-9_.:]*)\\(\\)|\\b([a-zA-Z_][a-zA-Z0-9_]*::[a-zA-Z_][a-zA-Z0-9_.:]*)\\b', origline):\n result = resolve_reference(m.group(0).rstrip('()'), name)\n if not result:\n continue\n\n if '::' in m.group():\n # We want a scoped name, apparently.\n ref = ':{0}:`{1}`'.format(*result)\n else:\n ref = ':{0}:`~{1}`'.format(*result)\n\n # Are we inside double-backticks?\n if origline[:m.start()].count('``') % 2 != 0:\n # Only replace if it's entirely wrapped in backticks.\n line = line.replace('``' + m.group() + '``', ref)\n else:\n line = line.replace(m.group(), ref)\n\n return line"
] |
[
"0.77604336",
"0.7191963",
"0.6982577",
"0.6753266",
"0.6694719",
"0.66529524",
"0.6194092",
"0.5882187",
"0.58658916",
"0.5842359",
"0.58198625",
"0.57814825",
"0.576712",
"0.5761573",
"0.5745436",
"0.56991446",
"0.5682727",
"0.5671348",
"0.5625877",
"0.5596721",
"0.5591246",
"0.554066",
"0.551289",
"0.5495454",
"0.54776263",
"0.5451674",
"0.54497224",
"0.5416314",
"0.54144084",
"0.54098445"
] |
0.77743816
|
0
|
rewriteDocstringForPerl (docstring) > docstring Performs some mimimal Perl specific sanitizations on the C++/Doxygen docstring.
|
def rewriteDocstringForPerl (docstring):
# Get rid of the /** ... */ and leading *'s.
docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')
# Get rid of indentation
p = re.compile('^\s+(\S*\s*)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Get rid of paragraph indentation not caught by the code above.
p = re.compile('^[ \t]+(\S)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Get rid of blank lines.
p = re.compile('^[ \t]+$', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Get rid of the %foo quoting.
docstring = re.sub('(\s)%(\w)', r'\1\2', docstring)
# The following are done in pairs because I couldn't come up with a
# better way to catch the case where @c and @em end up alone at the end
# of a line and the thing to be formatted starts on the next one after
# the comment '*' character on the beginning of the line.
docstring = re.sub('@c *([^ ,.:;()/*\n\t]+)', r'C<\1>', docstring)
docstring = re.sub('@c(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t]+)', r'\1C<\2>', docstring)
docstring = re.sub('@p +([^ ,.:;()/*\n\t]+)', r'C<\1>', docstring)
docstring = re.sub('@p(\n[ \t]*\*[ \t]+)([^ ,.:;()/*\n\t]+)', r'\1C<\2>', docstring)
docstring = re.sub('@em *([^ ,.:;()/*\n\t]+)', r'I<\1>', docstring)
docstring = re.sub('@em(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t]+)', r'\1I<\2>', docstring)
docstring = docstring.replace('<ul>', '\n=over\n')
docstring = docstring.replace('<li> ', '\n=item\n\n')
docstring = docstring.replace('</ul>', '\n=back\n')
docstring = docstring.replace('@return', 'Returns')
docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')
docstring = re.sub('<code>([^<]*)</code>', r'C<\1>', docstring)
docstring = re.sub('<b>([^<]*)</b>', r'B<\1>', docstring)
return docstring
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Python types.\n # (Note: this rewriting affects only the documentation comments inside\n # classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'NULL', 'None')\n docstring = docstring.replace(r'@c true', '@c True')\n docstring = docstring.replace(r'@c false', '@c False')\n\n # Also use Python syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n p = re.compile(r'(%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n # Python method cross-references won't be made by doxygen unless\n # the method reference is written without arguments.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*)(\\([^)]*?\\))', re.MULTILINE)\n docstring = p.sub(translatePythonCrossRef, docstring)\n p = re.compile('(@see\\s+)(\\w+\\s*)(\\([^)]*?\\))')\n docstring = p.sub(translatePythonSeeRef, docstring)\n\n # Friggin' doxygen escapes HTML character codes, so the hack we have to\n # do for Javadoc turns out doesn't work for the Python documentation.\n # Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # SWIG does some bizarre truncation of leading characters that\n # happens to hit us because of how we have to format verbatim's.\n # This tries to kluge around it: \n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(indentVerbatimForPython, docstring)\n\n return docstring",
"def sanitizeForHTML (docstring):\n\n # Remove @~, which we use as a hack in Doxygen 1.7-1.8\n\n docstring = docstring.replace(r'@~', '')\n\n # First do conditional section inclusion based on the current language.\n # Our possible conditional elements and their meanings are:\n #\n # java: only Java\n # python: only Python\n # perl: only Perl\n # cpp: only C++\n # csharp: only C#\n # conly: only C\n # clike: C, C++\n # notcpp:\tnot C++\n # notclike: not C or C++\n #\n # The notcpp/notclike variants are because Doxygen 1.6.x doesn't have\n # @ifnot, yet sometimes we want to say \"if not C or C++\".\n\n cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'\n p = re.compile('@if\\s+(' + cases + ')\\s+(.+?)((@else)\\s+(.+?))?@endif', re.DOTALL)\n docstring = p.sub(translateIfElse, docstring)\n\n # Replace blank lines between paragraphs with <p>. There are two main\n # cases: comments blocks whose lines always begin with an asterix (e.g.,\n # C/C++), and comment blocks where they don't (e.g., Python). The third\n # substitution below does the same thing for blank lines, except for the\n # very end of the doc string.\n\n p = re.compile('^(\\s+)\\*\\s*$', re.MULTILINE)\n docstring = p.sub(r'\\1* <p>', docstring)\n p = re.compile('^((?!\\s+\\Z)\\s+)$', re.MULTILINE)\n docstring = p.sub(r'\\1<p>', docstring)\n p = re.compile('^(?!\\Z)$', re.MULTILINE)\n docstring = p.sub(r'<p>', docstring)\n\n # Javadoc doesn't have an @htmlinclude command, so we process the file\n # inclusion directly here.\n\n p = re.compile('@htmlinclude\\s+([^\\s:;,(){}+|?\"\\'/]+)([\\s:;,(){}+|?\"\\'/])', re.MULTILINE)\n docstring = p.sub(translateInclude, docstring)\n\n # There's no Javadoc verbatim or @code/@endcode equivalent, so we have to\n # convert it to raw HTML and transform the content too. This requires\n # helpers. The following treats both @verbatim and @code the same way.\n\n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n p = re.compile('@code.+?@endcode', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n\n # Javadoc doesn't have a @section or @subsection commands, so we translate\n # those ourselves.\n\n p = re.compile('@section\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h2>\\1</h2>', docstring)\n p = re.compile('@subsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h3>\\1</h3>', docstring)\n p = re.compile('@subsubsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h4>\\1</h4>', docstring)\n\n # Javadoc doesn't have an @image command. We translate @image html\n # but ditch @image latex.\n\n p = re.compile('@image\\s+html+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r\"<center><img src='\\1'></center><br>\", docstring)\n p = re.compile('@image\\s+latex+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Doxygen doesn't understand HTML character codes like ≥, so we've\n # been using doxygen's Latex facility to get special mathematical\n # characters into the documentation, but as luck would have it, Javadoc\n # doesn't understand the Latex markup. All of this is getting old.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '≥', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '≤', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '×', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'<em>\\1</em>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1<em>\\2</em>', docstring)\n\n # Convert @li into <li>, but also add <ul> ... </ul>. This is a bit\n # simple-minded (I suppose like most of this code), but ought to work\n # for the cases we use in practice.\n\n p = re.compile('^(\\s+\\*\\s+)(@li\\s+.*?)(\\s+)(\\*/|\\*\\s+@(?!li\\s)|\\*\\s+<p>)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteList, docstring)\n\n # Wrap @deprecated content with a class so that we can style it.\n\n p = re.compile('^(\\s+\\*\\s+)(@deprecated\\s)((\\S|\\s)+)(<p>|\\*/)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteDeprecated, docstring)\n\n # Doxygen automatically cross-references class names in text to the class\n # definition page, but Javadoc does not. Rather than having to put in a\n # lot conditional @if/@endif's into the documentation to manually create\n # cross-links just for the Java case, let's automate. This needs to be\n # done better (e.g., by not hard-wiring the class names).\n\n p = re.compile(r'([^a-zA-Z0-9_.\">])(' + r')\\b([^:])', re.DOTALL)\n if language == 'csharp':\n docstring = p.sub(translateClassRefCSharp, docstring)\n elif language == 'java':\n docstring = p.sub(translateClassRefJava, docstring)\n\n # Massage method cross-references.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*\\([^)]*?\\))', re.MULTILINE)\n if language == 'csharp':\n docstring = p.sub(translateCSharpCrossRef, docstring)\n elif language == 'java':\n docstring = p.sub(translateJavaCrossRef, docstring)\n\n # Clean-up step needed because some of the procedures above are imperfect.\n # This converts \" * * @foo\" lines into \" * @foo\":\n\n p = re.compile('^(\\s+)\\*\\s+\\*\\s+@', re.MULTILINE)\n docstring = p.sub(r'\\1* @', docstring)\n\n # Take out any left-over Doxygen-style quotes, because Javadoc doesn't have\n # the %foo quoting mechanism.\n\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # Currently, we don't handle @ingroup.\n\n docstring = re.sub('@ingroup \\w+', '', docstring)\n\n return docstring",
"def docstring_hack():\n pass",
"def rewriteDocstringForCSharp (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # C# types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the actual method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string &', 'string ')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'const ', '')\n docstring = docstring.replace(r'NULL', 'null')\n docstring = docstring.replace(r'boolean', 'bool')\n\n # Use C# syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # <code> has its own special meaning in C#; we have to turn our input\n # file's uses of <code> into <c>. Conversely, we have to turn our\n # uses of verbatim to <code>.\n\n p = re.compile(r'<code>(.+?)</code>', re.DOTALL)\n docstring = p.sub(r'<c>\\1</c>', docstring)\n p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)\n docstring = p.sub(r'<code>\\1</code>', docstring)\n\n # Do replacements on some documentation text we sometimes use.\n\n p = re.compile(r'antimonyConstants([@.])')\n docstring = p.sub(r'antimonycs.antimony\\1', docstring)\n\n # Fix @link for constants that we forgot conditionalize in the source.\n\n p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)\n docstring = p.sub(r'@link antimony.\\1@endlink', docstring)\n\n # Can't use math symbols. Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # Some additional special cases.\n\n docstring = docstring.replace(r'SBML_formulaToString()', 'antimonycs.antimony.formulaToString()')\n docstring = docstring.replace(r'SBML_parseFormula()', 'antimonycs.antimony.parseFormula()')\n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\") \n\n return docstring",
"def rewriteDocstringForJava (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # Java types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'String ')\n docstring = docstring.replace(r'const char* ', 'String ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'String')\n docstring = docstring.replace(r'const std::string &', 'String ')\n docstring = docstring.replace(r'const std::string ', 'String ')\n docstring = docstring.replace(r'std::string', 'String')\n docstring = docstring.replace(r'NULL', 'null')\n\n # Also use Java syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # Do the big work.\n\n docstring = sanitizeForHTML(docstring)\n\n # Fix up for a problem introduced by sanitizeForHTML: it puts {@link ...}\n # into the arguments of functions mentioned in @see's, if the function\n # has more than one argument. This gets rid of the @link's. This should\n # be fixed properly some day.\n\n p = re.compile(r'((@see|@throws)\\s+[\\w\\\\ ,.\\'\"=<>()#]*?){@link\\s+([^}]+?)}')\n while re.search(p, docstring) != None:\n docstring = p.sub(r'\\1\\3', docstring)\n\n # Inside of @see, change double colons to pound signs.\n\n docstring = re.sub('(@see\\s+\\w+)::', r'\\1#', docstring)\n\n # The syntax for @see is slightly different: method names need to have a\n # leading pound sign character. This particular bit of code only handles\n # a single @see foo(), which means the docs have to be written that way.\n # Maybe someday in the future it should be expanded to handle\n # @see foo(), bar(), etc., but I don't have time right now to do it.\n\n docstring = re.sub('(@see\\s+)([\\w:.]+)\\(', r'\\1#\\2(', docstring)\n\n # Remove the '*' character that Javadoc doesn't want to see in @see's.\n # (This doesn't make a difference; javadoc still can't match up the refs.)\n\n # p = re.compile('@see[\\s\\w.:,()#]+[*][\\s\\w.:,()*#]')\n # docstring = p.sub(removeStar, docstring)\n\n # The syntax for @link is vastly different.\n \n p = re.compile('@link([\\s/*]+[\\w\\s,.:#()*]+[\\s/*]*[\\w():#]+[\\s/*]*)@endlink', re.DOTALL)\n docstring = p.sub(r'{@link \\1}', docstring)\n\n # Outside of @see and other constructs, dot is used to reference members\n # instead of C++'s double colon.\n\n docstring = docstring.replace(r'::', '.')\n\n # Need to escape quotation marks. The reason is that the\n # %javamethodmodifiers directives created for use with SWIG will\n # themselves be double-quoted strings, and leaving embedded quotes\n # will completely screw that up.\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n return docstring",
"def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def main_docstring():",
"def _parse_docstring(doc):\n _cache_key = doc\n try:\n return _parse_docstring_cache[_cache_key]\n except KeyError:\n pass\n\n if doc is None:\n return _Doc('', '', {})\n\n # Convert Google- or Numpy-style docstrings to RST.\n # (Should do nothing if not in either style.)\n doc = str(GoogleDocstring(doc))\n doc = str(NumpyDocstring(doc))\n\n tree = publish_doctree(doc)\n\n class Visitor(NodeVisitor):\n optional = [\n 'document', 'docinfo',\n 'field_list', 'field_body',\n 'literal', 'problematic']\n\n def __init__(self, document):\n NodeVisitor.__init__(self, document)\n self.paragraphs = []\n self.start_lines = []\n self.params = defaultdict(dict)\n self._current_paragraph = None\n self._indent_iterator_stack = []\n self._indent_stack = []\n\n def _do_nothing(self, node):\n pass\n\n def visit_paragraph(self, node):\n self.start_lines.append(node.line)\n self._current_paragraph = []\n\n def depart_paragraph(self, node):\n text = ''.join(self._current_paragraph)\n text = ''.join(self._indent_stack) + text\n self._indent_stack = [\n ' ' * len(item) for item in self._indent_stack]\n text = text.replace('\\n', '\\n' + ''.join(self._indent_stack))\n self.paragraphs.append(text)\n self._current_paragraph = None\n\n def visit_Text(self, node):\n self._current_paragraph.append(node)\n\n depart_Text = _do_nothing\n\n def visit_emphasis(self, node):\n self._current_paragraph.append('\\033[3m') # *foo*: italic\n\n def visit_strong(self, node):\n self._current_paragraph.append('\\033[1m') # **foo**: bold\n\n def visit_title_reference(self, node):\n self._current_paragraph.append('\\033[4m') # `foo`: underlined\n\n def _depart_markup(self, node):\n self._current_paragraph.append('\\033[0m')\n\n depart_emphasis = depart_strong = depart_title_reference = \\\n _depart_markup\n\n def visit_literal_block(self, node):\n text, = node\n self.start_lines.append(node.line)\n self.paragraphs.append(re.sub('^|\\n', r'\\g<0> ', text)) # indent\n raise SkipNode\n\n def visit_bullet_list(self, node):\n self._indent_iterator_stack.append(\n (node['bullet'] + ' ' for _ in range(len(node))))\n\n def depart_bullet_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_enumerated_list(self, node):\n enumtype = node['enumtype']\n fmt = {('(', ')'): 'parens',\n ('', ')'): 'rparen',\n ('', '.'): 'period'}[node['prefix'], node['suffix']]\n try:\n start = node['start']\n except KeyError:\n start = 1\n else:\n start = {\n 'arabic': int,\n 'loweralpha': lambda s: ord(s) - ord('a') + 1,\n 'upperalpha': lambda s: ord(s) - ord('A') + 1,\n 'lowerroman': lambda s: roman.fromRoman(s.upper()),\n 'upperroman': lambda s: roman.fromRoman(s),\n }[enumtype](start)\n enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0]\n for i in range(start, start + len(node))]\n width = max(map(len, enumerators))\n enumerators = [enum.ljust(width) for enum in enumerators]\n self._indent_iterator_stack.append(iter(enumerators))\n\n def depart_enumerated_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_list_item(self, node):\n self._indent_stack.append(next(self._indent_iterator_stack[-1]))\n\n def depart_list_item(self, node):\n self._indent_stack.pop()\n\n def visit_field(self, node):\n field_name_node, field_body_node = node\n field_name, = field_name_node\n parts = field_name.split()\n if len(parts) == 2:\n doctype, name = parts\n elif len(parts) == 3:\n doctype, type_, name = parts\n if doctype not in _PARAM_TYPES:\n raise SkipNode\n if 'type' in self.params[name]:\n raise ValueError('type defined twice for {}'.format(name))\n self.params[name]['type'] = type_\n else:\n raise SkipNode\n if doctype in _PARAM_TYPES:\n doctype = 'param'\n if doctype in _TYPE_NAMES:\n doctype = 'type'\n if doctype in self.params[name]:\n raise ValueError('{} defined twice for {}'.format(doctype, name))\n visitor = Visitor(self.document)\n field_body_node.walkabout(visitor)\n self.params[name][doctype] = ''.join(visitor.paragraphs)\n raise SkipNode\n\n def visit_comment(self, node):\n raise SkipNode\n\n def visit_system_message(self, node):\n raise SkipNode\n\n visitor = Visitor(tree)\n tree.walkabout(visitor)\n\n tuples = {name: _Param(values.get('param'), values.get('type'))\n for name, values in visitor.params.items()}\n if visitor.paragraphs:\n text = []\n for start, paragraph, next_start in zip(\n visitor.start_lines,\n visitor.paragraphs,\n visitor.start_lines[1:] + [0]):\n text.append(paragraph)\n # We insert a space before each newline to prevent argparse\n # from stripping consecutive newlines down to just two\n # (http://bugs.python.org/issue31330).\n text.append(' \\n' * (next_start - start - paragraph.count('\\n')))\n parsed = _Doc('', ''.join(text), tuples)\n else:\n parsed = _Doc('', '', tuples)\n _parse_docstring_cache[_cache_key] = parsed\n return parsed",
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def sphinxify(docstring, context, buildername='html'):\n\n srcdir = mkdtemp()\n srcdir = encoding.to_unicode_from_fs(srcdir)\n\n base_name = osp.join(srcdir, 'docstring')\n rst_name = base_name + '.rst'\n\n if buildername == 'html':\n suffix = '.html'\n else:\n suffix = '.txt'\n output_name = base_name + suffix\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n if context['right_sphinx_version'] and context['math_on']:\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n \n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n argspec = context['argspec']\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(char,\n '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n \n temp_confdir = False\n if temp_confdir:\n # TODO: This may be inefficient. Find a faster way to do it.\n confdir = mkdtemp()\n confdir = encoding.to_unicode_from_fs(confdir)\n generate_configuration(confdir)\n else:\n confdir = osp.join(get_module_source_path('spyderlib.utils.inspector'))\n\n confoverrides = {'html_context': context}\n\n doctreedir = osp.join(srcdir, 'doctrees')\n\n sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n # TODO: Investigate if this is necessary/important for us\n if osp.exists(output_name):\n output = codecs.open(output_name, 'r', encoding='utf-8').read()\n output = output.replace('<pre>', '<pre class=\"literal-block\">')\n else:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n if temp_confdir:\n shutil.rmtree(confdir, ignore_errors=True)\n shutil.rmtree(srcdir, ignore_errors=True)\n\n return output",
"def clean_schema_doc_string(doc_str, add_prefix=None, add_postfix=None, rst_format='**', remove_html_tags=True):\n prefix = ' ' if add_prefix is None else add_prefix\n clean_doc_str = doc_str\n if remove_html_tags:\n clean_doc_str = clean_doc_str.replace('<', '<')\n clean_doc_str = clean_doc_str.replace('>', '>')\n clean_doc_str = clean_doc_str.replace('<b>', ' **')\n clean_doc_str = clean_doc_str.replace('</b>', '** ')\n clean_doc_str = clean_doc_str.replace('<i>', ' *')\n clean_doc_str = clean_doc_str.replace('</i>', '* ')\n clean_doc_str = clean_doc_str.replace(':blue:', '')\n\n clean_doc_str = clean_doc_str.replace('COMMENT:', '%s%sComment:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('MORE_INFO:', '%s%sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n clean_doc_str = clean_doc_str.replace('NOTE:', '%s %sAdditional Information:%s ' %\n (prefix, rst_format, rst_format))\n if add_postfix is not None:\n clean_doc_str += add_postfix\n return clean_doc_str",
"def sphinxify(docstring, context, buildername='html', img_path=''):\n if img_path:\n if os.name == 'nt':\n img_path = img_path.replace('\\\\', '/')\n leading = '/' if os.name.startswith('posix') else ''\n docstring = docstring.replace('_images', leading+img_path)\n\n srcdir = osp.join(DOCDIR, '_sources')\n if not osp.exists(srcdir):\n os.makedirs(srcdir)\n base_name = osp.join(srcdir, xrtQookPageName)\n rst_name = base_name + '.rst'\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n\n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n # NOTE: Before doing this, we escape common html chars so that they\n # don't interfere with the rest of html present in the page\n argspec = escape(context['argspec'])\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(\n char, '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n\n confoverrides = {'html_context': context,\n 'extensions': ['sphinx.ext.mathjax',\n 'sphinxcontrib.jquery']}\n\n doctreedir = osp.join(DOCDIR, 'doctrees')\n sphinx_app = Sphinx(srcdir, DOCDIR, DOCDIR, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n pass",
"def DocString():\n return",
"def copy_docstring(other):\n\n def wrapper(func):\n func.__doc__ = other.__doc__\n return func\n\n return wrapper",
"def old_function_with_docstring(x, y):\n return x + y",
"def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)",
"def doc_apply(doc):\n\n def wrapper(func):\n func.__doc__ = doc\n return func\n\n return wrapper",
"def update_javadoc(javadoc=None, since=None, author=None):\n\n if javadoc is None:\n javadoc = \"\\n\\n/**\\n */\\n\"\n\n if since is not None:\n javadoc = re.sub(\"/\\*\\*\", \"/**\\n * @since \" + since, javadoc)\n\n if author is not None:\n javadoc = re.sub(\"/\\*\\*\", \"/**\\n * @author \" + author, javadoc)\n\n return javadoc",
"def doc_string():\n pass # pass does nothing",
"def clean_doc(doc):\r\n # Replace regular enter (i.e. mere comment formatting in cpp file)\r\n # with space\r\n doc = doc.replace(\"\\n\", \" \")\r\n\r\n # The removal can cause a \"hard enter\" (literal \\n) to get an unintended\r\n # trailing space - trim those.\r\n doc = doc.replace(\"\\\\n \", \"\\\\n\")\r\n return '\"%s\"' % doc",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring",
"def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines",
"def filter_doc(doc_text):\n # remove stars\n filter_regex=re.compile(r\"[_*]\")\n doc=filter_regex.sub(\"\",doc_text)\n # substitute quotation marks\n double_quot_regex=re.compile(r\"[“”]\")\n single_quot_regex=re.compile(r\"[’‘]\")\n doc=double_quot_regex.sub('\"',doc)\n doc=single_quot_regex.sub(\"'\",doc)\n # substitute new lines inside the text for spaces\n # these new lines are usually caused by formatting texts to fit in 80 columns \n newline_quot_regex=re.compile(r\"(\\S)\\n(\\S)\")\n doc=newline_quot_regex.sub(r\"\\1 \\2\",doc)\n # remove illustration tag\n #illustration_regex=re.compile(r\"\\[Illustration.*]\")\n #doc=illustration_regex.sub(\"\",doc)\n return doc",
"def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal",
"def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)",
"def strip_docstring(blob):\n docstring = True\n while docstring == True:\n match_docstring = re.search('\\n\\s*\"\"\"[^\"\"\"]*\"\"\"', blob)\n if not match_docstring:\n docstring = False\n else:\n blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.span()[1]], '')\n return blob",
"def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc",
"def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)",
"def docstrings(param1, param2):\n return \"example string\""
] |
[
"0.7227808",
"0.71237713",
"0.6798946",
"0.659024",
"0.65335333",
"0.6318953",
"0.59368324",
"0.57682997",
"0.5699257",
"0.56764704",
"0.56011766",
"0.5599349",
"0.55534387",
"0.54998285",
"0.5464047",
"0.54257745",
"0.53807503",
"0.53759325",
"0.5368533",
"0.5310211",
"0.5300423",
"0.52615154",
"0.5249973",
"0.5211845",
"0.5191311",
"0.51904935",
"0.5175667",
"0.51325047",
"0.5131462",
"0.5131362"
] |
0.8084435
|
0
|
processFile (filename, ostream) Reads the the given header file and writes to ostream the necessary SWIG incantation to annotate each method (or function) with a docstring appropriate for the given language.
|
def processFile (filename, ostream):
istream = open(filename)
header = CHeader(istream)
istream.close()
processClassDocs(ostream, header.classDocs)
processClasses(ostream, header.classes)
processFunctions(ostream, header.functions)
ostream.flush()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (src,)\r\n else:\r\n src = files[0]\r\n\r\n dst = src.replace(\".hh\", \"-method-def.hh\")\r\n dst = dst.replace(\".cpp\", \"-method-def.hh\")\r\n dst = os.path.join(out_root, os.path.split(dst)[1])\r\n\r\n dst_doc = src.replace(\".hh\", '-methods.txt')\r\n dst_doc = dst_doc.replace(\".cpp\", '-methods.txt')\r\n dst_doc_filename = os.path.split(dst_doc)[1]\r\n dst_doc_filename = os.path.join(doc_root, dst_doc_filename)\r\n\r\n dst_prop_doc = src.replace(\".cpp\", '-properties.txt')\r\n dst_doc_prop_filename = os.path.split(dst_prop_doc)[1]\r\n dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename)\r\n\r\n if util.changed(src, dst):\r\n if not did_print_heading:\r\n print(\"* Generating Python method definitions.\")\r\n did_print_heading = True\r\n generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name)\r\n changed = True\r\n if not changed:\r\n print(\"* Python method definitions up to date.\")",
"def add_header(header, filename, i):\n with open(filename, 'r+') as f:\n content = f.readlines()\n content[0] = header\n f.seek(0,0)\n f.write(f'<!-- Generated with XMLGenerator.py {__ver__} | {get_app_name(i)} -->\\n')\n f.writelines(content)",
"def _formatSource(self, sourceFiles, outputFile, language):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n\n for sourceFile in sourceFiles: \n #read in input file\n with self.openFile(sourceFile) as inputFile:\n preprocessedSource = inputFile.read()\n inputFile.close()\n \n #replace every occurence of '<' with '<' in the source file for the syntax highlighter\n source = preprocessedSource.replace('<', '<')\n \n f.write('<font face=\"courier\" color=\"' + AutoGrader.Const.HEADER_COLOR2 + '\">')\n f.write ('------------- BEGIN LISTING: ' + os.path.split(sourceFile)[1] + ' -------------</font><br>\\n')\n if language == 'C++':\n f.write('<pre class=\"brush: cpp;\">')\n if language == 'Python':\n f.write('<pre class=\"brush: python;\">')\n f.write(source)\n f.write('</pre>')\n\n f.write('<font face=\"courier\" color=\"' + AutoGrader.Const.HEADER_COLOR2 + '\">')\n f.write ('------------- END LISTING: ' + os.path.split(sourceFile)[1] + ' -------------</font><br>\\n')\n \n f.close()",
"def write_method_doc(file_name, entries):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('<table border=\"0\">')\r\n f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>')\r\n for items in sorted(entries, key=itemgetter(3)):\r\n f.write('<tr><td valign=\"top\">%s</td><td>%s</td></tr>' %\r\n (items[3], doc_to_html(items[4])))\r\n f.write('</table>')",
"def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file",
"def read (self, stream):\n inClass = False\n inClassDocs = False\n inDocs = False\n inFunc = False\n inSkip = False\n isInternal = False\n ignoreThis = False\n\n docstring = ''\n lines = ''\n\n for line in stream.readlines():\n stripped = line.strip()\n\n if stripped == '#ifndef SWIG':\n inSkip = True\n if stripped.startswith('#endif') and (stripped.find('SWIG') >= 0):\n inSkip = False\n if inSkip: continue\n\n # Track things that we flag as internal, so that we can\n # remove them from the documentation.\n\n if (stripped.find('@cond doxygenAntimonyInternal') >= 0): isInternal = True\n if (stripped.find('@endcond') >= 0): isInternal = False\n\n # Watch for class description, usually at top of file.\n\n if (not inClassDocs) and stripped.startswith('* @class'):\n inClassDocs = True\n classname = stripped[8:].strip()\n if classname.endswith('.'):\n classname = classname[:-1]\n docstring = ''\n continue\n\n if inClassDocs:\n if stripped.startswith('* @brief'):\n docstring += ' * ' + stripped[9:].strip() + '\\n'\n continue\n elif not stripped.endswith('*/') and not stripped.startswith('* @class'):\n docstring += line\n continue\n else:\n docstring = '/**\\n' + docstring + ' */'\n doc = CClassDoc(docstring, classname, isInternal)\n self.classDocs.append(doc)\n\n # There may be more class docs in the same comment.\n if stripped.startswith('* @class'):\n classname = stripped[8:].strip()\n if classname.endswith('.'):\n classname = classname[:-1] \n else:\n inClassDocs = False\n\n docstring = ''\n continue\n\n # Watch for class definition, methods and out-of-class functions.\n\n if stripped.startswith('class ') and not stripped.endswith(';'):\n ignoreThis = False\n inClass = True\n classname = line[6:].split(':')[0].strip()\n if classname[:6] == 'LIBSBM' or classname[:6] == 'LIBLAX':\n classname = classname.split(' ')[1].strip()\n self.classes.append( CClass(classname) )\n continue\n\n if stripped == '};':\n inClass = False\n continue\n\n if stripped == '/**':\n docstring = ''\n lines = ''\n ignoreThis = False\n inDocs = True\n\n if inDocs:\n docstring += line\n inDocs = (stripped != '*/')\n continue\n\n # If we get here, we're no longer inside a comment block.\n # Start saving lines, but skip embedded comments.\n\n if stripped.startswith('#') or (stripped.find('typedef') >= 0):\n ignoreThis = True\n continue\n\n if not ignoreThis:\n cppcomment = stripped.find('//')\n if cppcomment != -1:\n stripped = stripped[:cppcomment]\n lines += stripped + ' ' # Space avoids jamming code together.\n\n # Keep an eye out for the end of the declaration.\n if not stripped.startswith('*') and \\\n (stripped.endswith(';') or stripped.endswith(')') or stripped.endswith('}')):\n\n # It might be a forward declaration. Skip it.\n if lines.startswith('class'):\n continue\n\n # It might be a C++ operator redefinition. Skip it.\n if lines.find('operator') >= 0:\n continue\n\n # It might be an enum. Skip it.\n # If it's not an enum at this point, parse it.\n if stripped.endswith('}'):\n lines = lines[:lines.rfind('{')]\n if not stripped.startswith('enum'):\n\n # If this segment begins with a comment, we need to skip over it.\n searchstart = lines.rfind('*/')\n if (searchstart < 0):\n searchstart = 0\n\n # Find (we hope) the end of the method name.\n stop = lines[searchstart:].find('(')\n\n # Pull out the method name & signature.\n if (stop > 0):\n name = lines[searchstart : searchstart + stop].split()[-1]\n endparen = lines.rfind(')')\n args = lines[searchstart + stop : endparen + 1]\n isConst = lines[endparen:].rfind('const')\n\n # Swig doesn't seem to mind C++ argument lists, even though they\n # have \"const\", \"&\", etc. So I'm leaving the arg list unmodified.\n func = Method(isInternal, docstring, name, args, (isConst > 0))\n\n # Reset buffer for the next iteration, to skip the part seen.\n lines = lines[endparen + 2:]\n\n if inClass:\n c = self.classes[-1]\n c.methods.append(func)\n\n # Record method variants that take different arguments.\n if c.methodVariants.get(name) == None:\n c.methodVariants[name] = {}\n c.methodVariants[name][args] = func\n else:\n self.functions.append(func)\n # FIXME need do nc variants",
"def __call__(self, format, filename):\n # turn the filename into something suitable for use in #define's\n prettyname = filename.replace(\".\", \"_\").upper()\n prettyname = prettyname.replace(\"/\", \"__\")\n prettyname = prettyname.replace(\":\", \"__\")\n prettyname = prettyname.replace(\"-\", \"__\")\n\n # try and open the file\n with open(filename, \"w\") as output:\n self.writeFuncsLut[format]( output, prettyname )",
"def preprocessor(output_directory, filepath, stats, hip_clang_launch, is_pytorch_extension, clean_ctx):\n fin_path = os.path.join(output_directory, filepath)\n with open(fin_path, 'r', encoding='utf-8') as fin:\n output_source = fin.read()\n\n fout_path = os.path.join(output_directory, get_hip_file_path(filepath))\n if not os.path.exists(os.path.dirname(fout_path)):\n clean_ctx.makedirs(os.path.dirname(fout_path))\n\n # unsupported_calls statistics reporting is broken atm\n def pt_repl(m):\n return PYTORCH_MAP[m.group(0)]\n\n if is_pytorch_extension:\n output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)\n else:\n if is_pytorch_file(filepath):\n output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)\n else:\n def c2_repl(m):\n return CAFFE2_MAP[m.group(0)]\n output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source)\n\n # Header rewrites\n def mk_repl(templ):\n def repl(m):\n f = m.group(1)\n if (\n f.startswith(\"ATen/cuda\")\n or f.startswith(\"ATen/native/cuda\")\n or f.startswith(\"ATen/native/quantized/cuda\")\n or f.startswith(\"ATen/native/sparse/cuda\")\n or f.startswith(\"THC/\")\n or f.startswith(\"THCUNN/\")\n or (f.startswith(\"THC\") and not f.startswith(\"THCP\"))\n ):\n return templ.format(get_hip_file_path(m.group(1)))\n return m.group(0)\n return repl\n output_source = RE_QUOTE_HEADER.sub(mk_repl('#include \"{0}\"'), output_source)\n output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>'), output_source)\n output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE \"{0}\"'), output_source)\n\n # CMakeLists.txt rewrites\n if filepath.endswith('CMakeLists.txt'):\n output_source = output_source.replace('CUDA', 'HIP')\n output_source = output_source.replace('THC', 'THH')\n output_source = RE_CU_SUFFIX.sub('.hip', output_source)\n\n # Perform Kernel Launch Replacements\n if not hip_clang_launch:\n output_source = processKernelLaunches(output_source, stats)\n\n # Replace std:: with non-std:: versions\n if (filepath.endswith(\".cu\") or filepath.endswith(\".cuh\")) and \"PowKernel\" not in filepath:\n output_source = replace_math_functions(output_source)\n\n # Include header if device code is contained.\n output_source = hip_header_magic(output_source)\n\n # Replace the extern __shared__\n output_source = replace_extern_shared(output_source)\n\n do_write = True\n if os.path.exists(fout_path):\n with open(fout_path, 'r', encoding='utf-8') as fout_old:\n do_write = fout_old.read() != output_source\n if do_write:\n with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout:\n fout.write(output_source)\n return \"ok\"\n else:\n return \"skipped\"",
"def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc",
"def _reportFileAnalytics(self, sourceFiles, outputFile, language):\n \n #is this a single file or a set of files?\n bSingleFile = len(sourceFiles) == 1\n \n #open the output file for appending\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write ('<br>\\n=======================================================<br>\\n')\n if bSingleFile:\n f.write(sourceFiles[0]) #if this is a single file, simply output its name\n else: #if these are multiple files, list the directory name in bold\n f.write('<b>' + os.path.split(sourceFiles[0])[0] + '</b>') #directory name in bold\n f.write ('<br>\\n=======================================================<br>\\n</font>')\n\n #for each file, report the analytics\n for sourceFile in sourceFiles:\n if bSingleFile == False: #only print the filename if we have more than 1 file in the list\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write(os.path.split(sourceFile)[1] + '</font><br>\\n')\n \n if language == 'C++':\n numLines, numComments = self.analyzeCppCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write ('<br>\\n~#Comments: ' + str(numComments) + '<br>\\n')\n \n if language == 'Python':\n numLines, numDocStr, numComments, numDefs, numClasses = self.analyzePythonCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Functions: ' + str(numDefs))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Classes: ' + str(numClasses))\n f.write ('<br>\\n~#Comments: ' + str(numComments))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#DocStrs: ' + str(numDocStr) + '<br>\\n')\n \n f.write('</font><br>') #skip a line between entries\n f.close()",
"def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)",
"def process_source_code(source_dir, header_map):\n sources = get_source_files(source_dir)\n for filename in sources:\n process_file(filename, header_map)",
"def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6",
"def write_opening_header(final_file, **header_params):\n\n final_file.seek(0) # Reset file pointer.\n file_contents = final_file.read() # Save content.\n\n final_file.seek(0) # Write at the top.\n\n if header_params[\"extensions\"]:\n if len(header_params[\"extensions\"]) > 1:\n write_data(\n final_file,\n \"# Title: StevenBlack/hosts with the {0} and {1} extensions\\n#\\n\".format(\n \", \".join(header_params[\"extensions\"][:-1]),\n header_params[\"extensions\"][-1],\n ),\n )\n else:\n write_data(\n final_file,\n \"# Title: StevenBlack/hosts with the {0} extension\\n#\\n\".format(\n \", \".join(header_params[\"extensions\"])\n ),\n )\n else:\n write_data(final_file, \"# Title: StevenBlack/hosts\\n#\\n\")\n\n write_data(\n final_file,\n \"# This hosts file is a merged collection \"\n \"of hosts from reputable sources,\\n\",\n )\n write_data(final_file, \"# with a dash of crowd sourcing via GitHub\\n#\\n\")\n write_data(\n final_file,\n \"# Date: \" + time.strftime(\"%d %B %Y %H:%M:%S (%Z)\", time.gmtime()) + \"\\n\",\n )\n\n if header_params[\"extensions\"]:\n write_data(\n final_file,\n \"# Extensions added to this file: \"\n + \", \".join(header_params[\"extensions\"])\n + \"\\n\",\n )\n\n write_data(\n final_file,\n (\n \"# Number of unique domains: {:,}\\n#\\n\".format(\n header_params[\"numberofrules\"]\n )\n ),\n )\n write_data(\n final_file,\n \"# Fetch the latest version of this file: \"\n \"https://raw.githubusercontent.com/StevenBlack/hosts/master/\"\n + path_join_robust(header_params[\"outputsubfolder\"], \"\").replace(\"\\\\\", \"/\")\n + \"hosts\\n\",\n )\n write_data(\n final_file, \"# Project home page: https://github.com/StevenBlack/hosts\\n\"\n )\n write_data(\n final_file,\n \"# Project releases: https://github.com/StevenBlack/hosts/releases\\n#\\n\",\n )\n write_data(\n final_file,\n \"# ===============================================================\\n\",\n )\n write_data(final_file, \"\\n\")\n\n if not header_params[\"skipstatichosts\"]:\n write_data(final_file, \"127.0.0.1 localhost\\n\")\n write_data(final_file, \"127.0.0.1 localhost.localdomain\\n\")\n write_data(final_file, \"127.0.0.1 local\\n\")\n write_data(final_file, \"255.255.255.255 broadcasthost\\n\")\n write_data(final_file, \"::1 localhost\\n\")\n write_data(final_file, \"::1 ip6-localhost\\n\")\n write_data(final_file, \"::1 ip6-loopback\\n\")\n write_data(final_file, \"fe80::1%lo0 localhost\\n\")\n write_data(final_file, \"ff00::0 ip6-localnet\\n\")\n write_data(final_file, \"ff00::0 ip6-mcastprefix\\n\")\n write_data(final_file, \"ff02::1 ip6-allnodes\\n\")\n write_data(final_file, \"ff02::2 ip6-allrouters\\n\")\n write_data(final_file, \"ff02::3 ip6-allhosts\\n\")\n write_data(final_file, \"0.0.0.0 0.0.0.0\\n\")\n\n if platform.system() == \"Linux\":\n write_data(final_file, \"127.0.1.1 \" + socket.gethostname() + \"\\n\")\n write_data(final_file, \"127.0.0.53 \" + socket.gethostname() + \"\\n\")\n\n write_data(final_file, \"\\n\")\n\n preamble = path_join_robust(BASEDIR_PATH, \"myhosts\")\n maybe_copy_example_file(preamble)\n\n if os.path.isfile(preamble):\n with open(preamble, \"r\") as f:\n write_data(final_file, f.read())\n\n final_file.write(file_contents)",
"def generate(self, fileName):\n self.preProcess()\n styleFile = open(fileName, 'w')\n # write head part\n head = \"\"\"#!/usr/bin/env python\n\nimport os\n\nfrom WMQuality.Code import Code\n\n# output of the log files\n# prefix of the files in cvs\n# quality script for using pylint:\nqualityScript = '%s'\n# output file:\nqualityReport = '%s'\n# rating threshold (min: 0, max 10)\nthreshold = %s\n\npackages = {\\\\\n \"\"\" % (self.script, self.report, self.threshold)\n styleFile.writelines(head)\n styleFile.writelines('\\n')\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n # register this.\n styleFile.writelines(\" '\" + moduleName + \"':'\" + self.module[moduleName] + \"',\\\\\\n\")\n styleFile.writelines('}\\n')\n tail = \"\"\"\ncode = Code(qualityScript, qualityReport, WMCore.WMInit.getWMBASE(), threshold, packages)\ncode.run()\ncode.summaryText()\n \"\"\"\n styleFile.writelines(tail)\n styleFile.close()",
"def writeHeader(self, outputFile):\n print('\"\"\"{0}\"\"\"'.format(HEADER_COMMENT), file=outputFile)\n print(\"from protocol import ProtocolElement\", file=outputFile)\n print(\"from protocol import SearchRequest\", file=outputFile)\n print(\"from protocol import SearchResponse\", file=outputFile)\n print(file=outputFile)\n print(\"import avro.schema\", file=outputFile)\n print(file=outputFile)\n if self.version[0].lower() == 'v' and self.version.find('.') != -1:\n versionStr = self.version[1:] # Strip off leading 'v'\n else:\n versionStr = self.version\n print(\"version = '{0}'\".format(versionStr), file=outputFile)",
"def main():\n args = parse_arguments()\n mappings = parse_mappings(\n args.species.strip(\"'\").capitalize(), args.infile, args.outfile\n )\n create_new_header(args.infile, mappings, args.outfile)",
"def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)",
"def OutputFile(outpath, snippet):\n out = file(outpath, 'w')\n out.write(COPYRIGHT_HEADER + '\\n')\n out.write(snippet)\n print 'Output ' + os.path.normpath(outpath)",
"def translate_file(path, writer):\n parser = Parser(path)\n parsed_name = os.path.splitext(os.path.basename(path))[0]\n writer.setFileName(parsed_name)\n while parser.hasMoreCommands():\n parser.advance()\n if parser.commandType() == Command.C_ARITHMETIC:\n writer.writeArithmetic(parser.arg1())\n if parser.commandType() in (Command.C_PUSH, Command.C_POP):\n writer.writePushPop(parser.commandType(),\n parser.arg1(), parser.arg2())\n if parser.commandType() == Command.C_LABEL:\n writer.writeLabel(parser.arg1())\n\n if parser.commandType() == Command.C_GOTO:\n writer.writeGoto(parser.arg1())\n\n if parser.commandType() == Command.C_IF:\n writer.writeIf(parser.arg1())\n\n if parser.commandType() == Command.C_RETURN:\n writer.writeReturn()\n\n if parser.commandType() == Command.C_CALL:\n writer.writeCall(parser.arg1(), parser.arg2())\n\n if parser.commandType() == Command.C_FUNCTION:\n writer.writeFunction(parser.arg1(), parser.arg2())",
"def modify_header():\n\n print_debug_info()\n if not bool(int(vim.eval(\"g:BHModify\"))):\n return\n\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not has_header():\n debug(\"This file has no header.\")\n return add_header()\n\n # only if the suffix is supported and we have a method to strip the comment.\n if not ((\"extract_comment_%s\" % SUFFIX) in globals() and suffix_is_supported()):\n return\n\n comment = globals()[\"extract_comment_%s\" % SUFFIX]()\n debug(\"comment: %s\" % str(comment))\n if not comment:\n debug(\"comment is empty\")\n return\n\n comment_dict = {}\n\n if len(comment) < 3:\n # Less than 3 lines of original comment, put them in Description part.\n comment_dict['Description'] = '\\n'.join(comment)\n else:\n comment_dict = read_comment(comment)\n if \"\" in comment_dict:\n del comment_dict[\"\"]\n new_header_dict = read_comment(globals().get(\"%s_header\" % SUFFIX).rstrip().splitlines())\n debug(\"new\")\n debug(set(new_header_dict.keys()))\n debug(set(comment_dict.keys()))\n debug(\"end\")\n if not set(new_header_dict.keys()) == set(comment_dict.keys()):\n return prepend_header(render_header(comment_dict))\n else:\n debug(\"do not modify header since we already have the same header.\")",
"def print_from_file(filepath):\n \n \n header_filehandler = open(filepath, 'r')\n print header_filehandler.read()\n header_filehandler.close()",
"def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)",
"def process_file(header,header_delimiter,file_in,file_out): \n header_pos=-1;\n file_stat = os.stat(file_in)\n f=file(file_in)\n lines=f.readlines()\n f.close()\n header_match=True\n # Locate Header Delimiter\n for i in range(len(lines)):\n pos=lines[i].find(header_delimiter)\n if pos == 0 and header_pos >=0:\n print >> sys.stderr , \"*** Error: Double header delimiter found at line %d and line %d in file %s\" % (header_pos,i,file_in)\n return False;\n elif pos == 0 :\n header_pos=i\n print_verbose(1, \" - Found header delimiter at line %d\" % i)\n elif header_match and i < len(header):\n header_match = lines[i].rstrip('\\n') == header[i].rstrip('\\n')\n\n \n # Replace header in file\n if header_pos == -1:\n print_verbose(1, \" - Header not found, inserting new header\")\n header_pos=0;\n else:\n header_pos=header_pos+1\n \n if header_match:\n if VERBOSE==0: print \"Not changed\\t\" , file_in\n return False\n if header_pos == 0:\n if VERBOSE==0: print \"Added\\t\\t\" , file_in\n else:\n if VERBOSE==0: print \"Updated\\t\\t\" , file_in\n f=open(file_out,\"w\")\n # Writing new header\"\n for line in header:\n f.write(line)\n f.write(header_delimiter+\"\\n\"); # Writing delimiter\n # Writing body\n for line in range(header_pos,len(lines)):\n f.write(lines[line])\n f.close()\n os.utime(file_out,(file_stat.st_atime,file_stat.st_mtime))\n return True",
"def write_header(self, stream, alignments):\n return\n ##################################################\n # You MUST implement this method in the subclass #\n # if the file format defines a file header. #\n ##################################################",
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def main(sourcedatafile, targetdatafile, documentationfile, tail):\n data = load_data(sourcedatafile)\n genres = get_metadata(data)\n featurematrix = get_featurematrix(data)\n make_scatterplot(genres, featurematrix, targetdatafile)\n docfile.write(sourcedatafile, targetdatafile, documentationfile, docstring, tail, __file__)",
"def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret",
"def write_body(file_ptr, attribute, inverse_relation):\n depth = 0\n # The body will only consist of the main function.\n # Function def\n fwrite(\"def main(argv):\", file_ptr, depth)\n\n # Docstring\n depth += 1\n fwrite(\"\\\"\\\"\\\"Read the data from the provided file then attempt to \"\n \"classify it using\", file_ptr, depth)\n fwrite(\"a 1D binary sorting method. This function uses either height or \"\n \"age and\", file_ptr, depth)\n fwrite(\"uses a simple threshold test for sorting.\", file_ptr, depth, 2)\n\n fwrite(\":param data_file_arg: <str> Name of the CSV file of CDC data to\",\n file_ptr, depth)\n fwrite(\"make predictions for.\", file_ptr, depth)\n fwrite(\":return: None\", file_ptr, depth)\n fwrite(\"\\\"\\\"\\\"\", file_ptr, depth)\n fwrite(\"testing_data = read_data_file(argv[0])\", file_ptr, depth, 2)\n fwrite(\"relevant_data = testing_data[[\\\"{0}\\\"]]\".format(attribute),\n file_ptr, depth)\n fwrite(\"for row in relevant_data.itertuples(index=False):\",\n file_ptr, depth)\n # For loop\n depth += 1\n fwrite(\"if row.{0} > 0:\".format(attribute), file_ptr, depth)\n # Conditional\n if not inverse_relation:\n fwrite(\"print(\\\"1\\\")\", file_ptr, tabs=depth+1)\n fwrite(\"else:\", file_ptr, depth)\n fwrite(\"print(\\\"0\\\")\", file_ptr, tabs=depth+1, newlines=2)\n else:\n fwrite(\"print(\\\"0\\\")\", file_ptr, tabs=depth+1)\n fwrite(\"else:\", file_ptr, depth)\n fwrite(\"print(\\\"1\\\")\", file_ptr, tabs=depth+1, newlines=2)\n # End Conditional\n # End of function\n depth -= 2",
"def _write_header(self):\n # The last line here must not have a trailing \\n\n self.buffer.write_line(\"def template(self, __io, model=None):\")\n self.buffer.scope_line(\"view = self\")"
] |
[
"0.57407814",
"0.56480074",
"0.5549426",
"0.54974544",
"0.546188",
"0.5447185",
"0.5371064",
"0.53704304",
"0.53466254",
"0.5297166",
"0.5248141",
"0.52199423",
"0.5143805",
"0.5138736",
"0.51352113",
"0.51151234",
"0.51053274",
"0.50510335",
"0.5038631",
"0.5025414",
"0.49988407",
"0.49984235",
"0.49941206",
"0.49647528",
"0.4964203",
"0.49488488",
"0.49359056",
"0.4933329",
"0.49320728",
"0.49298093"
] |
0.7371441
|
0
|
Check if file exist on ftp
|
def file_exist() -> bool:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def file_exists(host, fqpath):\n command = \"ls -ld %s\" % fqpath\n rcode, _, rerr = g.run(host, command)\n if rcode == 0:\n return True\n\n g.log.error('File does not exist: %s', rerr)\n return False",
"def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False",
"def check_file_exist(self):\n return False",
"def is_file_exists(self):\n pass",
"def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False",
"def _existFile(f):\n\treturn os.path.isfile(f)",
"def test_files(host, f):\n assert host.file(f).exists",
"def testFtpUrl(self):\n try:\n remoteLocator = self.__ftpFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n dirPath = os.path.join(self.__workPath, \"chem_comp_models\")\n lPath = os.path.join(dirPath, self.__fileU.getFileName(self.__ftpFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=dirPath)\n ok = fp.endswith(\"chem_comp_model.cif\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def exists_file(f):\n if os.path.exists(f):\n return True\n return False",
"def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False",
"def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)",
"def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()",
"def file_exist(file_path):\n return os.path.isfile(file_path)",
"def file_exists(filename: str):\n if osp.exists(filename) is True:\n return True\n else:\n return False",
"def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False",
"def FileExists(file):\n return os.path.exists(file)",
"def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False",
"def file_exists(file_path):\n\n return Path(file_path).is_file()",
"def file_exists(fpath):\n # @todo - Use any()\n \n if os.path.exists(fpath) and os.path.isfile(fpath) and os.stat(fpath).st_size != 0:\n return True\n else: \n return False",
"def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False",
"def file_exists(path):\n return os.path.exists(path)",
"def _rexists(sftp, path):\n try:\n sftp.stat(path)\n except OSError as e:\n if e[0] == 2:\n return False\n raise\n else:\n return True",
"def exists(self, path):",
"def file_copy_remote_exists(self, src, dest=None, file_system=None):\n self.enable()\n if file_system is None:\n file_system = self._get_file_system()\n\n file_copy = self._file_copy_instance(src, dest, file_system=file_system)\n if file_copy.check_file_exists() and file_copy.compare_md5():\n log.debug(\"Host %s: File %s already exists on remote.\", self.host, src)\n return True\n\n log.debug(\"Host %s: File %s does not already exist on remote.\", self.host, src)\n return False",
"def FileExists(DSLModel, table, filename):\n fullpath=\"%s%s%s%s%s\" % (DSLModel['GENERAL']['target_folder'], os.sep,\n table['name'], os.sep, filename)\n return os.access(fullpath, os.F_OK)",
"def _does_file_exist(file_path):\n return os.path.exists(file_path) and os.path.getsize(file_path) > 0",
"def exist(self):\n return self.file_path.exists()",
"def file_exists(file_path):\r\n return exists(file_path) and isfile(file_path)",
"def check_file(file_path):\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n file1 = open(file_path + \"/success.txt\",\"w\")#write mode \n file1.write(\"Succes Download\") \n file1.close()\n return os.path.isfile(file_path + \"/success.txt\")",
"def isFileExist(file_name):\n return os.path.exists(file_name)"
] |
[
"0.74532896",
"0.7116993",
"0.7072668",
"0.7024049",
"0.69683254",
"0.69101447",
"0.67568624",
"0.67281693",
"0.67112243",
"0.66904974",
"0.6664261",
"0.6625451",
"0.66163576",
"0.66149956",
"0.6562228",
"0.6517187",
"0.65167314",
"0.65086555",
"0.6495261",
"0.6487371",
"0.64707136",
"0.6448078",
"0.64351434",
"0.64328194",
"0.64215386",
"0.63907444",
"0.6376545",
"0.6368954",
"0.6359755",
"0.6357175"
] |
0.7150851
|
1
|
Plot the background of the regime diagram following Fig. 3 of Belcher et al., 2012
|
def plot_regime_diagram_background_BG12(
ax=None,
):
if ax is None:
ax = plt.gca()
# range of power
xpr = [-1, 1]
ypr = [-3, 3]
# range
xlims = [10**i for i in xpr]
ylims = [10**i for i in ypr]
# size of x and y
nx = 500
ny = 500
xx = np.logspace(xpr[0], xpr[1], nx)
yy = np.logspace(ypr[0], ypr[1], ny)
zz1 = np.zeros([nx, ny])
zz2 = np.zeros([nx, ny])
zz3 = np.zeros([nx, ny])
for i in np.arange(nx):
for j in np.arange(ny):
zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))
zz2[i,j] = 0.22*xx[i]**(-2)
zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]
zz = zz1 + zz2 + zz3
ax.contourf(xx, yy, np.transpose(np.log10(zz)),
levels=[-0.1, 0, 0.1, 0.25, 0.5, 1, 2, 3, 4],
cmap='summer', extend='both')
ax.contour(xx, yy, np.transpose(np.log10(zz)),
levels=[-0.1, 0, 0.1, 0.25, 0.5, 1, 2, 3, 4],
colors='darkgray')
ax.contour(xx, yy, np.transpose(zz1/zz), levels=0.9, colors='k',
linestyles='-', linewidths=2)
ax.contour(xx, yy, np.transpose(zz2/zz), levels=0.9, colors='k',
linestyles='-', linewidths=2)
ax.contour(xx, yy, np.transpose(zz3/zz), levels=0.9, colors='k',
linestyles='-', linewidths=2)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('La$_t$')
ax.set_ylabel('$h/L_L$')
ax.set_aspect(aspect=1/3)
ax.text(0.85, 3e-3, '0', color='k', fontsize=8, rotation=-90)
ax.text(1.6, 1e-2, '0.1', color='k', fontsize=8, rotation=-90)
ax.text(3.8, 1e-1, '0.25', color='k', fontsize=8, rotation=-90)
ax.text(4, 1e2, '0.5', color='k', fontsize=8, rotation=33)
ax.text(3.2, 3e2, '1', color='k', fontsize=8, rotation=36)
ax.text(0.53, 1e2, '2', color='k', fontsize=8, rotation=38)
ax.text(0.3, 3.1e2, '3', color='k', fontsize=8, rotation=39)
ax.text(0.12, 5e2, '4', color='k', fontsize=8, rotation=40)
ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle="square",ec='k',fc='w'))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_regime_diagram_background_L19(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # background following Fig. 3 of Belcher et al., 2012\n nx = 500\n ny = 500\n xx = np.logspace(xpr[0], xpr[1], nx)\n yy = np.logspace(ypr[0], ypr[1], ny)\n zz1 = np.zeros([nx, ny])\n zz2 = np.zeros([nx, ny])\n zz3 = np.zeros([nx, ny])\n for i in np.arange(nx):\n for j in np.arange(ny):\n zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))\n zz2[i,j] = 0.22*xx[i]**(-2)\n zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]\n zz = zz1 + zz2 + zz3\n\n rz_ST = zz1/zz\n rz_LT = zz2/zz\n rz_CT = zz3/zz\n fr = np.ones(zz.shape) * 7\n cfrac = 0.25\n fr[(rz_LT<cfrac) & (rz_CT<cfrac)] = 1\n fr[(rz_ST<cfrac) & (rz_CT<cfrac)] = 2\n fr[(rz_ST<cfrac) & (rz_LT<cfrac)] = 3\n fr[(rz_ST>=cfrac) & (rz_LT>=cfrac) & (rz_CT<cfrac)] = 4\n fr[(rz_ST>=cfrac) & (rz_CT>=cfrac) & (rz_LT<cfrac)] = 5\n fr[(rz_LT>=cfrac) & (rz_CT>=cfrac) & (rz_ST<cfrac)] = 6\n color_list = ['firebrick','forestgreen','royalblue','gold','orchid','turquoise','w']\n cb_ticks = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]\n cmap, norm = from_levels_and_colors(cb_ticks, color_list)\n ax.contourf(xx, yy, np.transpose(fr), cmap=cmap, norm=norm)\n ax.contour(xx, yy, np.transpose(fr), colors='darkgray')\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('La$_t$')\n ax.set_ylabel('$h/L_L$')\n ax.set_aspect(aspect=1/3)\n ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))",
"def setAxisBackground(idx=-1):\n dislin.axsbgd(idx)",
"def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()",
"def plot_show():\r\n plt.gca().patch.set_facecolor('#0e0a16')\r\n plt.axis('equal')\r\n plt.show()",
"def plot_pretty():\n\n ts, ys, lin_model, K, us, dt_control, biass, end_time = simulate()\n plt.style.use('seaborn-deep')\n\n black = '#2B2B2D'\n red = '#E90039'\n orange = '#FF1800'\n white = '#FFFFFF'\n yellow = '#FF9900'\n\n plt.figure(figsize=(12.8, 9.6))\n plt.rcParams.update({'font.size': 16, 'text.color': white, 'axes.labelcolor': white,\n 'axes.edgecolor': white, 'xtick.color': white, 'ytick.color': white})\n\n plt.gcf().set_facecolor(black)\n\n plt.subplot(2, 3, 1)\n plt.plot(ts, ys[:, 2], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[1], color=white)\n plt.title(r'$C_{FA}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 2)\n plt.plot(ts, ys[:, 0], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[0], color=white)\n plt.title(r'$C_{G}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 3)\n plt.plot(ts, ys[:, 3], color=orange)\n plt.title(r'$C_{E}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 4)\n plt.plot(ts, us[:, lin_model.inputs[1]], color=red)\n plt.title(r'$F_{m, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 5)\n plt.plot(ts, us[:, lin_model.inputs[0]], color=red)\n plt.title(r'$F_{G, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 6)\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 1],\n color=red\n )\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 0],\n color=yellow\n )\n plt.legend([r'$C_{FA}$', r'$C_G$'], facecolor=black)\n plt.title('bias')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n # plt.suptitle('Closedloop bioreactor without noise')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('no_noise_pretty.png', transparent=True)\n plt.show()",
"def sn2003bg(ax, col, legend):\n nu = 8.46E9\n d = 6.056450393620008e+25\n\n t = np.array(\n [10, 12, 23, 35, 48, 58, 63, 73, 85, 91, 115, 129,\n 132, 142, 157, 161, 181, 201, 214, 227, 242, 255,\n 266, 285, 300, 326, 337, 351, 368, 405, 410, 424,\n 434, 435, 493, 533, 632, 702, 756, 820, 902, 978])\n f = np.array(\n [2.51, 3.86, 12.19, 24.72, 40.34, 51.72, 49.64, 46.20,\n 38.638, 33.85, 45.74, 53.94, 54.27, 54.83, 48.43,\n 47.43, 35.76, 31.35, 28.67, 27.38, 24.57, 22.30,\n 21.67, 21.31, 20.88, 20.33, 19.85, 18.84, 17.14,\n 14.61, 14.49, 14.16, 13.25, 13.08, 10.04, 8.92,\n 6.23, 6.18, 4.62, 3.93, 4.69, 4.48])\n lum = plot_line(ax, d, t, nu*f, 'SN2003bg', 'SN', col, legend, zorder=0)\n #ax.text(t[0]/1.05, lum[0], 'SN2003bg', fontsize=11,\n # verticalalignment='center',\n # horizontalalignment='right')",
"def main_background():\n surface.fill(COLOR_GRAY)",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()",
"def plot_fig43b_spreading_yeast():\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n y = np.loadtxt('csvs/ratio_lp50a10ad5hp0.0067379_yeast.txt')\n links35 = np.tile(35, 44)\n Rlinks = np.array([47, 21, 18, 15, 20, 17])\n Llinks = np.array([245, 30, 26, 15, 23, 35])\n #links to right of methylation site (50 in total)\n Rlinks = np.concatenate((Rlinks, links35))\n #links to left of methylation site (50 in total)\n Llinks = np.concatenate((Llinks, links35))\n #cumulative chain length including burried basepairs\n unwrap = 0\n #plot as positive distance from TSS in bp\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp\n #plot as negative distance from TSS in bp\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap) #max WLC chain length in bp\n x = np.concatenate((ldna_Llinks[::-1], ldna_Rlinks))\n ax.plot(x, y, color='k')\n ax.set_xlabel(r'Distance from TSS (bp)')\n ax.set_ylabel('Relative enrichment')\n #plot inset using Crabtree data\n axins = inset_axes(ax, width=\"40%\", height=\"40%\", \n bbox_to_anchor=(.1, .1, .8, .8),\n bbox_transform=ax.transAxes, loc=2)\n xcrabtree = np.array([-10256, -3077, -2241, -1485, -739, -309, -169, 489, 1746, 3087, 4400, 5300])\n REday0 = np.array([0.27, 0.13, 0.46, 0.12, 0.17, 0.33, 0.33, 0.31, 0.32, 0.27, 0.21, 0.33])\n REday5 = np.array([0.19, 0.40, 0.89, 1.55, 0.97, 1.25, 2.25, 3.57, 3.03, 2.09, 1.12, 0.14])\n ycrabtree = REday5/np.mean(REday0)\n axins.plot(xcrabtree, ycrabtree)\n axins.set_xlim([-10000, 10000])\n ax.set_xlim([-10000, 10000])\n #axins.set_ylabel('Relative enrichment', fontsize=8)\n #axins.set_xlabel('Distance from TSS (bp)', fontsize=8)\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.savefig(f'plots/thesis/fig43b_spreading-TSS-yeast.pdf', bbox_inches='tight')",
"def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig",
"def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()",
"def draw_background(self, t):\n pass",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def sn2007bg(ax, col, legend):\n nu = 8.46E9\n d = Planck15.luminosity_distance(z=0.0346).cgs.value\n t = np.array(\n [13.8, 19.2, 26.1, 30.9, 41.3, 55.9, 66.8, 81.8, 98.8, 124, \n 144, 159.8, 189.9, 214.9, 250.9, 286.8, 314.8, 368.8, \n 386.8, 419.9, 566.9, 623.8, 720.8, 775.8, 863.8])\n f = np.array(\n [480, 753, 804, 728, 1257, 1490, 1390, 1325, 1131, 957, \n 621, 316, 379, 404, 783, 1669, 2097, 2200, \n 2852, 3344, 3897, 3891, 3842, 3641, 3408]) * 1E-3\n lum = plot_line(ax, d, t, nu*f, 'SN2007bg', 'SN', col, legend)\n ax.text(t[0]/1.05, lum[0], 'SN2007bg', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='right', zorder=0)",
"def plot(self, data, background, scale=(5, 99)):\n # find the minimum and maximum value of plotting\n vmin = np.percentile(data, scale[0])\n vmax = np.percentile(data, scale[1])\n\n cax1 = self.ax1.imshow(data, cmap='gray', vmin=vmin, vmax=vmax,\n origin='lower')\n cax2 = self.ax2.imshow(background, cmap='viridis',\n origin='lower')\n cs = self.ax2.contour(background, colors='r', linewidths=0.5)\n self.ax2.clabel(cs, inline=1, fontsize=7, use_clabeltext=True)\n self.colorbar(cax1, cax=self.ax1c)\n self.colorbar(cax2, cax=self.ax2c)\n for ax in [self.ax1, self.ax2]:\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Y (pixel)')\n ax.xaxis.set_major_locator(tck.MultipleLocator(500))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(100))\n ax.yaxis.set_major_locator(tck.MultipleLocator(500))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(100))",
"def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"",
"def ShowLongitBackground(spectra,spectraUp,spectraDown,spectraAv,all_titles,all_filt,object_name,NBIMGPERROW=2,right_edge=1800):\n NBSPEC=len(spectra)\n MAXIMGROW=(NBSPEC-1) / NBIMGPERROW +1\n\n f, axarr = plt.subplots(MAXIMGROW,NBIMGPERROW,figsize=(25,5*MAXIMGROW))\n f.tight_layout()\n for index in np.arange(0,NBSPEC):\n ix=index%NBIMGPERROW\n iy=index/NBIMGPERROW\n axarr[iy,ix].plot(spectra[index],'r-')\n axarr[iy,ix].plot(spectraUp[index],'b-')\n axarr[iy,ix].plot(spectraDown[index],'g-')\n axarr[iy,ix].plot(spectraAv[index],'m-')\n thetitle=\"{}) : {} \".format(index,all_titles[index])\n axarr[iy,ix].set_title(thetitle)\n axarr[iy,ix].grid(True)\n axarr[iy,ix].set_ylim(0.,spectra[index][:right_edge].max()*1.2)\n axarr[iy,ix].annotate(all_filt[index],xy=(0.05,0.9),xytext=(0.05,0.9),verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20, xycoords='axes fraction')\n title='Longitudinal background Up/Down'.format(object_name)\n plt.suptitle(title,size=16)",
"def plot_2018_board():\n top_left_corner_border = plt.Polygon([[0,823], [91,823], [0,747]], fill='k', edgecolor='k')\n bottom_left_corner_border = plt.Polygon([[0,0], [0,76], [91,0]], fill='k', edgecolor='k')\n plt.gca().add_line(top_left_corner_border)\n plt.gca().add_line(bottom_left_corner_border)\n\n # Auto Line\n auto_line = plt.Line2D((305, 305), (0, 823), lw=2.5)\n plt.gca().add_line(auto_line)\n\n # Exchange Zone\n exchange_zone = plt.Rectangle((0, 442), 91, 122, fc='r')\n plt.gca().add_patch(exchange_zone)\n\n # Power Cube Zone\n power_cube_zone = plt.Rectangle((249, 354), 107, 114, fc='r')\n plt.gca().add_patch(power_cube_zone)\n\n # Switch Zone\n switch_zone = plt.Rectangle((356, 216), 142, 390, fc='grey')\n plt.gca().add_patch(switch_zone)\n\n # Power Cubes next to Switch Zone\n for i in range(0,6,1):\n cube = plt.Rectangle((498, 216+i*(33+38.4)), 33, 33, fc='yellow')\n plt.gca().add_patch(cube)\n\n # Null territory\n null_territory_top = plt.Polygon([[731.5, 581], [731.5, 823], [823, 823], [823, 581]], fill=None, edgecolor='k')\n null_territory_bottom = plt.Polygon([[731.5, 0], [731.5, 242], [823, 242], [823, 0]], fill=None, edgecolor='k')\n plt.gca().add_line(null_territory_top)\n plt.gca().add_line(null_territory_bottom)\n\n # Scale\n scale = plt.Rectangle((653.5, 242), 823-653.5, 581-242, fc='black')\n plt.gca().add_patch(scale)",
"def grayplot_NH(QC,stage):\n\n\n #set some constants\n numpts=QC['GMtcs'].shape[1] #number of timepoints\n rightsignallim = np.arange(-20,21,20) #GS, main plot signal limits - 2% assuming mode 1000 normalization\n leftsignallim = np.arange(0,21,10) #DVars limits\n rylimz=[np.min(rightsignallim),np.max(rightsignallim)]\n lylimz=[np.min(leftsignallim),np.max(leftsignallim)]\n FDmult = 10 #multiplier to FD to get in range of DVars values\n FDthresh = 0.2 #FD threshold to mark frame for scrubbing (use 0.1 for filtered FD)\n\n #compute data quality metrics -- CG: compute by hand to better understand (separated here for practice)\n [mvm,ddt_mvm,FD] = compute_FD(QC['MVM'])\n DVars = compute_DVARS(QC['GMtcs'][:,:,stage]) # compute DVARs for a particular processing stage\n GS = compute_GS(QC['GMtcs'][:,:,stage]) # compute global signal for a particular processing stage\n\n #create plot\n fig = plt.figure(figsize=(10,10),constrained_layout = True)\n gs = GridSpec(9,1,figure=fig)\n\n #plot individual mvm params\n ax1 = fig.add_subplot(gs[0:2])\n pointindex = np.arange(1,numpts+1)\n plt.plot(pointindex,mvm)\n\n plt.xlim([0, numpts])\n plt.ylim([-1.5, 1.5])\n plt.ylabel('mvm-XYZPYR')\n\n #Next, plot FD, DVARS and GS on the same plot\n ax2a = fig.add_subplot(gs[2:4])\n ax2b = ax2a.twinx()\n ax2a.plot(pointindex,DVars,color=[0,0,1],alpha=0.5)\n ax2b.plot(pointindex,GS,color=[0,1,0],alpha=0.5)\n ax2a.plot(pointindex,FD*FDmult,color=[1,0,0],alpha=0.5)\n ax2a.hlines(FDthresh*FDmult,pointindex[0],pointindex[-1],'k',alpha=0.5)\n \n plt.xlim([0, numpts])\n ax2a.set_ylim(lylimz)\n ax2a.set_yticks(leftsignallim)\n ax2b.set_ylim(rylimz)\n ax2b.set_yticks(rightsignallim)\n ax2a.set_ylabel('R:FD*' + str(FDmult) +' B:DV G:GS')\n\n #next plot gray matter signal\n ax3 = fig.add_subplot(gs[4:8])\n new_GMtcs = QC['GMtcs'][:,:,stage]\n plt.imshow(new_GMtcs,cmap='gray',vmin=-20,vmax=20,aspect='auto') #default: showing 2% signal on mode 1000 norm\n plt.ylabel('GRAY')\n\n #finally, plot WM and CSF ts\n ax4 = fig.add_subplot(gs[8:])\n new_WMCSF = np.vstack((QC['WMtcs'][:,:,stage],QC['CSFtcs'][:,:,stage]))\n plt.imshow(new_WMCSF,cmap='gray',vmin=-20,vmax=20,aspect='auto')\n plt.ylabel('WM CSF')\n plt.xlabel('frames')\n\n return fig",
"def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)",
"def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )",
"def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])",
"def makePlot(timeStamp):\n\n #-------------------------------------------------------------------------\n # Create figure and axes\n #-------------------------------------------------------------------------\n\n width = 12 # inches\n height = 8 # inches\n fig = plt.figure(figsize=(width, height))\n\n # We'll use gridspec to create axes in rectangular 6-by-5 lattice\n import matplotlib.gridspec as gridspec\n nrows = 6\n ncols = 5\n Grid = gridspec.GridSpec(nrows, ncols)\n\n # axis for elevation time series\n axElev = fig.add_subplot(Grid[:2, :2]) # first 2 rows, first 2 columns\n # axis for slab\n axSlab = fig.add_subplot(Grid[:2, 2:]) # first 2 rows, columns > 2\n # and the transects\n axTran1 = fig.add_subplot(Grid[2:4, :]) # rows 2,3,4, all columns\n # rows 5,6,7, all columns, share x/y axis with previous (sets same ticks\n # etc)\n axTran2 = fig.add_subplot(Grid[4:6, :], sharex=axTran1, sharey=axTran1)\n\n # gridspec allows to tune the spacing between plots (unit is fraction of\n # font size)\n boundary_pad = 3.5\n horizontal_pad = 0.2\n vertical_pad = 1.0\n # figure area left,bottom,right,top in normalized coordinates [0,1]\n bounds = [0, 0, 1, 1]\n Grid.tight_layout(\n fig,\n pad=boundary_pad,\n w_pad=horizontal_pad,\n h_pad=vertical_pad,\n rect=bounds)\n\n #-------------------------------------------------------------------------\n # Create plots\n #-------------------------------------------------------------------------\n\n # for all avaiable colormaps see ( '_r' reverses the colormap )\n # http://matplotlib.org/examples/color/colormaps_reference.html\n colormap = plt.get_cmap('Spectral_r')\n colormap_kine = plt.get_cmap('gist_heat')\n\n # slab\n salt_clim = [0, 32]\n ncontours = 16\n # bouding box for slab [xmin,xmax,ymin,ymax] in model x,y coordinates\n estuarybbox = [330000, 360000, 284500, 297500]\n dia = slabSnapshotDC(\n clabel='Salinity',\n unit='psu',\n clim=salt_clim,\n cmap=colormap)\n dia.setAxes(axSlab)\n dia.addSample(slabDC, timeStamp=timeStamp, plotType='contourf',\n bbox=estuarybbox, N=ncontours)\n # overrides default format for colorbar floats\n dia.showColorBar(format='%.2g')\n #dia.addTitle('in case you want a custom title')\n # get transect (x,y) coordinates from the transectDC\n transectXYCoords = generateTransectFromDataContainer(transectDC_salt, 0)[4]\n # plot transect on the map (thin black on thick white)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='w', linewidth=2.0)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='k', linewidth=1.0)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(\n staX,\n staY,\n label=station,\n printLabel=True,\n marker='*')\n # add text to plot. x,y are in normalized axis coordinates [0,1]\n dia.ax.text(0.05, 0.98, 'custom text', fontsize=fontsize,\n verticalalignment='top', horizontalalignment='left',\n transform=dia.ax.transAxes)\n\n # elevation time series\n # define the time range to plot\n elevStartTime = datetime.datetime(2012, 5, 4, 0, 0)\n elevEndTime = datetime.datetime(2012, 5, 5, 0, 15)\n elevMeanTime = elevStartTime + (elevEndTime - elevStartTime) / 2\n elevLim = [-1.5, 2.5]\n dia = timeSeriesPlotDC2(\n xlabel=elevMeanTime.strftime('%Y %b %d'),\n ylim=elevLim)\n dia.setAxes(axElev)\n #dia.addShadedRange( timeStamp, timeStamp+datetime.timedelta(seconds=30), facecolor='IndianRed')\n dia.addShadedRange(\n timeStamp,\n timeStamp,\n edgecolor='IndianRed',\n facecolor='none',\n linewidth=2)\n tag = elevDC.getMetaData('tag')\n dia.addSample(\n elevDC.timeWindow(\n elevStartTime,\n elevEndTime),\n label=tag,\n color='k')\n dia.addTitle('Elevation ({0:s}) [m]'.format(\n elevDC.getMetaData('location').upper()))\n # adjust the number of ticks in x/y axis\n dia.updateXAxis(maxticks=5)\n dia.updateYAxis(maxticks=3, prune='lower')\n\n # transects\n dia = transectSnapshotDC(\n clabel='Salinity',\n unit='psu',\n cmap=colormap,\n clim=salt_clim)\n dia.setAxes(axTran1)\n #transectDC_salt.data *= 1e-3\n dia.addSample(transectDC_salt, timeStamp, N=ncontours)\n dia.addTitle('')\n dia.showColorBar()\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n # do not show x axis ticks and label for this plot\n dia.hideXTicks()\n\n dia = transectSnapshotDC(clabel='TKE', unit='m2s-1', logScale=True,\n clim=[-7, -2], climIsLog=True, cmap=colormap_kine)\n dia.setAxes(axTran2)\n dia.addSample(transectDC_kine, timeStamp, N=ncontours)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n dia.addTitle('')\n dia.showColorBar()\n dia.updateXAxis(maxticks=15)\n dia.updateYAxis(maxticks=6)\n\n #-------------------------------------------------------------------------\n # Save to disk\n #-------------------------------------------------------------------------\n dateStr = timeStamp.strftime('%Y-%m-%d_%H-%M')\n filename = '_'.join([imgPrefix, dateStr])\n saveFigure(\n imgDir,\n filename,\n imgFiletype,\n verbose=True,\n dpi=200,\n bbox_tight=True)\n plt.close()",
"def show_grid(frame, episode_nr):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.imshow(frame, cmap='binary')\n ax.set_title(\"Episode {}\".format(episode_nr))\n plt.pause(0.01)\n plt.clf()",
"def plot(self):\n pass",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def plot_Ae_HDR10_exr_nl_out_vari_prj_white(graph_name=\"./img/out.png\"):\n width = 1920\n\n fname_list = [\n \"./AfterEffects/Comp 1/prj_w_W-none _00000.exr\",\n \"./AfterEffects/Comp 1/prj_w_W-100 _00000.exr\",\n \"./AfterEffects/Comp 1/prj_w_W-203 _00000.exr\",\n \"./AfterEffects/Comp 1/prj_w_W-10000 _00000.exr\",\n ]\n profile_list = [\n \"Rec.2100 PQ\",\n \"Rec.2100 PQ W100\",\n \"Rec.2100 PQ W203\",\n \"Rec.2100 PQ W10000\"\n ]\n\n def get_linear(fname):\n img = read_image(fname)\n x_ae = img[0, :, 1] # extract Green line data\n ae_luminance = tf.eotf_to_luminance(x_ae, tf.ST2084)\n return ae_luminance\n\n chara_list = [get_linear(fname) for fname in fname_list]\n\n x = np.linspace(0, 1, width)\n ref_luminance = tf.eotf_to_luminance(x, tf.ST2084)\n\n fig, ax1 = pu.plot_1_graph(\n fontsize=20,\n figsize=(10, 8),\n bg_color=(0.96, 0.96, 0.96),\n graph_title=\"Transfer characteristics of the working color space\",\n graph_title_size=None,\n xlabel=\"Input Luminance [cd/m2]\",\n ylabel=\"Output LUminance [cd/m2]\",\n axis_label_size=None,\n legend_size=17,\n xlim=[0.009, 15000],\n ylim=[0.009, 15000],\n xtick=None,\n ytick=None,\n xtick_size=None, ytick_size=None,\n linewidth=3,\n minor_xtick_num=None,\n minor_ytick_num=None)\n pu.log_scale_settings(ax1=ax1)\n for idx, y in enumerate(chara_list):\n ax1.plot(ref_luminance, y, label=profile_list[idx])\n pu.show_and_save(\n fig=fig, legend_loc='upper left', save_fname=graph_name,\n show=False)",
"def plot(self, idx=None, time=None):\n\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n\n if time is not None:\n # OK, we need to do some work to find which index is closest\n idx = np.abs(self.times-time).argmin()\n # todo: print the time used?\n\n bg_frame_attr = interp_one_sec(self.times[idx],\n one_seconds=self.one_second)\n\n ccd = ccd_pixels(full_frame=True)\n\n lla = geolocate_pixels(bg_frame_attr[0],\n bg_frame_attr[1],\n bg_frame_attr[2],\n ccd.x, ccd.y\n )\n\n map_proj = ccrs.Mercator()\n ll_proj = ccrs.PlateCarree()\n fig, ax = plt.subplots(subplot_kw={'projection': map_proj})\n\n ax.pcolormesh(lla.lon.reshape(128, 128), lla.lat.reshape(128, 128),\n self.bg_data[idx].T, transform=ll_proj,\n alpha=0.8, cmap='bone')\n\n _ = ax.add_feature(cfeature.LAND, facecolor='burlywood')\n _ = ax.add_feature(cfeature.OCEAN, facecolor='steelblue')\n\n _ = ax.gridlines(draw_labels=True, linestyle=':')\n\n return ax",
"def add_cmap_background(splot, cmap, vmin, vmax, ory='x', se=(0, 1)):\n gradient = np.linspace(0, vmin, vmax)\n\n # gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n if ory == 'x':\n extent = (vmin, vmax, se[0], se[1])\n else:\n extent = (se[0], se[1], vmin, vmax)\n gradient = gradient.T\n splot.imshow(gradient, aspect='auto', cmap=cmap, vmin=vmin, vmax=vmax, extent=extent)",
"def plot_update(self, years, abscissa, ordinate, colour_herb, colour_carn):\n if self.years_sim == 0:\n self.fig = plt.figure()\n plt.axis('off')\n\n if abscissa is None:\n abscissa = years # + self.years_sim\n\n ax1 = self.fig.add_subplot(2, 2, 1)\n plt.title('Rossum Island')\n rgb_value = {'O': (0.0, 0.0, 1.0),\n 'M': (0.5, 0.5, 0.5),\n 'J': (0.0, 0.6, 0.0),\n 'S': (0.5, 1.0, 0.5),\n 'D': (1.0, 1.0, 0.5)}\n kart_rgb = [[rgb_value[column] for column in row] for row in\n self.island_map.replace(\" \", \"\").split()]\n axlg = self.fig.add_axes([0.0, 0.5, 0.025, 0.5])\n axlg.axis('off')\n for ix, name in enumerate(('Ocean', 'Mountain', 'Jungle',\n 'Savannah', 'Desert')):\n axlg.add_patch(plt.Rectangle((0., ix * 0.2), 0.3, 0.1,\n edgecolor='none',\n facecolor=rgb_value[name[0]]))\n axlg.text(0.35, ix * 0.2, name, transform=axlg.transAxes)\n\n ax1.imshow(kart_rgb, interpolation='nearest')\n ax1.set_xticks(range(0, len(kart_rgb[0]), 4))\n ax1.set_xticklabels(range(1, 1 + len(kart_rgb[0]), 4))\n ax1.set_yticks(range(0, len(kart_rgb), 4))\n ax1.set_yticklabels(range(1, 1 + len(kart_rgb), 4))\n\n ax2 = self.fig.add_subplot(2, 2, 2)\n plt.axis([self.years_sim, self.years_sim + abscissa, self.years_sim,\n ordinate])\n\n title = plt.title('')\n line_herbs = ax2.plot(np.arange(years + self.years_sim), np.nan *\n np.ones(self.years_sim + years), 'g-',\n label=\"Herbivores\")[0]\n line_carns = ax2.plot(np.arange(years + self.years_sim), np.nan * \n np.ones(self.years_sim + years), 'r-',\n label=\"Carnivores\")[0]\n if self.years_sim == 0:\n plt.grid()\n plt.legend(loc=1, prop={'size': 7})\n\n ax3 = self.fig.add_subplot(2, 2, 3)\n plt.title(\"Herbivores\")\n ax3.set_xticks(range(0, len(self.island_map), 2))\n ax3.set_xticklabels(range(1, 1 + len(self.island_map), 2))\n ax3.set_yticks(range(0, len(self.island_map), 2))\n ax3.set_yticklabels(range(1, 1 + len(self.island_map), 2))\n ax3_bar = plt.imshow([[0 for _ in range(21)] for _ in range(13)])\n if self.years_sim == 0:\n plt.colorbar(ax3_bar, orientation='horizontal', ticks=[])\n\n ax4 = self.fig.add_subplot(2, 2, 4)\n plt.title(\"Carnivores\")\n ax4.set_xticks(range(0, len(self.island_map), 2))\n ax4.set_xticklabels(range(1, (1 + len(self.island_map)), 2))\n ax4.set_yticks(range(0, len(self.island_map), 2))\n ax4.set_yticklabels(range(1, (1 + len(self.island_map)), 2))\n ax4_bar = plt.imshow([[0 for _ in range(21)] for _ in range(13)])\n if self.years_sim == 0:\n plt.colorbar(ax4_bar, orientation='horizontal', ticks=[])\n\n for n in xrange(self.years_sim, self.years_sim + years):\n self.heat = self.heatmap(self.island.one_year())\n if n % self.vis_steps == 0:\n ax3.imshow(self.heat[0], interpolation='nearest',\n cmap=colour_herb)\n\n ax4.imshow(self.heat[1], interpolation='nearest',\n cmap=colour_carn)\n herbs = np.sum(self.heat[0])\n carns = np.sum(self.heat[1])\n ydata_herbs = line_herbs.get_ydata()\n ydata_carns = line_carns.get_ydata()\n ydata_herbs[n] = herbs\n ydata_carns[n] = carns\n line_herbs.set_ydata(ydata_herbs)\n line_carns.set_ydata(ydata_carns)\n title.set_text('Year: {:5}'.format(n + 1)) # Year counter\n\n self.fig.savefig(\n 'img{}{}.png'.format('0' * (5 - len(str(n + self.years_sim\n ))),\n n)) if \\\n (n + 1) % self.img_steps == 0 else None\n\n plt.pause(1e-7)\n if np.sum(self.heat) == 0:\n break\n self.years_sim += years"
] |
[
"0.78450114",
"0.64191943",
"0.61604625",
"0.60969305",
"0.60660625",
"0.604256",
"0.6025813",
"0.6011967",
"0.6009165",
"0.59734505",
"0.59395015",
"0.5934679",
"0.5891089",
"0.58235943",
"0.5800634",
"0.5736051",
"0.56845856",
"0.5660219",
"0.56601167",
"0.5652296",
"0.5646642",
"0.5645556",
"0.56059176",
"0.5581395",
"0.55769396",
"0.5552911",
"0.5534598",
"0.5532683",
"0.5513669",
"0.551047"
] |
0.7641587
|
1
|
Plot the background of the reegime diagram in Li et al., 2019
|
def plot_regime_diagram_background_L19(
ax=None,
):
if ax is None:
ax = plt.gca()
# range of power
xpr = [-1, 1]
ypr = [-3, 3]
# range
xlims = [10**i for i in xpr]
ylims = [10**i for i in ypr]
# background following Fig. 3 of Belcher et al., 2012
nx = 500
ny = 500
xx = np.logspace(xpr[0], xpr[1], nx)
yy = np.logspace(ypr[0], ypr[1], ny)
zz1 = np.zeros([nx, ny])
zz2 = np.zeros([nx, ny])
zz3 = np.zeros([nx, ny])
for i in np.arange(nx):
for j in np.arange(ny):
zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))
zz2[i,j] = 0.22*xx[i]**(-2)
zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]
zz = zz1 + zz2 + zz3
rz_ST = zz1/zz
rz_LT = zz2/zz
rz_CT = zz3/zz
fr = np.ones(zz.shape) * 7
cfrac = 0.25
fr[(rz_LT<cfrac) & (rz_CT<cfrac)] = 1
fr[(rz_ST<cfrac) & (rz_CT<cfrac)] = 2
fr[(rz_ST<cfrac) & (rz_LT<cfrac)] = 3
fr[(rz_ST>=cfrac) & (rz_LT>=cfrac) & (rz_CT<cfrac)] = 4
fr[(rz_ST>=cfrac) & (rz_CT>=cfrac) & (rz_LT<cfrac)] = 5
fr[(rz_LT>=cfrac) & (rz_CT>=cfrac) & (rz_ST<cfrac)] = 6
color_list = ['firebrick','forestgreen','royalblue','gold','orchid','turquoise','w']
cb_ticks = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]
cmap, norm = from_levels_and_colors(cb_ticks, color_list)
ax.contourf(xx, yy, np.transpose(fr), cmap=cmap, norm=norm)
ax.contour(xx, yy, np.transpose(fr), colors='darkgray')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('La$_t$')
ax.set_ylabel('$h/L_L$')
ax.set_aspect(aspect=1/3)
ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle="square",ec='k',fc='w'))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_regime_diagram_background_BG12(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # size of x and y\n nx = 500\n ny = 500\n xx = np.logspace(xpr[0], xpr[1], nx)\n yy = np.logspace(ypr[0], ypr[1], ny)\n zz1 = np.zeros([nx, ny])\n zz2 = np.zeros([nx, ny])\n zz3 = np.zeros([nx, ny])\n for i in np.arange(nx):\n for j in np.arange(ny):\n zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))\n zz2[i,j] = 0.22*xx[i]**(-2)\n zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]\n zz = zz1 + zz2 + zz3\n ax.contourf(xx, yy, np.transpose(np.log10(zz)),\n levels=[-0.1, 0, 0.1, 0.25, 0.5, 1, 2, 3, 4],\n cmap='summer', extend='both')\n ax.contour(xx, yy, np.transpose(np.log10(zz)),\n levels=[-0.1, 0, 0.1, 0.25, 0.5, 1, 2, 3, 4],\n colors='darkgray')\n ax.contour(xx, yy, np.transpose(zz1/zz), levels=0.9, colors='k',\n linestyles='-', linewidths=2)\n ax.contour(xx, yy, np.transpose(zz2/zz), levels=0.9, colors='k',\n linestyles='-', linewidths=2)\n ax.contour(xx, yy, np.transpose(zz3/zz), levels=0.9, colors='k',\n linestyles='-', linewidths=2)\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('La$_t$')\n ax.set_ylabel('$h/L_L$')\n ax.set_aspect(aspect=1/3)\n ax.text(0.85, 3e-3, '0', color='k', fontsize=8, rotation=-90)\n ax.text(1.6, 1e-2, '0.1', color='k', fontsize=8, rotation=-90)\n ax.text(3.8, 1e-1, '0.25', color='k', fontsize=8, rotation=-90)\n ax.text(4, 1e2, '0.5', color='k', fontsize=8, rotation=33)\n ax.text(3.2, 3e2, '1', color='k', fontsize=8, rotation=36)\n ax.text(0.53, 1e2, '2', color='k', fontsize=8, rotation=38)\n ax.text(0.3, 3.1e2, '3', color='k', fontsize=8, rotation=39)\n ax.text(0.12, 5e2, '4', color='k', fontsize=8, rotation=40)\n ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))",
"def plot_show():\r\n plt.gca().patch.set_facecolor('#0e0a16')\r\n plt.axis('equal')\r\n plt.show()",
"def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()",
"def setAxisBackground(idx=-1):\n dislin.axsbgd(idx)",
"def plot_fig43b_spreading_yeast():\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n y = np.loadtxt('csvs/ratio_lp50a10ad5hp0.0067379_yeast.txt')\n links35 = np.tile(35, 44)\n Rlinks = np.array([47, 21, 18, 15, 20, 17])\n Llinks = np.array([245, 30, 26, 15, 23, 35])\n #links to right of methylation site (50 in total)\n Rlinks = np.concatenate((Rlinks, links35))\n #links to left of methylation site (50 in total)\n Llinks = np.concatenate((Llinks, links35))\n #cumulative chain length including burried basepairs\n unwrap = 0\n #plot as positive distance from TSS in bp\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp\n #plot as negative distance from TSS in bp\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap) #max WLC chain length in bp\n x = np.concatenate((ldna_Llinks[::-1], ldna_Rlinks))\n ax.plot(x, y, color='k')\n ax.set_xlabel(r'Distance from TSS (bp)')\n ax.set_ylabel('Relative enrichment')\n #plot inset using Crabtree data\n axins = inset_axes(ax, width=\"40%\", height=\"40%\", \n bbox_to_anchor=(.1, .1, .8, .8),\n bbox_transform=ax.transAxes, loc=2)\n xcrabtree = np.array([-10256, -3077, -2241, -1485, -739, -309, -169, 489, 1746, 3087, 4400, 5300])\n REday0 = np.array([0.27, 0.13, 0.46, 0.12, 0.17, 0.33, 0.33, 0.31, 0.32, 0.27, 0.21, 0.33])\n REday5 = np.array([0.19, 0.40, 0.89, 1.55, 0.97, 1.25, 2.25, 3.57, 3.03, 2.09, 1.12, 0.14])\n ycrabtree = REday5/np.mean(REday0)\n axins.plot(xcrabtree, ycrabtree)\n axins.set_xlim([-10000, 10000])\n ax.set_xlim([-10000, 10000])\n #axins.set_ylabel('Relative enrichment', fontsize=8)\n #axins.set_xlabel('Distance from TSS (bp)', fontsize=8)\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.savefig(f'plots/thesis/fig43b_spreading-TSS-yeast.pdf', bbox_inches='tight')",
"def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()",
"def plot_pretty():\n\n ts, ys, lin_model, K, us, dt_control, biass, end_time = simulate()\n plt.style.use('seaborn-deep')\n\n black = '#2B2B2D'\n red = '#E90039'\n orange = '#FF1800'\n white = '#FFFFFF'\n yellow = '#FF9900'\n\n plt.figure(figsize=(12.8, 9.6))\n plt.rcParams.update({'font.size': 16, 'text.color': white, 'axes.labelcolor': white,\n 'axes.edgecolor': white, 'xtick.color': white, 'ytick.color': white})\n\n plt.gcf().set_facecolor(black)\n\n plt.subplot(2, 3, 1)\n plt.plot(ts, ys[:, 2], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[1], color=white)\n plt.title(r'$C_{FA}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 2)\n plt.plot(ts, ys[:, 0], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[0], color=white)\n plt.title(r'$C_{G}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 3)\n plt.plot(ts, ys[:, 3], color=orange)\n plt.title(r'$C_{E}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 4)\n plt.plot(ts, us[:, lin_model.inputs[1]], color=red)\n plt.title(r'$F_{m, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 5)\n plt.plot(ts, us[:, lin_model.inputs[0]], color=red)\n plt.title(r'$F_{G, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 6)\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 1],\n color=red\n )\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 0],\n color=yellow\n )\n plt.legend([r'$C_{FA}$', r'$C_G$'], facecolor=black)\n plt.title('bias')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n # plt.suptitle('Closedloop bioreactor without noise')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('no_noise_pretty.png', transparent=True)\n plt.show()",
"def ShowLongitBackground(spectra,spectraUp,spectraDown,spectraAv,all_titles,all_filt,object_name,NBIMGPERROW=2,right_edge=1800):\n NBSPEC=len(spectra)\n MAXIMGROW=(NBSPEC-1) / NBIMGPERROW +1\n\n f, axarr = plt.subplots(MAXIMGROW,NBIMGPERROW,figsize=(25,5*MAXIMGROW))\n f.tight_layout()\n for index in np.arange(0,NBSPEC):\n ix=index%NBIMGPERROW\n iy=index/NBIMGPERROW\n axarr[iy,ix].plot(spectra[index],'r-')\n axarr[iy,ix].plot(spectraUp[index],'b-')\n axarr[iy,ix].plot(spectraDown[index],'g-')\n axarr[iy,ix].plot(spectraAv[index],'m-')\n thetitle=\"{}) : {} \".format(index,all_titles[index])\n axarr[iy,ix].set_title(thetitle)\n axarr[iy,ix].grid(True)\n axarr[iy,ix].set_ylim(0.,spectra[index][:right_edge].max()*1.2)\n axarr[iy,ix].annotate(all_filt[index],xy=(0.05,0.9),xytext=(0.05,0.9),verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20, xycoords='axes fraction')\n title='Longitudinal background Up/Down'.format(object_name)\n plt.suptitle(title,size=16)",
"def create_rink():\n \n fig, ax = plt.subplots(figsize=(12, 9), dpi=600)\n # Нейтральная зона\n # Центральная линия\n line = plt.Line2D((0, 0), (-42.5, 42.5), lw=5, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n line = plt.Line2D((0, 0), (-42.5, 42.5), lw=2, color='white', linestyle='--')\n plt.gca().add_line(line)\n\n # синяя линия\n line = plt.Line2D((25, 25), (-42.5, 42.5), lw=5, color='blue', linestyle='-')\n plt.gca().add_line(line)\n\n # Центральный круг\n ax.add_patch(Arc((0, 0), 30, 30, theta1=-90, theta2=90, lw=2, edgecolor='blue'))\n ax.add_patch(Circle((0, 0), 1.5, lw=2.5, edgecolor='blue', facecolor='blue'))\n\n # точки\n ax.add_patch(Circle((20, 22), 1, lw=5, edgecolor='red', facecolor='red'))\n ax.add_patch(Circle((20, -22), 1, lw=5, edgecolor='red', facecolor='red'))\n\n # Верхний круг вбрасывания\n line = plt.Line2D((75, 71, 71), (23, 23, 26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (23, 23, 26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (21, 21, 18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((75, 71, 71), (21, 21, 18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n line = plt.Line2D((71, 71), (7, 5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (7, 5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (37, 39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((71, 71), (37, 39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n ax.add_patch(Circle((69, 22), 1, lw=5, edgecolor='red', facecolor='red'))\n ax.add_patch(Arc((69, 22), 30, 30, theta1=0, theta2=360, lw=2, edgecolor='red'))\n \n # Нижний круг вбрасывания\n line = plt.Line2D((75, 71, 71), (-23, -23, -26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (-23, -23, -26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (-21, -21, -18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((75, 71, 71), (-21, -21, -18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n line = plt.Line2D((71, 71), (-7, -5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (-7, -5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (-37, -39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((71, 71), (-37, -39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n ax.add_patch(Circle((69, -22), 1, lw=5, edgecolor='red', facecolor='red'))\n ax.add_patch(Arc((69, -22), 30, 30, theta1=0, theta2=360, lw=2, edgecolor='red'))\n\n\n #Зона ворот\n line = plt.Line2D((89, 89), (-40.7, 40.7), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n ax.add_patch(Arc((89, 0), 16, 16, theta1=90, theta2=270, lw=2, edgecolor='red', facecolor='blue'))\n ax.add_patch(Rectangle((85.5,-4), 3.5, 8, lw=2 ,edgecolor='red', facecolor='blue', alpha=0.7))\n\n ax.add_patch(Arc((90, 1), 4, 4, theta1=-30, theta2=90, lw=2, edgecolor='red', facecolor='blue'))\n ax.add_patch(Arc((90, -1), 4, 4, theta1=270, theta2=30, lw=2, edgecolor='red', facecolor='blue'))\n line = plt.Line2D((89, 90), (3, 3), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((89, 90), (-3, -3), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n\n # Борта\n line = plt.Line2D((0, 80), (-42.6, -42.6), lw=5, color='black')\n plt.gca().add_line(line)\n\n line = plt.Line2D((0, 80), (42.6, 42.6), lw=5, color='black')\n plt.gca().add_line(line)\n\n line = plt.Line2D((100, 100), (-22.6, 22.6), lw=5, color='black')\n plt.gca().add_line(line)\n\n ax.add_patch(Arc((80, 22.6), 40, 40,\n theta1=0, theta2=90, edgecolor='black', lw=5))\n ax.add_patch(Arc((80, -22.6), 40, 40,\n theta1=270, theta2=360, edgecolor='black', lw=5))\n\n plt.xlim(0, 120)\n\n #plt.axis('auto')\n #plt.show()\n return ax, fig",
"def main_background():\n surface.fill(COLOR_GRAY)",
"def plot_Ae_HDR10_exr_nl_out_vari_prj_white(graph_name=\"./img/out.png\"):\n width = 1920\n\n fname_list = [\n \"./AfterEffects/Comp 1/prj_w_W-none _00000.exr\",\n \"./AfterEffects/Comp 1/prj_w_W-100 _00000.exr\",\n \"./AfterEffects/Comp 1/prj_w_W-203 _00000.exr\",\n \"./AfterEffects/Comp 1/prj_w_W-10000 _00000.exr\",\n ]\n profile_list = [\n \"Rec.2100 PQ\",\n \"Rec.2100 PQ W100\",\n \"Rec.2100 PQ W203\",\n \"Rec.2100 PQ W10000\"\n ]\n\n def get_linear(fname):\n img = read_image(fname)\n x_ae = img[0, :, 1] # extract Green line data\n ae_luminance = tf.eotf_to_luminance(x_ae, tf.ST2084)\n return ae_luminance\n\n chara_list = [get_linear(fname) for fname in fname_list]\n\n x = np.linspace(0, 1, width)\n ref_luminance = tf.eotf_to_luminance(x, tf.ST2084)\n\n fig, ax1 = pu.plot_1_graph(\n fontsize=20,\n figsize=(10, 8),\n bg_color=(0.96, 0.96, 0.96),\n graph_title=\"Transfer characteristics of the working color space\",\n graph_title_size=None,\n xlabel=\"Input Luminance [cd/m2]\",\n ylabel=\"Output LUminance [cd/m2]\",\n axis_label_size=None,\n legend_size=17,\n xlim=[0.009, 15000],\n ylim=[0.009, 15000],\n xtick=None,\n ytick=None,\n xtick_size=None, ytick_size=None,\n linewidth=3,\n minor_xtick_num=None,\n minor_ytick_num=None)\n pu.log_scale_settings(ax1=ax1)\n for idx, y in enumerate(chara_list):\n ax1.plot(ref_luminance, y, label=profile_list[idx])\n pu.show_and_save(\n fig=fig, legend_loc='upper left', save_fname=graph_name,\n show=False)",
"def sn2003bg(ax, col, legend):\n nu = 8.46E9\n d = 6.056450393620008e+25\n\n t = np.array(\n [10, 12, 23, 35, 48, 58, 63, 73, 85, 91, 115, 129,\n 132, 142, 157, 161, 181, 201, 214, 227, 242, 255,\n 266, 285, 300, 326, 337, 351, 368, 405, 410, 424,\n 434, 435, 493, 533, 632, 702, 756, 820, 902, 978])\n f = np.array(\n [2.51, 3.86, 12.19, 24.72, 40.34, 51.72, 49.64, 46.20,\n 38.638, 33.85, 45.74, 53.94, 54.27, 54.83, 48.43,\n 47.43, 35.76, 31.35, 28.67, 27.38, 24.57, 22.30,\n 21.67, 21.31, 20.88, 20.33, 19.85, 18.84, 17.14,\n 14.61, 14.49, 14.16, 13.25, 13.08, 10.04, 8.92,\n 6.23, 6.18, 4.62, 3.93, 4.69, 4.48])\n lum = plot_line(ax, d, t, nu*f, 'SN2003bg', 'SN', col, legend, zorder=0)\n #ax.text(t[0]/1.05, lum[0], 'SN2003bg', fontsize=11,\n # verticalalignment='center',\n # horizontalalignment='right')",
"def plot_2018_board():\n top_left_corner_border = plt.Polygon([[0,823], [91,823], [0,747]], fill='k', edgecolor='k')\n bottom_left_corner_border = plt.Polygon([[0,0], [0,76], [91,0]], fill='k', edgecolor='k')\n plt.gca().add_line(top_left_corner_border)\n plt.gca().add_line(bottom_left_corner_border)\n\n # Auto Line\n auto_line = plt.Line2D((305, 305), (0, 823), lw=2.5)\n plt.gca().add_line(auto_line)\n\n # Exchange Zone\n exchange_zone = plt.Rectangle((0, 442), 91, 122, fc='r')\n plt.gca().add_patch(exchange_zone)\n\n # Power Cube Zone\n power_cube_zone = plt.Rectangle((249, 354), 107, 114, fc='r')\n plt.gca().add_patch(power_cube_zone)\n\n # Switch Zone\n switch_zone = plt.Rectangle((356, 216), 142, 390, fc='grey')\n plt.gca().add_patch(switch_zone)\n\n # Power Cubes next to Switch Zone\n for i in range(0,6,1):\n cube = plt.Rectangle((498, 216+i*(33+38.4)), 33, 33, fc='yellow')\n plt.gca().add_patch(cube)\n\n # Null territory\n null_territory_top = plt.Polygon([[731.5, 581], [731.5, 823], [823, 823], [823, 581]], fill=None, edgecolor='k')\n null_territory_bottom = plt.Polygon([[731.5, 0], [731.5, 242], [823, 242], [823, 0]], fill=None, edgecolor='k')\n plt.gca().add_line(null_territory_top)\n plt.gca().add_line(null_territory_bottom)\n\n # Scale\n scale = plt.Rectangle((653.5, 242), 823-653.5, 581-242, fc='black')\n plt.gca().add_patch(scale)",
"def draw_background(self, t):\n pass",
"def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )",
"def main():\n fout_png = 'color0.png'\n _, axis = plt.subplots(1, 1, figsize=(6, 6))\n colors = [\n '#0032ff',\n '#00ebff',\n '#fdfe02',\n '#ff0000',\n '#8500ff',\n ]\n plt_color_text(colors)\n plt.savefig(fout_png)",
"def plot(self):\n pass",
"def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()",
"def plot_graph(self) -> None:",
"def show_es():\n img = rotated_e()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(img[i], cmap=plt.cm.gray, interpolation='nearest')\n plt.show()",
"def plot_reward(reward, walls, ax_title, fig, ax, alpha=1):\n\n # Clean up the arrays (imshow only takes values in [0, 1])\n pos_label, neg_label = visualizeReward(reward)\n\n # set up plot\n def make_pic(pos_label, walls, neg_label):\n \"\"\"Combine colors to make the walls + rewards achieve desired color\"\"\"\n alphas = np.ones(pos_label.shape)\n alphas[pos_label > 0] = alpha\n alphas[neg_label > 0] = alpha\n\n # Coloring the walls brown\n # BROWN = np.array((133, 87, 35, 0)) / 255.0\n # wall_color = np.einsum(\"ij,k->ijk\", walls, BROWN)\n\n # to get our true reward (blue) values on the right scale, we'll create our own color scale\n # Another possibility: 123, 176, 32\n small_positive = np.array((150, 189, 3, 0)) / 255.0\n # Another possibility: 26,147,111\n big_positive = np.array((85, 135, 80, 0)) / 255.0\n diff = big_positive - small_positive\n blue = np.stack(\n [\n np.zeros(neg_label.shape),\n np.zeros(neg_label.shape),\n pos_label.copy(),\n np.zeros(neg_label.shape),\n ],\n axis=-1,\n )\n blue[pos_label > 0, :] = (\n np.einsum(\"i,j->ij\", pos_label[pos_label > 0], diff) + small_positive\n )\n\n # Negative reward\n # Another possibility: 223, 161, 177\n small_negative = np.array((227, 126, 126, 0)) / 255.0\n # Another possibility: 195, 75, 123\n big_negative = np.array((180, 27, 27, 0)) / 255.0\n diff = big_negative - small_negative\n neg_color = np.stack(\n [\n neg_label.copy(),\n np.zeros_like(neg_label),\n np.zeros_like(neg_label),\n np.zeros_like(neg_label),\n ],\n axis=-1,\n )\n neg_color[neg_label > 0, :] = (\n np.einsum(\"i,j->ij\", neg_label[neg_label > 0], diff) + small_negative\n )\n\n label = np.stack(\n [\n np.zeros_like(neg_label),\n np.zeros(pos_label.shape),\n np.zeros(pos_label.shape),\n alphas,\n ],\n axis=-1,\n )\n # label = label + blue + wall_color\n label = label + blue + neg_color\n\n # Set all the black (0,0,0,1) RGBA tuples to be white\n label[np.sum(label, 2) == 1] = np.array([0.9, 0.9, 0.9, 1])\n return label.reshape(list(walls.shape) + [4])\n\n # truth plot\n true = ax.imshow(make_pic(pos_label, walls, neg_label))\n hatch_walls(walls, ax)\n\n ax.set_title(ax_title)\n\n # Remove xticks, yticks\n ax.set_yticks([])\n ax.set_xticks([])\n\n return fig, ax",
"def reconstruction_plot(yyy, color = 'r'):\n length = len(yyy)\n plt.plot(np.linspace(0, 1, length)[:length // to_show + 1]\n , yyy[:length // to_show + 1], color)\n # plt.plot(np.linspace(0, 1, len(yyy)), yyy, color)",
"def plot(self):\n\t\tself.plotOfXray().plot()",
"def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])",
"def figure5():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 500,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_5',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n plt.figure(figsize=(5, 2))\n\n t, y = solver(3000, duration=2400, i_bias_on=0.22, t_start=60)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings) # Alter figure for publication\n plt.gca().add_patch(patches.Rectangle((40, -75), 120, 16, fill=False)) # Draw rectangle to highlight inset\n\n \"\"\"\n Create inset of highlighted region\n Due to inset: alter_figure is not used\n \"\"\"\n plt.axes([.75, .5, .25, .4], axisbg='y')\n\n ix_start = np.where(t < 40)[0][-1] # Find index for beginning of inset\n ix_end = np.where(t < 160)[0][-1] # Find index for end of inset\n v_highlighted = y[ix_start:ix_end, 0]\n plt.plot(t[ix_start:ix_end], v_highlighted, 'k')\n plt.ylim([-75, -55])\n plt.box('off')\n plt.xticks([])\n plt.yticks([])\n plt.xlim([t[ix_start], t[ix_end]])\n add_scalebar(plt.gca(), matchx=False, matchy=False, sizex=25, sizey=5, hidey=False, labelx='25', labely='5', loc=1)\n\n plt.savefig('figures/figure_5.pdf', dpi=1200)\n plt.close()",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()",
"def plot(self, **kwds):\n c0 = 'blue' # self.latex_options()[\"color_increasing\"]\n c1 = 'red' # self.latex_options()[\"color_decreasing\"]\n G = self.poset().hasse_diagram()\n G.set_pos(self._find_node_positions())\n for a, b, c in G.edges():\n if a < b:\n G.set_edge_label(a, b, 0)\n else:\n G.set_edge_label(a, b, 1)\n return G.plot(color_by_label={0: c0, 1: c1}, **kwds)"
] |
[
"0.68946487",
"0.63277197",
"0.62353915",
"0.6234131",
"0.6109965",
"0.60741985",
"0.6040554",
"0.60065895",
"0.59854",
"0.59645796",
"0.5899287",
"0.5894221",
"0.5870427",
"0.5811376",
"0.57827985",
"0.57744455",
"0.5766635",
"0.5760677",
"0.5701814",
"0.56806076",
"0.56651825",
"0.5659572",
"0.56355643",
"0.56207466",
"0.56116945",
"0.5602268",
"0.55710477",
"0.55630857",
"0.5562955",
"0.5548389"
] |
0.7338029
|
0
|
Use multiple colors in the ylabel
|
def set_ylabel_multicolor(
ax,
strings,
colors,
anchorpad = 0.,
**kwargs,
):
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker
boxes = [TextArea(text, textprops=dict(color=color, ha='left',va='bottom',rotation=90,**kwargs)) for text,color in zip(strings[::-1],colors[::-1])]
ybox = VPacker(children=boxes,align="center", pad=0, sep=5)
anchored_ybox = AnchoredOffsetbox(loc=3, child=ybox, pad=anchorpad, frameon=False, bbox_to_anchor=(-0.15, -0.05), bbox_transform=ax.transAxes, borderpad=0.)
ax.add_artist(anchored_ybox)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ylabel(text, fontsize=FONT_SIZE_M, color='medium ink', ax=None):\n\n if ax is None:\n ax = plt.gca()\n color = decode_color(color)\n return ax.set_ylabel(text, fontsize=fontsize, color=color)",
"def setAxisLabelColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'LABELS', axes)",
"def y_formatter_cb(self, ax):\n # y_vals should be the y-location of the labels.\n labels = getattr( self, 'labels', [] )\n labels = list(labels); #labels.reverse()\n y_vals = numpy.arange(.5,len(labels)+.5,1)\n\n # Locations should be fixed.\n fl = FixedLocator( y_vals )\n # Make the formatter for the y-axis\n ff = FixedFormatter( labels )\n ax.yaxis.set_major_formatter( ff )\n ax.yaxis.set_major_locator( fl )",
"def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel",
"def yaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.ybox.set_text(r\"$%s$\" % (label))\r\n pass",
"def plot_many_y(x, y, yer=None, xlabel = None, ylabel = None, ynames = None, label = None, domain=None,\n yrange = None, undertext =None, savedir = None, marker=None, markerstyles=None, plotspecs = None, groupings=None,\n groupings_labels_within = None, vlines = None, legend_title=None, n_legend_columns=None, text=None, linestyles=None,\n colors=None, save=None):\n if save is None:\n save = True\n if savedir is None:\n save_dir = os.getcwd()\n else:\n save_dir = savedir\n if marker is None:\n marker = False\n if vlines is None:\n vlines = []\n if isinstance(vlines, float):\n vlines = [vlines]\n if n_legend_columns is None:\n n_legend_columns = 1\n\n if markerstyles is None:\n my_marker_styles = [st for st in marker_styles]\n else:\n my_marker_styles = [st for st in markerstyles]\n if groupings_labels_within is None:\n groupings_labels_within = False\n\n if linestyles is None:\n my_line_styles = [ls for ls in line_styles]\n else:\n my_line_styles = [ls for ls in linestyles]\n\n\n #in case linestyle -- comes up\n dashes = (10, 25)\n dashes = [20,55]\n dashes = [40, 40]\n dashes = [5, 5]\n dash_width_factor = 2\n dash_width_factor = 1.5\n\n number_y = len(y)\n\n if groupings is None:\n grouped = False\n #print([\"hi\" for _ in range(number_y_num)])\n groupings = [{ii} for ii in range(number_y)]\n else:\n grouped = True\n\n # Make sure all the elements are in a colour grouping\n if grouped:\n extra_group = set()\n for i in range(number_y):\n in_a_group = False\n for seti in groupings:\n for el in seti:\n if i == el:\n if not in_a_group:\n in_a_group = True\n #else:\n #print el, ' in two colour groups'\n if not in_a_group:\n extra_group.add(i)\n\n if len(groupings) == 1:\n if ynames is not None:\n if len(ynames) == number_y:\n grouped = False\n\n\n default_plot_specs = copy.deepcopy(default_plot_specs_all)\n default_plot_specs['legend_font'] = {'size': 8}\n default_plot_specs['legend_anchor'] = 'upper right'\n default_plot_specs['legend_loc'] = (0.98, -0.1)\n\n if marker:\n default_plot_specs['x_scale'] = 0.05\n else:\n default_plot_specs['x_scale'] = 0\n\n text_heights = [-0.023, -0.069, -0.115,-0.161]\n\n if plotspecs is not None:\n for stat in list(default_plot_specs.keys()):\n if stat in plotspecs:\n default_plot_specs[stat] = plotspecs[stat]\n\n the_label = ''\n\n if domain is not None:\n xlow = domain[0]\n xhigh = domain[1]\n for ii in range(number_y):\n klow = x[ii].index(find_nearest(x[ii],xlow))\n khigh = x[ii].index(find_nearest(x[ii], xhigh))\n #khigh = x[ii].index(find_nearest_above(x[ii], xhigh))\n x[ii] = x[ii][klow:khigh]\n y[ii] = y[ii][klow:khigh]\n if yer:\n yer[ii] = yer[ii][klow:khigh]\n if yrange is not None:\n ylow = yrange[0]\n yhigh = yrange[1]\n if xlabel is None:\n x_label = ''\n else:\n x_label = xlabel\n if ylabel is None:\n y_label = ''\n the_label = 'y_' +str(number_y) +'_'\n else:\n y_label = ylabel\n the_label += y_label[:4] +'_'\n if ynames is None:\n y_names = []\n else:\n y_names = ynames\n if label is None:\n the_label = the_label + 'vs_' +x_label\n else:\n the_label = label\n\n under_text = []\n if undertext is not None:\n under_text = undertext[:]\n\n if marker:\n rcParams['legend.numpoints'] = 1\n\n plt.clf()\n\n fig = plt.figure(figsize=default_plot_specs['fsize'], dpi=default_plot_specs['dpi'])\n ax_1 = fig.add_subplot(111)\n\n if default_plot_specs['xlog']:\n ax_1.set_xscale('log')\n if default_plot_specs['ylog']:\n ax_1.set_yscale('log')\n\n if grouped:\n mycolors = cm.rainbow(np.linspace(0, 1, len(groupings)))\n else:\n mycolors = cm.rainbow(np.linspace(0, 1, number_y))\n color_dict = dict()\n line_style_dict = dict()\n marker_style_dict = dict()\n\n\n ynames_dict = dict()\n custom_legend_entries_dict = dict()\n display_leg_numbers = []\n\n add_dummy_ynames = False\n if ynames is not None:\n if len(ynames) == len(groupings):\n if len(groupings) != len(y):\n # if only the first element of each group is named\n add_dummy_ynames = True\n if not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n elif not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n elif not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n\n\n for seti, jj in zip(groupings, range(len(groupings))):\n for k,ii in zip(sorted(list(seti)), range(len(seti))):\n #jj is the group number\n #ii is the number within the set\n #k is the number in the ylist\n if colors is None:\n if grouped:\n color_dict[k] = mycolors[jj]\n else:\n color_dict[k] = mycolors[k]\n\n else:\n if grouped:\n color_dict[k] = colors[jj]\n else:\n color_dict[k] = colors[k]\n if grouped:\n marker_style_dict[k] = my_marker_styles[ii]\n line_style_dict[k] = my_line_styles[ii]\n else:\n # print(k)\n # print(markerstyles)\n if markerstyles is None:\n marker_style_dict[k] = default_plot_specs['marker_style']\n else:\n marker_style_dict[k] = markerstyles[k]\n if linestyles is None:\n line_style_dict[k] = default_plot_specs['linestyle']\n else:\n line_style_dict[k] = linestyles[k]\n if add_dummy_ynames:\n if ii == 0: # if the first in the set\n ynames_dict[k] = ynames[jj]\n else:\n ynames_dict[k] = 'dummy'\n\n\n\n if groupings_labels_within:\n\n if ii == 0:\n display_leg_numbers.append(k)\n\n # Create custom artists\n if marker:\n markstyli = marker_style_dict[k]\n style = line_style_dict[k]\n if markstyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n if line_style_dict[k] == '--':\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linestyle=style,\n linewidth=default_plot_specs['linewidth'])\n else:\n if line_style_dict[k] == '--':\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', dashes=dashes,\n linewidth=dash_width_factor*default_plot_specs['linewidth'])\n else:\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k',\n linestyle=style,\n linewidth=default_plot_specs['linewidth'])\n\n if add_dummy_ynames:\n ynames = [ynames_dict[k] for k in range(number_y)]\n # Create custom artists\n\n simArtist = plt.Line2D((0, 1), (0, 0), color='k', marker='o', linestyle='')\n anyArtist = plt.Line2D((0, 1), (0, 0), color='k')\n\n #print color_dict\n\n # print 'printing ynames in funct'\n # print ynames\n #print 'yname dict', ynames_dict\n\n hl = False\n for jj in range(number_y):\n coli = color_dict[jj]\n style = line_style_dict[jj] # '--' #'None'\n thickness = default_plot_specs['linewidth']\n if style == '--':\n thickness = thickness*dash_width_factor\n hl = True\n hl_num = 3.6\n dashi = True\n else:\n dashi = False\n if marker:\n if yer is None:\n markstyli = marker_style_dict[jj]\n if ynames is None or jj>len(ynames)-1 or not ynames[jj]:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, marker=markstyli\n , markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, marker=markstyli, linestyle=style\n , markersize=default_plot_specs['marker_size'],\n linewidth=thickness)\n else:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=markstyli\n , markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=markstyli,\n linestyle=style, markersize=default_plot_specs['marker_size'],\n linewidth=thickness)\n # else:\n # ax_1.plot(x[jj], y[jj], color=coli,linestyle=style)\n else:\n if ynames is None or jj > len(ynames) - 1:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness,dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness, linestyle=style)\n else:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness,label=ynames[jj],dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness, linestyle=style,\n label=ynames[jj])\n\n\n\n if yer is not None:\n\n # ax_1.plot(x[jj], yer_datas_high, color=coli,\n # label=y_names[jj] + ' + SE', linestyle='--')\n # ax_1.plot(x[jj], yer_datas_low, color=coli,\n # label=y_names[jj] + ' - SE', linestyle='--')\n if marker:\n markstyli = marker_style_dict[jj]\n if markstyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n if ynames is None or jj > len(ynames) - 1:\n if dashi:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linewidth=default_plot_specs['linewidth'],dashes=dashes)\n else:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n if dashi:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],dashes=dashes)\n else:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n yer_datas_high = [y_i + y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n yer_datas_low = [y_i - y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n ax_1.plot(x[jj], yer_datas_high, color=coli, linestyle='--',dashes=dashes)\n ax_1.plot(x[jj], yer_datas_low, color=coli, linestyle='--',dashes=dashes)\n\n if default_plot_specs['yrotation'] is 'vertical':\n if default_plot_specs['ylabelspace'] ==0:\n ax_1.set_ylabel(y_label, **default_plot_specs['axis_font'])\n else:\n labpad = int(default_plot_specs['axis_font']['size'])*default_plot_specs['ylabelspace']\n ax_1.set_ylabel(y_label,labelpad=labpad, **default_plot_specs['axis_font'])\n else:\n labpad =int(default_plot_specs['axis_font']['size'])*3\n #ax_1.set_ylabel(y_label,rotation=plotspecs['yrotation'],labelpad=int(labpad), **default_plot_specs['axis_font'])\n ax_1.set_ylabel(y_label, rotation=default_plot_specs['yrotation'],labelpad=labpad, horizontalalignment = 'center',verticalalignment ='center',\n **default_plot_specs['axis_font'])\n\n\n # Set the tick labels font\n for labeli in (ax_1.get_xticklabels() + ax_1.get_yticklabels()):\n # labeli.set_fontname('Arial')\n labeli.set_fontsize(default_plot_specs['ticksize'])\n\n ax_1.set_xlabel(x_label, **default_plot_specs['axis_font'])\n\n\n xlow, xhigh = min(x[0]), max(x[0])\n for xx in x[1:]:\n mycopy_low = [g for g in copy.deepcopy(xx)]\n mycopy_high = [g for g in copy.deepcopy(xx)]\n mycopy_low.append(xlow)\n mycopy_high.append(xhigh)\n xlow, xhigh = min(mycopy_low), max(mycopy_high)\n # set axes limits\n if domain is None:\n extra = (xhigh-xlow)*default_plot_specs['x_scale']\n xlow -= extra\n xhigh +=extra\n\n\n #Make vertical lines\n for xfloat in vlines:\n if xlow < xfloat < xhigh:\n ax_1.axvline(x=xfloat,color = default_plot_specs['vlinecolor'],linestyle= default_plot_specs['vlinestyle'],linewidth=default_plot_specs['vlineswidth'])\n\n # if not marker:\n # xhigh -= 15\n\n if yrange is None:\n if y:\n if y[0]:\n if yer is not None:\n ylow, yhigh = min([yi-yi_er for yi, yi_er in zip(y[0],yer[0])]), max([yi+yi_er for yi, yi_er in zip(y[0],yer[0])])\n else:\n ylow, yhigh = min(y[0]), max(y[0])\n else:\n ylow, yhigh = 0, 0\n else:\n ylow, yhigh = 0, 0\n if yer is not None:\n for yy, yy_er in zip(y[1:],yer[1:]):\n ylow, yhigh = min([ylow] + [yi-yi_er for yi, yi_er in zip(yy,yy_er)]), max([yhigh]+ [yi+yi_er for yi, yi_er in zip(yy,yy_er)])\n else:\n for yy in y[1:]:\n ylow, yhigh = min([ylow] + yy), max([yhigh] + yy)\n extra = (yhigh-ylow)*default_plot_specs['y_scale']\n ylow -= extra\n yhigh +=extra\n\n\n ax_1.set_xlim(xlow, xhigh)\n ax_1.set_ylim(ylow, yhigh)\n\n while under_text:\n texti = under_text.pop(0)\n plt.figtext(0.08, text_heights.pop(0), texti, default_plot_specs['undertext_font'])\n\n if text:\n ax_1.text(default_plot_specs['text_loc'][0], default_plot_specs['text_loc'][1], text,\n verticalalignment='bottom', horizontalalignment='right',\n transform=ax_1.transAxes,\n color=default_plot_specs['text_color'], fontsize=default_plot_specs['text_size'])\n\n #print 'display_leg_numbers', display_leg_numbers\n\n\n if default_plot_specs['xshade']:\n ax_1.axvspan(default_plot_specs['xshade'][0], default_plot_specs['xshade'][1], alpha=0.3, color=default_plot_specs['xshade_color'])\n\n if ynames:\n # print 'the display leg numbers '\n # print display_leg_numbers\n\n handles, labels = ax_1.get_legend_handles_labels()\n handles = [handle for i,handle in enumerate(handles) if i in display_leg_numbers]\n labels = [label for i,label in enumerate(labels) if i in display_leg_numbers]\n if groupings_labels_within:\n mini = min(len(list(custom_legend_entries_dict.keys())),len(groupings_labels_within))\n handles += [custom_legend_entries_dict[k] for k in range(mini)]\n labels += groupings_labels_within[:mini]\n if hl:\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop=default_plot_specs['legend_font'], ncol=n_legend_columns,handlelength=hl_num)\n else:\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop=default_plot_specs['legend_font'], ncol=n_legend_columns)\n\n if legend_title:\n lgd.set_title(legend_title,prop=default_plot_specs['legend_font'])\n\n plt.setp(lgd.get_title(), multialignment='center')\n\n # if hl:\n # print 'doing hl 2'\n # ax_1.legend(handlelength=2)\n\n\n if default_plot_specs['nxticks'] > 0:\n #visible_labelsx = [lab for lab in ax_1.get_xticklabels() if lab.get_visible() is True and lab.get_text() != '']\n for lab in ax_1.get_xticklabels():\n lab.set_visible(True)\n visible_labelsx = [lab for lab in ax_1.get_xticklabels() if lab.get_visible() is True]\n visible_labelsx=visible_labelsx[1::default_plot_specs['nxticks']]\n plt.setp(visible_labelsx, visible = False)\n #\n #ax_1.set_xticks(visible_labelsx[1::2])\n #plt.setp(visible_labels[1::2], visible=False)\n #ax_1.locator_params(axis='x', nticks=default_plot_specs['nxticks'])\n #\n if default_plot_specs['nyticks'] > 0:\n # #ax_1.locator_params(axis='y', nticks=default_plot_specs['nyticks'])\n visible_labelsy = [lab for lab in ax_1.get_yticklabels() if lab.get_visible() is True]\n if len(visible_labelsy) > 4:\n visible_labelsy = visible_labelsy[2:-2]\n plt.setp(visible_labelsy, visible=False)\n\n #plt.grid('off')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_dir = os.path.join(save_dir,'%s.png' % the_label)\n\n if save:\n save_fig(fig, save_dir)\n else:\n return fig, save_dir",
"def ylabel(lab,ax=None,**kwargs):\n ax = gca(ax)\n return genLabel(ax.set_ylabel,lab,**kwargs)",
"def ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)",
"def plot_labels(y_true, y_pred, label_mapper=None):\n plt.plot(y_true, label=\"True\")\n plt.plot(y_pred, label=\"Pred\")\n if label_mapper:\n plt.yticks(list(label_mapper.keys()), list(label_mapper.values()))\n plt.legend()\n plt.xlabel(\"Epochs\")",
"def set_labels(x, y=''):\n plt.xlabel(x)\n plt.ylabel(y)",
"def label_rgb(colors):\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))",
"def ylabel(ylabel):\n impl.ylabel(**locals())",
"def yaxis(self,label,units):\n if units != \"\": label = label + \" (\" + units + \")\"\n self.subplot.set_ylabel(label)\n pass",
"def setYLabel(self, label):\n self.__y_label__ = label",
"def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors",
"def y_to_label(self, data, Y):\n pass",
"def _labels_pcolor(self, obj, fmt=None, **kwargs):\n # Parse input args and populate _facecolors, which is initially unfilled\n # See: https://stackoverflow.com/a/20998634/4970632\n fmt = _not_none(fmt, pticker.SimpleFormatter())\n labels_kw = {'size': rc['text.labelsize'], 'ha': 'center', 'va': 'center'}\n labels_kw.update(kwargs)\n obj.update_scalarmappable() # populate _facecolors\n\n # Get positions and contour colors\n array = obj.get_array()\n paths = obj.get_paths()\n colors = _to_ndarray(obj.get_facecolors())\n edgecolors = _to_ndarray(obj.get_edgecolors())\n if len(colors) == 1: # weird flex but okay\n colors = np.repeat(colors, len(array), axis=0)\n if len(edgecolors) == 1:\n edgecolors = np.repeat(edgecolors, len(array), axis=0)\n\n # Apply colors\n labs = []\n for i, (color, path, num) in enumerate(zip(colors, paths, array)):\n if not np.isfinite(num):\n edgecolors[i, :] = 0\n continue\n bbox = path.get_extents()\n x = (bbox.xmin + bbox.xmax) / 2\n y = (bbox.ymin + bbox.ymax) / 2\n if 'color' not in kwargs:\n _, _, lum = to_xyz(color, 'hcl')\n if lum < 50:\n color = 'w'\n else:\n color = 'k'\n labels_kw['color'] = color\n lab = self.text(x, y, fmt(num), **labels_kw)\n labs.append(lab)\n obj.set_edgecolors(edgecolors)\n\n return labs",
"def color_color(self, c1, c2,\n label_x='',\n label_y=''):\n pl.clf()\n sp = pl.subplot()\n h, x, y, scale = sp.hist2d(self.data[c1], self.data[c2],\n bins=200, cmap='gray_r')\n colb = pl.colorbar(scale)\n colb.set_label('counts')\n\n # targets are set too\n if self.targets is not None:\n sp.scatter(self.targets[c1], self.targets[c2],\n marker='.')\n\n if label_x != '':\n sp.set_xlabel(label_x)\n else:\n sp.set_xlabel('{} - {}'.format(c1[0], c1[-1]))\n\n if label_y != '':\n sp.set_ylabel(label_y)\n else:\n sp.set_ylabel('{} - {}'.format(c2[0], c2[-1]))\n pl.show()",
"def ylabel(self, ylabel: str):\n self.ax.set_ylabel(ylabel)\n self.canvas.draw()",
"def XCAFDoc_DocumentTool_ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)",
"def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')",
"def format_y_axis(self, text=None, positionx=None, positiony=None, color=None, fontsize=None):\n if text is not None:\n self.xaxis_label = text\n\n x, y = self.settings.otherParams[\"ylabel.position\"]\n if positionx is not None:\n x = positionx\n if positiony is not None:\n y = positiony\n self.settings.otherParams[\"ylabel.position\"] = (x, y)\n\n if color is not None:\n self.settings.otherParams[\"ylabel.color\"] = color\n\n if fontsize is not None:\n self.settings.otherParams[\"ylabel.fontsize\"] = fontsize",
"def adjust_labels(data_y, label):\n\n if label == 'locomotion': # Labels for locomotion are adjusted\n data_y[data_y == 4] = 3\n data_y[data_y == 5] = 4\n elif label == 'gestures': # Labels for gestures are adjusted\n data_y[data_y == 406516] = 1\n data_y[data_y == 406517] = 2\n data_y[data_y == 404516] = 3\n data_y[data_y == 404517] = 4\n data_y[data_y == 406520] = 5\n data_y[data_y == 404520] = 6\n data_y[data_y == 406505] = 7\n data_y[data_y == 404505] = 8\n data_y[data_y == 406519] = 9\n data_y[data_y == 404519] = 10\n data_y[data_y == 406511] = 11\n data_y[data_y == 404511] = 12\n data_y[data_y == 406508] = 13\n data_y[data_y == 404508] = 14\n data_y[data_y == 408512] = 15\n data_y[data_y == 407521] = 16\n data_y[data_y == 405506] = 17\n return data_y",
"def _color_for_labels(label_color, default_color, seq_index):\n if label_color is None:\n if hasattr(default_color, '__getitem__'):\n c = default_color[seq_index]\n else:\n c = default_color\n else:\n c = label_color\n\n return c or 'black'",
"def _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize, collection):\n _ = core.getAssemblyPitch()\n for a, val, label in zip(core, data, labels):\n x, y, _ = a.spatialLocator.getLocalCoordinates()\n cmap = collection.get_cmap()\n patchColor = numpy.asarray(cmap(collection.norm(val)))\n luminance = patchColor.dot(LUMINANCE_WEIGHTS)\n dark = luminance < 0.5\n if dark:\n color = \"white\"\n else:\n color = \"black\"\n # Write text on top of patch locations.\n if label is None and labelFmt is not None:\n # Write the value\n labelText = labelFmt.format(val)\n text = ax.text(\n x,\n y,\n labelText,\n zorder=1,\n ha=\"center\",\n va=\"center\",\n fontsize=fontSize,\n color=color,\n )\n elif label is not None:\n text = ax.text(\n x,\n y,\n label,\n zorder=1,\n ha=\"center\",\n va=\"center\",\n fontsize=fontSize,\n color=color,\n )\n else:\n # labelFmt was none, so they don't want any text plotted\n continue\n texts.append(text)",
"def plot_many_y_SMBE(x, y, yer=None, xlabel = None, ylabel = None, ynames = None, label = None, domain=None,\n yrange = None, undertext =None, savedir = None, marker=None, plotspecs = None, groupings=None,\n vlines = None, legend_title=None, n_legend_columns=None, text=None):\n if savedir is None:\n save_dir = os.getcwd()\n else:\n save_dir = savedir\n if marker is None:\n marker = False\n if vlines is None:\n vlines = []\n if isinstance(vlines, float):\n vlines = [vlines]\n if n_legend_columns is None:\n n_legend_columns = 1\n\n number_y = len(y)\n\n if groupings is None:\n grouped = False\n groupings = [{i} for i in range(number_y)]\n else:\n grouped = True\n\n # Make sure all the elements are in a colour grouping\n if grouped:\n extra_group = set()\n for i in range(number_y):\n in_a_group = False\n for seti in groupings:\n for el in seti:\n if i == el:\n if not in_a_group:\n in_a_group = True\n else:\n print(el, ' in two colour groups')\n if not in_a_group:\n extra_group.add(i)\n\n\n default_plot_specs = copy.deepcopy(default_plot_specs_all)\n default_plot_specs['legend_font'] = {'size': 8}\n default_plot_specs['legend_anchor'] = 'upper right'\n default_plot_specs['legend_loc'] = (0.98, -0.1)\n\n if marker:\n default_plot_specs['x_scale'] = 0.05\n else:\n default_plot_specs['x_scale'] = 0\n\n text_heights = [-0.023, -0.069, -0.115,-0.161]\n\n if plotspecs is not None:\n for stat in list(default_plot_specs.keys()):\n if stat in plotspecs:\n default_plot_specs[stat] = plotspecs[stat]\n\n the_label = ''\n\n if domain is not None:\n xlow = domain[0]\n xhigh = domain[1]\n for ii in range(number_y):\n klow = x[ii].index(find_nearest(x[ii],xlow))\n khigh = x[ii].index(find_nearest(x[ii], xhigh))\n x[ii] = x[ii][klow:khigh]\n y[ii] = y[ii][klow:khigh]\n if yrange is not None:\n ylow = yrange[0]\n yhigh = yrange[1]\n if xlabel is None:\n x_label = ''\n else:\n x_label = xlabel\n if ylabel is None:\n y_label = ''\n the_label = 'y_' +str(number_y) +'_'\n else:\n y_label = ylabel\n the_label += y_label[:4] +'_'\n if ynames is None:\n y_names = []\n else:\n y_names = ynames\n if label is None:\n the_label = the_label + 'vs_' +x_label\n else:\n the_label = label\n\n under_text = []\n if undertext is not None:\n under_text = undertext[:]\n\n if marker:\n rcParams['legend.numpoints'] = 1\n\n plt.clf()\n\n fig = plt.figure(figsize=default_plot_specs['fsize'], dpi=default_plot_specs['dpi'])\n ax_1 = fig.add_subplot(111)\n\n colors = cm.rainbow(np.linspace(0, 1, len(groupings)))\n color_dict = dict()\n line_style_dict = dict()\n marker_style_dict = dict()\n\n for seti, jj in zip(groupings, range(number_y)):\n for k,ii in zip(sorted(list(seti)), range(len(seti))):\n color_dict[k] = colors[jj]\n if grouped:\n marker_style_dict[k] = marker_styles[ii]\n line_style_dict[k] = line_styles[ii]\n\n else:\n marker_style_dict[k] = default_plot_specs['marker_style']\n line_style_dict[k] = default_plot_specs['linestyle']\n\n\n for jj in range(number_y):\n coli = color_dict[jj]\n\n if marker:\n style = line_style_dict[jj]#'--' #'None'\n if yer is None:\n if ynames is None or jj>len(ynames)-1:\n ax_1.plot(x[jj], y[jj], color=coli, marker=marker_style_dict[jj], linestyle=style\n , markersize=default_plot_specs['marker_size'])\n else:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=marker_style_dict[jj],linestyle=style\n , markersize=default_plot_specs['marker_size'])\n # else:\n # ax_1.plot(x[jj], y[jj], color=coli,linestyle=style)\n else:\n style = line_style_dict[jj]\n if ynames is None or jj > len(ynames) - 1:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=default_plot_specs['linewidth'],linestyle=style,label=ynames[jj])\n\n\n if yer is not None:\n\n # ax_1.plot(x[jj], yer_datas_high, color=coli,\n # label=y_names[jj] + ' + SE', linestyle='--')\n # ax_1.plot(x[jj], yer_datas_low, color=coli,\n # label=y_names[jj] + ' - SE', linestyle='--')\n if marker:\n markersyli = default_plot_specs['marker_size']\n if markersyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markersyli,\n markersize=default_plot_specs['marker_size'],capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n yer_datas_high = [y_i + y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n yer_datas_low = [y_i - y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n ax_1.plot(x[jj], yer_datas_high, color=coli, linestyle='--')\n ax_1.plot(x[jj], yer_datas_low, color=coli, linestyle='--')\n\n ax_1.set_ylabel(y_label, **default_plot_specs['axis_font'])\n\n if default_plot_specs['nxticks'] > 0:\n ax_1.locator_params(axis='x', nticks=default_plot_specs['nxticks'])\n\n if default_plot_specs['nyticks'] > 0:\n ax_1.locator_params(axis='y', nticks=default_plot_specs['nyticks'])\n\n\n # Set the tick labels font\n for labeli in (ax_1.get_xticklabels() + ax_1.get_yticklabels()):\n # labeli.set_fontname('Arial')\n labeli.set_fontsize(default_plot_specs['ticksize'])\n\n ax_1.set_xlabel(x_label, **default_plot_specs['axis_font'])\n\n\n # set axes limits\n if domain is None:\n xlow, xhigh = min(x[0]), max(x[0])\n for xx in x[1:]:\n mycopy_low = [g for g in copy.deepcopy(xx)]\n mycopy_high = [g for g in copy.deepcopy(xx)]\n mycopy_low.append(xlow)\n mycopy_high.append(xhigh)\n xlow, xhigh = min(mycopy_low), max(mycopy_high)\n extra = (xhigh-xlow)*default_plot_specs['x_scale']\n xlow -= extra\n xhigh +=extra\n\n\n #Make vertical lines\n for xfloat in vlines:\n if xlow < xfloat < xhigh:\n ax_1.axvline(x=xfloat,color = default_plot_specs['vlinecolor'],linestyle= default_plot_specs['vlinestyle'],linewidth=default_plot_specs['vlineswidth'])\n\n if not marker:\n xhigh -= 15\n\n if yrange is None:\n if y:\n if y[0]:\n ylow, yhigh = min(y[0]), max(y[0])\n else:\n ylow, yhigh = 0, 0\n else:\n ylow, yhigh = 0, 0\n for yy in y[1:]:\n ylow, yhigh = min([ylow] + yy), max([yhigh]+ yy)\n extra = (yhigh-ylow)*default_plot_specs['y_scale']\n ylow -= extra\n yhigh +=extra\n\n\n\n ax_1.set_xlim(xlow, xhigh)\n ax_1.set_ylim(ylow, yhigh)\n\n while under_text:\n texti = under_text.pop(0)\n plt.figtext(0.08, text_heights.pop(0), texti, default_plot_specs['undertext_font'])\n\n if text:\n ax_1.text(default_plot_specs['text_loc'][0], default_plot_specs['text_loc'][1], text,\n verticalalignment='bottom', horizontalalignment='right',\n transform=ax_1.transAxes,\n color=default_plot_specs['text_color'], fontsize=default_plot_specs['text_size'])\n\n if ynames:\n handles, labels = ax_1.get_legend_handles_labels()\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop = default_plot_specs['legend_font'],ncol=n_legend_columns)\n if legend_title:\n lgd.set_title(legend_title,prop=default_plot_specs['legend_font'])\n\n plt.grid('off')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_dir = os.path.join(save_dir,'%s.png' % the_label)\n\n if ynames:\n plt.savefig(save_dir, bbox_extra_artists=(lgd,), bbox_inches='tight')\n else:\n plt.savefig(save_dir, bbox_inches='tight') # ,bbox_extra_artists=(lgd,))\n\n plt.close('all')",
"def plot_y(x, y, yer=None, xlabel = None, ylabel = None, yname = None, label = None, domain=None,\n yrange = None, undertext =None, savedir = None, marker = None, plotspecs = None, vlines=None):\n\n if yname is not None:\n ynames = [yname]\n else:\n ynames = None\n\n plot_many_y([x], [y], yer=yer, xlabel=xlabel, ylabel=ylabel, ynames=ynames, label=label, domain=domain,\n yrange=yrange, undertext=undertext, savedir=savedir, marker=marker, plotspecs=plotspecs)",
"def yFormat(self, formatFnc, label=None, options=None, isPyData=False):\r\n return self",
"def ylabel(self, ylabel):\n self._checkfigure()\n self.axes.set_ylabel(ylabel)",
"def color_axis_ticks(color,spine_name=\"left\",axis_name=\"y\",ax=None,\n label_color=None):\n if (label_color) is None:\n label_color = color\n ax = gca(ax)\n ax.tick_params(axis_name,color=color,which='both',labelcolor=label_color)\n ax.spines[spine_name].set_color(color) \n ax.spines[spine_name].set_edgecolor(color)"
] |
[
"0.68434757",
"0.6531821",
"0.648328",
"0.63563305",
"0.62593186",
"0.6250489",
"0.61543673",
"0.61458606",
"0.613033",
"0.6076741",
"0.6069385",
"0.6053534",
"0.5973648",
"0.59678274",
"0.595456",
"0.59413916",
"0.59286755",
"0.59199375",
"0.589542",
"0.5882302",
"0.5799317",
"0.579188",
"0.57905245",
"0.57233214",
"0.57099265",
"0.57018703",
"0.56796014",
"0.56517655",
"0.56449884",
"0.56191987"
] |
0.68740535
|
0
|
Redirect mobile browsers to /mobile and others to /home.
|
def desktop_or_mobile(request):
url_name = 'home.mobile' if request.MOBILE else 'home'
return redirect_to(request, url_name, permanent=False)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))",
"def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)",
"def mobile(request):\n MOBILE_AGENT_RE = re.compile(\n r\".*(iphone|mobile|androidtouch)\", re.IGNORECASE)\n if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):\n return True\n else:\n return False",
"def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")",
"def homepage():\n return redirect('index.html')",
"def require_mobile_user(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n return redirect(url_for('wechat.signin'))\n return func(*args, **kwargs)\n\n return decorator",
"def homepage():\n return redirect(\"/posts\")",
"def catch_all(path):\n return redirect('/', code=302)",
"def check_mobile(request):\n MOBILE_AGENT_RE=re.compile(r\".*(iphone|mobile|androidtouch)\",re.IGNORECASE)\n\n if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):\n return True\n else:\n return False",
"def homepage( request ):\n if \"email\" in request.session:\n return redirect( '/home' )\n return render_to_response( 'index.html' )",
"def home(request):\n return redirect('commprod/')",
"def home_page():\n return redirect('/users')",
"def redirect_old_featured(page):\r\n return redirect(url_for('.index', page=page), 301)",
"def go_home(request):\n\n url = request.route_url('home', _app_url=get_app_url(request))\n return HTTPFound(location=url)",
"def first_request():\n heroku_url: str = 'https://justice-ndou.herokuapp.com/'\n registered_domain: str = 'https://justice-ndou.herokuapp.com/'\n\n if request.host_url.lower().startswith(heroku_url):\n return redirect(request.host_url.lower().replace(heroku_url, registered_domain)), 301",
"def toLanding():\n return redirect(url_for('landingurl'))",
"def homepage_redirect():\n return redirect('/upload_file')",
"def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)",
"def start_page():\n if not _home:\n abort(404)\n return redirect(_home)",
"def start_page():\n if not _home:\n abort(404)\n return redirect(_home)",
"def root_redirect():\r\n return redirect(url_for(\"display_top\"))",
"def index():\n return redirect(url_for(\"home\"))",
"def welcome_page():\n return redirect(\"/static/welcome.html\")",
"def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)",
"def redirect_heroku():\n urlparts = urlparse(request.url)\n domain_name = \"rsvp.tiks-ultimate.in\"\n old_domain_name = \"thatte-idli-rsvp.herokuapp.com\"\n fly_domain_name = \"tiks-ultimate-rsvp.fly.dev\"\n if urlparts.netloc in {old_domain_name, fly_domain_name}:\n urlparts_list = list(urlparts)\n urlparts_list[1] = domain_name\n return redirect(urlunparse(urlparts_list), code=301)",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def redirect_nonwww():\n urlparts = urlparse(request.url)\n if urlparts.netloc != 'www.mealscount.com':\n return redirect('https://www.mealscount.com/', code=301)",
"def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))"
] |
[
"0.5935401",
"0.569237",
"0.5678945",
"0.54713297",
"0.5317255",
"0.53092873",
"0.5183884",
"0.5179762",
"0.5162804",
"0.5144536",
"0.51287115",
"0.51033294",
"0.5092589",
"0.5091707",
"0.5090215",
"0.50564617",
"0.50066596",
"0.49919397",
"0.49904168",
"0.49904168",
"0.4976217",
"0.49141172",
"0.49112922",
"0.49068478",
"0.49067122",
"0.4884481",
"0.4884481",
"0.4884481",
"0.48504403",
"0.4824129"
] |
0.7651228
|
0
|
Print all datatypes in the model.
|
def print_datatypes(model: nn.Module, model_name: str, sep: str = "\n") -> None:
log = model_name + "'s datatypes:" + sep
log += sep.join(str(t) for t in model_utils.get_model_tensor_datatype(model))
logger.info(log)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)",
"def show_features_datatypes(df):\n\tfor inum,icol in enumerate(df.columns):\n\t\tprint('Column id: {0:3d} \\tName: {1:12s} \\tDataType: {2}'.format(inum, icol, df[icol].dtypes))",
"def printListOfCalibTypes (self) :\n print '\\nprintListOfCalibTypes(): list_of_clib_types:' #, self.list_of_clib_types\n for type in self.list_of_clib_types : print ' ', type",
"def show_database_structure(self):\n self.analyze()\n items = []\n for model in get_models():\n names = []\n # for f, m in model._meta.get_fields_with_model():\n for f in model._meta.concrete_fields:\n names.append(f.name)\n items.append(\n \"{0} : {1}\".format(fmn(model), ', '.join(names)))\n\n items = sorted(items)\n return rstgen.ul(items)",
"def print_model_definitions(self):\n sys.stdout.write(\"Model Dimensions\\n\")\n sys.stdout.write(\"----------------\\n\")\n for key, val in self.dimensions.iteritems():\n sys.stdout.write(\"{key}: {val}\\n\".format(key=key, val=val))",
"def print_model_functions(self):\n # TODO replace print statements with stdout.write\n # TODO get derivatives recursively\n self.functions = self.definitions.get(\"functions\", [])\n for func in self.functions:\n print \"type: \", func[\"type\"]\n print \"args: \", func[\"args\"]\n print \"derivatives: \"\n for deriv in func.get(\"deriv\", []):\n for key, val in deriv.iteritems():\n print \" \", key, val\n print \"\"",
"def __str__(self):\n return self.types",
"def type_list():\n for type_ in orm.DataFlagType.select():\n click.echo(type_.name)",
"def dump(self):\r\n for (name, value) in self.__table__.items():\r\n print (name)\r\n print (value)",
"def print_abs_type(self):\n pass",
"def printModel(self):\n print(self.model)",
"def print_types(data: bytearray):\n print(data.decode(\"ascii\"))\n print(\"\".join(\"^\" if is_lms(i, data) else \" \" for i in range(len(data))))",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def do_dtype(self, args, opts=None):\n print(self.explorer[args[0]].dtype)",
"def data_types(self):",
"def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]",
"def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))",
"def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()",
"def print(self):\n for var in self.variables:\n print(var)",
"def display(self, type_get):\n data = self.build_data()\n for word_type, content in data.items():\n count_def = 1\n if type_get and self.word_type_dict[word_type] != type_get:\n continue\n pron = content[1]\n print(bcolors.BOLD + bcolors.YELLOW + \"%s /%s/ (%s)\" % (self.word, pron, self.word_type_dict[word_type])\n + bcolors.ENDC)\n for sense_dict in content[0]:\n type_def = self.get_type_of_def(sense_dict)\n if type_def:\n type_def = \" \" + type_def + \" \"\n print(\"%s.\" % str(count_def) +\n bcolors.ITALIC + bcolors.GREEN + \"%1s\" % type_def + bcolors.ENDC +\n \"%s\" % self.chunk_str(sense_dict['definitions'][0]))\n if 'examples' in sense_dict:\n self.display_examples(sense_dict['examples'])\n\n print(\"\\r\")\n\n if 'subsenses' in sense_dict:\n self.display_subsenses(sense_dict['subsenses'], count_def)\n\n print(\"\\r\")\n count_def += 1",
"def data_types():\n\n return ...",
"def print_types(self):\n print type(self.posXposYposZ)\n print type(self.posXposYnegZ)\n print type(self.posXnegYposZ)\n print type(self.posXnegYnegZ)\n print type(self.negXposYposZ)\n print type(self.negXposYnegZ)\n print type(self.negXnegYposZ)\n print type(self.negXnegYnegZ)",
"def ntypes(self): # -> list[str]:\n ...",
"def print_type(obj: object) -> None:\n print(f'{type(obj)}')",
"def display_file_types():\n\n print 'Available file types. Each line contains the file type and the list of extensions by those the file type is determined. To include FOOBAR file type to search use --FOOBAR, to exlude use --noFOOBAR. You can include and exclude a number of file types.'\n for ftype, extensions in TYPES().iteritems():\n print '%s: %s' % (ftype, ', '.join(extensions))",
"def ntypes(self): # -> None:\n ...",
"def print(self):\n for fiction in self.fictions:\n print(fiction.__dict__)",
"def dump(self):\n print \"Relation \" + self.relation\n print \" With attributes\"\n for n in self.attributes:\n if self.attribute_types[n] != 'nominal':\n print \" %s of type %s\" % (n, self.attribute_types[n])\n else:\n print (\" \" + n + \" of type nominal with values \" +\n ', '.join(self.attribute_data[n]))\n for d in self.data:\n print d",
"def print_abs_type(self):\n return 'Generic'",
"def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())"
] |
[
"0.6963292",
"0.68407357",
"0.66779065",
"0.6490826",
"0.6259917",
"0.62305534",
"0.62192714",
"0.6191834",
"0.61866474",
"0.61513305",
"0.6119368",
"0.61187154",
"0.596671",
"0.5951513",
"0.59312636",
"0.5927027",
"0.59191304",
"0.5916792",
"0.59138525",
"0.5900104",
"0.5892891",
"0.5878842",
"0.5859794",
"0.5859703",
"0.58542764",
"0.5838425",
"0.5745943",
"0.57445484",
"0.5711256",
"0.5680749"
] |
0.7769597
|
0
|
Load the trained model with the best accuracy.
|
def _load_best_model(self) -> None:
self.trainer.resume()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_best_model(self) -> None:\n self.resume()",
"def load_model(self):\n print(\"=============start loading models=============\")\n # load models from basemodel and fine-tune layers\n base_model = DenseNet(reduction=0.5, classes=1000, weights_path=BASE_WEIGHT_DIR)\n base_model.layers.pop()\n base_model.layers.pop()\n x4 = Dense(6, activation='relu')(base_model.layers[-1].output)\n o = Activation('softmax')(x4)\n\n model = Model(inputs=base_model.input, outputs=[o])\n model.load_weights(WEIGHT_DIR)\n\n self.model = model\n print(\"=============finish loading models=============\")",
"def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)",
"def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()",
"def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return",
"def load_best_model():\r\n best_model = LSTM_load_best_model()\r\n return best_model",
"def load_model(self):\n self.loaded_model = keras.models.load_model(self.path)\n return self.loaded_model.summary()",
"def load_model(self):\n self.__model = tf.keras.models.load_model(\n os.path.join(self.model_path, \"model.h5\")\n )\n print(\"[INFO] Model loaded!\")\n\n tok_pth = os.path.join(self.model_path, \"tokenizer.json\")\n with open(tok_pth, \"r\") as f:\n self.__tokenizer = tf.keras\\\n .preprocessing\\\n .text\\\n .tokenizer_from_json(f.read())\n print(\"[INFO] Tokenizer loaded!\")\n\n meta_pth = os.path.join(self.model_path, \"meta.json\")\n with open(meta_pth, \"r\") as f:\n meta = json.load(f)\n self.__title_len = meta[\"title_pad_length\"]\n self.__body_len = meta[\"body_pad_length\"]\n\n self.load_explainer()\n print(\"[INFO] Explainer loaded!\")",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def load_model(self):\n # Load the model\n print('Loading model:', self.model_path)\n t0 = time.time()\n model = load_model(self.model_path)\n t1 = time.time()\n print('Loaded in:', t1 - t0)\n return model",
"def load_model(self, tmp_dir):\n if self.inf_learner is None:\n self.log_options()\n model_uri = self.backend_opts.model_uri\n model_path = download_if_needed(model_uri, tmp_dir)\n self.inf_learner = load_learner(\n dirname(model_path), basename(model_path))",
"def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")",
"def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")",
"def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model",
"def load_best_model(self):\r\n self.logger_object.log(\r\n self.file_object, \"Entered load_best_model method of ModelTester class.\"\r\n )\r\n try:\r\n with open(str(Config.MODELS_PATH / \"best_model.joblib\"), \"rb\") as outfile:\r\n model = joblib.load(outfile)\r\n r_squared, rmse = ModelScorer(\r\n file_object=self.file_object\r\n ).get_model_scores(model=model, X_test=self.X_test, y_test=self.y_test)\r\n self.logger_object.log(self.file_object, \"Successfully loaded best model.\")\r\n print(f\"rsquared:{r_squared}, rmse:{rmse}\")\r\n except Exception as e:\r\n self.logger_object.log(\r\n self.file_object,\r\n f\"Exception occured in load_best_model method of ModelTester class. Exception message: {e}\",\r\n )\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Loading final model unsuccessful. Exited load_best_model method of ModelTester class\",\r\n )\r\n raise Exception()",
"def load_model():\n \n _files = training_file()\n \n predictor_path = _files.model_file(LANDMARKS_WEIGHTS)\n face_rec_model_path = _files.model_file(RESNET_WEIGHTS)\n \n detector = dlib.get_frontal_face_detector()\n sp = dlib.shape_predictor(predictor_path)\n facerec = dlib.face_recognition_model_v1(face_rec_model_path)\n \n return (detector, sp, facerec)",
"def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)",
"def load_model(name):\n\tmodel = joblib.load(\"data/{}/{}.model\".format(name, name))\n\t# Setting n_jobs to 1 in case it was set to a higher number while training the model seems to makes predictions of single samples much faster.\n\tmodel.n_jobs = 1\n\treturn model",
"def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager",
"def get_model(cls):\n if cls.model == None:\n # with open(os.path.join(model_path, 'classify-model'), 'rb') as inp:\n # cls.model = pickle.load(inp)\n #cls.model = keras.models.load_model(model_path, 'classify_model')\n cls.model = tf.keras.models.load_model(model_path)\n print(\"model loaded\")\n return cls.model",
"def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model",
"def train(X_train, y_train, save_model='model.h5'):\n \n # Hyperparameters\n batch_size = 32\n epochs = 30\n learning_rate = 0.001\n \n # Loading model from model.py\n model = m(input_height=IMAGE_HEIGHT, input_width=IMAGE_WIDTH)\n \n # Plot model as image\n plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n \n # If trained model exist already then load first for further training\n if tf.gfile.Exists(save_model):\n model.load_weights(save_model)\n model.compile(loss='mse', optimizer=Adam(learning_rate))\n \n # Only save model which has best performed on validation set.\n # These are callbacks which are being used in \"model.fit\" call\n earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='min')\n mcp_save = ModelCheckpoint('model.h5', save_best_only=True, monitor='val_loss', mode='min')\n reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')\n\n # Train the model\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.2, shuffle=True)\n \n return",
"def load_model(cls) -> Classifier:\n if cls.model is None:\n cls.model = Classifier.load(model_path)\n return cls.model",
"def load_model(self):\n for t in self.topic:\n if t != \"other\":\n print(\"Loading models of topic: \", t)\n for st in self.topic2sub_topic[t].keys():\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=[1024, ]))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy])\n\n if not os.path.exists(self.trained_w_folder+\"/%s/%s.h5\" %(t,st)):\n print(\"Now training the classsfier for topic: \", t, \" ; intent: \", st)\n print(64 * \"=\")\n X, y = self.get_data(t, st)\n print(\"data_loaded!\")\n X_train, X_dev, y_train, y_dev = self.my_train_test_split(X, y)\n model.fit(X_train, y_train, epochs=3, batch_size=128)\n model.save_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n print(\"f1_score on dev set: \")\n self.f1_score_model(model, X_dev, y_dev)\n print(64*\"=\")\n print()\n else:\n model.load_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n print(\"Loaded weights for model \" + t + \" : \" + st)\n self.model_zoo[t][st] = model",
"def load_model(self):\n if os.path.exists(self.model_filename):\n self.model.load_weights(self.model_filename)",
"def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))",
"def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])",
"def load_model():\n logger.info('load_model called')\n return 1",
"def test_train_after_load(self):\n model = PoincareModel(self.data, burn_in=0, negative=3)\n model.train(epochs=1)\n model.save(testfile())\n loaded = PoincareModel.load(testfile())\n model.train(epochs=1)\n loaded.train(epochs=1)\n self.models_equal(model, loaded)",
"def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")"
] |
[
"0.7404185",
"0.73254263",
"0.73121685",
"0.7288304",
"0.71412724",
"0.7104152",
"0.6995499",
"0.6952824",
"0.6943414",
"0.6898542",
"0.68605185",
"0.6857538",
"0.6857538",
"0.68334395",
"0.6812487",
"0.67689997",
"0.6748135",
"0.67441106",
"0.673847",
"0.67283577",
"0.67224383",
"0.66996545",
"0.66928655",
"0.66715246",
"0.66697073",
"0.66547674",
"0.66159165",
"0.6611341",
"0.66017455",
"0.6595484"
] |
0.753348
|
0
|
Test the basic sleet calculation works.
|
def test_basic_calculation(self):
expected_result = np.array(
[
[[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],
[[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],
],
dtype=np.float32,
)
result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertTrue(result.dtype == np.float32)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_get_solution(self):\n pass",
"def test_secant_system(testFunctions, tol, printFlag): \n pass",
"def test_check_cost():",
"def test_suite():\n test(sum_of_squares([2, 3, 4]) == 29)\n test(sum_of_squares([ ]) == 0)\n test(sum_of_squares([2, -3, 4]) == 29)",
"def test_secant(testFunctions, tol, printFlag): \n pass",
"def test_salomon(self):\n fun = get_problem('salomon', self.dimension, -100.0, 100.0)\n self.assertEqual(fun(self.array), 0.0)",
"def test_sum_squares(self):\n fun = get_problem('sum_squares', self.dimension)\n self.assertEqual(fun(self.array), 0.0)",
"def test_weierstrass(self):\n fun = get_problem('weierstrass', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 0.0, delta=1e-4)",
"def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")",
"def test_compute_workload(self):\r\n\r\n spread = [1.23, 0.5, 1.27]\r\n num_cores = 3\r\n num_flows = 11\r\n result = compute_workload(num_cores, num_flows, spread)\r\n self.assertEqual(result, [4, 2, 5])",
"def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")",
"def test_suite():\r\n test(add_vectors([1, 1], [1, 1]) == [2, 2])\r\n test(add_vectors([1, 2], [1, 4]) == [2, 6])\r\n test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])\r\n test(scalar_mult(5, [1, 2]) == [5, 10])\r\n test(scalar_mult(3, [1, 0, -1]) == [3, 0, -3])\r\n test(scalar_mult(7, [3, 0, 5, 11, 2]) == [21, 0, 35, 77, 14])\r\n test(dot_product([1, 1], [1, 1]) == 2)\r\n test(dot_product([1, 2], [1, 4]) == 9)\r\n test(dot_product([1, 2, 1], [1, 4, 3]) == 12)\r\n test(cross_product([2,3,4], [5,6,7]) == [-3, 6, -3])",
"def test_slp(self):\n slp = dep.read_slp(get_path('slp.txt'))\n self.assertEquals(len(slp), 5)\n self.assertAlmostEquals(slp[4]['y'][-1], -8.3, 1)",
"def test_calculate_all_operations(self):\n result = self.calcuate.calcuate('11-2+4x3-5')\n expected_result = \"16\"\n self.assertEqual(expected_result, result)",
"def test_katsuura(self):\n fun = get_problem('katsuura', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 3837.4739882594373, delta=4000)",
"def test_suite():\r\n test(slope(5, 3, 4, 2) == 1.0)\r\n test(slope(1, 2, 3, 2) == 0.0)\r\n test(slope(1, 2, 3, 3) == 0.5)\r\n test(slope(2, 4, 1, 2) == 2.0)",
"def test_schumer_steiglitz(self):\n fun = get_problem('schumer_steiglitz', self.dimension, -100, 100)\n self.assertEqual(fun(self.array), 0.0)",
"def run_tests(): \n \n\n nextdata = [[21, 61, 42, 30], [33,45, 18, 29]]\n\n for xval, yval, snum, expect in nextdata:\n\n pmachine = PMachine()\n pmachine.serial_number = snum\n pmachine.run2_completion()\n result = pmachine.calc_square_total(xval, yval, showsquare=True)\n assert result == expect\n print(\"Got value {}={} as expected\".format(result, expect))",
"def test_schwefel222(self):\n fun = get_problem('schwefel222', self.dimension)\n self.assertEqual(fun(self.array), 0.0)",
"def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")",
"def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0",
"def test_amount_in_tons(self):",
"def testBeliefs1sk(self):",
"def test_shocksine():\n from . import shocksine\n from clawpack.pyclaw.util import test_app, check_diff\n\n def verify_shocksine(controller):\n \"\"\" given an expected value, returns a verification function \"\"\"\n import numpy as np\n import os\n\n test_solution = controller.solution.state.get_q_global()\n\n if test_solution is not None:\n thisdir = os.path.dirname(__file__)\n expected_density = np.loadtxt(os.path.join(thisdir,'shocksine_regression_density.txt'))\n test_density = test_solution[0,:]\n test_err = np.linalg.norm(expected_density-test_density)\n return check_diff(0, test_err, abstol=1.e-4)\n\n return test_app(shocksine.setup, verify_shocksine, {})",
"def test_calc_layer_sparsity():\n test_ndarray = np.array([[0, 2, 0], [1, 0, 1]])\n assert lu.calc_layer_sparsity(test_ndarray) == 3 / 6, 'correct sparsity value'\n\n test_ndarray = np.array([[0, 0, 0], [1, 0, 1]])\n assert abs(lu.calc_layer_sparsity(test_ndarray) - 4 / 6) < 10**-8, 'correct sparsity value'\n assert lu.calc_layer_sparsity(np.zeros((20, 20))) == 1.0, 'zero array should have 1.0 sparsity'\n assert lu.calc_layer_sparsity(\n np.random.rand(20, 20)) == 0.0, 'random array should have 0.0 sparsity'\n assert type(lu.calc_layer_sparsity(np.zeros((10, 10)))) is float, 'return value should be of type float'",
"def test_solve_task(self):\n pass",
"def test_run():\n argv = [\"py.test\", \"100\", \"-potential\", \"potentials/bump.cfg\"]\n args = get_sargs(argv)\n from basis.basis_solve import run\n assert run(args) == 0",
"def test_3_basic(self):\n plan = list(astar(self.mapp_1_s,\n lambda s : s == self.mapp_1_g,\n MAPPDistanceSum(self.mapp_1_g)))\n self.assertEqual(4,len(plan))",
"def problem1(self, s):\n\n # Test with good inputs (4 points)\n x = np.array([1, 2])\n y = np.array([2, 2])\n points = self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")\n \n x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n y = np.array([2, 6, 4, 8, 0, 2, 4, 7, 5, 11])\n points += self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")\n \n x = (np.random.random(100)-.5)*200\n y = (np.random.random(100)-.5)*200\n points += self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")*2\n \n # Test with bad inputs (1 point)\n x = np.array([1, 2])\n y = np.array([1, 2, 3])\n try:\n s.euclidean_metric(x, y)\n self.feedback += \"\\n\\teuclidean_metric() failed to raise a \"\n self.feedback += \"ValueError for vectors of different lengths\"\n except:\n points += 1\n\n return points",
"def testBeliefs2sk(self):"
] |
[
"0.6918797",
"0.6694533",
"0.6680471",
"0.66613245",
"0.63638985",
"0.63563555",
"0.6344537",
"0.63026416",
"0.6276182",
"0.62295896",
"0.6196068",
"0.61565745",
"0.6153773",
"0.6133147",
"0.61278003",
"0.61272407",
"0.61217076",
"0.6105129",
"0.6069144",
"0.6065967",
"0.60624766",
"0.60624313",
"0.6060449",
"0.60555285",
"0.6052915",
"0.60497296",
"0.60487247",
"0.6045774",
"0.6043738",
"0.6043512"
] |
0.6764586
|
1
|
Test the basic sleet calculation works with int8 data.
|
def test_with_ints(self):
rain_prob_cube = self.rain_prob_cube.copy(
np.array(
[[[1, 0, 0], [0, 1, 1], [0, 0, 1]], [[1, 0, 0], [0, 1, 1], [0, 0, 1]]],
dtype=np.int8,
)
)
snow_prob_cube = self.snow_prob_cube.copy(
np.array(
[[[0, 1, 0], [1, 0, 0], [0, 1, 0]], [[0, 1, 0], [1, 0, 0], [0, 1, 0]]],
dtype=np.int8,
)
)
expected_result = np.array(
[[[0, 0, 1], [0, 0, 0], [1, 0, 0]], [[0, 0, 1], [0, 0, 0], [1, 0, 0]]],
dtype=np.int8,
)
result = calculate_sleet_probability(rain_prob_cube, snow_prob_cube)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertTrue(result.dtype == np.int8)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testGetOne(self):\n data = b'0123456789'\n inst = WireData(data)\n for i, byte in enumerate(bytearray(data)):\n self.assertEqual(inst[i], byte)\n for i in range(-1, len(data) * -1, -1):\n self.assertEqual(inst[i], bytearray(data)[i])",
"def test_numbers_roundtrip():\n for num in (0, 1, 2, 178, 300, BIG_NUMBER):\n num2 = UnsignedInt.read(UnsignedInt.to_bytes(num))\n assert num2 == num",
"def data_check(self):\n # Determine whether the input data is an integer\n temp = [i for i in self._src_data if not isinstance(i, int)]\n if temp:\n print('invalid data')\n return\n # Preliminary Judgment on Solution\n self._src_data.sort()\n three_sum = self._src_data[3:6]\n if sum(self._src_data) != 3 * sum(self._src_data[3:6]):\n print('no solution!')\n return\n self._t_sum = sum(three_sum)\n s = self.solution(self._src_data)\n return s",
"def test_single_index(self):\n dset = self.f.create_dataset('x', (1,), dtype='i1')\n out = dset[0]\n self.assertIsInstance(out, np.int8)",
"def testSC(self):\n\n obt_np = compression.decByteOffet_numpy(compression.compByteOffet_numpy(self.ds))\n self.assertEqual(abs(self.ds - obt_np).max(), 0.0, \"numpy algo\")\n obt_cy = compression.decByteOffet_cython(compression.compByteOffet_numpy(self.ds))\n self.assertEqual(abs(self.ds - obt_cy).max(), 0.0, \"cython algo\")\n obt_cy2 = compression.decByteOffet_cython(compression.compByteOffet_numpy(self.ds), self.ds.size)\n self.assertEqual(abs(self.ds - obt_cy2).max(), 0.0, \"cython algo_orig\")\n obt_we = compression.decByteOffet_weave(compression.compByteOffet_numpy(self.ds), self.ds.size)\n self.assertEqual(abs(self.ds - obt_we).max(), 0.0, \"weave algo\")",
"def test_katsuura(self):\n fun = get_problem('katsuura', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 3837.4739882594373, delta=4000)",
"def test_right_twos_to_int(self):\n self.assertEqual(utils.twos_to_int('101'.zfill(8)), 5)",
"def test_toInt(self):\r\n self.assertEqual(self.black.toInt(), 0)\r\n self.assertEqual(self.red.toInt(), 16711680)\r\n self.assertEqual(self.pink.toInt(), 6553600)",
"def test_get_squarerect_ltz_all_int(self):\n result = get_squarerectangle_type(-1, -1, -1, -1)\n self.assertEqual(result, 'invalid')",
"def test_roundtrip_signed_int():\n for num in (0, -0, -1, 2, -178, 300, -BIG_NUMBER, BIG_NUMBER):\n num2 = SignedInt.read(SignedInt.to_bytes(num))\n assert num2 == num",
"def test_bigSum():\n\n assert bigSum() == 20000000100000000",
"def test_basic(self):\n self.assertEqual(solution(\"\"\"11111\n19991\n19191\n19991\n11111\"\"\"), 6)\n self.assertEqual(solution(\"\"\"5483143223\n2745854711\n5264556173\n6141336146\n6357385478\n4167524645\n2176841721\n6882881134\n4846848554\n5283751526\"\"\"), 195)",
"def test_binops(self):",
"def test_run_a_scan_on_sdp_subarray_in_low():",
"def test_scalar_null(self):\n dset = self.f.create_dataset('x', shape=(), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.int8)",
"def test_symmetry_positive_int(self):\n for x in range(1000):\n random_int = random.randint(0, sys.maxsize)\n encoded_int = base62.from_decimal(random_int)\n self.assertEqual(random_int, base62.to_decimal(encoded_int))",
"def test_int_install_1():\n expected_output_price = 65000\n output_price = int_installs('65000')\n assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \\\n \"\"\"Should show that the installs is 65000.\"\"\"",
"def test_stepint(self):\n fun = get_problem('stepint', self.dimension)\n self.assertEqual(fun(self.array9), -5.0)",
"def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")",
"def test_devide_int(self):\n self.assertEqual(operations.devide(8,4), 2)",
"def test_singles(self):\n self.assertEqual(singles(self.TestData), 3)\n self.assertEqual(singles(array([0,3,4])), 0)\n self.assertEqual(singles(array([1])), 1)",
"def solution(data):\n\t\tif data:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0",
"def test_sinc_array():\n x,sc = cw04.gen_sinc_array(1,3,3)\n desired = ([0.8414709848078965, .45464871 , 0.04704000])\n print(\"Obtained:\",sc)\n print(\"Desired:\",desired)\n # For comparing floating point values, nose has useful helper functions\n # to ensure they are equal up to a numerical precision tolerance\n np.testing.assert_almost_equal(sc, desired)",
"def test_int(self):\n htype = h5t.py_create('i')\n self.assertIsInstance(htype, h5t.TypeIntegerID)",
"def test_basic_calculation(self):\n expected_result = np.array(\n [\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n ],\n dtype=np.float32,\n )\n result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)\n self.assertArrayAlmostEqual(result.data, expected_result)\n self.assertTrue(result.dtype == np.float32)",
"def test_S2L1C_float32_uint16(self):\n test_dir = os.path.dirname(os.path.realpath(__file__))\n cache_folder = os.path.join(test_dir, 'cache_test')\n\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n\n task = SentinelHubInputTask(\n bands_feature=(FeatureType.DATA, 'BANDS'),\n additional_data=[(FeatureType.MASK, 'dataMask')],\n size=self.size,\n maxcc=self.maxcc,\n time_difference=self.time_difference,\n data_collection=DataCollection.SENTINEL2_L1C,\n max_threads=self.max_threads,\n cache_folder=cache_folder\n )\n\n eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)\n bands = eopatch[(FeatureType.DATA, 'BANDS')]\n is_data = eopatch[(FeatureType.MASK, 'dataMask')]\n\n self.assertTrue(np.allclose(array_stats(bands), [0.0233, 0.0468, 0.0252]))\n\n width, height = self.size\n self.assertTrue(bands.shape == (4, height, width, 13))\n self.assertTrue(is_data.shape == (4, height, width, 1))\n self.assertTrue(len(eopatch.timestamp) == 4)\n self.assertTrue(bands.dtype == np.float32)\n\n self.assertTrue(os.path.exists(cache_folder))\n\n # change task's bans_dtype and run it again\n task.bands_dtype = np.uint16\n\n eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)\n bands = eopatch[(FeatureType.DATA, 'BANDS')]\n\n self.assertTrue(np.allclose(array_stats(bands), [232.5769, 467.5385, 251.8654]))\n\n self.assertTrue(bands.dtype == np.uint16)\n\n shutil.rmtree(cache_folder)",
"def testIntcodeProgram():\n\n testData = [\n {\n \"input\": [1, 0, 0, 0, 99],\n \"output\": [2, 0, 0, 0, 99]\n },\n {\n \"input\": [2, 3, 0, 3, 99],\n \"output\": [2, 3, 0, 6, 99]\n },\n {\n \"input\": [2, 4, 4, 5, 99, 0],\n \"output\": [2, 4, 4, 5, 99, 9801]\n },\n {\n \"input\": [1, 1, 1, 4, 99, 5, 6, 0, 99],\n \"output\": [30, 1, 1, 4, 2, 5, 6, 0, 99]\n },\n ]\n\n overallSuccess = True\n\n for test in testData:\n input = test['input']\n expectedResult = test['output']\n\n result = runIntcode(input.copy())\n\n if result == expectedResult:\n print (\"Testing\", input, \"... ok\")\n else:\n print (\"Testing\", input, \"... fail, got \", result)\n overallSuccess = False\n\n return overallSuccess",
"def test_int_out_of_range(parallel, guess):\n imin = np.iinfo(int).min + 1\n imax = np.iinfo(int).max - 1\n huge = f\"{imax+2:d}\"\n\n text = f\"P M S\\n {imax:d} {imin:d} {huge:s}\"\n expected = Table([[imax], [imin], [huge]], names=(\"P\", \"M\", \"S\"))\n # NOTE: Warning behavior varies for the parameters being passed in.\n with pytest.warns() as w:\n table = ascii.read(\n text, format=\"basic\", guess=guess, fast_reader={\"parallel\": parallel}\n )\n if not parallel:\n assert len(w) == 1\n assert (\n \"OverflowError converting to IntType in column S, reverting to String\"\n in str(w[0].message)\n )\n assert_table_equal(table, expected)\n\n # Check with leading zeroes to make sure strtol does not read them as octal\n text = f\"P M S\\n000{imax:d} -0{-imin:d} 00{huge:s}\"\n expected = Table([[imax], [imin], [\"00\" + huge]], names=(\"P\", \"M\", \"S\"))\n with pytest.warns() as w:\n table = ascii.read(\n text, format=\"basic\", guess=guess, fast_reader={\"parallel\": parallel}\n )\n if not parallel:\n assert len(w) == 1\n assert (\n \"OverflowError converting to IntType in column S, reverting to String\"\n in str(w[0].message)\n )\n assert_table_equal(table, expected)",
"def test_convert_logical():",
"def eighteen():\r\n\r\n return 0"
] |
[
"0.6081037",
"0.58889943",
"0.5885166",
"0.5839974",
"0.57514775",
"0.5720629",
"0.569138",
"0.5677612",
"0.56651866",
"0.56646323",
"0.56623197",
"0.5593129",
"0.5531913",
"0.5520678",
"0.5516416",
"0.5483794",
"0.54793435",
"0.54614264",
"0.5461213",
"0.5455815",
"0.5453234",
"0.5450797",
"0.5445706",
"0.5436324",
"0.5429078",
"0.54159844",
"0.5410914",
"0.5409873",
"0.5401134",
"0.5360823"
] |
0.66065717
|
0
|
Test that an exception is raised for negative values of probability_of_sleet in the cube.
|
def test_negative_values(self):
rain = self.rain_prob_cube
high_prob = self.high_prob_cube
msg = "Negative values of sleet probability have been calculated."
with self.assertRaisesRegex(ValueError, msg):
calculate_sleet_probability(rain, high_prob)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_negativexvalue(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(1, -2)\n self.assertEqual(str(e.exception), \"x must be >= 0\")",
"def test_error_when_probabilities_negative(self):\n self._assert_raise_error(\n probabilities=[0.5, 0.6, -0.1],\n random_nums=[0, 0, 0],\n error=InvalidProbabilitiesError,\n code=2\n )",
"def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)",
"def test_negativesize(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(-1)\n self.assertEqual(str(e.exception), \"width must be > 0\")",
"def test_negative_numbers(self):\n\t\tself.assertTrue(prime_generator(-5), \"Negative numbers not allowed.\")",
"def test_stochatreat_input_invalid_probs(correct_params):\n probs_not_sum_to_one = [0.1, 0.2]\n with pytest.raises(Exception):\n stochatreat(\n data=correct_params[\"data\"],\n block_cols=[\"block\"],\n treats=correct_params[\"treat\"],\n idx_col=correct_params[\"idx_col\"],\n probs=probs_not_sum_to_one,\n )",
"def test_negativeyvalue(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(1, 2, -2)\n self.assertEqual(str(e.exception), \"y must be >= 0\")",
"def test_negative_values_not_allowed(self, test_input, expected, sc):\n expected_err_msg = f'negatives not allowed {expected}'\n with pytest.raises(ValueError):\n sc.add(test_input)\n\n try:\n sc.add(test_input)\n except ValueError as e:\n assert str(e) == expected_err_msg",
"def test_one_negative_number(self):\r\n given_n = -85\r\n total_n = 200\r\n\r\n with self.assertRaises(NegativeNumberException):\r\n n_percent(given_n, total_n)",
"def test_negative(self):\n\n input_ = -1\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)",
"def test_negative_case(self):\n self.assertRaises(ValueError, factorial, -3)",
"def test_if_input_is_negative(self):\n self.assertEquals(prime_numbers(-5), \"Numbers less than or equal to zero are not allowed!\")",
"def test_negative_exponents():\n with raises(ValueError):\n power(1, -1)",
"def test_critic_over_exception(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.1],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"CRITIC\")",
"def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")",
"def test_error_if_negative_more_than_population(self):\n model = PoincareModel(self.data, negative=5)\n with self.assertRaises(ValueError):\n model.train(epochs=1)",
"def test_error_when_probabilities_does_not_sum_to_one(self):\n self._assert_raise_error(\n probabilities=[0.5, 0.4],\n random_nums=[0, 0],\n error=InvalidProbabilitiesError,\n code=3\n )",
"def test_pauli_error_raise_invalid(self):\n self.assertRaises(NoiseError, lambda: pauli_error([('S', 1)]))",
"def test_nonsense(self):\n with self.assertRaises(ValueError):\n ESN(N_in,N_out,random_state=-1)\n\n with self.assertRaises(Exception) as cm:\n ESN(N_in,N_out,random_state=0.5)\n self.assertIn(\"Invalid seed\",str(cm.exception))",
"def test_negative(self):\n self.assertFalse(validate_measure_input('-1', self.measures))",
"def test_two_negative_numbers(self):\r\n given_n = -85\r\n total_n = -200\r\n\r\n with self.assertRaises(NegativeNumberException):\r\n n_percent(given_n, total_n)",
"def test_exception(\n self,\n ):\n with pytest.raises(ValueError, match=\"cannot be larger than number of subsystems\"):\n symplectic.reduced_state(np.array([0, 0]), np.identity(2), [6, 4])",
"def test_exceptions(self):\n # Argument must be a list of LeastSquaresTerms\n with self.assertRaises(ValueError):\n prob = LeastSquaresProblem(7)\n with self.assertRaises(ValueError):\n prob = LeastSquaresProblem([])\n with self.assertRaises(ValueError):\n prob = LeastSquaresProblem([7, 1])",
"def test_negative_input(self):\n negative_data_down = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=-0.1\n )\n negative_uv_down = self.cube_uv_down.copy(data=negative_data_down)\n msg = (\n \"The radiation flux in UV downward contains data \"\n \"that is negative or NaN. Data should be >= 0.\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n calculate_uv_index(negative_uv_down)",
"def test_that_test_can_fail():\n try:\n verify_atomic_weight_for_substance(\"O2\", 1.0)\n except AssertionError as e:\n return\n\n raise AssertionError(\"test_that_test_can_fail() didn't fail\")",
"def test_Sobol_G_raises_error_if_values_lt_zero():\n with raises(ValueError):\n evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))",
"def test_notrunerror(self, MetricClass):\n m = MetricClass()\n with pytest.raises(NotRunError):\n RandomTrader(seed=42).evaluate(m)",
"def test_critic_under_exception(self):\n z_matrix = np.array(\n [[ 0.0, 0.0, 1.0], # noqa: E201\n [-0.1, 0.2, 0.8], # noqa: E201\n [ 0.2, 0.4, 0.6], # noqa: E201\n [ 0.3, 0.7, 0.3], # noqa: E201\n [ 0.6, 0.8, 0.2], # noqa: E201\n [ 0.8, 0.9, 0.1], # noqa: E201\n [ 1.0, 1.0, 0.0]], # noqa: E201\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"CRITIC\")",
"def test_set_glass_millilitres__with_negative_number__raises_value_error():\n glass = moet.create_glass(\"A\")\n with pytest.raises(ValueError):\n glass.quantity = -100",
"def test_negative_pressure(self):\n with pytest.raises(StateError):\n State(substance=\"water\", T=Q_(300, \"K\"), p=Q_(-101325, \"Pa\"))"
] |
[
"0.71974754",
"0.696715",
"0.68571836",
"0.6828799",
"0.67281663",
"0.6592654",
"0.65832895",
"0.65570897",
"0.65449387",
"0.6540636",
"0.65332764",
"0.6524829",
"0.64873177",
"0.6453737",
"0.6445203",
"0.64262295",
"0.641401",
"0.6391509",
"0.63878036",
"0.6383371",
"0.6382286",
"0.6375111",
"0.6321261",
"0.63087475",
"0.6301347",
"0.62755626",
"0.62555355",
"0.62548304",
"0.6240187",
"0.6233626"
] |
0.8309056
|
0
|
Test that the name has been changed to sleet_probability
|
def test_name_of_cube(self):
result = calculate_sleet_probability(self.snow_prob_cube, self.rain_prob_cube)
name = "probability_of_sleet"
self.assertEqual(result.long_name, name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_first_name_sim_nones():\n assert nedss.first_name_similarity_scorer(None, None) == 1",
"def testMapTitle(self) -> None:\n def testNewTitle(name:str, solution:list[float]):\n self._nameClassifierBuilder._initializeNameMapping()\n title = self._nameClassifierBuilder._getTitle(name)\n self._nameClassifierBuilder._mapTitle(title)\n self.assertEquals(solution, self._nameClassifierBuilder._currentNameMapping)\n\n solution = [1.0,0.0,0.0,0.0,0.0,0.0]\n testNewTitle(\"jslghaldfaCollgja lgn awfggad\", solution)\n \n solution = [0.0,0.0,1.0,0.0,0.0,0.0]\n testNewTitle(\"fsdj Mrs. afjdlgaj\", solution)\n\n solution = [0.0,0.0,0.0,0.0,0.0,1.0]\n testNewTitle(\"jslghaldfagja lgn awfggad\", solution)",
"def test_drudge_has_names(free_alg):\n\n p = free_alg.names\n\n # Range and dummy related.\n assert p.R == Range('R')\n assert len(p.R_dumms) == 6\n assert p.R_dumms[0] == p.i\n assert p.R_dumms[-1] == p.n\n\n # Vector bases.\n assert p.v == Vec('v')\n\n # Scalar bases.\n assert p.m == IndexedBase('m')",
"def statistical_test_name(sample_size):\n # First we create our sample\n sample = []\n for i in range(sample_size):\n sample.append(full_name())\n\n # Then we test each name and add the numbers to the according values\n doctor = 0\n double_first = 0\n double_last = 0\n male = 0\n female = 0\n for i in sample:\n result = test_name(i)\n doctor += result[0]\n double_first += result[1]\n double_last += result[2]\n if result[3] == \"male\":\n male += 1\n elif result[3] == \"female\":\n female += 1\n\n # Now we convert the raw numbers to percentage values with 2 digits\n doctor = '{:.2%}'.format(doctor / sample_size)\n double_first = '{:.2%}'.format(double_first / sample_size)\n double_last = '{:.2%}'.format(double_last / sample_size)\n male = '{:.2%}'.format(male / sample_size)\n female = '{:.2%}'.format(female / sample_size)\n\n # At the end we print each probability\n print(doctor, \" are doctors.\", sep=\"\")\n print(double_first, \" have a double first name.\", sep=\"\")\n print(double_last, \" have a double last name.\", sep=\"\")\n print(male, \" are male.\", sep=\"\")\n print(female, \" are female.\", sep=\"\")",
"def find_new_name(self, std, name):\n all_names = [case.read_name() for case in std.get_all(aster_s.Case)]\n new_name = name\n for idx in xrange(100):\n if new_name not in all_names:\n return new_name\n new_name = name + str(idx)\n else:\n mod.launch(ERROR, \"Too many wizards '%s' in use\" % name)",
"def score(name):\r\n return (sorted(test).index(name)+1)*value(name)",
"def test_get_probabilities(self):\n # Get data and clean it\n input_data = pd.read_csv(\n self._DATA_FOLDER / 'first_name_input.csv',\n skip_blank_lines=False,\n )\n # Get prob\n result = self._FIRST_NAME_MODEL.get_probabilities(input_data['first_name'])\n # Get true result\n true_result = pd.read_csv(\n self._DATA_FOLDER / 'first_name_output.csv',\n )\n # Clean for consistency\n result = result.round(4).fillna('')\n true_result = true_result.round(4).fillna('')\n # Check that all items in the series are equal\n pd.testing.assert_frame_equal(result, true_result)",
"def _does_name_change_require_verification(user, old_name, new_name):\n return (\n is_verified_name_enabled()\n and old_name != new_name\n and len(get_certificates_for_user(user.username)) > 0\n )",
"def test_name_freezing(self):\r\n user = UserFactory.create()\r\n user.profile.name = u\"Jack \\u01B4\" # gratuious non-ASCII char to test encodings\r\n\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = u\"Clyde \\u01B4\"\r\n attempt.mark_ready()\r\n\r\n user.profile.name = u\"Rusty \\u01B4\"\r\n\r\n assert_equals(u\"Clyde \\u01B4\", attempt.name)",
"def name_collision(x):\r\n return x",
"def name_collision(x):\r\n return x",
"def test_name_false(self):\r\n self.name = False",
"def test_name(self, data, firstname, secondname):\n layer = Points(data)\n assert layer.name == \"Points\"\n\n layer = Points(data, name=firstname)\n assert layer.name == firstname\n\n layer.name = secondname\n assert layer.name == secondname",
"def test_basic(self):\n plugin_instance = ProbabilitiesFromPercentiles2D(self.test_cube,\n self.new_name)\n self.assertEqual(plugin_instance.output_name, self.new_name)",
"def name_collision(x):\n return x",
"def name_collision(x):\n return x",
"def test_that_name_saved():\n custom_sum_name = \"custom_sum\"\n\n assert custom_sum.__name__ == custom_sum_name",
"def test_it_has_a_name():\n rob = Unicorn('Robert')\n assert rob.name == 'Robert'",
"def testBeliefs1sk(self):",
"def _weight_boosting_loss(name: str):\n return hp.choice(name, [\"linear\", \"square\", \"exponential\"])",
"def test_legal_names(self):\n names = [i[0] for i in generate_products()]\n\n for n in names:\n name = str(n).split()\n name1 = name[0]\n name2 = name[1]\n self.assertIn(name1, ADJECTIVES)\n self.assertIn(name2, NOUNS)",
"def test_change_provisioned_throughput_usual_case():",
"def verif_similar_names(sv):\r\n ok=True\r\n names=[os.path.normcase(n) for n in sv.Object_list] # list names without case\r\n names.sort() # facilitate compare one to the next\r\n for i, n in enumerate(names[:-1]): # scan whole list\r\n a,b=n[:-1], names[i+1][:-1] # names minus last char\r\n c=names[i+1][-1] # last char in full name\r\n d=n[-1] # last char in full name\r\n if len(a)>1 and (c <\"0\" or c>\"9\") and (d <\"0\" or d>\"9\") and a[-1]!=Underscore and b in [a, n]:\r\n if ok:\r\n print(\"\")\r\n ok=False\r\n warn(\"\\n\"+Warn_typing_risk+\"\\n'\"+n+\"' / '\"+names[i+1]+\"'\") # *** Warning: risk of typing error in '\"+n+\"' or '\"+names[i+1]+\"' *** \r\n \r\n if not ok: print(\"\")",
"def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))",
"def setName(self,newName):\n for i, wt in enumerate(self.weights):\n tempcoef = 0\n tempoff = ord(newName[i]) - ord(self.getName()[i])\n if(tempoff > 0): \n tempcoef = 0.1\n else: \n tempcoef = -0.1\n #print(\"Was: \"+newName + \" \" + self.getName() + \" \" + str(tempoff))\n tempoff = np.abs(tempoff)\n for j in range(tempoff): \n a = np.random.randint(wt.shape[0])\n b = np.random.randint(wt.shape[1])\n wt[a,b] += tempcoef\n\n for v, bs in enumerate(self.bias):\n tempcoef = 0\n tempoff = ord(newName[v+len(self.weights)]) - ord(self.getName()[v+len(self.weights)])\n if(tempoff > 0): \n tempcoef = 0.1\n else: \n tempcoef = -0.1\n #print(\"Now: \"+ str(v) + \" \" +newName + \" \" + self.getName() + \" \" + str(tempoff))\n tempoff = np.abs(tempoff)\n for j in range(tempoff): \n c = np.random.randint(bs.shape[0])\n bs[c] += tempcoef",
"def test_is_ramped_using_string(self):\n self.feature_test.set_percentage(100)\n self.assertTrue(self.feature_test._is_ramped('[email protected]'))",
"def test_basic(self):\n new_name = \"probability_of_snowfall\"\n test_cube = set_up_percentiles_cube()\n inverse_ordering = False\n expected = ('<ProbabilitiesFromPercentiles2D: percentiles_'\n 'cube: {}, output_name: {}, inverse_ordering: {}'.format(\n test_cube, new_name, inverse_ordering))\n result = str(ProbabilitiesFromPercentiles2D(test_cube,\n new_name))\n self.assertEqual(result, expected)",
"def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product",
"def _weight_boosting_algorithm(name: str):\n return hp.choice(name, [\"SAMME\", \"SAMME.R\"])"
] |
[
"0.5866515",
"0.58143824",
"0.579036",
"0.5770138",
"0.5757495",
"0.57450956",
"0.56818116",
"0.5674922",
"0.5635623",
"0.5598415",
"0.5589073",
"0.5589073",
"0.5558196",
"0.5556718",
"0.55326825",
"0.55205125",
"0.55205125",
"0.55115056",
"0.55009645",
"0.5485113",
"0.54738146",
"0.5425484",
"0.5424789",
"0.5412144",
"0.54074436",
"0.53991073",
"0.5395879",
"0.5387488",
"0.5373479",
"0.53722703"
] |
0.64126116
|
0
|
Assert an appropriate exception is raised when UMAP is not installed
|
def test_umap_unavailable():
from yellowbrick.text.umap_vis import UMAP
assert UMAP is None
with pytest.raises(
YellowbrickValueError, match="umap package doesn't seem to be installed"
):
UMAPVisualizer()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_not_units(self):\n with self.assertRaises(AssertionError):\n _unit_map(\"WiB\")",
"def test_lookup_exception(self):\n self.assertIsInstance(BuildGraph.TransitiveLookupError(), AddressLookupError)",
"def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"async def test_setup_failed_error(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n ufp.api.get_nvr = AsyncMock(side_effect=NvrError)\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n assert ufp.entry.state == ConfigEntryState.SETUP_RETRY\n assert not ufp.api.update.called",
"async def test_setup_failed_auth(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n ufp.api.get_nvr = AsyncMock(side_effect=NotAuthorized)\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n assert ufp.entry.state == ConfigEntryState.SETUP_ERROR\n assert not ufp.api.update.called",
"def test_py2_application_exception_message_unicode():\n try:\n raise ValueError(UNICODE_MESSAGE)\n except ValueError:\n app = application()\n notice_error(application=app)",
"def test_py2_application_exception_message_unicode_utf8_encoding():\n try:\n raise ValueError(UNICODE_MESSAGE)\n except ValueError:\n app = application()\n notice_error(application=app)",
"def test_failure_with_invalid_api_key(self):\n self.geocoder = Yandex(\n api_key='bad key'\n )\n with self.assertRaises(GeocoderInsufficientPrivileges):\n self.geocode_run(\n {\"query\": \"площадь Ленина Донецк\"},\n {}\n )",
"def non_existing_package_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel -p hash1\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)",
"async def test_whoami_query_raises(raising_session) -> None:\n info = await location_util._get_whoami(raising_session)\n assert info is None",
"def testNotAPackage(self):\n\n self.assertRaises(NotAPackageException,\n parse_package,\n \"not_a_package\")",
"def test_pm_profile_activate_negative(profile_manager):\n\n with pytest.raises(exceptions.IRProfileMissing):\n profile_manager.activate(\"wrong_profile\")",
"async def test_setup_fails_non_root(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n assert await async_setup_component(\n hass,\n DOMAIN,\n {},\n )\n await hass.async_block_till_done()\n\n with patch(\"os.geteuid\", return_value=10), patch(\n \"homeassistant.components.dhcp._verify_l2socket_setup\",\n side_effect=Scapy_Exception,\n ), patch(\"homeassistant.components.dhcp.DiscoverHosts.async_discover\"):\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)\n await hass.async_block_till_done()\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)\n await hass.async_block_till_done()\n\n assert \"Cannot watch for dhcp packets without root or CAP_NET_RAW\" in caplog.text",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_wrong_uninstall_section_should_fail(self):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.UNINSTALL: ''\n })\n ext_manager = PkgInstExtrasManager(manifest)\n with pytest.raises(exceptions.InstExtrasManagerConfigError):\n ext_manager.handle_uninstall_extras()",
"def test_init_no_mac(self):\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip=\"3.3.3.3\", ap_info=self.ap)",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def test_format_otu_map_error_on_bad_prefix(self):\r\n self.assertRaises(ValueError, list,\r\n format_otu_map(self.otu_map1, 'my_otu_'))",
"def test_wrong_install_section_should_fail(self):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.INSTALL: ''\n })\n ext_manager = PkgInstExtrasManager(manifest)\n with pytest.raises(exceptions.InstExtrasManagerConfigError):\n ext_manager.handle_install_extras()",
"def not_existing_error_test(self):\n client = TestClient()\n error = client.run(\"upload some_nonsense\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: No packages found matching pattern 'some_nonsense'\",\n client.user_io.out)",
"async def test_setup_failed_update(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n ufp.api.update = AsyncMock(side_effect=NvrError)\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n assert ufp.entry.state == ConfigEntryState.SETUP_RETRY\n assert ufp.api.update.called",
"def test_exceptions():\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.connect(path=r\"No process with this please\")\r\n assert False\r\n except application.ProcessNotFoundError:\r\n print('ProcessNotFoundError has been raised. OK.')\r\n\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.start(cmd_line = r\"No process with this please\")\r\n assert False\r\n except application.AppStartError:\r\n print('AppStartError has been raised. OK.')",
"def test_extract_raises(capsys):\n with mock.patch('uflash.extract', side_effect=RuntimeError(\"boom\")):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--extract', 'test.py'])\n\n _, stderr = capsys.readouterr()\n expected = 'Error extracting test.py'\n assert expected in stderr",
"def test_py3_application_exception_message_bytes_non_english_unicode():\n try:\n raise ValueError(UNICODE_MESSAGE)\n except ValueError:\n app = application()\n notice_error(application=app)",
"def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n mock_pwnam.side_effect = KeyError()\n self.configuration.hgst_space_user = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_user = 'Fred!`'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def testUidMissingError(self):\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', None,\n '4.5.6')\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', 'stuid',\n None, '7.8.9')",
"def test_xfailed_not_mentioned_exception():\n assert False",
"async def test_warn_when_cannot_connect(opp, caplog):\n with patch.dict(os.environ, MOCK_ENVIRON), patch(\n \"openpeerpower.components.oppio.OppIO.is_connected\",\n return_value=None,\n ):\n result = await async_setup_component(opp, \"oppio\", {})\n assert result\n\n assert opp.components.oppio.is_oppio()\n assert \"Not connected with Opp.io / system too busy!\" in caplog.text",
"def test_error_on_volume_mountpoint_not_ascii(self):\n mountpoint_unicode = u'\\u2603'\n config = dict(\n version=1,\n applications={'mysql-hybridcluster': dict(\n image='busybox',\n volume={'mountpoint': mountpoint_unicode},\n )}\n )\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._applications_from_configuration,\n config)\n self.assertEqual(\n \"Application 'mysql-hybridcluster' has a config error. \"\n \"Invalid volume specification. Mountpoint {mount} contains \"\n \"non-ASCII (unsupported).\".format(mount=mountpoint_unicode),\n exception.message\n )",
"def test_bad_data_fail(self):\n with self.assertRaises(ValueError):\n mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.map'))"
] |
[
"0.6245788",
"0.6064388",
"0.5946757",
"0.59312123",
"0.58708656",
"0.58636844",
"0.5819605",
"0.57972056",
"0.5749107",
"0.5747986",
"0.5737053",
"0.573631",
"0.57356614",
"0.57354164",
"0.5728028",
"0.5724836",
"0.5716993",
"0.5716395",
"0.56910515",
"0.56907964",
"0.5687345",
"0.56652135",
"0.5661754",
"0.5659845",
"0.56448233",
"0.5641957",
"0.5620433",
"0.5615869",
"0.55967754",
"0.55853343"
] |
0.7728401
|
0
|
Verify the pipeline creation step for UMAP
|
def test_make_pipeline(self):
umap = UMAPVisualizer() # Should not cause an exception.
assert umap.transformer_ is not None
assert len(umap.transformer_.steps) == 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def assert_pipeline_running(self, request):\r\n self.assertTrue(pipeline.running(request))",
"def test_build_pipeline_six(self):\n args = \"Test_APP FIVE A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))",
"def verify():",
"def given_pipeline_completed(context):\n pipeline.pipelineVerified | should.be_true.desc(\"Pipeline is complete and verified.\")\n print('Attempting to use query for Pipeline deployed to Stage...')\n context.stage = Stage()",
"def begin(self, pipeline: osbuild.Pipeline):",
"def _setup_pipeline_cfg(self):",
"def verify(self):\n pass",
"def verify(self):\n pass",
"def verify(self):",
"def test_multilingual_pipeline():\n run_multilingual_pipeline()",
"def test_pvc_creation(self, mock_logger, mock_run_to_compl, mock_pvc_create):\n self.assertIsInstance(init_pvc(self.data, self.filer), PVC)",
"def test_sklearn_umap_title(self):\n # In TSNEVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like title, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(title=\"custom_title\")",
"def Verify(self):\n return True",
"def testAttributes(self):\n pl = Pipeline(loadInitFile=False)\n batch = Batch(pl)\n self.assertIs(pl, batch.pipeline)",
"def test_build_pipeline_five(self):\n args = \"Test_APP FOUR A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))",
"def setUp(self):\n self.pipeline = copy.deepcopy(self.PIPELINE)\n self.pipeline[\"pairwise_fasta\"] = io.StringIO(self.FASTA)",
"def check_setup(setup):\n user_created = False\n if User.objects.filter(username=\"test_user_\" + str(setup.id)).count() == 0:\n user = User.objects.create_user(\"test_user_\" + str(setup.id))\n user_created = True\n else:\n user = User.objects.get(username=\"test_user_\" + str(setup.id))\n session = Session.objects.create(setup_id=setup, user_id=user, status=\"inactive\") # temp session to test setup\n session.save()\n ocal_input = Facade.prepare_ocal_api_request(session)\n session.delete()\n if user_created:\n user.delete()\n if ocal_input[:4] == \"fail\":\n return \"Failed collecting input. Tag : (\" + ocal_input[5:] + \").\"\n ocal_output = Facade.get_last_iteration_output(ocal_input)\n ocal_output_message = Facade.check_ocal_output(ocal_output)\n if ocal_output_message != \"success\":\n return ocal_output_message\n else:\n return \"success\"",
"def verify(self):\r\n pass",
"def _verify(self):\n pass",
"def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route",
"def test_ipam_vrfs_create(self):\n pass",
"def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()",
"def setUp(self):\n\n # try:\n os.mkdir(self.pipeline_folder)\n # except FileExistsError:\n # pass\n\n with open(self.pipeline_spec_file, 'w+') as stream:\n json.dump(self.pipeline_spec, stream)\n\n with open(self.pipeline_source_file, 'w+') as stream:\n json.dump(self.source_description, stream)\n\n self.source = Source(self.pipeline_id)",
"def test_pm_Completeness(self):\n pass",
"def final_check(self, test_collection):\n assert True",
"def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})",
"def test_verification_failed(self):\n pass",
"def test_list_pipeline_add_one(self):\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_true(exsit)",
"def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")",
"def test_umap_unavailable():\n from yellowbrick.text.umap_vis import UMAP\n\n assert UMAP is None\n\n with pytest.raises(\n YellowbrickValueError, match=\"umap package doesn't seem to be installed\"\n ):\n UMAPVisualizer()"
] |
[
"0.5749236",
"0.5503751",
"0.543985",
"0.5400903",
"0.5394067",
"0.53700024",
"0.5339371",
"0.5339371",
"0.533054",
"0.5328124",
"0.5322165",
"0.5309885",
"0.5309038",
"0.5307369",
"0.5306281",
"0.5295248",
"0.52881926",
"0.52841985",
"0.5281725",
"0.52551985",
"0.5247717",
"0.5241058",
"0.5228738",
"0.52284145",
"0.5216318",
"0.5213208",
"0.5202807",
"0.5195164",
"0.5175979",
"0.51533043"
] |
0.781539
|
0
|
Check to make sure sklearn's UMAP doesn't use the size param
|
def test_sklearn_umap_size(self):
# In UMAPVisualizer, the internal sklearn UMAP transform consumes
# some but not all kwargs passed in by user. Those not in get_params(),
# like size, are passed through to YB's finalize method. This test should
# notify us if UMAP's params change on the sklearn side.
with pytest.raises(TypeError):
UMAP(size=(100, 100))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_custom_size_umap(self):\n umap = UMAPVisualizer(size=(100, 50))\n\n assert umap._size == (100, 50)",
"def test_no_target_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=6897,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=64)\n umap.fit(X)\n\n self.assert_images_similar(umap, tol=40)",
"def test_umap_mismtached_labels(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## fewer labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)\n\n ## more labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)",
"def _check_data_size(self):\n if len(self.list_data) < self.n_cluster:\n self.n_cluster = len(self.list_data)",
"def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))",
"def test_sklearn_umap_title(self):\n # In TSNEVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like title, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(title=\"custom_title\")",
"def _validate_train_size(train_size):\n assert isinstance(train_size, float) and (0. < train_size < 1.), \\\n \"train_size should be a float between 0 and 1\"",
"def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)",
"def test_summarize_otu_sizes_from_otu_map(self):\r\n otu_map_f = \"\"\"O1\tseq1\r\no2\tseq2\tseq3\tseq4\tseq5\r\no3\tseq5\r\no4\tseq6\tseq7\"\"\".split('\\n')\r\n expected = [(1, 2), (2, 1), (4, 1)]\r\n self.assertEqual(summarize_otu_sizes_from_otu_map(otu_map_f), expected)",
"def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))",
"def _check_input_size(n_components, n_features):\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)",
"def test_make_classification_umap(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)",
"def test_default_sample_weight() -> None:\n mapie = MapieClassifier()\n assert signature(mapie.fit).parameters[\"sample_weight\"].default is None",
"def test_has_correct_length(self) -> None:\n assert len(list(self._dataset)) == 7168",
"def test_too_large_cv(cv: Any) -> None:\n mapie = MapieRegressor(cv=cv)\n with pytest.raises(\n ValueError,\n match=rf\".*Cannot have number of splits n_splits={cv} greater.*\"\n ):\n mapie.fit(X_toy, y_toy)",
"def debug_get_simple_size(self, _train=None, _validation=None):\n if np.any(_train):\n self.train_check = list()\n self.train_check.append(_train)\n if np.any(_validation):\n self.val_check = list()\n self.val_check.append(_validation)",
"def test_batch_size_pack_size():",
"def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))",
"def correct_size():\n check50.run(\"./inheritance_test\").stdout(\"size_true.*\").exit(0)",
"def features_size(self) -> int:\n return None",
"def __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)",
"def test_check_map_var_len_not_specified(self):\r\n\r\n self.assertRaises(ValueError, check_map,\r\n self.valid_mapping_data_var_len_bcs)",
"def test_reduce_dimensionality(base_bertopic, embeddings, shape):\n umap_embeddings = base_bertopic._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)",
"def test_number_of_classes(simple_unet_data, number_of_classes):\n unet = models.UNet(num_classes=number_of_classes)\n output = unet(simple_unet_data)\n assert output.shape[-1] == number_of_classes",
"def test_default_sample_weight() -> None:\n mapie = MapieRegressor()\n assert signature(mapie.fit).parameters[\"sample_weight\"].default is None",
"def test_quick_method(self):\n corpus = load_hobbies()\n tfidf = TfidfVectorizer()\n\n X = tfidf.fit_transform(corpus.data)\n y = corpus.target\n\n viz = umap(X, y, show=False)\n assert isinstance(viz, UMAPVisualizer)\n\n self.assert_images_similar(viz, tol=50)",
"def _test_obsdup(t):\n return t.shape[0] != len(set(t.ids(axis='observation')))",
"def test_reduce_dimensionality(embeddings, shape):\n model = BERTopic()\n umap_embeddings = model._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)",
"def test_checks_population_size(self):\n with pm.Model() as model:\n n = pm.Normal(\"n\", mu=0, sigma=1)\n for stepper in TestPopulationSamplers.steppers:\n step = stepper()\n with pytest.raises(ValueError, match=\"requires at least 3 chains\"):\n pm.sample(draws=10, tune=10, chains=1, cores=1, step=step)\n # don't parallelize to make test faster\n pm.sample(\n draws=10,\n tune=10,\n chains=4,\n cores=1,\n step=step,\n compute_convergence_checks=False,\n )",
"def test_invalid_prefit_estimator_shape() -> None:\n estimator = LinearRegression().fit(X, y)\n with pytest.raises(ValueError, match=r\".*mismatch between.*\"):\n check_n_features_in(\n X_toy, cv=\"prefit\", estimator=estimator\n )"
] |
[
"0.6389161",
"0.627912",
"0.61946136",
"0.5868766",
"0.57942975",
"0.5700335",
"0.567017",
"0.5652609",
"0.55832916",
"0.5576838",
"0.55651337",
"0.5560315",
"0.5551153",
"0.5548372",
"0.5547365",
"0.55050397",
"0.5484217",
"0.54676306",
"0.53981966",
"0.5370509",
"0.53530574",
"0.5337778",
"0.53149325",
"0.5271932",
"0.5262036",
"0.5259163",
"0.52591074",
"0.5257881",
"0.52562535",
"0.52354264"
] |
0.8239812
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.